Skip to content

Commit 22eee1f

Browse files
author
Cloud Shell
committed
chore: attempt to fix linting errors
1 parent e5a79c0 commit 22eee1f

File tree

2 files changed

+70
-70
lines changed

2 files changed

+70
-70
lines changed

generative-ai/snippets/functionCallingStreamChat.js

+16-16
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@
1414

1515
// [START aiplatform_gemini_function_calling_chat]
1616
const {
17-
VertexAI,
18-
FunctionDeclarationSchemaType
17+
VertexAI,
18+
FunctionDeclarationSchemaType
1919
} = require('@google-cloud/vertexai');
2020

2121
const functionDeclarations = [
@@ -27,27 +27,27 @@ const functionDeclarations = [
2727
parameters: {
2828
type: FunctionDeclarationSchemaType.OBJECT,
2929
properties: {
30-
location: { type: FunctionDeclarationSchemaType.STRING },
30+
location: {type: FunctionDeclarationSchemaType.STRING},
3131
unit: {
3232
type: FunctionDeclarationSchemaType.STRING,
3333
enum: ['celsius', 'fahrenheit'],
34-
},
35-
},
34+
},
35+
},
3636
required: ['location'],
37-
},
38-
},
37+
},
38+
},
3939
],
40-
},
40+
},
4141
];
4242

4343
const functionResponseParts = [
4444
{
4545
functionResponse: {
4646
name: 'get_current_weather',
4747
response:
48-
{ name: 'get_current_weather', content: { weather: 'super nice' } },
49-
},
50-
},
48+
{name: 'get_current_weather', content: {weather: 'super nice'}},
49+
},
50+
},
5151
];
5252

5353
/**
@@ -59,32 +59,32 @@ async function functionCallingStreamChat(
5959
model = 'gemini-pro'
6060
) {
6161
// Initialize Vertex with your Cloud project and location
62-
const vertexAI = new VertexAI({ project: projectId, location: location });
62+
const vertexAI = new VertexAI({project: projectId, location: location});
6363

6464
// Instantiate the model
6565
const generativeModel = vertexAI.preview.getGenerativeModel({
6666
model: model,
67-
});
67+
});
6868

6969
// Create a chat session and pass your function declarations
7070
const chat = generativeModel.startChat({
7171
tools: functionDeclarations,
72-
});
72+
});
7373

7474
const chatInput1 = 'What is the weather in Boston?';
7575

7676
// This should include a functionCall response from the model
7777
const result1 = await chat.sendMessageStream(chatInput1);
7878
for await (const item of result1.stream) {
7979
console.log(item.candidates[0]);
80-
}
80+
}
8181
const response1 = await result1.response;
8282

8383
// Send a follow up message with a FunctionResponse
8484
const result2 = await chat.sendMessageStream(functionResponseParts);
8585
for await (const item of result2.stream) {
8686
console.log(item.candidates[0]);
87-
}
87+
}
8888

8989
// This should include a text response from the model using the response content
9090
// provided above

generative-ai/snippets/functionCallingStreamContent.js

+54-54
Original file line numberDiff line numberDiff line change
@@ -13,73 +13,73 @@
1313
// limitations under the License.
1414

1515
// [START aiplatform_gemini_function_calling_content]
16-
const { VertexAI, FunctionDeclarationSchemaType } = require('@google-cloud/vertexai');
16+
const {VertexAI, FunctionDeclarationSchemaType} = require('@google-cloud/vertexai');
1717

1818
const functionDeclarations = [
19-
{
20-
function_declarations: [
21-
{
22-
name: 'get_current_weather',
23-
description: 'get weather in a given location',
24-
parameters: {
25-
type: FunctionDeclarationSchemaType.OBJECT,
26-
properties: {
27-
location: {type: FunctionDeclarationSchemaType.STRING},
28-
unit: {
29-
type: FunctionDeclarationSchemaType.STRING,
30-
enum: ['celsius', 'fahrenheit'],
31-
},
32-
},
33-
required: ['location'],
34-
},
35-
},
36-
],
37-
},
38-
];
39-
40-
const functionResponseParts = [
41-
{
42-
functionResponse: {
19+
{
20+
function_declarations: [
21+
{
4322
name: 'get_current_weather',
44-
response:
45-
{name: 'get_current_weather', content: {weather: 'super nice'}},
46-
},
47-
},
48-
];
23+
description: 'get weather in a given location',
24+
parameters: {
25+
type: FunctionDeclarationSchemaType.OBJECT,
26+
properties: {
27+
location: {type: FunctionDeclarationSchemaType.STRING},
28+
unit: {
29+
type: FunctionDeclarationSchemaType.STRING,
30+
enum: ['celsius', 'fahrenheit'],
31+
},
32+
},
33+
required: ['location'],
34+
},
35+
},
36+
],
37+
},
38+
];
39+
40+
const functionResponseParts = [
41+
{
42+
functionResponse: {
43+
name: 'get_current_weather',
44+
response:
45+
{name: 'get_current_weather', content: {weather: 'super nice'}},
46+
},
47+
},
48+
];
4949

5050
/**
5151
* TODO(developer): Update these variables before running the sample.
5252
*/
5353
async function functionCallingStreamChat(
54-
projectId = 'PROJECT_ID',
55-
location = 'us-central1',
56-
model = 'gemini-pro'
54+
projectId = 'PROJECT_ID',
55+
location = 'us-central1',
56+
model = 'gemini-pro'
5757
) {
58-
// Initialize Vertex with your Cloud project and location
59-
const vertexAI = new VertexAI({ project: projectId, location: location });
58+
// Initialize Vertex with your Cloud project and location
59+
const vertexAI = new VertexAI({project: projectId, location: location});
6060

61-
// Instantiate the model
62-
const generativeModel = vertexAI.preview.getGenerativeModel({
63-
model: model,
64-
});
61+
// Instantiate the model
62+
const generativeModel = vertexAI.preview.getGenerativeModel({
63+
model: model,
64+
});
6565

66-
const request = {
67-
contents: [
68-
{role: 'user', parts: [{text: 'What is the weather in Boston?'}]},
69-
{role: 'model', parts: [{functionCall: {name: 'get_current_weather', args: {'location': 'Boston'}}}]},
70-
{role: 'function', parts: functionResponseParts}
71-
],
72-
tools: functionDeclarations,
73-
};
74-
const streamingResp =
75-
await generativeModel.generateContentStream(request);
76-
for await (const item of streamingResp.stream) {
77-
console.log(item.candidates[0].content.parts[0].text);
78-
}
66+
const request = {
67+
contents: [
68+
{role: 'user', parts: [{text: 'What is the weather in Boston?'}]},
69+
{role: 'model', parts: [{functionCall: {name: 'get_current_weather', args: {'location': 'Boston'}}}]},
70+
{role: 'function', parts: functionResponseParts}
71+
],
72+
tools: functionDeclarations,
73+
};
74+
const streamingResp =
75+
await generativeModel.generateContentStream(request);
76+
for await (const item of streamingResp.stream) {
77+
console.log(item.candidates[0].content.parts[0].text);
78+
}
7979
}
8080
// [END aiplatform_gemini_function_calling_content]
8181

8282
functionCallingStreamChat(...process.argv.slice(2)).catch(err => {
83-
console.error(err.message);
84-
process.exitCode = 1;
83+
console.error(err.message);
84+
process.exitCode = 1;
8585
});

0 commit comments

Comments
 (0)