Skip to content

Commit c907c4e

Browse files
Cloud Shellpattishin
Cloud Shell
authored andcommitted
chore: update ai package versions
fix: fix broken tests fix: update dependency to use github over npm feat: change assertion to more common output added function calling examples removed double await, fix linter
1 parent 652746c commit c907c4e

11 files changed

+272
-12
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
// Copyright 2023 Google LLC
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// https://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
// [START aiplatform_gemini_function_calling_chat]
16+
const {
17+
VertexAI,
18+
FunctionDeclarationSchemaType
19+
} = require('@google-cloud/vertexai');
20+
21+
const functionDeclarations = [
22+
{
23+
function_declarations: [
24+
{
25+
name: 'get_current_weather',
26+
description: 'get weather in a given location',
27+
parameters: {
28+
type: FunctionDeclarationSchemaType.OBJECT,
29+
properties: {
30+
location: { type: FunctionDeclarationSchemaType.STRING },
31+
unit: {
32+
type: FunctionDeclarationSchemaType.STRING,
33+
enum: ['celsius', 'fahrenheit'],
34+
},
35+
},
36+
required: ['location'],
37+
},
38+
},
39+
],
40+
},
41+
];
42+
43+
const functionResponseParts = [
44+
{
45+
functionResponse: {
46+
name: 'get_current_weather',
47+
response:
48+
{ name: 'get_current_weather', content: { weather: 'super nice' } },
49+
},
50+
},
51+
];
52+
53+
/**
54+
* TODO(developer): Update these variables before running the sample.
55+
*/
56+
async function functionCallingStreamChat(
57+
projectId = 'PROJECT_ID',
58+
location = 'us-central1',
59+
model = 'gemini-pro'
60+
) {
61+
// Initialize Vertex with your Cloud project and location
62+
const vertexAI = new VertexAI({ project: projectId, location: location });
63+
64+
// Instantiate the model
65+
const generativeModel = vertexAI.preview.getGenerativeModel({
66+
model: model,
67+
});
68+
69+
// Create a chat session and pass your function declarations
70+
const chat = generativeModel.startChat({
71+
tools: functionDeclarations,
72+
});
73+
74+
const chatInput1 = 'What is the weather in Boston?';
75+
76+
// This should include a functionCall response from the model
77+
const result1 = await chat.sendMessageStream(chatInput1);
78+
for await (const item of result1.stream) {
79+
console.log(item.candidates[0]);
80+
}
81+
const response1 = await result1.response;
82+
83+
// Send a follow up message with a FunctionResponse
84+
const result2 = await chat.sendMessageStream(functionResponseParts);
85+
for await (const item of result2.stream) {
86+
console.log(item.candidates[0]);
87+
}
88+
89+
// This should include a text response from the model using the response content
90+
// provided above
91+
const response2 = await result2.response;
92+
console.log(response2.candidates[0].content.parts[0].text);
93+
}
94+
// [END aiplatform_gemini_function_calling_chat]
95+
96+
functionCallingStreamChat(...process.argv.slice(2)).catch(err => {
97+
console.error(err.message);
98+
process.exitCode = 1;
99+
});
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
// Copyright 2023 Google LLC
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// https://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
// [START aiplatform_gemini_function_calling_content]
16+
const { VertexAI, FunctionDeclarationSchemaType } = require('@google-cloud/vertexai');
17+
18+
const functionDeclarations = [
19+
{
20+
function_declarations: [
21+
{
22+
name: 'get_current_weather',
23+
description: 'get weather in a given location',
24+
parameters: {
25+
type: FunctionDeclarationSchemaType.OBJECT,
26+
properties: {
27+
location: {type: FunctionDeclarationSchemaType.STRING},
28+
unit: {
29+
type: FunctionDeclarationSchemaType.STRING,
30+
enum: ['celsius', 'fahrenheit'],
31+
},
32+
},
33+
required: ['location'],
34+
},
35+
},
36+
],
37+
},
38+
];
39+
40+
const functionResponseParts = [
41+
{
42+
functionResponse: {
43+
name: 'get_current_weather',
44+
response:
45+
{name: 'get_current_weather', content: {weather: 'super nice'}},
46+
},
47+
},
48+
];
49+
50+
/**
51+
* TODO(developer): Update these variables before running the sample.
52+
*/
53+
async function functionCallingStreamChat(
54+
projectId = 'PROJECT_ID',
55+
location = 'us-central1',
56+
model = 'gemini-pro'
57+
) {
58+
// Initialize Vertex with your Cloud project and location
59+
const vertexAI = new VertexAI({ project: projectId, location: location });
60+
61+
// Instantiate the model
62+
const generativeModel = vertexAI.preview.getGenerativeModel({
63+
model: model,
64+
});
65+
66+
const request = {
67+
contents: [
68+
{role: 'user', parts: [{text: 'What is the weather in Boston?'}]},
69+
{role: 'model', parts: [{functionCall: {name: 'get_current_weather', args: {'location': 'Boston'}}}]},
70+
{role: 'function', parts: functionResponseParts}
71+
],
72+
tools: functionDeclarations,
73+
};
74+
const streamingResp =
75+
await generativeModel.generateContentStream(request);
76+
for await (const item of streamingResp.stream) {
77+
console.log(item.candidates[0].content.parts[0].text);
78+
}
79+
}
80+
// [END aiplatform_gemini_function_calling_content]
81+
82+
functionCallingStreamChat(...process.argv.slice(2)).catch(err => {
83+
console.error(err.message);
84+
process.exitCode = 1;
85+
});

generative-ai/snippets/nonStreamingChat.js

+9-6
Original file line numberDiff line numberDiff line change
@@ -37,20 +37,23 @@ async function createNonStreamingChat(
3737
console.log(`User: ${chatInput1}`);
3838

3939
const result1 = await chat.sendMessage(chatInput1);
40-
const response1 = result1.response.candidates[0].content.parts[0].text;
41-
console.log('Chat bot: ', response1);
40+
const response1 = result1.response;
41+
const text1 = response1.candidates[0].content.parts[0].text;
42+
console.log('Chat bot: ', text1);
4243

4344
const chatInput2 = 'Can you tell me a scientific fun fact?';
4445
console.log(`User: ${chatInput2}`);
4546
const result2 = await chat.sendMessage(chatInput2);
46-
const response2 = result2.response.candidates[0].content.parts[0].text;
47-
console.log('Chat bot: ', response2);
47+
const response2 = await result2.response;
48+
const text2 = response2.candidates[0].content.parts[0].text;
49+
console.log('Chat bot: ', text2);
4850

4951
const chatInput3 = 'How can I learn more about that?';
5052
console.log(`User: ${chatInput3}`);
5153
const result3 = await chat.sendMessage(chatInput3);
52-
const response3 = result3.response.candidates[0].content.parts[0].text;
53-
console.log('Chat bot: ', response3);
54+
const response3 = await result3.response;
55+
const text3 = response3.candidates[0].content.parts[0].text;
56+
console.log('Chat bot: ', text3);
5457
}
5558
// [END aiplatform_gemini_multiturn_chat_nonstreaming]
5659

generative-ai/snippets/nonStreamingMultipartContent.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ async function createNonStreamingMultipartContent(
5050
};
5151

5252
console.log('Prompt Text:');
53-
console.log(request.contents[0].parts[0].text);
53+
console.log(request.contents[0].parts[1].text);
5454

5555
console.log('Non-Streaming Response Text:');
5656
// Create the response stream

generative-ai/snippets/package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
"test": "c8 mocha -p -j 2 --timeout 2400000 test/*.test.js"
1414
},
1515
"dependencies": {
16-
"@google-cloud/aiplatform": "^3.0.0",
16+
"@google-cloud/aiplatform": "^3.12.0",
1717
"@google-cloud/vertexai": "github:googleapis/nodejs-vertexai",
1818
"axios": "^1.6.2",
1919
"supertest": "^6.3.3"

generative-ai/snippets/safetySettings.js

+2-1
Original file line numberDiff line numberDiff line change
@@ -59,14 +59,15 @@ async function setSafetySettings(
5959
for await (const item of responseStream.stream) {
6060
if (item.candidates[0].finishReason === 'SAFETY') {
6161
console.log('This response stream terminated due to safety concerns.');
62+
break;
6263
} else {
6364
process.stdout.write(item.candidates[0].content.parts[0].text);
6465
}
6566
}
6667
}
6768
// [END aiplatform_gemini_safety_settings]
6869

69-
setSafetySettings(...process.argv.slice(3)).catch(err => {
70+
setSafetySettings(...process.argv.slice(2)).catch(err => {
7071
console.error(err.message);
7172
process.exitCode = 1;
7273
});

generative-ai/snippets/streamMultipartContent.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ async function createStreamMultipartContent(
5050
};
5151

5252
console.log('Prompt Text:');
53-
console.log(request.contents[0].parts[0].text);
53+
console.log(request.contents[0].parts[1].text);
5454
console.log('Streaming Response Text:');
5555

5656
// Create the response stream
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
// Copyright 2023 Google LLC
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// https://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
'use strict';
16+
17+
const {assert} = require('chai');
18+
const {describe, it} = require('mocha');
19+
const cp = require('child_process');
20+
21+
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
22+
23+
describe('Generative AI Function Calling Stream Chat', () => {
24+
const project = 'cloud-llm-preview1';
25+
const location = 'us-central1';
26+
const model = 'gemini-pro';
27+
28+
it('should create stream chat and begin the conversation the same in each instance', async () => {
29+
const output = execSync(
30+
`node ./functionCallingStreamChat.js ${project} ${location} ${model}`
31+
);
32+
33+
// Assert that the response is what we expect
34+
assert(output.match(/The weather in Boston is super nice./));
35+
});
36+
});
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
// Copyright 2023 Google LLC
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// https://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
'use strict';
16+
17+
const {assert} = require('chai');
18+
const {describe, it} = require('mocha');
19+
const cp = require('child_process');
20+
21+
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
22+
23+
describe('Generative AI Function Calling Stream Content', () => {
24+
const project = 'cloud-llm-preview1';
25+
const location = 'us-central1';
26+
const model = 'gemini-pro';
27+
28+
it('should create stream chat and begin the conversation the same in each instance', async () => {
29+
const output = execSync(
30+
`node ./functionCallingStreamContent.js ${project} ${location} ${model}`
31+
);
32+
33+
// Assert that the response is what we expect
34+
assert(output.match(/The weather in Boston is super nice./));
35+
});
36+
});

generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,6 @@ describe('Generative AI Stream MultiModal with Image', () => {
3030
`node ./sendMultiModalPromptWithImage.js ${project} ${location} ${model}`
3131
);
3232
// Ensure that the conversation is what we expect for these images
33-
assert(output.match(/Paris/));
33+
assert(output.match(/city: Rio de Janeiro, Landmark: Christ the Redeemer/));
3434
});
3535
});

generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,6 @@ describe('Generative AI Stream MultiModal with Video', () => {
3030
`node ./sendMultiModalPromptWithVideo.js ${project} ${location} ${model}`
3131
);
3232
// Ensure that the conversation is what we expect for these images
33-
assert(output.match(/advertisement/));
33+
assert(output.match(/Zootopia/));
3434
});
3535
});

0 commit comments

Comments
 (0)