diff --git a/generative-ai/snippets/countTokens.js b/generative-ai/snippets/countTokens.js index 492be74da3..40ce023064 100644 --- a/generative-ai/snippets/countTokens.js +++ b/generative-ai/snippets/countTokens.js @@ -24,10 +24,10 @@ async function countTokens( model = 'gemini-pro' ) { // Initialize Vertex with your Cloud project and location - const vertex_ai = new VertexAI({project: projectId, location: location}); + const vertexAI = new VertexAI({project: projectId, location: location}); // Instantiate the model - const generativeModel = vertex_ai.preview.getGenerativeModel({ + const generativeModel = vertexAI.getGenerativeModel({ model: model, }); diff --git a/generative-ai/snippets/functionCallingStreamChat.js b/generative-ai/snippets/functionCallingStreamChat.js new file mode 100644 index 0000000000..62caf5d12c --- /dev/null +++ b/generative-ai/snippets/functionCallingStreamChat.js @@ -0,0 +1,98 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// [START aiplatform_gemini_function_calling_chat] +const { + VertexAI, + FunctionDeclarationSchemaType, +} = require('@google-cloud/vertexai'); + +const functionDeclarations = [ + { + function_declarations: [ + { + name: 'get_current_weather', + description: 'get weather in a given location', + parameters: { + type: FunctionDeclarationSchemaType.OBJECT, + properties: { + location: {type: FunctionDeclarationSchemaType.STRING}, + unit: { + type: FunctionDeclarationSchemaType.STRING, + enum: ['celsius', 'fahrenheit'], + }, + }, + required: ['location'], + }, + }, + ], + }, +]; + +const functionResponseParts = [ + { + functionResponse: { + name: 'get_current_weather', + response: {name: 'get_current_weather', content: {weather: 'super nice'}}, + }, + }, +]; + +/** + * TODO(developer): Update these variables before running the sample. + */ +async function functionCallingStreamChat( + projectId = 'PROJECT_ID', + location = 'us-central1', + model = 'gemini-pro' +) { + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + // Create a chat session and pass your function declarations + const chat = generativeModel.startChat({ + tools: functionDeclarations, + }); + + const chatInput1 = 'What is the weather in Boston?'; + + // This should include a functionCall response from the model + const result1 = await chat.sendMessageStream(chatInput1); + for await (const item of result1.stream) { + console.log(item.candidates[0]); + } + await result1.response; + + // Send a follow up message with a FunctionResponse + const result2 = await chat.sendMessageStream(functionResponseParts); + for await (const item of result2.stream) { + console.log(item.candidates[0]); + } + + // This should include a text response from the model using the response content + // provided above + const response2 = await result2.response; + console.log(response2.candidates[0].content.parts[0].text); +} +// [END aiplatform_gemini_function_calling_chat] + +functionCallingStreamChat(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/functionCallingStreamContent.js b/generative-ai/snippets/functionCallingStreamContent.js new file mode 100644 index 0000000000..e0b71e2a29 --- /dev/null +++ b/generative-ai/snippets/functionCallingStreamContent.js @@ -0,0 +1,96 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// [START aiplatform_gemini_function_calling_content] +const { + VertexAI, + FunctionDeclarationSchemaType, +} = require('@google-cloud/vertexai'); + +const functionDeclarations = [ + { + function_declarations: [ + { + name: 'get_current_weather', + description: 'get weather in a given location', + parameters: { + type: FunctionDeclarationSchemaType.OBJECT, + properties: { + location: {type: FunctionDeclarationSchemaType.STRING}, + unit: { + type: FunctionDeclarationSchemaType.STRING, + enum: ['celsius', 'fahrenheit'], + }, + }, + required: ['location'], + }, + }, + ], + }, +]; + +const functionResponseParts = [ + { + functionResponse: { + name: 'get_current_weather', + response: {name: 'get_current_weather', content: {weather: 'super nice'}}, + }, + }, +]; + +/** + * TODO(developer): Update these variables before running the sample. + */ +async function functionCallingStreamChat( + projectId = 'PROJECT_ID', + location = 'us-central1', + model = 'gemini-pro' +) { + // Initialize Vertex with your Cloud project and location + const vertexAI = new VertexAI({project: projectId, location: location}); + + // Instantiate the model + const generativeModel = vertexAI.preview.getGenerativeModel({ + model: model, + }); + + const request = { + contents: [ + {role: 'user', parts: [{text: 'What is the weather in Boston?'}]}, + { + role: 'model', + parts: [ + { + functionCall: { + name: 'get_current_weather', + args: {location: 'Boston'}, + }, + }, + ], + }, + {role: 'function', parts: functionResponseParts}, + ], + tools: functionDeclarations, + }; + const streamingResp = await generativeModel.generateContentStream(request); + for await (const item of streamingResp.stream) { + console.log(item.candidates[0].content.parts[0].text); + } +} +// [END aiplatform_gemini_function_calling_content] + +functionCallingStreamChat(...process.argv.slice(2)).catch(err => { + console.error(err.message); + process.exitCode = 1; +}); diff --git a/generative-ai/snippets/nonStreamingChat.js b/generative-ai/snippets/nonStreamingChat.js index 0199074f44..8d2af6609b 100644 --- a/generative-ai/snippets/nonStreamingChat.js +++ b/generative-ai/snippets/nonStreamingChat.js @@ -27,7 +27,7 @@ async function createNonStreamingChat( const vertexAI = new VertexAI({project: projectId, location: location}); // Instantiate the model - const generativeModel = vertexAI.preview.getGenerativeModel({ + const generativeModel = vertexAI.getGenerativeModel({ model: model, }); @@ -37,20 +37,23 @@ async function createNonStreamingChat( console.log(`User: ${chatInput1}`); const result1 = await chat.sendMessage(chatInput1); - const response1 = result1.response.candidates[0].content.parts[0].text; - console.log('Chat bot: ', response1); + const response1 = result1.response; + const text1 = response1.candidates[0].content.parts[0].text; + console.log('Chat bot: ', text1); const chatInput2 = 'Can you tell me a scientific fun fact?'; console.log(`User: ${chatInput2}`); const result2 = await chat.sendMessage(chatInput2); - const response2 = result2.response.candidates[0].content.parts[0].text; - console.log('Chat bot: ', response2); + const response2 = await result2.response; + const text2 = response2.candidates[0].content.parts[0].text; + console.log('Chat bot: ', text2); const chatInput3 = 'How can I learn more about that?'; console.log(`User: ${chatInput3}`); const result3 = await chat.sendMessage(chatInput3); - const response3 = result3.response.candidates[0].content.parts[0].text; - console.log('Chat bot: ', response3); + const response3 = await result3.response; + const text3 = response3.candidates[0].content.parts[0].text; + console.log('Chat bot: ', text3); } // [END aiplatform_gemini_multiturn_chat_nonstreaming] diff --git a/generative-ai/snippets/nonStreamingContent.js b/generative-ai/snippets/nonStreamingContent.js index 1d82a7d7bb..65b4dc80ea 100644 --- a/generative-ai/snippets/nonStreamingContent.js +++ b/generative-ai/snippets/nonStreamingContent.js @@ -27,7 +27,7 @@ async function createNonStreamingContent( const vertexAI = new VertexAI({project: projectId, location: location}); // Instantiate the model - const generativeModel = vertexAI.preview.getGenerativeModel({ + const generativeModel = vertexAI.getGenerativeModel({ model: model, }); diff --git a/generative-ai/snippets/nonStreamingMultipartContent.js b/generative-ai/snippets/nonStreamingMultipartContent.js index d281919313..962339fd8e 100644 --- a/generative-ai/snippets/nonStreamingMultipartContent.js +++ b/generative-ai/snippets/nonStreamingMultipartContent.js @@ -29,7 +29,7 @@ async function createNonStreamingMultipartContent( const vertexAI = new VertexAI({project: projectId, location: location}); // Instantiate the model - const generativeVisionModel = vertexAI.preview.getGenerativeModel({ + const generativeVisionModel = vertexAI.getGenerativeModel({ model: model, }); @@ -50,7 +50,7 @@ async function createNonStreamingMultipartContent( }; console.log('Prompt Text:'); - console.log(request.contents[0].parts[0].text); + console.log(request.contents[0].parts[1].text); console.log('Non-Streaming Response Text:'); // Create the response stream diff --git a/generative-ai/snippets/package.json b/generative-ai/snippets/package.json index 29a62f5f37..59bb0948c1 100644 --- a/generative-ai/snippets/package.json +++ b/generative-ai/snippets/package.json @@ -13,7 +13,7 @@ "test": "c8 mocha -p -j 2 --timeout 2400000 test/*.test.js" }, "dependencies": { - "@google-cloud/aiplatform": "^3.0.0", + "@google-cloud/aiplatform": "^3.12.0", "@google-cloud/vertexai": "github:googleapis/nodejs-vertexai", "axios": "^1.6.2", "supertest": "^6.3.3" diff --git a/generative-ai/snippets/safetySettings.js b/generative-ai/snippets/safetySettings.js index ac4509f646..306fa54db5 100644 --- a/generative-ai/snippets/safetySettings.js +++ b/generative-ai/snippets/safetySettings.js @@ -31,7 +31,7 @@ async function setSafetySettings( const vertexAI = new VertexAI({project: projectId, location: location}); // Instantiate the model - const generativeModel = vertexAI.preview.getGenerativeModel({ + const generativeModel = vertexAI.getGenerativeModel({ model: model, // The following parameters are optional // They can also be passed to individual content generation requests @@ -59,6 +59,7 @@ async function setSafetySettings( for await (const item of responseStream.stream) { if (item.candidates[0].finishReason === 'SAFETY') { console.log('This response stream terminated due to safety concerns.'); + break; } else { process.stdout.write(item.candidates[0].content.parts[0].text); } @@ -66,7 +67,7 @@ async function setSafetySettings( } // [END aiplatform_gemini_safety_settings] -setSafetySettings(...process.argv.slice(3)).catch(err => { +setSafetySettings(...process.argv.slice(2)).catch(err => { console.error(err.message); process.exitCode = 1; }); diff --git a/generative-ai/snippets/sendMultiModalPromptWithImage.js b/generative-ai/snippets/sendMultiModalPromptWithImage.js index b6dcb09aa7..acc4514b28 100644 --- a/generative-ai/snippets/sendMultiModalPromptWithImage.js +++ b/generative-ai/snippets/sendMultiModalPromptWithImage.js @@ -43,7 +43,7 @@ async function sendMultiModalPromptWithImage( // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); - const generativeVisionModel = vertexAI.preview.getGenerativeModel({ + const generativeVisionModel = vertexAI.getGenerativeModel({ model: model, }); diff --git a/generative-ai/snippets/sendMultiModalPromptWithVideo.js b/generative-ai/snippets/sendMultiModalPromptWithVideo.js index cd777cbf12..a6c4dad1cc 100644 --- a/generative-ai/snippets/sendMultiModalPromptWithVideo.js +++ b/generative-ai/snippets/sendMultiModalPromptWithVideo.js @@ -26,7 +26,7 @@ async function sendMultiModalPromptWithVideo( // Initialize Vertex with your Cloud project and location const vertexAI = new VertexAI({project: projectId, location: location}); - const generativeVisionModel = vertexAI.preview.getGenerativeModel({ + const generativeVisionModel = vertexAI.getGenerativeModel({ model: model, }); diff --git a/generative-ai/snippets/streamChat.js b/generative-ai/snippets/streamChat.js index bf6edcfa40..7400774dc8 100644 --- a/generative-ai/snippets/streamChat.js +++ b/generative-ai/snippets/streamChat.js @@ -27,7 +27,7 @@ async function createStreamChat( const vertexAI = new VertexAI({project: projectId, location: location}); // Instantiate the model - const generativeModel = vertexAI.preview.getGenerativeModel({ + const generativeModel = vertexAI.getGenerativeModel({ model: model, }); diff --git a/generative-ai/snippets/streamContent.js b/generative-ai/snippets/streamContent.js index 9004c6b515..cfaf156e5d 100644 --- a/generative-ai/snippets/streamContent.js +++ b/generative-ai/snippets/streamContent.js @@ -27,7 +27,7 @@ async function createStreamContent( const vertexAI = new VertexAI({project: projectId, location: location}); // Instantiate the model - const generativeModel = vertexAI.preview.getGenerativeModel({ + const generativeModel = vertexAI.getGenerativeModel({ model: model, }); diff --git a/generative-ai/snippets/streamMultipartContent.js b/generative-ai/snippets/streamMultipartContent.js index b336258c29..670a0b13c2 100644 --- a/generative-ai/snippets/streamMultipartContent.js +++ b/generative-ai/snippets/streamMultipartContent.js @@ -29,7 +29,7 @@ async function createStreamMultipartContent( const vertexAI = new VertexAI({project: projectId, location: location}); // Instantiate the model - const generativeVisionModel = vertexAI.preview.getGenerativeModel({ + const generativeVisionModel = vertexAI.getGenerativeModel({ model: model, }); @@ -50,7 +50,7 @@ async function createStreamMultipartContent( }; console.log('Prompt Text:'); - console.log(request.contents[0].parts[0].text); + console.log(request.contents[0].parts[1].text); console.log('Streaming Response Text:'); // Create the response stream diff --git a/generative-ai/snippets/test/functionCallingStreamChat.test.js b/generative-ai/snippets/test/functionCallingStreamChat.test.js new file mode 100644 index 0000000000..482d79bd40 --- /dev/null +++ b/generative-ai/snippets/test/functionCallingStreamChat.test.js @@ -0,0 +1,36 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI Function Calling Stream Chat', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should create stream chat and begin the conversation the same in each instance', async () => { + const output = execSync( + `node ./functionCallingStreamChat.js ${project} ${location} ${model}` + ); + + // Assert that the response is what we expect + assert(output.match(/The weather in Boston is super nice./)); + }); +}); diff --git a/generative-ai/snippets/test/functionCallingStreamContent.test.js b/generative-ai/snippets/test/functionCallingStreamContent.test.js new file mode 100644 index 0000000000..f80ea029d8 --- /dev/null +++ b/generative-ai/snippets/test/functionCallingStreamContent.test.js @@ -0,0 +1,36 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const {assert} = require('chai'); +const {describe, it} = require('mocha'); +const cp = require('child_process'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +describe('Generative AI Function Calling Stream Content', () => { + const project = 'cloud-llm-preview1'; + const location = 'us-central1'; + const model = 'gemini-pro'; + + it('should create stream chat and begin the conversation the same in each instance', async () => { + const output = execSync( + `node ./functionCallingStreamContent.js ${project} ${location} ${model}` + ); + + // Assert that the response is what we expect + assert(output.match(/The weather in Boston is super nice./)); + }); +}); diff --git a/generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js b/generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js index a421893228..414fff580f 100644 --- a/generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js +++ b/generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js @@ -30,6 +30,6 @@ describe('Generative AI Stream MultiModal with Image', () => { `node ./sendMultiModalPromptWithImage.js ${project} ${location} ${model}` ); // Ensure that the conversation is what we expect for these images - assert(output.match(/Paris/)); + assert(output.match(/city: Rio de Janeiro, Landmark: Christ the Redeemer/)); }); }); diff --git a/generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js b/generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js index 91d6fedb56..17e3e07bef 100644 --- a/generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js +++ b/generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js @@ -30,6 +30,6 @@ describe('Generative AI Stream MultiModal with Video', () => { `node ./sendMultiModalPromptWithVideo.js ${project} ${location} ${model}` ); // Ensure that the conversation is what we expect for these images - assert(output.match(/advertisement/)); + assert(output.match(/Zootopia/)); }); });