Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: remove .preview from generative ai methods #3620

Merged
merged 6 commits into from
Feb 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions generative-ai/snippets/countTokens.js
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ async function countTokens(
model = 'gemini-pro'
) {
// Initialize Vertex with your Cloud project and location
const vertex_ai = new VertexAI({project: projectId, location: location});
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertex_ai.preview.getGenerativeModel({
const generativeModel = vertexAI.getGenerativeModel({
model: model,
});

Expand Down
98 changes: 98 additions & 0 deletions generative-ai/snippets/functionCallingStreamChat.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// [START aiplatform_gemini_function_calling_chat]
const {
VertexAI,
FunctionDeclarationSchemaType,
} = require('@google-cloud/vertexai');

const functionDeclarations = [
{
function_declarations: [
{
name: 'get_current_weather',
description: 'get weather in a given location',
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
location: {type: FunctionDeclarationSchemaType.STRING},
unit: {
type: FunctionDeclarationSchemaType.STRING,
enum: ['celsius', 'fahrenheit'],
},
},
required: ['location'],
},
},
],
},
];

const functionResponseParts = [
{
functionResponse: {
name: 'get_current_weather',
response: {name: 'get_current_weather', content: {weather: 'super nice'}},
},
},
];

/**
* TODO(developer): Update these variables before running the sample.
*/
async function functionCallingStreamChat(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertexAI.preview.getGenerativeModel({
model: model,
});

// Create a chat session and pass your function declarations
const chat = generativeModel.startChat({
tools: functionDeclarations,
});

const chatInput1 = 'What is the weather in Boston?';

// This should include a functionCall response from the model
const result1 = await chat.sendMessageStream(chatInput1);
for await (const item of result1.stream) {
console.log(item.candidates[0]);
}
await result1.response;

// Send a follow up message with a FunctionResponse
const result2 = await chat.sendMessageStream(functionResponseParts);
for await (const item of result2.stream) {
console.log(item.candidates[0]);
}

// This should include a text response from the model using the response content
// provided above
const response2 = await result2.response;
console.log(response2.candidates[0].content.parts[0].text);
}
// [END aiplatform_gemini_function_calling_chat]

functionCallingStreamChat(...process.argv.slice(2)).catch(err => {
console.error(err.message);
process.exitCode = 1;
});
96 changes: 96 additions & 0 deletions generative-ai/snippets/functionCallingStreamContent.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// [START aiplatform_gemini_function_calling_content]
const {
VertexAI,
FunctionDeclarationSchemaType,
} = require('@google-cloud/vertexai');

const functionDeclarations = [
{
function_declarations: [
{
name: 'get_current_weather',
description: 'get weather in a given location',
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
location: {type: FunctionDeclarationSchemaType.STRING},
unit: {
type: FunctionDeclarationSchemaType.STRING,
enum: ['celsius', 'fahrenheit'],
},
},
required: ['location'],
},
},
],
},
];

const functionResponseParts = [
{
functionResponse: {
name: 'get_current_weather',
response: {name: 'get_current_weather', content: {weather: 'super nice'}},
},
},
];

/**
* TODO(developer): Update these variables before running the sample.
*/
async function functionCallingStreamChat(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertexAI.preview.getGenerativeModel({
model: model,
});

const request = {
contents: [
{role: 'user', parts: [{text: 'What is the weather in Boston?'}]},
{
role: 'model',
parts: [
{
functionCall: {
name: 'get_current_weather',
args: {location: 'Boston'},
},
},
],
},
{role: 'function', parts: functionResponseParts},
],
tools: functionDeclarations,
};
const streamingResp = await generativeModel.generateContentStream(request);
for await (const item of streamingResp.stream) {
console.log(item.candidates[0].content.parts[0].text);
}
}
// [END aiplatform_gemini_function_calling_content]

functionCallingStreamChat(...process.argv.slice(2)).catch(err => {
console.error(err.message);
process.exitCode = 1;
});
17 changes: 10 additions & 7 deletions generative-ai/snippets/nonStreamingChat.js
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ async function createNonStreamingChat(
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertexAI.preview.getGenerativeModel({
const generativeModel = vertexAI.getGenerativeModel({
model: model,
});

Expand All @@ -37,20 +37,23 @@ async function createNonStreamingChat(
console.log(`User: ${chatInput1}`);

const result1 = await chat.sendMessage(chatInput1);
const response1 = result1.response.candidates[0].content.parts[0].text;
console.log('Chat bot: ', response1);
const response1 = result1.response;
const text1 = response1.candidates[0].content.parts[0].text;
console.log('Chat bot: ', text1);

const chatInput2 = 'Can you tell me a scientific fun fact?';
console.log(`User: ${chatInput2}`);
const result2 = await chat.sendMessage(chatInput2);
const response2 = result2.response.candidates[0].content.parts[0].text;
console.log('Chat bot: ', response2);
const response2 = await result2.response;
const text2 = response2.candidates[0].content.parts[0].text;
console.log('Chat bot: ', text2);

const chatInput3 = 'How can I learn more about that?';
console.log(`User: ${chatInput3}`);
const result3 = await chat.sendMessage(chatInput3);
const response3 = result3.response.candidates[0].content.parts[0].text;
console.log('Chat bot: ', response3);
const response3 = await result3.response;
const text3 = response3.candidates[0].content.parts[0].text;
console.log('Chat bot: ', text3);
}
// [END aiplatform_gemini_multiturn_chat_nonstreaming]

Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/nonStreamingContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ async function createNonStreamingContent(
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertexAI.preview.getGenerativeModel({
const generativeModel = vertexAI.getGenerativeModel({
model: model,
});

Expand Down
4 changes: 2 additions & 2 deletions generative-ai/snippets/nonStreamingMultipartContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ async function createNonStreamingMultipartContent(
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeVisionModel = vertexAI.preview.getGenerativeModel({
const generativeVisionModel = vertexAI.getGenerativeModel({
model: model,
});

Expand All @@ -50,7 +50,7 @@ async function createNonStreamingMultipartContent(
};

console.log('Prompt Text:');
console.log(request.contents[0].parts[0].text);
console.log(request.contents[0].parts[1].text);

console.log('Non-Streaming Response Text:');
// Create the response stream
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
"test": "c8 mocha -p -j 2 --timeout 2400000 test/*.test.js"
},
"dependencies": {
"@google-cloud/aiplatform": "^3.0.0",
"@google-cloud/aiplatform": "^3.12.0",
"@google-cloud/vertexai": "github:googleapis/nodejs-vertexai",
"axios": "^1.6.2",
"supertest": "^6.3.3"
Expand Down
5 changes: 3 additions & 2 deletions generative-ai/snippets/safetySettings.js
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ async function setSafetySettings(
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertexAI.preview.getGenerativeModel({
const generativeModel = vertexAI.getGenerativeModel({
model: model,
// The following parameters are optional
// They can also be passed to individual content generation requests
Expand Down Expand Up @@ -59,14 +59,15 @@ async function setSafetySettings(
for await (const item of responseStream.stream) {
if (item.candidates[0].finishReason === 'SAFETY') {
console.log('This response stream terminated due to safety concerns.');
break;
} else {
process.stdout.write(item.candidates[0].content.parts[0].text);
}
}
}
// [END aiplatform_gemini_safety_settings]

setSafetySettings(...process.argv.slice(3)).catch(err => {
setSafetySettings(...process.argv.slice(2)).catch(err => {
console.error(err.message);
process.exitCode = 1;
});
2 changes: 1 addition & 1 deletion generative-ai/snippets/sendMultiModalPromptWithImage.js
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ async function sendMultiModalPromptWithImage(
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

const generativeVisionModel = vertexAI.preview.getGenerativeModel({
const generativeVisionModel = vertexAI.getGenerativeModel({
model: model,
});

Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/sendMultiModalPromptWithVideo.js
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ async function sendMultiModalPromptWithVideo(
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

const generativeVisionModel = vertexAI.preview.getGenerativeModel({
const generativeVisionModel = vertexAI.getGenerativeModel({
model: model,
});

Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/streamChat.js
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ async function createStreamChat(
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertexAI.preview.getGenerativeModel({
const generativeModel = vertexAI.getGenerativeModel({
model: model,
});

Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/streamContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ async function createStreamContent(
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertexAI.preview.getGenerativeModel({
const generativeModel = vertexAI.getGenerativeModel({
model: model,
});

Expand Down
4 changes: 2 additions & 2 deletions generative-ai/snippets/streamMultipartContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ async function createStreamMultipartContent(
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeVisionModel = vertexAI.preview.getGenerativeModel({
const generativeVisionModel = vertexAI.getGenerativeModel({
model: model,
});

Expand All @@ -50,7 +50,7 @@ async function createStreamMultipartContent(
};

console.log('Prompt Text:');
console.log(request.contents[0].parts[0].text);
console.log(request.contents[0].parts[1].text);
console.log('Streaming Response Text:');

// Create the response stream
Expand Down
Loading
Loading