|
13 | 13 | // limitations under the License.
|
14 | 14 |
|
15 | 15 | // [START aiplatform_gemini_function_calling_chat]
|
16 |
| -const { VertexAI, FunctionDeclarationSchemaType } = require('@google-cloud/vertexai'); |
| 16 | +const { |
| 17 | + VertexAI, |
| 18 | + FunctionDeclarationSchemaType |
| 19 | +} = require('@google-cloud/vertexai'); |
17 | 20 |
|
18 | 21 | const functionDeclarations = [
|
19 |
| - { |
20 |
| - function_declarations: [ |
21 |
| - { |
22 |
| - name: "get_current_weather", |
23 |
| - description: 'get weather in a given location', |
24 |
| - parameters: { |
25 |
| - type: FunctionDeclarationSchemaType.OBJECT, |
26 |
| - properties: { |
27 |
| - location: {type: FunctionDeclarationSchemaType.STRING}, |
28 |
| - unit: { |
29 |
| - type: FunctionDeclarationSchemaType.STRING, |
30 |
| - enum: ['celsius', 'fahrenheit'], |
31 |
| - }, |
| 22 | + { |
| 23 | + function_declarations: [ |
| 24 | + { |
| 25 | + name: 'get_current_weather', |
| 26 | + description: 'get weather in a given location', |
| 27 | + parameters: { |
| 28 | + type: FunctionDeclarationSchemaType.OBJECT, |
| 29 | + properties: { |
| 30 | + location: { type: FunctionDeclarationSchemaType.STRING }, |
| 31 | + unit: { |
| 32 | + type: FunctionDeclarationSchemaType.STRING, |
| 33 | + enum: ['celsius', 'fahrenheit'], |
32 | 34 | },
|
33 |
| - required: ['location'], |
34 | 35 | },
|
| 36 | + required: ['location'], |
35 | 37 | },
|
36 |
| - ], |
37 |
| - }, |
38 |
| - ]; |
39 |
| - |
40 |
| - const functionResponseParts = [ |
41 |
| - { |
42 |
| - functionResponse: { |
43 |
| - name: "get_current_weather", |
44 |
| - response: |
45 |
| - {name: "get_current_weather", content: {weather: "super nice"}}, |
46 | 38 | },
|
| 39 | + ], |
| 40 | + }, |
| 41 | +]; |
| 42 | + |
| 43 | +const functionResponseParts = [ |
| 44 | + { |
| 45 | + functionResponse: { |
| 46 | + name: 'get_current_weather', |
| 47 | + response: |
| 48 | + { name: 'get_current_weather', content: { weather: 'super nice' } }, |
47 | 49 | },
|
48 |
| - ]; |
| 50 | + }, |
| 51 | +]; |
49 | 52 |
|
50 | 53 | /**
|
51 | 54 | * TODO(developer): Update these variables before running the sample.
|
52 | 55 | */
|
53 | 56 | async function functionCallingStreamChat(
|
54 |
| - projectId = 'PROJECT_ID', |
55 |
| - location = 'us-central1', |
56 |
| - model = 'gemini-pro' |
| 57 | + projectId = 'PROJECT_ID', |
| 58 | + location = 'us-central1', |
| 59 | + model = 'gemini-pro' |
57 | 60 | ) {
|
58 |
| - // Initialize Vertex with your Cloud project and location |
59 |
| - const vertexAI = new VertexAI({ project: projectId, location: location }); |
| 61 | + // Initialize Vertex with your Cloud project and location |
| 62 | + const vertexAI = new VertexAI({ project: projectId, location: location }); |
60 | 63 |
|
61 |
| - // Instantiate the model |
62 |
| - const generativeModel = vertexAI.preview.getGenerativeModel({ |
63 |
| - model: model, |
64 |
| - }); |
| 64 | + // Instantiate the model |
| 65 | + const generativeModel = vertexAI.preview.getGenerativeModel({ |
| 66 | + model: model, |
| 67 | + }); |
65 | 68 |
|
66 |
| - // Create a chat session and pass your function declarations |
67 |
| - const chat = generativeModel.startChat({ |
68 |
| - tools: functionDeclarations, |
69 |
| - }); |
| 69 | + // Create a chat session and pass your function declarations |
| 70 | + const chat = generativeModel.startChat({ |
| 71 | + tools: functionDeclarations, |
| 72 | + }); |
70 | 73 |
|
71 |
| - const chatInput1 = 'What is the weather in Boston?'; |
| 74 | + const chatInput1 = 'What is the weather in Boston?'; |
72 | 75 |
|
73 |
| - // This should include a functionCall response from the model |
74 |
| - const result1 = await chat.sendMessageStream(chatInput1); |
75 |
| - for await (const item of result1.stream) { |
76 |
| - console.log(item.candidates[0]); |
77 |
| - } |
78 |
| - const response1 = await result1.response; |
| 76 | + // This should include a functionCall response from the model |
| 77 | + const result1 = await chat.sendMessageStream(chatInput1); |
| 78 | + for await (const item of result1.stream) { |
| 79 | + console.log(item.candidates[0]); |
| 80 | + } |
| 81 | + const response1 = await result1.response; |
79 | 82 |
|
80 |
| - // Send a follow up message with a FunctionResponse |
81 |
| - const result2 = await chat.sendMessageStream(functionResponseParts); |
82 |
| - for await (const item of result2.stream) { |
83 |
| - console.log(item.candidates[0]); |
84 |
| - } |
| 83 | + // Send a follow up message with a FunctionResponse |
| 84 | + const result2 = await chat.sendMessageStream(functionResponseParts); |
| 85 | + for await (const item of result2.stream) { |
| 86 | + console.log(item.candidates[0]); |
| 87 | + } |
85 | 88 |
|
86 |
| - // This should include a text response from the model using the response content |
87 |
| - // provided above |
88 |
| - const response2 = await result2.response; |
89 |
| - console.log(response2.candidates[0].content.parts[0].text); |
| 89 | + // This should include a text response from the model using the response content |
| 90 | + // provided above |
| 91 | + const response2 = await result2.response; |
| 92 | + console.log(response2.candidates[0].content.parts[0].text); |
90 | 93 | }
|
91 | 94 | // [END aiplatform_gemini_function_calling_chat]
|
92 | 95 |
|
93 | 96 | functionCallingStreamChat(...process.argv.slice(2)).catch(err => {
|
94 |
| - console.error(err.message); |
95 |
| - process.exitCode = 1; |
| 97 | + console.error(err.message); |
| 98 | + process.exitCode = 1; |
96 | 99 | });
|
0 commit comments