-
Notifications
You must be signed in to change notification settings - Fork 2k
/
Copy pathsendMultiModalPromptWithImage.js
101 lines (91 loc) · 2.99 KB
/
sendMultiModalPromptWithImage.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// [START aiplatform_gemini_single_turn_multi_image]
const {VertexAI} = require('@google-cloud/vertexai');
const axios = require('axios');
async function getBase64(url) {
const image = await axios.get(url, {responseType: 'arraybuffer'});
return Buffer.from(image.data).toString('base64');
}
/**
* TODO(developer): Update these variables before running the sample.
*/
async function sendMultiModalPromptWithImage(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-1.0-pro-vision'
) {
// For images, the SDK supports base64 strings
const landmarkImage1 = await getBase64(
'https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark1.png'
);
const landmarkImage2 = await getBase64(
'https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark2.png'
);
const landmarkImage3 = await getBase64(
'https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark3.png'
);
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
const generativeVisionModel = vertexAI.getGenerativeModel({
model: model,
});
// Pass multimodal prompt
const request = {
contents: [
{
role: 'user',
parts: [
{
inlineData: {
data: landmarkImage1,
mimeType: 'image/png',
},
},
{
text: 'city: Rome, Landmark: the Colosseum',
},
{
inlineData: {
data: landmarkImage2,
mimeType: 'image/png',
},
},
{
text: 'city: Beijing, Landmark: Forbidden City',
},
{
inlineData: {
data: landmarkImage3,
mimeType: 'image/png',
},
},
],
},
],
};
// Create the response
const response = await generativeVisionModel.generateContent(request);
// Wait for the response to complete
const aggregatedResponse = await response.response;
// Select the text from the response
const fullTextResponse =
aggregatedResponse.candidates[0].content.parts[0].text;
console.log(fullTextResponse);
}
// [END aiplatform_gemini_single_turn_multi_image]
sendMultiModalPromptWithImage(...process.argv.slice(2)).catch(err => {
console.error(err.message);
process.exitCode = 1;
});