Skip to content

Commit 62253bb

Browse files
authored
Merge pull request #3598 from GoogleCloudPlatform/feature/add-model-name
add model to each function
2 parents 8f00e33 + be61214 commit 62253bb

10 files changed

+65
-117
lines changed

generative-ai/snippets/countTokens.js

+7-13
Original file line numberDiff line numberDiff line change
@@ -12,22 +12,17 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15+
// [START aiplatform_gemini_token_count]
1516
const {VertexAI} = require('@google-cloud/vertexai');
1617

18+
/**
19+
* TODO(developer): Update these variables before running the sample.
20+
*/
1721
async function countTokens(
1822
projectId = 'PROJECT_ID',
19-
location = 'LOCATION_ID',
20-
model = 'MODEL'
23+
location = 'us-central1',
24+
model = 'gemini-pro'
2125
) {
22-
// [START aiplatform_gemini_token_count]
23-
24-
/**
25-
* TODO(developer): Uncomment these variables before running the sample.
26-
*/
27-
// const projectId = 'your-project-id';
28-
// const location = 'us-central1';
29-
// const model = 'chosen-genai-model';
30-
3126
// Initialize Vertex with your Cloud project and location
3227
const vertex_ai = new VertexAI({project: projectId, location: location});
3328

@@ -42,9 +37,8 @@ async function countTokens(
4237

4338
const countTokensResp = await generativeModel.countTokens(req);
4439
console.log('count tokens response: ', countTokensResp);
45-
46-
// [END aiplatform_gemini_token_count]
4740
}
41+
// [END aiplatform_gemini_token_count]
4842

4943
countTokens(...process.argv.slice(2)).catch(err => {
5044
console.error(err.message);

generative-ai/snippets/nonStreamingChat.js

+7-12
Original file line numberDiff line numberDiff line change
@@ -12,21 +12,17 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15+
// [START aiplatform_gemini_multiturn_chat_nonstreaming]
1516
const {VertexAI} = require('@google-cloud/vertexai');
1617

18+
/**
19+
* TODO(developer): Update these variables before running the sample.
20+
*/
1721
async function createNonStreamingChat(
1822
projectId = 'PROJECT_ID',
19-
location = 'LOCATION_ID',
20-
model = 'MODEL'
23+
location = 'us-central1',
24+
model = 'gemini-pro'
2125
) {
22-
// [START aiplatform_gemini_multiturn_chat_nonstreaming]
23-
/**
24-
* TODO(developer): Uncomment these variables before running the sample.
25-
*/
26-
// const projectId = 'your-project-id';
27-
// const location = 'us-central1';
28-
// const model = 'chosen-genai-model';
29-
3026
// Initialize Vertex with your Cloud project and location
3127
const vertexAI = new VertexAI({project: projectId, location: location});
3228

@@ -55,9 +51,8 @@ async function createNonStreamingChat(
5551
const result3 = await chat.sendMessage(chatInput3);
5652
const response3 = result3.response.candidates[0].content.parts[0].text;
5753
console.log('Chat bot: ', response3);
58-
59-
// [END aiplatform_gemini_multiturn_chat_nonstreaming]
6054
}
55+
// [END aiplatform_gemini_multiturn_chat_nonstreaming]
6156

6257
createNonStreamingChat(...process.argv.slice(2)).catch(err => {
6358
console.error(err.message);

generative-ai/snippets/nonStreamingContent.js

+7-13
Original file line numberDiff line numberDiff line change
@@ -12,22 +12,17 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15+
// [START aiplatform_gemini_content_nonstreaming]
1516
const {VertexAI} = require('@google-cloud/vertexai');
1617

18+
/**
19+
* TODO(developer): Update these variables before running the sample.
20+
*/
1721
async function createNonStreamingContent(
1822
projectId = 'PROJECT_ID',
19-
location = 'LOCATION_ID',
20-
model = 'MODEL'
23+
location = 'us-central1',
24+
model = 'gemini-pro'
2125
) {
22-
// [START aiplatform_gemini_content_nonstreaming]
23-
24-
/**
25-
* TODO(developer): Uncomment these variables before running the sample.
26-
*/
27-
// const projectId = 'your-project-id';
28-
// const location = 'us-central1';
29-
// const model = 'chosen-genai-model';
30-
3126
// Initialize Vertex with your Cloud project and location
3227
const vertexAI = new VertexAI({project: projectId, location: location});
3328

@@ -55,9 +50,8 @@ async function createNonStreamingContent(
5550
aggregatedResponse.candidates[0].content.parts[0].text;
5651

5752
console.log(fullTextResponse);
58-
59-
// [END aiplatform_gemini_content_nonstreaming]
6053
}
54+
// [END aiplatform_gemini_content_nonstreaming]
6155

6256
createNonStreamingContent(...process.argv.slice(2)).catch(err => {
6357
console.error(err.message);

generative-ai/snippets/nonStreamingMultipartContent.js

+7-14
Original file line numberDiff line numberDiff line change
@@ -12,25 +12,19 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15+
// [START aiplatform_gemini_get_started]
1516
const {VertexAI} = require('@google-cloud/vertexai');
1617

18+
/**
19+
* TODO(developer): Update these variables before running the sample.
20+
*/
1721
async function createNonStreamingMultipartContent(
1822
projectId = 'PROJECT_ID',
19-
location = 'LOCATION_ID',
20-
model = 'MODEL',
23+
location = 'us-central1',
24+
model = 'gemini-pro-vision',
2125
image = 'gs://generativeai-downloads/images/scones.jpg',
2226
mimeType = 'image/jpeg'
2327
) {
24-
// [START aiplatform_gemini_get_started]
25-
26-
/**
27-
* TODO(developer): Uncomment these variables before running the sample.
28-
*/
29-
// const projectId = 'your-project-id';
30-
// const location = 'us-central1';
31-
// const image = 'gs://generativeai-downloads/images/scones.jpg'; // Google Cloud Storage image
32-
// const mimeType = 'image/jpeg';
33-
3428
// Initialize Vertex with your Cloud project and location
3529
const vertexAI = new VertexAI({project: projectId, location: location});
3630

@@ -71,9 +65,8 @@ async function createNonStreamingMultipartContent(
7165
aggregatedResponse.candidates[0].content.parts[0].text;
7266

7367
console.log(fullTextResponse);
74-
75-
// [END aiplatform_gemini_get_started]
7668
}
69+
// [END aiplatform_gemini_get_started]
7770

7871
createNonStreamingMultipartContent(...process.argv.slice(2)).catch(err => {
7972
console.error(err.message);

generative-ai/snippets/safetySettings.js

+7-11
Original file line numberDiff line numberDiff line change
@@ -12,25 +12,21 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15+
// [START aiplatform_gemini_safety_settings]
1516
const {
1617
VertexAI,
1718
HarmCategory,
1819
HarmBlockThreshold,
1920
} = require('@google-cloud/vertexai');
2021

22+
/**
23+
* TODO(developer): Update these variables before running the sample.
24+
*/
2125
async function setSafetySettings(
2226
projectId = 'PROJECT_ID',
23-
location = 'LOCATION_ID',
24-
model = 'MODEL'
27+
location = 'us-central1',
28+
model = 'gemini-pro'
2529
) {
26-
// [START aiplatform_gemini_safety_settings]
27-
/**
28-
* TODO(developer): Uncomment these variables before running the sample.
29-
*/
30-
// const projectId = 'your-project-id';
31-
// const location = 'us-central1';
32-
// const model = 'chosen-genai-model';
33-
3430
// Initialize Vertex with your Cloud project and location
3531
const vertexAI = new VertexAI({project: projectId, location: location});
3632

@@ -67,8 +63,8 @@ async function setSafetySettings(
6763
process.stdout.write(item.candidates[0].content.parts[0].text);
6864
}
6965
}
70-
// [END aiplatform_gemini_safety_settings]
7166
}
67+
// [END aiplatform_gemini_safety_settings]
7268

7369
setSafetySettings(...process.argv.slice(3)).catch(err => {
7470
console.error(err.message);

generative-ai/snippets/sendMultiModalPromptWithImage.js

+2-2
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ async function getBase64(url) {
2626
*/
2727
async function sendMultiModalPromptWithImage(
2828
projectId = 'PROJECT_ID',
29-
location = 'LOCATION_ID',
30-
model = 'MODEL'
29+
location = 'us-central1',
30+
model = 'gemini-pro-vision'
3131
) {
3232
// For images, the SDK supports base64 strings
3333
const landmarkImage1 = await getBase64(

generative-ai/snippets/sendMultiModalPromptWithVideo.js

+7-12
Original file line numberDiff line numberDiff line change
@@ -12,21 +12,17 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15+
// [START aiplatform_gemini_single_turn_video]
1516
const {VertexAI} = require('@google-cloud/vertexai');
1617

18+
/**
19+
* TODO(developer): Update these variables before running the sample.
20+
*/
1721
async function sendMultiModalPromptWithVideo(
1822
projectId = 'PROJECT_ID',
19-
location = 'LOCATION_ID',
20-
model = 'MODEL'
23+
location = 'us-central1',
24+
model = 'gemini-pro-vision'
2125
) {
22-
// [START aiplatform_gemini_single_turn_video]
23-
/**
24-
* TODO(developer): Uncomment these variables before running the sample.
25-
*/
26-
// const projectId = 'your-project-id';
27-
// const location = 'us-central1';
28-
// const model = 'chosen-genai-model';
29-
3026
// Initialize Vertex with your Cloud project and location
3127
const vertexAI = new VertexAI({project: projectId, location: location});
3228

@@ -63,9 +59,8 @@ async function sendMultiModalPromptWithVideo(
6359
aggregatedResponse.candidates[0].content.parts[0].text;
6460

6561
console.log(fullTextResponse);
66-
67-
// [END aiplatform_gemini_single_turn_video]
6862
}
63+
// [END aiplatform_gemini_single_turn_video]
6964

7065
sendMultiModalPromptWithVideo(...process.argv.slice(2)).catch(err => {
7166
console.error(err.message);

generative-ai/snippets/streamChat.js

+7-12
Original file line numberDiff line numberDiff line change
@@ -12,21 +12,17 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15+
// [START aiplatform_gemini_multiturn_chat]
1516
const {VertexAI} = require('@google-cloud/vertexai');
1617

18+
/**
19+
* TODO(developer): Update these variables before running the sample.
20+
*/
1721
async function createStreamChat(
1822
projectId = 'PROJECT_ID',
19-
location = 'LOCATION_ID',
20-
model = 'MODEL'
23+
location = 'us-central1',
24+
model = 'gemini-pro'
2125
) {
22-
// [START aiplatform_gemini_multiturn_chat]
23-
/**
24-
* TODO(developer): Uncomment these variables before running the sample.
25-
*/
26-
// const projectId = 'your-project-id';
27-
// const location = 'us-central1';
28-
// const model = 'chosen-genai-model';
29-
3026
// Initialize Vertex with your Cloud project and location
3127
const vertexAI = new VertexAI({project: projectId, location: location});
3228

@@ -44,9 +40,8 @@ async function createStreamChat(
4440
for await (const item of result1.stream) {
4541
console.log(item.candidates[0].content.parts[0].text);
4642
}
47-
48-
// [END aiplatform_gemini_multiturn_chat]
4943
}
44+
// [END aiplatform_gemini_multiturn_chat]
5045

5146
createStreamChat(...process.argv.slice(2)).catch(err => {
5247
console.error(err.message);

generative-ai/snippets/streamContent.js

+7-13
Original file line numberDiff line numberDiff line change
@@ -12,22 +12,17 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15+
// [START aiplatform_gemini_content]
1516
const {VertexAI} = require('@google-cloud/vertexai');
1617

18+
/**
19+
* TODO(developer): Update these variables before running the sample.
20+
*/
1721
async function createStreamContent(
1822
projectId = 'PROJECT_ID',
19-
location = 'LOCATION_ID',
20-
model = 'MODEL'
23+
location = 'us-central1',
24+
model = 'gemini-pro'
2125
) {
22-
// [START aiplatform_gemini_content]
23-
24-
/**
25-
* TODO(developer): Uncomment these variables before running the sample.
26-
*/
27-
// const projectId = 'your-project-id';
28-
// const location = 'us-central1';
29-
// const model = 'chosen-genai-model';
30-
3126
// Initialize Vertex with your Cloud project and location
3227
const vertexAI = new VertexAI({project: projectId, location: location});
3328

@@ -51,9 +46,8 @@ async function createStreamContent(
5146
for await (const item of responseStream.stream) {
5247
process.stdout.write(item.candidates[0].content.parts[0].text);
5348
}
54-
55-
// [END aiplatform_gemini_content]
5649
}
50+
// [END aiplatform_gemini_content]
5751

5852
createStreamContent(...process.argv.slice(2)).catch(err => {
5953
console.error(err.message);

generative-ai/snippets/streamMultipartContent.js

+7-15
Original file line numberDiff line numberDiff line change
@@ -12,26 +12,19 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15+
// [START aiplatform_gemini_get_started]
1516
const {VertexAI} = require('@google-cloud/vertexai');
1617

18+
/**
19+
* TODO(developer): Update these variables before running the sample.
20+
*/
1721
async function createStreamMultipartContent(
1822
projectId = 'PROJECT_ID',
19-
location = 'LOCATION_ID',
20-
model = 'MODEL',
23+
location = 'us-central1',
24+
model = 'gemini-pro-vision',
2125
image = 'gs://generativeai-downloads/images/scones.jpg',
2226
mimeType = 'image/jpeg'
2327
) {
24-
// [START aiplatform_gemini_get_started]
25-
26-
/**
27-
* TODO(developer): Uncomment these variables before running the sample.
28-
*/
29-
// const projectId = 'your-project-id';
30-
// const location = 'us-central1';
31-
// const model = 'chosen-genai-model';
32-
// const image = 'gs://generativeai-downloads/images/scones.jpg'; // Google Cloud Storage image
33-
// const mimeType = 'image/jpeg';
34-
3528
// Initialize Vertex with your Cloud project and location
3629
const vertexAI = new VertexAI({project: projectId, location: location});
3730

@@ -68,9 +61,8 @@ async function createStreamMultipartContent(
6861
for await (const item of responseStream.stream) {
6962
process.stdout.write(item.candidates[0].content.parts[0].text);
7063
}
71-
72-
// [END aiplatform_gemini_get_started]
7364
}
65+
// [END aiplatform_gemini_get_started]
7466

7567
createStreamMultipartContent(...process.argv.slice(2)).catch(err => {
7668
console.error(err.message);

0 commit comments

Comments
 (0)