Skip to content

Commit 7e889ba

Browse files
authored
Merge branch 'main' into feature/nextjs-helloworld
2 parents 0574efe + 4892dff commit 7e889ba

12 files changed

+155
-67
lines changed

generative-ai/snippets/test/countTokens.test.js

+12-5
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,24 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro';
25+
2326
describe('Count tokens', async () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro';
2734

2835
it('should count tokens', async () => {
2936
const output = execSync(
30-
`node ./countTokens.js ${project} ${location} ${model}`
37+
`node ./countTokens.js ${projectId} ${location} ${model}`
3138
);
3239

3340
// Expect 6 tokens

generative-ai/snippets/test/functionCallingStreamChat.test.js

+12-5
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,24 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro';
25+
2326
describe('Generative AI Function Calling Stream Chat', () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro';
2734

2835
it('should create stream chat and begin the conversation the same in each instance', async () => {
2936
const output = execSync(
30-
`node ./functionCallingStreamChat.js ${project} ${location} ${model}`
37+
`node ./functionCallingStreamChat.js ${projectId} ${location} ${model}`
3138
);
3239

3340
// Assert that the response is what we expect

generative-ai/snippets/test/functionCallingStreamContent.test.js

+12-5
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,24 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro';
25+
2326
describe('Generative AI Function Calling Stream Content', () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro';
2734

2835
it('should create stream chat and begin the conversation the same in each instance', async () => {
2936
const output = execSync(
30-
`node ./functionCallingStreamContent.js ${project} ${location} ${model}`
37+
`node ./functionCallingStreamContent.js ${projectId} ${location} ${model}`
3138
);
3239

3340
// Assert that the response is what we expect

generative-ai/snippets/test/nonStreamingChat.test.js

+21-12
Original file line numberDiff line numberDiff line change
@@ -17,22 +17,31 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro';
25+
2326
describe('Generative AI NonStreaming Chat', async () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro';
2734

28-
it('should create nonstreaming chat and begin the conversation the same in each instance', async () => {
29-
const output = execSync(
30-
`node ./nonStreamingChat.js ${project} ${location} ${model}`
31-
);
35+
describe('Generative AI NonStreaming Chat', async () => {
36+
it('should create nonstreaming chat and begin the conversation the same in each instance', async () => {
37+
const output = execSync(
38+
`node ./nonStreamingChat.js ${projectId} ${location} ${model}`
39+
);
3240

33-
// Ensure that the beginning of the conversation is consistent
34-
assert(output.match(/User: Hello/));
35-
assert(output.match(/User: Can you tell me a scientific fun fact?/));
36-
assert(output.match(/User: How can I learn more about that?/));
41+
// Ensure that the beginning of the conversation is consistent
42+
assert(output.match(/User: Hello/));
43+
assert(output.match(/User: Can you tell me a scientific fun fact?/));
44+
assert(output.match(/User: How can I learn more about that?/));
45+
});
3746
});
3847
});

generative-ai/snippets/test/nonStreamingContent.test.js

+12-5
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,24 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro';
25+
2326
describe('Generative AI NonStreaming Content', () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro';
2734

2835
it('should create nonstreaming content and begin the conversation the same in each instance', async () => {
2936
const output = execSync(
30-
`node ./nonStreamingContent.js ${project} ${location} ${model}`
37+
`node ./nonStreamingContent.js ${projectId} ${location} ${model}`
3138
);
3239

3340
// Ensure that the beginning of the conversation is consistent

generative-ai/snippets/test/nonStreamingMultipartContent.test.js

+13-5
Original file line numberDiff line numberDiff line change
@@ -17,18 +17,26 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro-vision';
25+
2326
describe('Generative AI NonStreaming Multipart Content', () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro-vision';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro-vision';
34+
2735
const image = 'gs://generativeai-downloads/images/scones.jpg';
2836

2937
it('should create nonstreaming multipart content and begin the conversation the same in each instance', async () => {
3038
const output = execSync(
31-
`node ./nonStreamingMultipartContent.js ${project} ${location} ${model} ${image}`
39+
`node ./nonStreamingMultipartContent.js ${projectId} ${location} ${model} ${image}`
3240
);
3341

3442
// Ensure that the conversation is what we expect for this scone image

generative-ai/snippets/test/safetySettings.test.js

+12-5
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,24 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro';
25+
2326
describe('Safety settings', async () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro';
2734

2835
it('should reject a dangerous request', async () => {
2936
const output = execSync(
30-
`node ./safetySettings.js ${project} ${location} ${model}`
37+
`node ./safetySettings.js ${projectId} ${location} ${model}`
3138
);
3239

3340
// Expect rejection due to safety concerns

generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js

+12-5
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,24 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro-vision';
25+
2326
describe('Generative AI Stream MultiModal with Image', () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro-vision';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro-vision';
2734

2835
it('should create stream multimodal content', async () => {
2936
const output = execSync(
30-
`node ./sendMultiModalPromptWithImage.js ${project} ${location} ${model}`
37+
`node ./sendMultiModalPromptWithImage.js ${projectId} ${location} ${model}`
3138
);
3239
// Ensure that the conversation is what we expect for these images
3340
assert(output.match(/city: Rio de Janeiro, Landmark: Christ the Redeemer/));

generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js

+12-5
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,24 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro-vision';
25+
2326
describe('Generative AI Stream MultiModal with Video', () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro-vision';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro-vision';
2734

2835
it('should create stream multimodal content', async () => {
2936
const output = execSync(
30-
`node ./sendMultiModalPromptWithVideo.js ${project} ${location} ${model}`
37+
`node ./sendMultiModalPromptWithVideo.js ${projectId} ${location} ${model}`
3138
);
3239
// Ensure that the conversation is what we expect for these images
3340
assert(output.match(/Zootopia/));

generative-ai/snippets/test/streamChat.test.js

+12-5
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,24 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro';
25+
2326
describe('Generative AI Stream Chat', () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro';
2734

2835
it('should create stream chat and begin the conversation the same in each instance', async () => {
2936
const output = execSync(
30-
`node ./streamChat.js ${project} ${location} ${model}`
37+
`node ./streamChat.js ${projectId} ${location} ${model}`
3138
);
3239

3340
// Assert that the advice given for learning is what we expect

generative-ai/snippets/test/streamContent.test.js

+12-5
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,24 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro';
25+
2326
describe('Generative AI Stream Content', () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro';
2734

2835
it('should create stream content', async () => {
2936
const output = execSync(
30-
`node ./streamContent.js ${project} ${location} ${model}`
37+
`node ./streamContent.js ${projectId} ${location} ${model}`
3138
);
3239
// Ensure that the beginning of the conversation is consistent
3340
assert(output.match(/Prompt:/));

generative-ai/snippets/test/streamMultipartContent.test.js

+13-5
Original file line numberDiff line numberDiff line change
@@ -17,18 +17,26 @@
1717
const {assert} = require('chai');
1818
const {describe, it} = require('mocha');
1919
const cp = require('child_process');
20-
2120
const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2221

22+
const projectId = process.env.CAIP_PROJECT_ID;
23+
const location = process.env.LOCATION;
24+
const model = 'gemini-1.0-pro-vision';
25+
2326
describe('Generative AI Stream Multipart Content', () => {
24-
const project = 'cloud-llm-preview1';
25-
const location = 'us-central1';
26-
const model = 'gemini-1.0-pro-vision';
27+
/**
28+
* TODO(developer): Uncomment these variables before running the sample.\
29+
* (Not necessary if passing values as arguments)
30+
*/
31+
// const projectId = 'YOUR_PROJECT_ID';
32+
// const location = 'YOUR_LOCATION';
33+
// const model = 'gemini-1.0-pro-vision';
34+
2735
const image = 'gs://generativeai-downloads/images/scones.jpg';
2836

2937
it('should create stream multipart content', async () => {
3038
const output = execSync(
31-
`node ./streamMultipartContent.js ${project} ${location} ${model} ${image}`
39+
`node ./streamMultipartContent.js ${projectId} ${location} ${model} ${image}`
3240
);
3341
// Split up conversation output
3442
const conversation = output.split('\n');

0 commit comments

Comments
 (0)