Skip to content

Commit 6119f1e

Browse files
author
Cloud Shell
committed
fix: fix broken tests
1 parent 62d5dc4 commit 6119f1e

5 files changed

+14
-10
lines changed

generative-ai/snippets/nonStreamingChat.js

+9-6
Original file line numberDiff line numberDiff line change
@@ -37,20 +37,23 @@ async function createNonStreamingChat(
3737
console.log(`User: ${chatInput1}`);
3838

3939
const result1 = await chat.sendMessage(chatInput1);
40-
const response1 = result1.response.candidates[0].content.parts[0].text;
41-
console.log('Chat bot: ', response1);
40+
const response1 = await result1.response;
41+
const text1 = response1.candidates[0].content.parts[0].text;
42+
console.log('Chat bot: ', text1);
4243

4344
const chatInput2 = 'Can you tell me a scientific fun fact?';
4445
console.log(`User: ${chatInput2}`);
4546
const result2 = await chat.sendMessage(chatInput2);
46-
const response2 = result2.response.candidates[0].content.parts[0].text;
47-
console.log('Chat bot: ', response2);
47+
const response2 = await result2.response;
48+
const text2 = response2.candidates[0].content.parts[0].text;
49+
console.log('Chat bot: ', text2);
4850

4951
const chatInput3 = 'How can I learn more about that?';
5052
console.log(`User: ${chatInput3}`);
5153
const result3 = await chat.sendMessage(chatInput3);
52-
const response3 = result3.response.candidates[0].content.parts[0].text;
53-
console.log('Chat bot: ', response3);
54+
const response3 = await result3.response;
55+
const text3 = response3.candidates[0].content.parts[0].text;
56+
console.log('Chat bot: ', text3);
5457
}
5558
// [END aiplatform_gemini_multiturn_chat_nonstreaming]
5659

generative-ai/snippets/nonStreamingMultipartContent.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ async function createNonStreamingMultipartContent(
5050
};
5151

5252
console.log('Prompt Text:');
53-
console.log(request.contents[0].parts[0].text);
53+
console.log(request.contents[0].parts[1].text);
5454

5555
console.log('Non-Streaming Response Text:');
5656
// Create the response stream

generative-ai/snippets/safetySettings.js

+2-1
Original file line numberDiff line numberDiff line change
@@ -59,14 +59,15 @@ async function setSafetySettings(
5959
for await (const item of responseStream.stream) {
6060
if (item.candidates[0].finishReason === 'SAFETY') {
6161
console.log('This response stream terminated due to safety concerns.');
62+
break;
6263
} else {
6364
process.stdout.write(item.candidates[0].content.parts[0].text);
6465
}
6566
}
6667
}
6768
// [END aiplatform_gemini_safety_settings]
6869

69-
setSafetySettings(...process.argv.slice(3)).catch(err => {
70+
setSafetySettings(...process.argv.slice(2)).catch(err => {
7071
console.error(err.message);
7172
process.exitCode = 1;
7273
});

generative-ai/snippets/streamMultipartContent.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ async function createStreamMultipartContent(
5050
};
5151

5252
console.log('Prompt Text:');
53-
console.log(request.contents[0].parts[0].text);
53+
console.log(request.contents[0].parts[1].text);
5454
console.log('Streaming Response Text:');
5555

5656
// Create the response stream

generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,6 @@ describe('Generative AI Stream MultiModal with Image', () => {
3030
`node ./sendMultiModalPromptWithImage.js ${project} ${location} ${model}`
3131
);
3232
// Ensure that the conversation is what we expect for these images
33-
assert(output.match(/Paris/));
33+
assert(output.match(/city: Rio de Janeiro, Landmark: Christ the Redeemer/));
3434
});
3535
});

0 commit comments

Comments
 (0)