Skip to content

Commit a2248a8

Browse files
authored
fix(aiplatform): update Gemini model names (#3626)
* fix(aiplatform): update Gemini model names * Fix the model name
1 parent 72026d7 commit a2248a8

24 files changed

+24
-24
lines changed

generative-ai/snippets/countTokens.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function countTokens(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-pro'
24+
model = 'gemini-1.0-pro'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/functionCallingStreamChat.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ const functionResponseParts = [
5555
async function functionCallingStreamChat(
5656
projectId = 'PROJECT_ID',
5757
location = 'us-central1',
58-
model = 'gemini-pro'
58+
model = 'gemini-1.0-pro'
5959
) {
6060
// Initialize Vertex with your Cloud project and location
6161
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/functionCallingStreamContent.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ const functionResponseParts = [
5555
async function functionCallingStreamChat(
5656
projectId = 'PROJECT_ID',
5757
location = 'us-central1',
58-
model = 'gemini-pro'
58+
model = 'gemini-1.0-pro'
5959
) {
6060
// Initialize Vertex with your Cloud project and location
6161
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/nonStreamingChat.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function createNonStreamingChat(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-pro'
24+
model = 'gemini-1.0-pro'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/nonStreamingContent.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function createNonStreamingContent(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-pro'
24+
model = 'gemini-1.0-pro'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/nonStreamingMultipartContent.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function createNonStreamingMultipartContent(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-pro-vision',
24+
model = 'gemini-1.0-pro-vision',
2525
image = 'gs://generativeai-downloads/images/scones.jpg',
2626
mimeType = 'image/jpeg'
2727
) {

generative-ai/snippets/safetySettings.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ const {
2525
async function setSafetySettings(
2626
projectId = 'PROJECT_ID',
2727
location = 'us-central1',
28-
model = 'gemini-pro'
28+
model = 'gemini-1.0-pro'
2929
) {
3030
// Initialize Vertex with your Cloud project and location
3131
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/sendMultiModalPromptWithImage.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ async function getBase64(url) {
2727
async function sendMultiModalPromptWithImage(
2828
projectId = 'PROJECT_ID',
2929
location = 'us-central1',
30-
model = 'gemini-pro-vision'
30+
model = 'gemini-1.0-pro-vision'
3131
) {
3232
// For images, the SDK supports base64 strings
3333
const landmarkImage1 = await getBase64(

generative-ai/snippets/sendMultiModalPromptWithVideo.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function sendMultiModalPromptWithVideo(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-pro-vision'
24+
model = 'gemini-1.0-pro-vision'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/streamChat.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function createStreamChat(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-pro'
24+
model = 'gemini-1.0-pro'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/streamContent.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function createStreamContent(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-pro'
24+
model = 'gemini-1.0-pro'
2525
) {
2626
// Initialize Vertex with your Cloud project and location
2727
const vertexAI = new VertexAI({project: projectId, location: location});

generative-ai/snippets/streamMultipartContent.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
2121
async function createStreamMultipartContent(
2222
projectId = 'PROJECT_ID',
2323
location = 'us-central1',
24-
model = 'gemini-pro-vision',
24+
model = 'gemini-1.0-pro-vision',
2525
image = 'gs://generativeai-downloads/images/scones.jpg',
2626
mimeType = 'image/jpeg'
2727
) {

generative-ai/snippets/test/countTokens.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Count tokens', async () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro';
26+
const model = 'gemini-1.0-pro';
2727

2828
it('should count tokens', async () => {
2929
const output = execSync(

generative-ai/snippets/test/functionCallingStreamChat.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Generative AI Function Calling Stream Chat', () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro';
26+
const model = 'gemini-1.0-pro';
2727

2828
it('should create stream chat and begin the conversation the same in each instance', async () => {
2929
const output = execSync(

generative-ai/snippets/test/functionCallingStreamContent.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Generative AI Function Calling Stream Content', () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro';
26+
const model = 'gemini-1.0-pro';
2727

2828
it('should create stream chat and begin the conversation the same in each instance', async () => {
2929
const output = execSync(

generative-ai/snippets/test/nonStreamingChat.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Generative AI NonStreaming Chat', async () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro';
26+
const model = 'gemini-1.0-pro';
2727

2828
it('should create nonstreaming chat and begin the conversation the same in each instance', async () => {
2929
const output = execSync(

generative-ai/snippets/test/nonStreamingContent.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Generative AI NonStreaming Content', () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro';
26+
const model = 'gemini-1.0-pro';
2727

2828
it('should create nonstreaming content and begin the conversation the same in each instance', async () => {
2929
const output = execSync(

generative-ai/snippets/test/nonStreamingMultipartContent.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Generative AI NonStreaming Multipart Content', () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro-vision';
26+
const model = 'gemini-1.0-pro-vision';
2727
const image = 'gs://generativeai-downloads/images/scones.jpg';
2828

2929
it('should create nonstreaming multipart content and begin the conversation the same in each instance', async () => {

generative-ai/snippets/test/safetySettings.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Safety settings', async () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro';
26+
const model = 'gemini-1.0-pro';
2727

2828
it('should reject a dangerous request', async () => {
2929
const output = execSync(

generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Generative AI Stream MultiModal with Image', () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro-vision';
26+
const model = 'gemini-1.0-pro-vision';
2727

2828
it('should create stream multimodal content', async () => {
2929
const output = execSync(

generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Generative AI Stream MultiModal with Video', () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro-vision';
26+
const model = 'gemini-1.0-pro-vision';
2727

2828
it('should create stream multimodal content', async () => {
2929
const output = execSync(

generative-ai/snippets/test/streamChat.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Generative AI Stream Chat', () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro';
26+
const model = 'gemini-1.0-pro';
2727

2828
it('should create stream chat and begin the conversation the same in each instance', async () => {
2929
const output = execSync(

generative-ai/snippets/test/streamContent.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Generative AI Stream Content', () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro';
26+
const model = 'gemini-1.0-pro';
2727

2828
it('should create stream content', async () => {
2929
const output = execSync(

generative-ai/snippets/test/streamMultipartContent.test.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
2323
describe('Generative AI Stream Multipart Content', () => {
2424
const project = 'cloud-llm-preview1';
2525
const location = 'us-central1';
26-
const model = 'gemini-pro-vision';
26+
const model = 'gemini-1.0-pro-vision';
2727
const image = 'gs://generativeai-downloads/images/scones.jpg';
2828

2929
it('should create stream multipart content', async () => {

0 commit comments

Comments
 (0)