Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(aiplatform): update Gemini model names #3626

Merged
merged 2 commits into from
Feb 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion generative-ai/snippets/countTokens.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
async function countTokens(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro'
model = 'gemini-1.0-pro'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/functionCallingStreamChat.js
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ const functionResponseParts = [
async function functionCallingStreamChat(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro'
model = 'gemini-1.0-pro'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/functionCallingStreamContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ const functionResponseParts = [
async function functionCallingStreamChat(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro'
model = 'gemini-1.0-pro'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/nonStreamingChat.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
async function createNonStreamingChat(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro'
model = 'gemini-1.0-pro'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/nonStreamingContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
async function createNonStreamingContent(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro'
model = 'gemini-1.0-pro'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/nonStreamingMultipartContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
async function createNonStreamingMultipartContent(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro-vision',
model = 'gemini-1.0-pro-vision',
image = 'gs://generativeai-downloads/images/scones.jpg',
mimeType = 'image/jpeg'
) {
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/safetySettings.js
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ const {
async function setSafetySettings(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro'
model = 'gemini-1.0-pro'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/sendMultiModalPromptWithImage.js
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ async function getBase64(url) {
async function sendMultiModalPromptWithImage(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro-vision'
model = 'gemini-1.0-pro-vision'
) {
// For images, the SDK supports base64 strings
const landmarkImage1 = await getBase64(
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/sendMultiModalPromptWithVideo.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
async function sendMultiModalPromptWithVideo(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro-vision'
model = 'gemini-1.0-pro-vision'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/streamChat.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
async function createStreamChat(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro'
model = 'gemini-1.0-pro'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/streamContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
async function createStreamContent(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro'
model = 'gemini-1.0-pro'
) {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/streamMultipartContent.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ const {VertexAI} = require('@google-cloud/vertexai');
async function createStreamMultipartContent(
projectId = 'PROJECT_ID',
location = 'us-central1',
model = 'gemini-pro-vision',
model = 'gemini-1.0-pro-vision',
image = 'gs://generativeai-downloads/images/scones.jpg',
mimeType = 'image/jpeg'
) {
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/test/countTokens.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Count tokens', async () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const model = 'gemini-1.0-pro';

it('should count tokens', async () => {
const output = execSync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Generative AI Function Calling Stream Chat', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const model = 'gemini-1.0-pro';

it('should create stream chat and begin the conversation the same in each instance', async () => {
const output = execSync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Generative AI Function Calling Stream Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const model = 'gemini-1.0-pro';

it('should create stream chat and begin the conversation the same in each instance', async () => {
const output = execSync(
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/test/nonStreamingChat.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Generative AI NonStreaming Chat', async () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const model = 'gemini-1.0-pro';

it('should create nonstreaming chat and begin the conversation the same in each instance', async () => {
const output = execSync(
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/test/nonStreamingContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Generative AI NonStreaming Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const model = 'gemini-1.0-pro';

it('should create nonstreaming content and begin the conversation the same in each instance', async () => {
const output = execSync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Generative AI NonStreaming Multipart Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro-vision';
const model = 'gemini-1.0-pro-vision';
const image = 'gs://generativeai-downloads/images/scones.jpg';

it('should create nonstreaming multipart content and begin the conversation the same in each instance', async () => {
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/test/safetySettings.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Safety settings', async () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const model = 'gemini-1.0-pro';

it('should reject a dangerous request', async () => {
const output = execSync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Generative AI Stream MultiModal with Image', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro-vision';
const model = 'gemini-1.0-pro-vision';

it('should create stream multimodal content', async () => {
const output = execSync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Generative AI Stream MultiModal with Video', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro-vision';
const model = 'gemini-1.0-pro-vision';

it('should create stream multimodal content', async () => {
const output = execSync(
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/test/streamChat.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Generative AI Stream Chat', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const model = 'gemini-1.0-pro';

it('should create stream chat and begin the conversation the same in each instance', async () => {
const output = execSync(
Expand Down
2 changes: 1 addition & 1 deletion generative-ai/snippets/test/streamContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Generative AI Stream Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro';
const model = 'gemini-1.0-pro';

it('should create stream content', async () => {
const output = execSync(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
describe('Generative AI Stream Multipart Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-pro-vision';
const model = 'gemini-1.0-pro-vision';
const image = 'gs://generativeai-downloads/images/scones.jpg';

it('should create stream multipart content', async () => {
Expand Down