Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions google/genai/_live_converters.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,11 @@ def _GenerationConfig_to_vertex(
if getv(from_object, ['top_p']) is not None:
setv(to_object, ['topP'], getv(from_object, ['top_p']))

if getv(from_object, ['enable_enhanced_civic_answers']) is not None:
raise ValueError(
'enable_enhanced_civic_answers parameter is not supported in Vertex AI.'
)

return to_object


Expand Down
5 changes: 5 additions & 0 deletions google/genai/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2398,6 +2398,11 @@ def _GenerationConfig_to_vertex(
if getv(from_object, ['top_p']) is not None:
setv(to_object, ['topP'], getv(from_object, ['top_p']))

if getv(from_object, ['enable_enhanced_civic_answers']) is not None:
raise ValueError(
'enable_enhanced_civic_answers parameter is not supported in Vertex AI.'
)

return to_object


Expand Down
12 changes: 10 additions & 2 deletions google/genai/tests/live/test_live.py
Original file line number Diff line number Diff line change
Expand Up @@ -720,7 +720,11 @@ async def test_bidi_setup_to_api_speech_config(vertexai):
result = await get_connect_message(
mock_api_client(vertexai=vertexai), model='test_model', config=config_dict
)
assert result == expected_result
assert types.LiveClientMessage._from_response(
response=result, kwargs=None
) == types.LiveClientMessage._from_response(
response=expected_result, kwargs=None
)
# Config is a LiveConnectConfig
config = types.LiveConnectConfig(
speech_config=types.SpeechConfig(
Expand All @@ -745,7 +749,11 @@ async def test_bidi_setup_to_api_speech_config(vertexai):
mock_api_client(vertexai=vertexai),
model='test_model', config=config
)
assert result == expected_result
assert types.LiveClientMessage._from_response(
response=result, kwargs=None
) == types.LiveClientMessage._from_response(
response=expected_result, kwargs=None
)


@pytest.mark.parametrize('vertexai', [True, False])
Expand Down
2 changes: 1 addition & 1 deletion google/genai/tests/operations/test_get.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2025 Google LLC
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down
30 changes: 0 additions & 30 deletions google/genai/tunings.py
Original file line number Diff line number Diff line change
Expand Up @@ -551,36 +551,6 @@ def _TuningJob_from_mldev(
_TunedModel_from_mldev(getv(from_object, ['_self']), to_object),
)

if getv(from_object, ['customBaseModel']) is not None:
setv(
to_object, ['custom_base_model'], getv(from_object, ['customBaseModel'])
)

if getv(from_object, ['experiment']) is not None:
setv(to_object, ['experiment'], getv(from_object, ['experiment']))

if getv(from_object, ['labels']) is not None:
setv(to_object, ['labels'], getv(from_object, ['labels']))

if getv(from_object, ['outputUri']) is not None:
setv(to_object, ['output_uri'], getv(from_object, ['outputUri']))

if getv(from_object, ['pipelineJob']) is not None:
setv(to_object, ['pipeline_job'], getv(from_object, ['pipelineJob']))

if getv(from_object, ['serviceAccount']) is not None:
setv(to_object, ['service_account'], getv(from_object, ['serviceAccount']))

if getv(from_object, ['tunedModelDisplayName']) is not None:
setv(
to_object,
['tuned_model_display_name'],
getv(from_object, ['tunedModelDisplayName']),
)

if getv(from_object, ['veoTuningSpec']) is not None:
setv(to_object, ['veo_tuning_spec'], getv(from_object, ['veoTuningSpec']))

return to_object


Expand Down
149 changes: 82 additions & 67 deletions google/genai/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,19 @@ class Language(_common.CaseInSensitiveEnum):
"""Python >= 3.10, with numpy and simpy available."""


class FunctionResponseScheduling(_common.CaseInSensitiveEnum):
"""Specifies how the response should be scheduled in the conversation."""

SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED'
"""This value is unused."""
SILENT = 'SILENT'
"""Only add the result to the conversation context, do not interrupt or trigger generation."""
WHEN_IDLE = 'WHEN_IDLE'
"""Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation."""
INTERRUPT = 'INTERRUPT'
"""Add the result to the conversation context, interrupt ongoing generation and prompt to generate output."""


class Type(_common.CaseInSensitiveEnum):
"""Optional. The type of the data."""

Expand All @@ -144,14 +157,14 @@ class HarmCategory(_common.CaseInSensitiveEnum):

HARM_CATEGORY_UNSPECIFIED = 'HARM_CATEGORY_UNSPECIFIED'
"""The harm category is unspecified."""
HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH'
"""The harm category is hate speech."""
HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT'
"""The harm category is dangerous content."""
HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT'
"""The harm category is harassment."""
HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH'
"""The harm category is hate speech."""
HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT'
"""The harm category is sexually explicit content."""
HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT'
"""The harm category is dangerous content."""
HARM_CATEGORY_CIVIC_INTEGRITY = 'HARM_CATEGORY_CIVIC_INTEGRITY'
"""Deprecated: Election filter is not longer supported. The harm category is civic integrity."""
HARM_CATEGORY_IMAGE_HATE = 'HARM_CATEGORY_IMAGE_HATE'
Expand Down Expand Up @@ -702,19 +715,6 @@ class MediaModality(_common.CaseInSensitiveEnum):
"""Document, e.g. PDF."""


class FunctionResponseScheduling(_common.CaseInSensitiveEnum):
"""Specifies how the response should be scheduled in the conversation."""

SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED'
"""This value is unused."""
SILENT = 'SILENT'
"""Only add the result to the conversation context, do not interrupt or trigger generation."""
WHEN_IDLE = 'WHEN_IDLE'
"""Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation."""
INTERRUPT = 'INTERRUPT'
"""Add the result to the conversation context, interrupt ongoing generation and prompt to generate output."""


class StartSensitivity(_common.CaseInSensitiveEnum):
"""Start of speech sensitivity."""

Expand Down Expand Up @@ -2672,8 +2672,7 @@ class GoogleSearch(_common.BaseModel):
)
exclude_domains: Optional[list[str]] = Field(
default=None,
description="""Optional. List of domains to be excluded from the search results.
The default limit is 2000 domains.""",
description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"].""",
)


Expand All @@ -2686,8 +2685,7 @@ class GoogleSearchDict(TypedDict, total=False):
"""

exclude_domains: Optional[list[str]]
"""Optional. List of domains to be excluded from the search results.
The default limit is 2000 domains."""
"""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]."""


GoogleSearchOrDict = Union[GoogleSearch, GoogleSearchDict]
Expand Down Expand Up @@ -8304,34 +8302,6 @@ class DeleteModelResponseDict(TypedDict, total=False):
DeleteModelResponseOrDict = Union[DeleteModelResponse, DeleteModelResponseDict]


class GenerationConfigThinkingConfig(_common.BaseModel):
"""Config for thinking features."""

include_thoughts: Optional[bool] = Field(
default=None,
description="""Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available.""",
)
thinking_budget: Optional[int] = Field(
default=None,
description="""Optional. Indicates the thinking budget in tokens.""",
)


class GenerationConfigThinkingConfigDict(TypedDict, total=False):
"""Config for thinking features."""

include_thoughts: Optional[bool]
"""Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available."""

thinking_budget: Optional[int]
"""Optional. Indicates the thinking budget in tokens."""


GenerationConfigThinkingConfigOrDict = Union[
GenerationConfigThinkingConfig, GenerationConfigThinkingConfigDict
]


class GenerationConfig(_common.BaseModel):
"""Generation config."""

Expand Down Expand Up @@ -8400,7 +8370,7 @@ class GenerationConfig(_common.BaseModel):
default=None,
description="""Optional. Controls the randomness of predictions.""",
)
thinking_config: Optional[GenerationConfigThinkingConfig] = Field(
thinking_config: Optional[ThinkingConfig] = Field(
default=None,
description="""Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.""",
)
Expand All @@ -8412,6 +8382,10 @@ class GenerationConfig(_common.BaseModel):
default=None,
description="""Optional. If specified, nucleus sampling will be used.""",
)
enable_enhanced_civic_answers: Optional[bool] = Field(
default=None,
description="""Optional. Enables enhanced civic answers. It may not be available for all models.""",
)


class GenerationConfigDict(TypedDict, total=False):
Expand Down Expand Up @@ -8474,7 +8448,7 @@ class GenerationConfigDict(TypedDict, total=False):
temperature: Optional[float]
"""Optional. Controls the randomness of predictions."""

thinking_config: Optional[GenerationConfigThinkingConfigDict]
thinking_config: Optional[ThinkingConfigDict]
"""Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking."""

top_k: Optional[float]
Expand All @@ -8483,6 +8457,9 @@ class GenerationConfigDict(TypedDict, total=False):
top_p: Optional[float]
"""Optional. If specified, nucleus sampling will be used."""

enable_enhanced_civic_answers: Optional[bool]
"""Optional. Enables enhanced civic answers. It may not be available for all models."""


GenerationConfigOrDict = Union[GenerationConfig, GenerationConfigDict]

Expand Down Expand Up @@ -9335,14 +9312,22 @@ class TunedModelCheckpointDict(TypedDict, total=False):


class TunedModel(_common.BaseModel):
"""TunedModel for the Tuned Model of a Tuning Job."""

model: Optional[str] = Field(
default=None,
description="""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`""",
description="""Output only. The resource name of the TunedModel.
Format: `projects/{project}/locations/{location}/models/{model}@{version_id}`
When tuning from a base model, the version_id will be 1.
For continuous tuning, the version id will be incremented by 1 from the
last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`
""",
)
endpoint: Optional[str] = Field(
default=None,
description="""Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.""",
description="""Output only. A resource name of an Endpoint.
Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
""",
)
checkpoints: Optional[list[TunedModelCheckpoint]] = Field(
default=None,
Expand All @@ -9353,12 +9338,20 @@ class TunedModel(_common.BaseModel):


class TunedModelDict(TypedDict, total=False):
"""TunedModel for the Tuned Model of a Tuning Job."""

model: Optional[str]
"""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`"""
"""Output only. The resource name of the TunedModel.
Format: `projects/{project}/locations/{location}/models/{model}@{version_id}`
When tuning from a base model, the version_id will be 1.
For continuous tuning, the version id will be incremented by 1 from the
last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`
"""

endpoint: Optional[str]
"""Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`."""
"""Output only. A resource name of an Endpoint.
Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
"""

checkpoints: Optional[list[TunedModelCheckpointDict]]
"""The checkpoints associated with this TunedModel.
Expand Down Expand Up @@ -10829,22 +10822,24 @@ class _CancelTuningJobParametersDict(TypedDict, total=False):


class TuningExample(_common.BaseModel):
"""A single example for tuning."""

text_input: Optional[str] = Field(
default=None, description="""Text model input."""
)
output: Optional[str] = Field(
default=None, description="""The expected model output."""
default=None, description="""Required. The expected model output."""
)
text_input: Optional[str] = Field(
default=None, description="""Optional. Text model input."""
)


class TuningExampleDict(TypedDict, total=False):

text_input: Optional[str]
"""Text model input."""
"""A single example for tuning."""

output: Optional[str]
"""The expected model output."""
"""Required. The expected model output."""

text_input: Optional[str]
"""Optional. Text model input."""


TuningExampleOrDict = Union[TuningExample, TuningExampleDict]
Expand Down Expand Up @@ -11656,10 +11651,11 @@ class ListFilesResponse(_common.BaseModel):
default=None, description="""Used to retain the full HTTP response."""
)
next_page_token: Optional[str] = Field(
default=None, description="""A token to retrieve next page of results."""
default=None,
description="""A token that can be sent as a `page_token` into a subsequent `ListFiles` call.""",
)
files: Optional[list[File]] = Field(
default=None, description="""The list of files."""
default=None, description="""The list of `File`s."""
)


Expand All @@ -11670,10 +11666,10 @@ class ListFilesResponseDict(TypedDict, total=False):
"""Used to retain the full HTTP response."""

next_page_token: Optional[str]
"""A token to retrieve next page of results."""
"""A token that can be sent as a `page_token` into a subsequent `ListFiles` call."""

files: Optional[list[FileDict]]
"""The list of files."""
"""The list of `File`s."""


ListFilesResponseOrDict = Union[ListFilesResponse, ListFilesResponseDict]
Expand Down Expand Up @@ -12351,6 +12347,25 @@ def done(self) -> bool:
return self.state.name in JOB_STATES_ENDED


class GenerationConfigThinkingConfig(ThinkingConfig):
"""Config for thinking feature.

This class will be deprecated. Please use `ThinkingConfig` instead.
"""


class GenerationConfigThinkingConfigDict(ThinkingConfigDict):
"""Config for thinking feature.

This class will be deprecated. Please use `ThinkingConfig` instead.
"""


GenerationConfigThinkingConfigOrDict = Union[
GenerationConfigThinkingConfig, GenerationConfigThinkingConfigDict
]


class BatchJobDict(TypedDict, total=False):
"""Config for batches.create return value."""

Expand Down