diff --git a/google/genai/_live_converters.py b/google/genai/_live_converters.py index c7586db57..33ebe106c 100644 --- a/google/genai/_live_converters.py +++ b/google/genai/_live_converters.py @@ -226,6 +226,11 @@ def _GenerationConfig_to_vertex( if getv(from_object, ['top_p']) is not None: setv(to_object, ['topP'], getv(from_object, ['top_p'])) + if getv(from_object, ['enable_enhanced_civic_answers']) is not None: + raise ValueError( + 'enable_enhanced_civic_answers parameter is not supported in Vertex AI.' + ) + return to_object diff --git a/google/genai/models.py b/google/genai/models.py index 267fac2b5..3b8e8fa24 100644 --- a/google/genai/models.py +++ b/google/genai/models.py @@ -2398,6 +2398,11 @@ def _GenerationConfig_to_vertex( if getv(from_object, ['top_p']) is not None: setv(to_object, ['topP'], getv(from_object, ['top_p'])) + if getv(from_object, ['enable_enhanced_civic_answers']) is not None: + raise ValueError( + 'enable_enhanced_civic_answers parameter is not supported in Vertex AI.' + ) + return to_object diff --git a/google/genai/tests/live/test_live.py b/google/genai/tests/live/test_live.py index 608cd5917..4e5b76a3c 100644 --- a/google/genai/tests/live/test_live.py +++ b/google/genai/tests/live/test_live.py @@ -720,7 +720,11 @@ async def test_bidi_setup_to_api_speech_config(vertexai): result = await get_connect_message( mock_api_client(vertexai=vertexai), model='test_model', config=config_dict ) - assert result == expected_result + assert types.LiveClientMessage._from_response( + response=result, kwargs=None + ) == types.LiveClientMessage._from_response( + response=expected_result, kwargs=None + ) # Config is a LiveConnectConfig config = types.LiveConnectConfig( speech_config=types.SpeechConfig( @@ -745,7 +749,11 @@ async def test_bidi_setup_to_api_speech_config(vertexai): mock_api_client(vertexai=vertexai), model='test_model', config=config ) - assert result == expected_result + assert types.LiveClientMessage._from_response( + response=result, kwargs=None + ) == types.LiveClientMessage._from_response( + response=expected_result, kwargs=None + ) @pytest.mark.parametrize('vertexai', [True, False]) diff --git a/google/genai/tests/operations/test_get.py b/google/genai/tests/operations/test_get.py index 8c3d0df52..36b1e08fa 100644 --- a/google/genai/tests/operations/test_get.py +++ b/google/genai/tests/operations/test_get.py @@ -1,4 +1,4 @@ -# Copyright 2025 Google LLC + # Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/genai/tunings.py b/google/genai/tunings.py index 0a12124aa..34ff7072e 100644 --- a/google/genai/tunings.py +++ b/google/genai/tunings.py @@ -551,36 +551,6 @@ def _TuningJob_from_mldev( _TunedModel_from_mldev(getv(from_object, ['_self']), to_object), ) - if getv(from_object, ['customBaseModel']) is not None: - setv( - to_object, ['custom_base_model'], getv(from_object, ['customBaseModel']) - ) - - if getv(from_object, ['experiment']) is not None: - setv(to_object, ['experiment'], getv(from_object, ['experiment'])) - - if getv(from_object, ['labels']) is not None: - setv(to_object, ['labels'], getv(from_object, ['labels'])) - - if getv(from_object, ['outputUri']) is not None: - setv(to_object, ['output_uri'], getv(from_object, ['outputUri'])) - - if getv(from_object, ['pipelineJob']) is not None: - setv(to_object, ['pipeline_job'], getv(from_object, ['pipelineJob'])) - - if getv(from_object, ['serviceAccount']) is not None: - setv(to_object, ['service_account'], getv(from_object, ['serviceAccount'])) - - if getv(from_object, ['tunedModelDisplayName']) is not None: - setv( - to_object, - ['tuned_model_display_name'], - getv(from_object, ['tunedModelDisplayName']), - ) - - if getv(from_object, ['veoTuningSpec']) is not None: - setv(to_object, ['veo_tuning_spec'], getv(from_object, ['veoTuningSpec'])) - return to_object diff --git a/google/genai/types.py b/google/genai/types.py index 3f9a07d42..fe3e48361 100644 --- a/google/genai/types.py +++ b/google/genai/types.py @@ -118,6 +118,19 @@ class Language(_common.CaseInSensitiveEnum): """Python >= 3.10, with numpy and simpy available.""" +class FunctionResponseScheduling(_common.CaseInSensitiveEnum): + """Specifies how the response should be scheduled in the conversation.""" + + SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED' + """This value is unused.""" + SILENT = 'SILENT' + """Only add the result to the conversation context, do not interrupt or trigger generation.""" + WHEN_IDLE = 'WHEN_IDLE' + """Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation.""" + INTERRUPT = 'INTERRUPT' + """Add the result to the conversation context, interrupt ongoing generation and prompt to generate output.""" + + class Type(_common.CaseInSensitiveEnum): """Optional. The type of the data.""" @@ -144,14 +157,14 @@ class HarmCategory(_common.CaseInSensitiveEnum): HARM_CATEGORY_UNSPECIFIED = 'HARM_CATEGORY_UNSPECIFIED' """The harm category is unspecified.""" - HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH' - """The harm category is hate speech.""" - HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT' - """The harm category is dangerous content.""" HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT' """The harm category is harassment.""" + HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH' + """The harm category is hate speech.""" HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT' """The harm category is sexually explicit content.""" + HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT' + """The harm category is dangerous content.""" HARM_CATEGORY_CIVIC_INTEGRITY = 'HARM_CATEGORY_CIVIC_INTEGRITY' """Deprecated: Election filter is not longer supported. The harm category is civic integrity.""" HARM_CATEGORY_IMAGE_HATE = 'HARM_CATEGORY_IMAGE_HATE' @@ -702,19 +715,6 @@ class MediaModality(_common.CaseInSensitiveEnum): """Document, e.g. PDF.""" -class FunctionResponseScheduling(_common.CaseInSensitiveEnum): - """Specifies how the response should be scheduled in the conversation.""" - - SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED' - """This value is unused.""" - SILENT = 'SILENT' - """Only add the result to the conversation context, do not interrupt or trigger generation.""" - WHEN_IDLE = 'WHEN_IDLE' - """Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation.""" - INTERRUPT = 'INTERRUPT' - """Add the result to the conversation context, interrupt ongoing generation and prompt to generate output.""" - - class StartSensitivity(_common.CaseInSensitiveEnum): """Start of speech sensitivity.""" @@ -2672,8 +2672,7 @@ class GoogleSearch(_common.BaseModel): ) exclude_domains: Optional[list[str]] = Field( default=None, - description="""Optional. List of domains to be excluded from the search results. - The default limit is 2000 domains.""", + description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"].""", ) @@ -2686,8 +2685,7 @@ class GoogleSearchDict(TypedDict, total=False): """ exclude_domains: Optional[list[str]] - """Optional. List of domains to be excluded from the search results. - The default limit is 2000 domains.""" + """Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"].""" GoogleSearchOrDict = Union[GoogleSearch, GoogleSearchDict] @@ -8304,34 +8302,6 @@ class DeleteModelResponseDict(TypedDict, total=False): DeleteModelResponseOrDict = Union[DeleteModelResponse, DeleteModelResponseDict] -class GenerationConfigThinkingConfig(_common.BaseModel): - """Config for thinking features.""" - - include_thoughts: Optional[bool] = Field( - default=None, - description="""Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available.""", - ) - thinking_budget: Optional[int] = Field( - default=None, - description="""Optional. Indicates the thinking budget in tokens.""", - ) - - -class GenerationConfigThinkingConfigDict(TypedDict, total=False): - """Config for thinking features.""" - - include_thoughts: Optional[bool] - """Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available.""" - - thinking_budget: Optional[int] - """Optional. Indicates the thinking budget in tokens.""" - - -GenerationConfigThinkingConfigOrDict = Union[ - GenerationConfigThinkingConfig, GenerationConfigThinkingConfigDict -] - - class GenerationConfig(_common.BaseModel): """Generation config.""" @@ -8400,7 +8370,7 @@ class GenerationConfig(_common.BaseModel): default=None, description="""Optional. Controls the randomness of predictions.""", ) - thinking_config: Optional[GenerationConfigThinkingConfig] = Field( + thinking_config: Optional[ThinkingConfig] = Field( default=None, description="""Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.""", ) @@ -8412,6 +8382,10 @@ class GenerationConfig(_common.BaseModel): default=None, description="""Optional. If specified, nucleus sampling will be used.""", ) + enable_enhanced_civic_answers: Optional[bool] = Field( + default=None, + description="""Optional. Enables enhanced civic answers. It may not be available for all models.""", + ) class GenerationConfigDict(TypedDict, total=False): @@ -8474,7 +8448,7 @@ class GenerationConfigDict(TypedDict, total=False): temperature: Optional[float] """Optional. Controls the randomness of predictions.""" - thinking_config: Optional[GenerationConfigThinkingConfigDict] + thinking_config: Optional[ThinkingConfigDict] """Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.""" top_k: Optional[float] @@ -8483,6 +8457,9 @@ class GenerationConfigDict(TypedDict, total=False): top_p: Optional[float] """Optional. If specified, nucleus sampling will be used.""" + enable_enhanced_civic_answers: Optional[bool] + """Optional. Enables enhanced civic answers. It may not be available for all models.""" + GenerationConfigOrDict = Union[GenerationConfig, GenerationConfigDict] @@ -9335,14 +9312,22 @@ class TunedModelCheckpointDict(TypedDict, total=False): class TunedModel(_common.BaseModel): + """TunedModel for the Tuned Model of a Tuning Job.""" model: Optional[str] = Field( default=None, - description="""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`""", + description="""Output only. The resource name of the TunedModel. + Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` + When tuning from a base model, the version_id will be 1. + For continuous tuning, the version id will be incremented by 1 from the + last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}` + """, ) endpoint: Optional[str] = Field( default=None, - description="""Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.""", + description="""Output only. A resource name of an Endpoint. + Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`. + """, ) checkpoints: Optional[list[TunedModelCheckpoint]] = Field( default=None, @@ -9353,12 +9338,20 @@ class TunedModel(_common.BaseModel): class TunedModelDict(TypedDict, total=False): + """TunedModel for the Tuned Model of a Tuning Job.""" model: Optional[str] - """Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`""" + """Output only. The resource name of the TunedModel. + Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` + When tuning from a base model, the version_id will be 1. + For continuous tuning, the version id will be incremented by 1 from the + last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}` + """ endpoint: Optional[str] - """Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.""" + """Output only. A resource name of an Endpoint. + Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`. + """ checkpoints: Optional[list[TunedModelCheckpointDict]] """The checkpoints associated with this TunedModel. @@ -10829,22 +10822,24 @@ class _CancelTuningJobParametersDict(TypedDict, total=False): class TuningExample(_common.BaseModel): + """A single example for tuning.""" - text_input: Optional[str] = Field( - default=None, description="""Text model input.""" - ) output: Optional[str] = Field( - default=None, description="""The expected model output.""" + default=None, description="""Required. The expected model output.""" + ) + text_input: Optional[str] = Field( + default=None, description="""Optional. Text model input.""" ) class TuningExampleDict(TypedDict, total=False): - - text_input: Optional[str] - """Text model input.""" + """A single example for tuning.""" output: Optional[str] - """The expected model output.""" + """Required. The expected model output.""" + + text_input: Optional[str] + """Optional. Text model input.""" TuningExampleOrDict = Union[TuningExample, TuningExampleDict] @@ -11656,10 +11651,11 @@ class ListFilesResponse(_common.BaseModel): default=None, description="""Used to retain the full HTTP response.""" ) next_page_token: Optional[str] = Field( - default=None, description="""A token to retrieve next page of results.""" + default=None, + description="""A token that can be sent as a `page_token` into a subsequent `ListFiles` call.""", ) files: Optional[list[File]] = Field( - default=None, description="""The list of files.""" + default=None, description="""The list of `File`s.""" ) @@ -11670,10 +11666,10 @@ class ListFilesResponseDict(TypedDict, total=False): """Used to retain the full HTTP response.""" next_page_token: Optional[str] - """A token to retrieve next page of results.""" + """A token that can be sent as a `page_token` into a subsequent `ListFiles` call.""" files: Optional[list[FileDict]] - """The list of files.""" + """The list of `File`s.""" ListFilesResponseOrDict = Union[ListFilesResponse, ListFilesResponseDict] @@ -12351,6 +12347,25 @@ def done(self) -> bool: return self.state.name in JOB_STATES_ENDED +class GenerationConfigThinkingConfig(ThinkingConfig): + """Config for thinking feature. + + This class will be deprecated. Please use `ThinkingConfig` instead. + """ + + +class GenerationConfigThinkingConfigDict(ThinkingConfigDict): + """Config for thinking feature. + + This class will be deprecated. Please use `ThinkingConfig` instead. + """ + + +GenerationConfigThinkingConfigOrDict = Union[ + GenerationConfigThinkingConfig, GenerationConfigThinkingConfigDict +] + + class BatchJobDict(TypedDict, total=False): """Config for batches.create return value."""