diff --git a/python/packages/azure-ai/agent_framework_azure_ai/__init__.py b/python/packages/azure-ai/agent_framework_azure_ai/__init__.py index 6e6ac7a5e5..cf2423693d 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/__init__.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/__init__.py @@ -3,6 +3,7 @@ import importlib.metadata from ._chat_client import AzureAIAgentClient +from ._client import AzureAIClient from ._shared import AzureAISettings try: @@ -12,6 +13,7 @@ __all__ = [ "AzureAIAgentClient", + "AzureAIClient", "AzureAISettings", "__version__", ] diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_client.py new file mode 100644 index 0000000000..774349a85d --- /dev/null +++ b/python/packages/azure-ai/agent_framework_azure_ai/_client.py @@ -0,0 +1,354 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys +from collections.abc import MutableSequence +from typing import Any, ClassVar, TypeVar + +from agent_framework import ( + AGENT_FRAMEWORK_USER_AGENT, + ChatMessage, + ChatOptions, + HostedMCPTool, + TextContent, + get_logger, + use_chat_middleware, + use_function_invocation, +) +from agent_framework.exceptions import ServiceInitializationError +from agent_framework.observability import use_observability +from agent_framework.openai._responses_client import OpenAIBaseResponsesClient +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import ( + MCPTool, + PromptAgentDefinition, + PromptAgentDefinitionText, + ResponseTextFormatConfigurationJsonSchema, +) +from azure.core.credentials_async import AsyncTokenCredential +from azure.core.exceptions import ResourceNotFoundError +from openai.types.responses.parsed_response import ( + ParsedResponse, +) +from openai.types.responses.response import Response as OpenAIResponse +from pydantic import BaseModel, ValidationError + +from ._shared import AzureAISettings + +if sys.version_info >= (3, 11): + from typing import Self # pragma: no cover +else: + from typing_extensions import Self # pragma: no cover + + +logger = get_logger("agent_framework.azure") + + +TAzureAIClient = TypeVar("TAzureAIClient", bound="AzureAIClient") + + +@use_function_invocation +@use_observability +@use_chat_middleware +class AzureAIClient(OpenAIBaseResponsesClient): + """Azure AI Agent client.""" + + OTEL_PROVIDER_NAME: ClassVar[str] = "azure.ai" # type: ignore[reportIncompatibleVariableOverride, misc] + + def __init__( + self, + *, + project_client: AIProjectClient | None = None, + agent_name: str | None = None, + agent_version: str | None = None, + conversation_id: str | None = None, + project_endpoint: str | None = None, + model_deployment_name: str | None = None, + async_credential: AsyncTokenCredential | None = None, + use_latest_version: bool | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + **kwargs: Any, + ) -> None: + """Initialize an Azure AI Agent client. + + Keyword Args: + project_client: An existing AIProjectClient to use. If not provided, one will be created. + agent_name: The name to use when creating new agents. + agent_version: The version of the agent to use. + conversation_id: Default conversation ID to use for conversations. Can be overridden by + conversation_id property when making a request. + project_endpoint: The Azure AI Project endpoint URL. + Can also be set via environment variable AZURE_AI_PROJECT_ENDPOINT. + Ignored when a project_client is passed. + model_deployment_name: The model deployment name to use for agent creation. + Can also be set via environment variable AZURE_AI_MODEL_DEPLOYMENT_NAME. + async_credential: Azure async credential to use for authentication. + use_latest_version: Boolean flag that indicates whether to use latest agent version + if it exists in the service. + env_file_path: Path to environment file for loading settings. + env_file_encoding: Encoding of the environment file. + kwargs: Additional keyword arguments passed to the parent class. + + Examples: + .. code-block:: python + + from agent_framework.azure import AzureAIClient + from azure.identity.aio import DefaultAzureCredential + + # Using environment variables + # Set AZURE_AI_PROJECT_ENDPOINT=https://your-project.cognitiveservices.azure.com + # Set AZURE_AI_MODEL_DEPLOYMENT_NAME=gpt-4 + credential = DefaultAzureCredential() + client = AzureAIClient(async_credential=credential) + + # Or passing parameters directly + client = AzureAIClient( + project_endpoint="https://your-project.cognitiveservices.azure.com", + model_deployment_name="gpt-4", + async_credential=credential, + ) + + # Or loading from a .env file + client = AzureAIClient(async_credential=credential, env_file_path="path/to/.env") + """ + try: + azure_ai_settings = AzureAISettings( + project_endpoint=project_endpoint, + model_deployment_name=model_deployment_name, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise ServiceInitializationError("Failed to create Azure AI settings.", ex) from ex + + # If no project_client is provided, create one + should_close_client = False + if project_client is None: + if not azure_ai_settings.project_endpoint: + raise ServiceInitializationError( + "Azure AI project endpoint is required. Set via 'project_endpoint' parameter " + "or 'AZURE_AI_PROJECT_ENDPOINT' environment variable." + ) + + # Use provided credential + if not async_credential: + raise ServiceInitializationError("Azure credential is required when project_client is not provided.") + project_client = AIProjectClient( + endpoint=azure_ai_settings.project_endpoint, + credential=async_credential, + user_agent=AGENT_FRAMEWORK_USER_AGENT, + ) + should_close_client = True + + # Initialize parent + super().__init__( + **kwargs, + ) + + # Initialize instance variables + self.agent_name = agent_name + self.agent_version = agent_version + self.use_latest_version = use_latest_version + self.project_client = project_client + self.credential = async_credential + self.model_id = azure_ai_settings.model_deployment_name + self.conversation_id = conversation_id + self._should_close_client = should_close_client # Track whether we should close client connection + + async def setup_azure_ai_observability(self, enable_sensitive_data: bool | None = None) -> None: + """Use this method to setup tracing in your Azure AI Project. + + This will take the connection string from the project project_client. + It will override any connection string that is set in the environment variables. + It will disable any OTLP endpoint that might have been set. + """ + try: + conn_string = await self.project_client.telemetry.get_application_insights_connection_string() + except ResourceNotFoundError: + logger.warning( + "No Application Insights connection string found for the Azure AI Project, " + "please call setup_observability() manually." + ) + return + from agent_framework.observability import setup_observability + + setup_observability( + applicationinsights_connection_string=conn_string, enable_sensitive_data=enable_sensitive_data + ) + + async def __aenter__(self) -> "Self": + """Async context manager entry.""" + return self + + async def __aexit__(self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any) -> None: + """Async context manager exit.""" + await self.close() + + async def close(self) -> None: + """Close the project_client.""" + await self._close_client_if_needed() + + async def _get_agent_reference_or_create( + self, run_options: dict[str, Any], messages_instructions: str | None + ) -> dict[str, str]: + """Determine which agent to use and create if needed. + + Returns: + str: The agent_name to use + """ + agent_name = self.agent_name or "UnnamedAgent" + + # If no agent_version is provided, either use latest version or create a new agent: + if self.agent_version is None: + # Try to use latest version if requested and agent exists + if self.use_latest_version: + try: + existing_agent = await self.project_client.agents.get(agent_name) + self.agent_name = existing_agent.name + self.agent_version = existing_agent.versions.latest.version + return {"name": self.agent_name, "version": self.agent_version, "type": "agent_reference"} + except ResourceNotFoundError: + # Agent doesn't exist, fall through to creation logic + pass + + if "model" not in run_options or not run_options["model"]: + raise ServiceInitializationError( + "Model deployment name is required for agent creation, " + "can also be passed to the get_response methods." + ) + + args: dict[str, Any] = {"model": run_options["model"]} + + if "tools" in run_options: + args["tools"] = run_options["tools"] + + if "response_format" in run_options: + response_format = run_options["response_format"] + args["text"] = PromptAgentDefinitionText( + format=ResponseTextFormatConfigurationJsonSchema( + name=response_format.__name__, + schema=response_format.model_json_schema(), + ) + ) + + # Combine instructions from messages and options + combined_instructions = [ + instructions + for instructions in [messages_instructions, run_options.get("instructions")] + if instructions + ] + if combined_instructions: + args["instructions"] = "".join(combined_instructions) + + created_agent = await self.project_client.agents.create_version( + agent_name=agent_name, definition=PromptAgentDefinition(**args) + ) + + self.agent_name = created_agent.name + self.agent_version = created_agent.version + + return {"name": agent_name, "version": self.agent_version, "type": "agent_reference"} + + async def _close_client_if_needed(self) -> None: + """Close project_client session if we created it.""" + if self._should_close_client: + await self.project_client.close() + + def _prepare_input(self, messages: MutableSequence[ChatMessage]) -> tuple[list[ChatMessage], str | None]: + """Prepare input from messages and convert system/developer messages to instructions.""" + result: list[ChatMessage] = [] + instructions_list: list[str] = [] + instructions: str | None = None + + # System/developer messages are turned into instructions, since there is no such message roles in Azure AI. + for message in messages: + if message.role.value in ["system", "developer"]: + for text_content in [content for content in message.contents if isinstance(content, TextContent)]: + instructions_list.append(text_content.text) + else: + result.append(message) + + if len(instructions_list) > 0: + instructions = "".join(instructions_list) + + return result, instructions + + async def prepare_options( + self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions + ) -> dict[str, Any]: + chat_options.store = bool(chat_options.store or chat_options.store is None) + prepared_messages, instructions = self._prepare_input(messages) + run_options = await super().prepare_options(prepared_messages, chat_options) + agent_reference = await self._get_agent_reference_or_create(run_options, instructions) + + run_options["extra_body"] = {"agent": agent_reference} + + conversation_id = chat_options.conversation_id or self.conversation_id + + # Handle different conversation ID formats + if conversation_id: + if conversation_id.startswith("resp_"): + # For response IDs, set previous_response_id and remove conversation property + run_options.pop("conversation", None) + run_options["previous_response_id"] = conversation_id + elif conversation_id.startswith("conv_"): + # For conversation IDs, set conversation and remove previous_response_id property + run_options.pop("previous_response_id", None) + run_options["conversation"] = conversation_id + + # Remove properties that are not supported on request level + # but were configured on agent level + exclude = ["model", "tools", "response_format"] + + for property in exclude: + run_options.pop(property, None) + + return run_options + + async def initialize_client(self) -> None: + """Initialize OpenAI client asynchronously.""" + self.client = await self.project_client.get_openai_client() # type: ignore + + def _update_agent_name(self, agent_name: str | None) -> None: + """Update the agent name in the chat client. + + Args: + agent_name: The new name for the agent. + """ + # This is a no-op in the base class, but can be overridden by subclasses + # to update the agent name in the client. + if agent_name and not self.agent_name: + self.agent_name = agent_name + + def get_mcp_tool(self, tool: HostedMCPTool) -> Any: + """Get MCP tool from HostedMCPTool.""" + mcp = MCPTool(server_label=tool.name.replace(" ", "_"), server_url=str(tool.url)) + + if tool.allowed_tools: + mcp["allowed_tools"] = list(tool.allowed_tools) + + if tool.approval_mode: + match tool.approval_mode: + case str(): + mcp["require_approval"] = "always" if tool.approval_mode == "always_require" else "never" + case _: + if always_require_approvals := tool.approval_mode.get("always_require_approval"): + mcp["require_approval"] = {"always": {"tool_names": list(always_require_approvals)}} + if never_require_approvals := tool.approval_mode.get("never_require_approval"): + mcp["require_approval"] = {"never": {"tool_names": list(never_require_approvals)}} + + return mcp + + def get_conversation_id( + self, response: OpenAIResponse | ParsedResponse[BaseModel], store: bool | None + ) -> str | None: + """Get the conversation ID from the response if store is True.""" + if store: + # If conversation ID exists, it means that we operate with conversation + # so we use conversation ID as input and output. + if response.conversation and response.conversation.id: + return response.conversation.id + # If conversation ID doesn't exist, we operate with responses + # so we use response ID as input and output. + return response.id + return None diff --git a/python/packages/azure-ai/pyproject.toml b/python/packages/azure-ai/pyproject.toml index fa15e4c074..18a9b5b500 100644 --- a/python/packages/azure-ai/pyproject.toml +++ b/python/packages/azure-ai/pyproject.toml @@ -24,7 +24,7 @@ classifiers = [ ] dependencies = [ "agent-framework-core", - "azure-ai-projects >= 1.0.0b11", + "azure-ai-projects >= 2.0.0b1", "azure-ai-agents == 1.2.0b5", "aiohttp", ] diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py new file mode 100644 index 0000000000..576218f270 --- /dev/null +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -0,0 +1,743 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from agent_framework import ( + ChatClientProtocol, + ChatMessage, + ChatOptions, + Role, + TextContent, +) +from agent_framework.exceptions import ServiceInitializationError +from azure.ai.projects.models import ( + ResponseTextFormatConfigurationJsonSchema, +) +from openai.types.responses.parsed_response import ParsedResponse +from openai.types.responses.response import Response as OpenAIResponse +from pydantic import BaseModel, ConfigDict, ValidationError + +from agent_framework_azure_ai import AzureAIClient, AzureAISettings + + +def create_test_azure_ai_client( + mock_project_client: MagicMock, + agent_name: str | None = None, + agent_version: str | None = None, + conversation_id: str | None = None, + azure_ai_settings: AzureAISettings | None = None, + should_close_client: bool = False, + use_latest_version: bool | None = None, +) -> AzureAIClient: + """Helper function to create AzureAIClient instances for testing, bypassing normal validation.""" + if azure_ai_settings is None: + azure_ai_settings = AzureAISettings(env_file_path="test.env") + + # Create client instance directly + client = object.__new__(AzureAIClient) + + # Set attributes directly + client.project_client = mock_project_client + client.credential = None + client.agent_name = agent_name + client.agent_version = agent_version + client.use_latest_version = use_latest_version + client.model_id = azure_ai_settings.model_deployment_name + client.conversation_id = conversation_id + client._should_close_client = should_close_client # type: ignore + client.additional_properties = {} + client.middleware = None + + # Mock the OpenAI client attribute + mock_openai_client = MagicMock() + mock_openai_client.conversations = MagicMock() + mock_openai_client.conversations.create = AsyncMock() + client.client = mock_openai_client + + return client + + +def test_azure_ai_settings_init(azure_ai_unit_test_env: dict[str, str]) -> None: + """Test AzureAISettings initialization.""" + settings = AzureAISettings() + + assert settings.project_endpoint == azure_ai_unit_test_env["AZURE_AI_PROJECT_ENDPOINT"] + assert settings.model_deployment_name == azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"] + + +def test_azure_ai_settings_init_with_explicit_values() -> None: + """Test AzureAISettings initialization with explicit values.""" + settings = AzureAISettings( + project_endpoint="https://custom-endpoint.com/", + model_deployment_name="custom-model", + ) + + assert settings.project_endpoint == "https://custom-endpoint.com/" + assert settings.model_deployment_name == "custom-model" + + +def test_azure_ai_client_init_with_project_client(mock_project_client: MagicMock) -> None: + """Test AzureAIClient initialization with existing project_client.""" + with patch("agent_framework_azure_ai._client.AzureAISettings") as mock_settings: + mock_settings.return_value.project_endpoint = None + mock_settings.return_value.model_deployment_name = "test-model" + + client = AzureAIClient( + project_client=mock_project_client, + agent_name="test-agent", + agent_version="1.0", + ) + + assert client.project_client is mock_project_client + assert client.agent_name == "test-agent" + assert client.agent_version == "1.0" + assert not client._should_close_client # type: ignore + assert isinstance(client, ChatClientProtocol) + + +def test_azure_ai_client_init_auto_create_client( + azure_ai_unit_test_env: dict[str, str], + mock_azure_credential: MagicMock, +) -> None: + """Test AzureAIClient initialization with auto-created project_client.""" + with patch("agent_framework_azure_ai._client.AIProjectClient") as mock_ai_project_client: + mock_project_client = MagicMock() + mock_ai_project_client.return_value = mock_project_client + + client = AzureAIClient( + project_endpoint=azure_ai_unit_test_env["AZURE_AI_PROJECT_ENDPOINT"], + model_deployment_name=azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + async_credential=mock_azure_credential, + agent_name="test-agent", + ) + + assert client.project_client is mock_project_client + assert client.agent_name == "test-agent" + assert client._should_close_client # type: ignore + + # Verify AIProjectClient was called with correct parameters + mock_ai_project_client.assert_called_once() + + +def test_azure_ai_client_init_missing_project_endpoint() -> None: + """Test AzureAIClient initialization when project_endpoint is missing and no project_client provided.""" + with patch("agent_framework_azure_ai._client.AzureAISettings") as mock_settings: + mock_settings.return_value.project_endpoint = None + mock_settings.return_value.model_deployment_name = "test-model" + + with pytest.raises(ServiceInitializationError, match="Azure AI project endpoint is required"): + AzureAIClient(async_credential=MagicMock()) + + +def test_azure_ai_client_init_missing_credential(azure_ai_unit_test_env: dict[str, str]) -> None: + """Test AzureAIClient.__init__ when async_credential is missing and no project_client provided.""" + with pytest.raises( + ServiceInitializationError, match="Azure credential is required when project_client is not provided" + ): + AzureAIClient( + project_endpoint=azure_ai_unit_test_env["AZURE_AI_PROJECT_ENDPOINT"], + model_deployment_name=azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + ) + + +def test_azure_ai_client_init_validation_error(mock_azure_credential: MagicMock) -> None: + """Test that ValidationError in AzureAISettings is properly handled.""" + with patch("agent_framework_azure_ai._client.AzureAISettings") as mock_settings: + mock_settings.side_effect = ValidationError.from_exception_data("test", []) + + with pytest.raises(ServiceInitializationError, match="Failed to create Azure AI settings"): + AzureAIClient(async_credential=mock_azure_credential) + + +async def test_azure_ai_client_get_agent_reference_or_create_existing_version( + mock_project_client: MagicMock, +) -> None: + """Test _get_agent_reference_or_create when agent_version is already provided.""" + client = create_test_azure_ai_client(mock_project_client, agent_name="existing-agent", agent_version="1.0") + + agent_ref = await client._get_agent_reference_or_create({}, None) # type: ignore + + assert agent_ref == {"name": "existing-agent", "version": "1.0", "type": "agent_reference"} + + +async def test_azure_ai_client_get_agent_reference_or_create_new_agent( + mock_project_client: MagicMock, + azure_ai_unit_test_env: dict[str, str], +) -> None: + """Test _get_agent_reference_or_create when creating a new agent.""" + azure_ai_settings = AzureAISettings(model_deployment_name=azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"]) + client = create_test_azure_ai_client( + mock_project_client, agent_name="new-agent", azure_ai_settings=azure_ai_settings + ) + + # Mock agent creation response + mock_agent = MagicMock() + mock_agent.name = "new-agent" + mock_agent.version = "1.0" + mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent) + + run_options = {"model": azure_ai_settings.model_deployment_name} + agent_ref = await client._get_agent_reference_or_create(run_options, None) # type: ignore + + assert agent_ref == {"name": "new-agent", "version": "1.0", "type": "agent_reference"} + assert client.agent_name == "new-agent" + assert client.agent_version == "1.0" + + +async def test_azure_ai_client_get_agent_reference_missing_model( + mock_project_client: MagicMock, +) -> None: + """Test _get_agent_reference_or_create when model is missing for agent creation.""" + client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent") + + with pytest.raises(ServiceInitializationError, match="Model deployment name is required for agent creation"): + await client._get_agent_reference_or_create({}, None) # type: ignore + + +async def test_azure_ai_client_prepare_input_with_system_messages( + mock_project_client: MagicMock, +) -> None: + """Test _prepare_input converts system/developer messages to instructions.""" + client = create_test_azure_ai_client(mock_project_client) + + messages = [ + ChatMessage(role=Role.SYSTEM, contents=[TextContent(text="You are a helpful assistant.")]), + ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")]), + ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text="System response")]), + ] + + result_messages, instructions = client._prepare_input(messages) # type: ignore + + assert len(result_messages) == 2 + assert result_messages[0].role == Role.USER + assert result_messages[1].role == Role.ASSISTANT + assert instructions == "You are a helpful assistant." + + +async def test_azure_ai_client_prepare_input_no_system_messages( + mock_project_client: MagicMock, +) -> None: + """Test _prepare_input with no system/developer messages.""" + client = create_test_azure_ai_client(mock_project_client) + + messages = [ + ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")]), + ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text="Hi there!")]), + ] + + result_messages, instructions = client._prepare_input(messages) # type: ignore + + assert len(result_messages) == 2 + assert instructions is None + + +async def test_azure_ai_client_prepare_options_basic(mock_project_client: MagicMock) -> None: + """Test prepare_options basic functionality.""" + client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") + + messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] + chat_options = ChatOptions() + + with ( + patch.object(client.__class__.__bases__[0], "prepare_options", return_value={"model": "test-model"}), + patch.object( + client, + "_get_agent_reference_or_create", + return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, + ), + ): + run_options = await client.prepare_options(messages, chat_options) + + assert "extra_body" in run_options + assert run_options["extra_body"]["agent"]["name"] == "test-agent" + + +async def test_azure_ai_client_initialize_client(mock_project_client: MagicMock) -> None: + """Test initialize_client method.""" + client = create_test_azure_ai_client(mock_project_client) + + mock_openai_client = MagicMock() + mock_project_client.get_openai_client = AsyncMock(return_value=mock_openai_client) + + await client.initialize_client() + + assert client.client is mock_openai_client + mock_project_client.get_openai_client.assert_called_once() + + +def test_azure_ai_client_update_agent_name(mock_project_client: MagicMock) -> None: + """Test _update_agent_name method.""" + client = create_test_azure_ai_client(mock_project_client) + + # Test updating agent name when current is None + with patch.object(client, "_update_agent_name") as mock_update: + mock_update.return_value = None + client._update_agent_name("new-agent") # type: ignore + mock_update.assert_called_once_with("new-agent") + + # Test behavior when agent name is updated + assert client.agent_name is None # Should remain None since we didn't actually update + client.agent_name = "test-agent" # Manually set for the test + + # Test with None input + with patch.object(client, "_update_agent_name") as mock_update: + mock_update.return_value = None + client._update_agent_name(None) # type: ignore + mock_update.assert_called_once_with(None) + + +async def test_azure_ai_client_async_context_manager(mock_project_client: MagicMock) -> None: + """Test async context manager functionality.""" + client = create_test_azure_ai_client(mock_project_client, should_close_client=True) + + mock_project_client.close = AsyncMock() + + async with client as ctx_client: + assert ctx_client is client + + # Should call close after exiting context + mock_project_client.close.assert_called_once() + + +async def test_azure_ai_client_close_method(mock_project_client: MagicMock) -> None: + """Test close method.""" + client = create_test_azure_ai_client(mock_project_client, should_close_client=True) + + mock_project_client.close = AsyncMock() + + await client.close() + + mock_project_client.close.assert_called_once() + + +async def test_azure_ai_client_close_client_when_should_close_false(mock_project_client: MagicMock) -> None: + """Test _close_client_if_needed when should_close_client is False.""" + client = create_test_azure_ai_client(mock_project_client, should_close_client=False) + + mock_project_client.close = AsyncMock() + + await client._close_client_if_needed() # type: ignore + + # Should not call close when should_close_client is False + mock_project_client.close.assert_not_called() + + +async def test_azure_ai_client_agent_creation_with_instructions( + mock_project_client: MagicMock, +) -> None: + """Test agent creation with combined instructions.""" + client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent") + + # Mock agent creation response + mock_agent = MagicMock() + mock_agent.name = "test-agent" + mock_agent.version = "1.0" + mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent) + + run_options = {"model": "test-model", "instructions": "Option instructions. "} + messages_instructions = "Message instructions. " + + await client._get_agent_reference_or_create(run_options, messages_instructions) # type: ignore + + # Verify agent was created with combined instructions + call_args = mock_project_client.agents.create_version.call_args + assert call_args[1]["definition"].instructions == "Message instructions. Option instructions. " + + +async def test_azure_ai_client_agent_creation_with_tools( + mock_project_client: MagicMock, +) -> None: + """Test agent creation with tools.""" + client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent") + + # Mock agent creation response + mock_agent = MagicMock() + mock_agent.name = "test-agent" + mock_agent.version = "1.0" + mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent) + + test_tools = [{"type": "function", "function": {"name": "test_tool"}}] + run_options = {"model": "test-model", "tools": test_tools} + + await client._get_agent_reference_or_create(run_options, None) # type: ignore + + # Verify agent was created with tools + call_args = mock_project_client.agents.create_version.call_args + assert call_args[1]["definition"].tools == test_tools + + +async def test_azure_ai_client_use_latest_version_existing_agent( + mock_project_client: MagicMock, +) -> None: + """Test _get_agent_reference_or_create when use_latest_version=True and agent exists.""" + client = create_test_azure_ai_client(mock_project_client, agent_name="existing-agent", use_latest_version=True) + + # Mock existing agent response + mock_existing_agent = MagicMock() + mock_existing_agent.name = "existing-agent" + mock_existing_agent.versions.latest.version = "2.5" + mock_project_client.agents.get = AsyncMock(return_value=mock_existing_agent) + + run_options = {"model": "test-model"} + agent_ref = await client._get_agent_reference_or_create(run_options, None) # type: ignore + + # Verify existing agent was retrieved and used + mock_project_client.agents.get.assert_called_once_with("existing-agent") + mock_project_client.agents.create_version.assert_not_called() + + assert agent_ref == {"name": "existing-agent", "version": "2.5", "type": "agent_reference"} + assert client.agent_name == "existing-agent" + assert client.agent_version == "2.5" + + +async def test_azure_ai_client_use_latest_version_agent_not_found( + mock_project_client: MagicMock, +) -> None: + """Test _get_agent_reference_or_create when use_latest_version=True but agent doesn't exist.""" + from azure.core.exceptions import ResourceNotFoundError + + client = create_test_azure_ai_client(mock_project_client, agent_name="non-existing-agent", use_latest_version=True) + + # Mock ResourceNotFoundError when trying to retrieve agent + mock_project_client.agents.get = AsyncMock(side_effect=ResourceNotFoundError("Agent not found")) + + # Mock agent creation response for fallback + mock_created_agent = MagicMock() + mock_created_agent.name = "non-existing-agent" + mock_created_agent.version = "1.0" + mock_project_client.agents.create_version = AsyncMock(return_value=mock_created_agent) + + run_options = {"model": "test-model"} + agent_ref = await client._get_agent_reference_or_create(run_options, None) # type: ignore + + # Verify retrieval was attempted and creation was used as fallback + mock_project_client.agents.get.assert_called_once_with("non-existing-agent") + mock_project_client.agents.create_version.assert_called_once() + + assert agent_ref == {"name": "non-existing-agent", "version": "1.0", "type": "agent_reference"} + assert client.agent_name == "non-existing-agent" + assert client.agent_version == "1.0" + + +async def test_azure_ai_client_use_latest_version_false( + mock_project_client: MagicMock, +) -> None: + """Test _get_agent_reference_or_create when use_latest_version=False (default behavior).""" + client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", use_latest_version=False) + + # Mock agent creation response + mock_created_agent = MagicMock() + mock_created_agent.name = "test-agent" + mock_created_agent.version = "1.0" + mock_project_client.agents.create_version = AsyncMock(return_value=mock_created_agent) + + run_options = {"model": "test-model"} + agent_ref = await client._get_agent_reference_or_create(run_options, None) # type: ignore + + # Verify retrieval was not attempted and creation was used directly + mock_project_client.agents.get.assert_not_called() + mock_project_client.agents.create_version.assert_called_once() + + assert agent_ref == {"name": "test-agent", "version": "1.0", "type": "agent_reference"} + + +async def test_azure_ai_client_use_latest_version_with_existing_agent_version( + mock_project_client: MagicMock, +) -> None: + """Test that use_latest_version is ignored when agent_version is already provided.""" + client = create_test_azure_ai_client( + mock_project_client, agent_name="test-agent", agent_version="3.0", use_latest_version=True + ) + + agent_ref = await client._get_agent_reference_or_create({}, None) # type: ignore + + # Verify neither retrieval nor creation was attempted since version is already set + mock_project_client.agents.get.assert_not_called() + mock_project_client.agents.create_version.assert_not_called() + + assert agent_ref == {"name": "test-agent", "version": "3.0", "type": "agent_reference"} + + +class ResponseFormatModel(BaseModel): + """Test Pydantic model for response format testing.""" + + name: str + value: int + description: str + model_config = ConfigDict(extra="forbid") + + +async def test_azure_ai_client_agent_creation_with_response_format( + mock_project_client: MagicMock, +) -> None: + """Test agent creation with response_format configuration.""" + client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent") + + # Mock agent creation response + mock_agent = MagicMock() + mock_agent.name = "test-agent" + mock_agent.version = "1.0" + mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent) + + run_options = {"model": "test-model", "response_format": ResponseFormatModel} + + await client._get_agent_reference_or_create(run_options, None) # type: ignore + + # Verify agent was created with response format configuration + call_args = mock_project_client.agents.create_version.call_args + created_definition = call_args[1]["definition"] + + # Check that text format configuration was set + assert hasattr(created_definition, "text") + assert created_definition.text is not None + + # Check that the format is a ResponseTextFormatConfigurationJsonSchema + assert hasattr(created_definition.text, "format") + format_config = created_definition.text.format + assert isinstance(format_config, ResponseTextFormatConfigurationJsonSchema) + + # Check the schema name matches the model class name + assert format_config.name == "ResponseFormatModel" + + # Check that schema was generated correctly + assert format_config.schema is not None + schema = format_config.schema + assert "properties" in schema + assert "name" in schema["properties"] + assert "value" in schema["properties"] + assert "description" in schema["properties"] + + +async def test_azure_ai_client_prepare_options_excludes_response_format( + mock_project_client: MagicMock, +) -> None: + """Test that prepare_options excludes response_format from final run options.""" + client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") + + messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] + chat_options = ChatOptions() + + with ( + patch.object( + client.__class__.__bases__[0], + "prepare_options", + return_value={"model": "test-model", "response_format": ResponseFormatModel}, + ), + patch.object( + client, + "_get_agent_reference_or_create", + return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, + ), + ): + run_options = await client.prepare_options(messages, chat_options) + + # response_format should be excluded from final run options + assert "response_format" not in run_options + # But extra_body should contain agent reference + assert "extra_body" in run_options + assert run_options["extra_body"]["agent"]["name"] == "test-agent" + + +async def test_azure_ai_client_prepare_options_with_resp_conversation_id( + mock_project_client: MagicMock, +) -> None: + """Test prepare_options with conversation ID starting with 'resp_'.""" + client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") + + messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] + chat_options = ChatOptions(conversation_id="resp_12345") + + with ( + patch.object( + client.__class__.__bases__[0], + "prepare_options", + return_value={"model": "test-model", "previous_response_id": "old_value", "conversation": "old_conv"}, + ), + patch.object( + client, + "_get_agent_reference_or_create", + return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, + ), + ): + run_options = await client.prepare_options(messages, chat_options) + + # Should set previous_response_id and remove conversation property + assert run_options["previous_response_id"] == "resp_12345" + assert "conversation" not in run_options + + +async def test_azure_ai_client_prepare_options_with_conv_conversation_id( + mock_project_client: MagicMock, +) -> None: + """Test prepare_options with conversation ID starting with 'conv_'.""" + client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") + + messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] + chat_options = ChatOptions(conversation_id="conv_67890") + + with ( + patch.object( + client.__class__.__bases__[0], + "prepare_options", + return_value={"model": "test-model", "previous_response_id": "old_value", "conversation": "old_conv"}, + ), + patch.object( + client, + "_get_agent_reference_or_create", + return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, + ), + ): + run_options = await client.prepare_options(messages, chat_options) + + # Should set conversation and remove previous_response_id property + assert run_options["conversation"] == "conv_67890" + assert "previous_response_id" not in run_options + + +async def test_azure_ai_client_prepare_options_with_client_conversation_id( + mock_project_client: MagicMock, +) -> None: + """Test prepare_options using client's default conversation ID when chat options don't have one.""" + client = create_test_azure_ai_client( + mock_project_client, agent_name="test-agent", agent_version="1.0", conversation_id="resp_client_default" + ) + + messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] + chat_options = ChatOptions() # No conversation_id specified + + with ( + patch.object( + client.__class__.__bases__[0], + "prepare_options", + return_value={"model": "test-model", "previous_response_id": "old_value", "conversation": "old_conv"}, + ), + patch.object( + client, + "_get_agent_reference_or_create", + return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, + ), + ): + run_options = await client.prepare_options(messages, chat_options) + + # Should use client's default conversation_id and set previous_response_id + assert run_options["previous_response_id"] == "resp_client_default" + assert "conversation" not in run_options + + +def test_get_conversation_id_with_store_true_and_conversation_id() -> None: + """Test get_conversation_id returns conversation ID when store is True and conversation exists.""" + client = create_test_azure_ai_client(MagicMock()) + + # Mock OpenAI response with conversation + mock_response = MagicMock(spec=OpenAIResponse) + mock_response.id = "resp_12345" + mock_conversation = MagicMock() + mock_conversation.id = "conv_67890" + mock_response.conversation = mock_conversation + + result = client.get_conversation_id(mock_response, store=True) + + assert result == "conv_67890" + + +def test_get_conversation_id_with_store_true_and_no_conversation() -> None: + """Test get_conversation_id returns response ID when store is True and no conversation exists.""" + client = create_test_azure_ai_client(MagicMock()) + + # Mock OpenAI response without conversation + mock_response = MagicMock(spec=OpenAIResponse) + mock_response.id = "resp_12345" + mock_response.conversation = None + + result = client.get_conversation_id(mock_response, store=True) + + assert result == "resp_12345" + + +def test_get_conversation_id_with_store_true_and_empty_conversation_id() -> None: + """Test get_conversation_id returns response ID when store is True and conversation ID is empty.""" + client = create_test_azure_ai_client(MagicMock()) + + # Mock OpenAI response with conversation but empty ID + mock_response = MagicMock(spec=OpenAIResponse) + mock_response.id = "resp_12345" + mock_conversation = MagicMock() + mock_conversation.id = "" + mock_response.conversation = mock_conversation + + result = client.get_conversation_id(mock_response, store=True) + + assert result == "resp_12345" + + +def test_get_conversation_id_with_store_false() -> None: + """Test get_conversation_id returns None when store is False.""" + client = create_test_azure_ai_client(MagicMock()) + + # Mock OpenAI response with conversation + mock_response = MagicMock(spec=OpenAIResponse) + mock_response.id = "resp_12345" + mock_conversation = MagicMock() + mock_conversation.id = "conv_67890" + mock_response.conversation = mock_conversation + + result = client.get_conversation_id(mock_response, store=False) + + assert result is None + + +def test_get_conversation_id_with_parsed_response_and_store_true() -> None: + """Test get_conversation_id works with ParsedResponse when store is True.""" + client = create_test_azure_ai_client(MagicMock()) + + # Mock ParsedResponse with conversation + mock_response = MagicMock(spec=ParsedResponse[BaseModel]) + mock_response.id = "resp_parsed_12345" + mock_conversation = MagicMock() + mock_conversation.id = "conv_parsed_67890" + mock_response.conversation = mock_conversation + + result = client.get_conversation_id(mock_response, store=True) + + assert result == "conv_parsed_67890" + + +def test_get_conversation_id_with_parsed_response_no_conversation() -> None: + """Test get_conversation_id returns response ID with ParsedResponse when no conversation exists.""" + client = create_test_azure_ai_client(MagicMock()) + + # Mock ParsedResponse without conversation + mock_response = MagicMock(spec=ParsedResponse[BaseModel]) + mock_response.id = "resp_parsed_12345" + mock_response.conversation = None + + result = client.get_conversation_id(mock_response, store=True) + + assert result == "resp_parsed_12345" + + +@pytest.fixture +def mock_project_client() -> MagicMock: + """Fixture that provides a mock AIProjectClient.""" + mock_client = MagicMock() + + # Mock agents property + mock_client.agents = MagicMock() + mock_client.agents.create_version = AsyncMock() + + # Mock conversations property + mock_client.conversations = MagicMock() + mock_client.conversations.create = AsyncMock() + + # Mock telemetry property + mock_client.telemetry = MagicMock() + mock_client.telemetry.get_application_insights_connection_string = AsyncMock() + + # Mock get_openai_client method + mock_client.get_openai_client = AsyncMock() + + # Mock close method + mock_client.close = AsyncMock() + + return mock_client diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py index 116148b80f..630e7f8709 100644 --- a/python/packages/core/agent_framework/_clients.py +++ b/python/packages/core/agent_framework/_clients.py @@ -564,10 +564,6 @@ async def get_response( # Validate that store is True when conversation_id is set if chat_options.conversation_id is not None and chat_options.store is not True: - logger.warning( - "When conversation_id is set, store must be True for service-managed threads. " - "Automatically setting store=True." - ) chat_options.store = True if chat_options.instructions: @@ -663,10 +659,6 @@ async def get_streaming_response( # Validate that store is True when conversation_id is set if chat_options.conversation_id is not None and chat_options.store is not True: - logger.warning( - "When conversation_id is set, store must be True for service-managed threads. " - "Automatically setting store=True." - ) chat_options.store = True if chat_options.instructions: diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py index 117c9efe52..6edd258e15 100644 --- a/python/packages/core/agent_framework/_tools.py +++ b/python/packages/core/agent_framework/_tools.py @@ -1636,7 +1636,7 @@ async def function_invocation_wrapper( # this runs in every but the first run # we need to keep track of all function call messages fcc_messages.extend(response.messages) - if getattr(kwargs.get("chat_options"), "store", False): + if response.conversation_id is not None: prepped_messages.clear() prepped_messages.append(result_message) else: @@ -1839,7 +1839,7 @@ async def streaming_function_invocation_wrapper( # this runs in every but the first run # we need to keep track of all function call messages fcc_messages.extend(response.messages) - if getattr(kwargs.get("chat_options"), "store", False): + if response.conversation_id is not None: prepped_messages.clear() prepped_messages.append(result_message) else: diff --git a/python/packages/core/agent_framework/azure/__init__.py b/python/packages/core/agent_framework/azure/__init__.py index 5dfab603cb..23b2085cbc 100644 --- a/python/packages/core/agent_framework/azure/__init__.py +++ b/python/packages/core/agent_framework/azure/__init__.py @@ -6,6 +6,7 @@ _IMPORTS: dict[str, tuple[str, str]] = { "AzureAIAgentClient": ("agent_framework_azure_ai", "azure-ai"), + "AzureAIClient": ("agent_framework_azure_ai", "azure-ai"), "AzureOpenAIAssistantsClient": ("agent_framework.azure._assistants_client", "core"), "AzureOpenAIChatClient": ("agent_framework.azure._chat_client", "core"), "AzureAISettings": ("agent_framework_azure_ai", "azure-ai"), diff --git a/python/packages/core/agent_framework/azure/__init__.pyi b/python/packages/core/agent_framework/azure/__init__.pyi index 742325a736..582c7a05be 100644 --- a/python/packages/core/agent_framework/azure/__init__.pyi +++ b/python/packages/core/agent_framework/azure/__init__.pyi @@ -1,6 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. -from agent_framework_azure_ai import AzureAIAgentClient, AzureAISettings +from agent_framework_azure_ai import AzureAIAgentClient, AzureAIClient, AzureAISettings from agent_framework.azure._assistants_client import AzureOpenAIAssistantsClient from agent_framework.azure._chat_client import AzureOpenAIChatClient @@ -10,6 +10,7 @@ from agent_framework.azure._shared import AzureOpenAISettings __all__ = [ "AzureAIAgentClient", + "AzureAIClient", "AzureAISettings", "AzureOpenAIAssistantsClient", "AzureOpenAIChatClient", diff --git a/python/packages/core/agent_framework/openai/_assistants_client.py b/python/packages/core/agent_framework/openai/_assistants_client.py index 8a28075e62..6255a6b8db 100644 --- a/python/packages/core/agent_framework/openai/_assistants_client.py +++ b/python/packages/core/agent_framework/openai/_assistants_client.py @@ -161,7 +161,8 @@ async def __aexit__(self, exc_type: type[BaseException] | None, exc_val: BaseExc async def close(self) -> None: """Clean up any assistants we created.""" if self._should_delete_assistant and self.assistant_id is not None: - await self.client.beta.assistants.delete(self.assistant_id) + client = await self.ensure_client() + await client.beta.assistants.delete(self.assistant_id) object.__setattr__(self, "assistant_id", None) object.__setattr__(self, "_should_delete_assistant", False) @@ -215,7 +216,11 @@ async def _get_assistant_id_or_create(self) -> str: """ # If no assistant is provided, create a temporary assistant if self.assistant_id is None: - created_assistant = await self.client.beta.assistants.create(name=self.assistant_name, model=self.model_id) + if not self.model_id: + raise ServiceInitializationError("Parameter 'model_id' is required for assistant creation.") + + client = await self.ensure_client() + created_assistant = await client.beta.assistants.create(name=self.assistant_name, model=self.model_id) self.assistant_id = created_assistant.id self._should_delete_assistant = True @@ -233,6 +238,7 @@ async def _create_assistant_stream( Returns: tuple: (stream, final_thread_id) """ + client = await self.ensure_client() # Get any active run for this thread thread_run = await self._get_active_thread_run(thread_id) @@ -240,7 +246,7 @@ async def _create_assistant_stream( if thread_run is not None and tool_run_id is not None and tool_run_id == thread_run.id and tool_outputs: # There's an active run and we have tool results to submit, so submit the results. - stream = self.client.beta.threads.runs.submit_tool_outputs_stream( # type: ignore[reportDeprecated] + stream = client.beta.threads.runs.submit_tool_outputs_stream( # type: ignore[reportDeprecated] run_id=tool_run_id, thread_id=thread_run.thread_id, tool_outputs=tool_outputs ) final_thread_id = thread_run.thread_id @@ -249,7 +255,7 @@ async def _create_assistant_stream( final_thread_id = await self._prepare_thread(thread_id, thread_run, run_options) # Now create a new run and stream the results. - stream = self.client.beta.threads.runs.stream( # type: ignore[reportDeprecated] + stream = client.beta.threads.runs.stream( # type: ignore[reportDeprecated] assistant_id=assistant_id, thread_id=final_thread_id, **run_options ) @@ -257,19 +263,21 @@ async def _create_assistant_stream( async def _get_active_thread_run(self, thread_id: str | None) -> Run | None: """Get any active run for the given thread.""" + client = await self.ensure_client() if thread_id is None: return None - async for run in self.client.beta.threads.runs.list(thread_id=thread_id, limit=1, order="desc"): # type: ignore[reportDeprecated] + async for run in client.beta.threads.runs.list(thread_id=thread_id, limit=1, order="desc"): # type: ignore[reportDeprecated] if run.status not in ["completed", "cancelled", "failed", "expired"]: return run return None async def _prepare_thread(self, thread_id: str | None, thread_run: Run | None, run_options: dict[str, Any]) -> str: """Prepare the thread for a new run, creating or cleaning up as needed.""" + client = await self.ensure_client() if thread_id is None: # No thread ID was provided, so create a new thread. - thread = await self.client.beta.threads.create( # type: ignore[reportDeprecated] + thread = await client.beta.threads.create( # type: ignore[reportDeprecated] messages=run_options["additional_messages"], tool_resources=run_options.get("tool_resources"), metadata=run_options.get("metadata"), @@ -280,7 +288,7 @@ async def _prepare_thread(self, thread_id: str | None, thread_run: Run | None, r if thread_run is not None: # There was an active run; we need to cancel it before starting a new run. - await self.client.beta.threads.runs.cancel(run_id=thread_run.id, thread_id=thread_id) # type: ignore[reportDeprecated] + await client.beta.threads.runs.cancel(run_id=thread_run.id, thread_id=thread_id) # type: ignore[reportDeprecated] return thread_id diff --git a/python/packages/core/agent_framework/openai/_chat_client.py b/python/packages/core/agent_framework/openai/_chat_client.py index e6a4087508..02e0743e1b 100644 --- a/python/packages/core/agent_framework/openai/_chat_client.py +++ b/python/packages/core/agent_framework/openai/_chat_client.py @@ -69,10 +69,11 @@ async def _inner_get_response( chat_options: ChatOptions, **kwargs: Any, ) -> ChatResponse: + client = await self.ensure_client() options_dict = self._prepare_options(messages, chat_options) try: return self._create_chat_response( - await self.client.chat.completions.create(stream=False, **options_dict), chat_options + await client.chat.completions.create(stream=False, **options_dict), chat_options ) except BadRequestError as ex: if ex.code == "content_filter": @@ -97,10 +98,11 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: + client = await self.ensure_client() options_dict = self._prepare_options(messages, chat_options) options_dict["stream_options"] = {"include_usage": True} try: - async for chunk in await self.client.chat.completions.create(stream=True, **options_dict): + async for chunk in await client.chat.completions.create(stream=True, **options_dict): if len(chunk.choices) == 0 and chunk.usage is None: continue yield self._create_chat_response_update(chunk) diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index 149fe4bfac..447333447a 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -89,23 +89,24 @@ async def _inner_get_response( chat_options: ChatOptions, **kwargs: Any, ) -> ChatResponse: - options_dict = self._prepare_options(messages, chat_options) + client = await self.ensure_client() + run_options = await self.prepare_options(messages, chat_options) try: - if not chat_options.response_format: - response = await self.client.responses.create( + response_format = run_options.pop("response_format", None) + if not response_format: + response = await client.responses.create( stream=False, - **options_dict, + **run_options, ) - chat_options.conversation_id = response.id if chat_options.store is True else None + chat_options.conversation_id = self.get_conversation_id(response, chat_options.store) return self._create_response_content(response, chat_options=chat_options) # create call does not support response_format, so we need to handle it via parse call - resp_format = chat_options.response_format - parsed_response: ParsedResponse[BaseModel] = await self.client.responses.parse( - text_format=resp_format, + parsed_response: ParsedResponse[BaseModel] = await client.responses.parse( + text_format=response_format, stream=False, - **options_dict, + **run_options, ) - chat_options.conversation_id = parsed_response.id if chat_options.store is True else None + chat_options.conversation_id = self.get_conversation_id(parsed_response, chat_options.store) return self._create_response_content(parsed_response, chat_options=chat_options) except BadRequestError as ex: if ex.code == "content_filter": @@ -130,13 +131,15 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - options_dict = self._prepare_options(messages, chat_options) + client = await self.ensure_client() + run_options = await self.prepare_options(messages, chat_options) function_call_ids: dict[int, tuple[str, str]] = {} # output_index: (call_id, name) try: - if not chat_options.response_format: - response = await self.client.responses.create( + response_format = run_options.pop("response_format", None) + if not response_format: + response = await client.responses.create( stream=True, - **options_dict, + **run_options, ) async for chunk in response: update = self._create_streaming_response_content( @@ -145,9 +148,9 @@ async def _inner_get_streaming_response( yield update return # create call does not support response_format, so we need to handle it via stream call - async with self.client.responses.stream( - text_format=chat_options.response_format, - **options_dict, + async with client.responses.stream( + text_format=response_format, + **run_options, ) as response: async for chunk in response: update = self._create_streaming_response_content( @@ -170,6 +173,12 @@ async def _inner_get_streaming_response( inner_exception=ex, ) from ex + def get_conversation_id( + self, response: OpenAIResponse | ParsedResponse[BaseModel], store: bool | None + ) -> str | None: + """Get the conversation ID from the response if store is True.""" + return response.id if store else None + # region Prep methods def _tools_to_response_tools( @@ -180,31 +189,7 @@ def _tools_to_response_tools( if isinstance(tool, ToolProtocol): match tool: case HostedMCPTool(): - mcp: Mcp = { - "type": "mcp", - "server_label": tool.name.replace(" ", "_"), - "server_url": str(tool.url), - "server_description": tool.description, - "headers": tool.headers, - } - if tool.allowed_tools: - mcp["allowed_tools"] = list(tool.allowed_tools) - if tool.approval_mode: - match tool.approval_mode: - case str(): - mcp["require_approval"] = ( - "always" if tool.approval_mode == "always_require" else "never" - ) - case _: - if always_require_approvals := tool.approval_mode.get("always_require_approval"): - mcp["require_approval"] = { - "always": {"tool_names": list(always_require_approvals)} - } - if never_require_approvals := tool.approval_mode.get("never_require_approval"): - mcp["require_approval"] = { - "never": {"tool_names": list(never_require_approvals)} - } - response_tools.append(mcp) + response_tools.append(self.get_mcp_tool(tool)) case HostedCodeInterpreterTool(): tool_args: CodeInterpreterContainerCodeInterpreterToolAuto = {"type": "auto"} if tool.inputs: @@ -306,12 +291,36 @@ def _tools_to_response_tools( response_tools.append(tool_dict) return response_tools - def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions) -> dict[str, Any]: + def get_mcp_tool(self, tool: HostedMCPTool) -> Any: + """Get MCP tool from HostedMCPTool.""" + mcp: Mcp = { + "type": "mcp", + "server_label": tool.name.replace(" ", "_"), + "server_url": str(tool.url), + "server_description": tool.description, + "headers": tool.headers, + } + if tool.allowed_tools: + mcp["allowed_tools"] = list(tool.allowed_tools) + if tool.approval_mode: + match tool.approval_mode: + case str(): + mcp["require_approval"] = "always" if tool.approval_mode == "always_require" else "never" + case _: + if always_require_approvals := tool.approval_mode.get("always_require_approval"): + mcp["require_approval"] = {"always": {"tool_names": list(always_require_approvals)}} + if never_require_approvals := tool.approval_mode.get("never_require_approval"): + mcp["require_approval"] = {"never": {"tool_names": list(never_require_approvals)}} + + return mcp + + async def prepare_options( + self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions + ) -> dict[str, Any]: """Take ChatOptions and create the specific options for Responses API.""" - options_dict: dict[str, Any] = chat_options.to_dict( + run_options: dict[str, Any] = chat_options.to_dict( exclude={ "type", - "response_format", # handled in inner get methods "presence_penalty", # not supported "frequency_penalty", # not supported "logit_bias", # not supported @@ -320,6 +329,10 @@ def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: "instructions", # already added as system message } ) + + if chat_options.response_format: + run_options["response_format"] = chat_options.response_format + translations = { "model_id": "model", "allow_multiple_tool_calls": "parallel_tool_calls", @@ -327,35 +340,37 @@ def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: "max_tokens": "max_output_tokens", } for old_key, new_key in translations.items(): - if old_key in options_dict and old_key != new_key: - options_dict[new_key] = options_dict.pop(old_key) + if old_key in run_options and old_key != new_key: + run_options[new_key] = run_options.pop(old_key) # tools if chat_options.tools is None: - options_dict.pop("parallel_tool_calls", None) + run_options.pop("parallel_tool_calls", None) else: - options_dict["tools"] = self._tools_to_response_tools(chat_options.tools) + run_options["tools"] = self._tools_to_response_tools(chat_options.tools) # model id - if not options_dict.get("model"): - options_dict["model"] = self.model_id + if not run_options.get("model"): + if not self.model_id: + raise ValueError("model_id must be a non-empty string") + run_options["model"] = self.model_id # messages request_input = self._prepare_chat_messages_for_request(messages) if not request_input: raise ServiceInvalidRequestError("Messages are required for chat completions") - options_dict["input"] = request_input + run_options["input"] = request_input # additional provider specific settings - if additional_properties := options_dict.pop("additional_properties", None): + if additional_properties := run_options.pop("additional_properties", None): for key, value in additional_properties.items(): if value is not None: - options_dict[key] = value - if "store" not in options_dict: - options_dict["store"] = False - if (tool_choice := options_dict.get("tool_choice")) and len(tool_choice.keys()) == 1: - options_dict["tool_choice"] = tool_choice["mode"] - return options_dict + run_options[key] = value + if "store" not in run_options: + run_options["store"] = False + if (tool_choice := run_options.get("tool_choice")) and len(tool_choice.keys()) == 1: + run_options["tool_choice"] = tool_choice["mode"] + return run_options def _prepare_chat_messages_for_request(self, chat_messages: Sequence[ChatMessage]) -> list[dict[str, Any]]: """Prepare the chat messages for a request. @@ -504,7 +519,6 @@ def _openai_content_parser( # call_id for the result needs to be the same as the call_id for the function call args: dict[str, Any] = { "call_id": content.call_id, - "id": call_id_to_id.get(content.call_id), "type": "function_call_output", } if content.result: @@ -734,7 +748,7 @@ def _create_response_content( "raw_representation": response, } if chat_options.store: - args["conversation_id"] = response.id + args["conversation_id"] = self.get_conversation_id(response, chat_options.store) if response.usage and (usage_details := self._usage_details_from_openai(response.usage)): args["usage_details"] = usage_details if structured_response: @@ -834,7 +848,7 @@ def _create_streaming_response_content( contents.append(TextReasoningContent(text=event.text, raw_representation=event)) metadata.update(self._get_metadata_from_response(event)) case "response.completed": - conversation_id = event.response.id if chat_options.store is True else None + conversation_id = self.get_conversation_id(event.response, chat_options.store) model = event.response.model if event.response.usage: usage = self._usage_details_from_openai(event.response.usage) diff --git a/python/packages/core/agent_framework/openai/_shared.py b/python/packages/core/agent_framework/openai/_shared.py index bea58786c8..20c719e09e 100644 --- a/python/packages/core/agent_framework/openai/_shared.py +++ b/python/packages/core/agent_framework/openai/_shared.py @@ -127,18 +127,18 @@ class OpenAIBase(SerializationMixin): INJECTABLE: ClassVar[set[str]] = {"client"} - def __init__(self, *, client: AsyncOpenAI, model_id: str, **kwargs: Any) -> None: + def __init__(self, *, model_id: str | None = None, client: AsyncOpenAI | None = None, **kwargs: Any) -> None: """Initialize OpenAIBase. Keyword Args: client: The AsyncOpenAI client instance. - model_id: The AI model ID to use (non-empty, whitespace stripped). + model_id: The AI model ID to use. **kwargs: Additional keyword arguments. """ - if not model_id or not model_id.strip(): - raise ValueError("model_id must be a non-empty string") self.client = client - self.model_id = model_id.strip() + self.model_id = None + if model_id: + self.model_id = model_id.strip() # Call super().__init__() to continue MRO chain (e.g., BaseChatClient) # Extract known kwargs that belong to other base classes @@ -162,6 +162,21 @@ def __init__(self, *, client: AsyncOpenAI, model_id: str, **kwargs: Any) -> None for key, value in kwargs.items(): setattr(self, key, value) + async def initialize_client(self) -> None: + """Initialize OpenAI client asynchronously. + + Override in subclasses to initialize the OpenAI client asynchronously. + """ + pass + + async def ensure_client(self) -> AsyncOpenAI: + """Ensure OpenAI client is initialized.""" + await self.initialize_client() + if self.client is None: + raise ServiceInitializationError("OpenAI client is not initialized") + + return self.client + def _get_api_key( self, api_key: str | SecretStr | Callable[[], str | Awaitable[str]] | None ) -> str | Callable[[], str | Awaitable[str]] | None: diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index 5ff4bb3de3..4700950439 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -1407,27 +1407,27 @@ def test_create_response_content_image_generation_fallback(): assert f"data:image/png;base64,{unrecognized_base64}" == content.uri -def test_prepare_options_store_parameter_handling() -> None: +async def test_prepare_options_store_parameter_handling() -> None: client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") messages = [ChatMessage(role="user", text="Test message")] test_conversation_id = "test-conversation-123" chat_options = ChatOptions(store=True, conversation_id=test_conversation_id) - options = client._prepare_options(messages, chat_options) # type: ignore + options = await client.prepare_options(messages, chat_options) assert options["store"] is True assert options["previous_response_id"] == test_conversation_id chat_options = ChatOptions(store=False, conversation_id="") - options = client._prepare_options(messages, chat_options) # type: ignore + options = await client.prepare_options(messages, chat_options) assert options["store"] is False chat_options = ChatOptions(store=None, conversation_id=None) - options = client._prepare_options(messages, chat_options) # type: ignore + options = await client.prepare_options(messages, chat_options) assert options["store"] is False assert "previous_response_id" not in options chat_options = ChatOptions() - options = client._prepare_options(messages, chat_options) # type: ignore + options = await client.prepare_options(messages, chat_options) assert options["store"] is False assert "previous_response_id" not in options diff --git a/python/samples/README.md b/python/samples/README.md index 330d6f03fc..d18679d23c 100644 --- a/python/samples/README.md +++ b/python/samples/README.md @@ -21,20 +21,20 @@ This directory contains samples demonstrating the capabilities of Microsoft Agen | File | Description | |------|-------------| -| [`getting_started/agents/azure_ai/azure_ai_basic.py`](./getting_started/agents/azure_ai/azure_ai_basic.py) | Azure AI Agent Basic Example | -| [`getting_started/agents/azure_ai/azure_ai_with_azure_ai_search.py`](./getting_started/agents/azure_ai/azure_ai_with_azure_ai_search.py) | Azure AI Agent with Azure AI Search Example | -| [`getting_started/agents/azure_ai/azure_ai_with_bing_grounding.py`](./getting_started/agents/azure_ai/azure_ai_with_bing_grounding.py) | Azure AI agent with Bing Grounding search for real-time web information | -| [`getting_started/agents/azure_ai/azure_ai_with_code_interpreter.py`](./getting_started/agents/azure_ai/azure_ai_with_code_interpreter.py) | Azure AI Agent with Code Interpreter Example | -| [`getting_started/agents/azure_ai/azure_ai_with_existing_agent.py`](./getting_started/agents/azure_ai/azure_ai_with_existing_agent.py) | Azure AI Agent with Existing Agent Example | -| [`getting_started/agents/azure_ai/azure_ai_with_existing_thread.py`](./getting_started/agents/azure_ai/azure_ai_with_existing_thread.py) | Azure AI Agent with Existing Thread Example | -| [`getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py`](./getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py) | Azure AI Agent with Explicit Settings Example | -| [`getting_started/agents/azure_ai/azure_ai_with_file_search.py`](./getting_started/agents/azure_ai/azure_ai_with_file_search.py) | Azure AI agent with File Search capabilities | -| [`getting_started/agents/azure_ai/azure_ai_with_function_tools.py`](./getting_started/agents/azure_ai/azure_ai_with_function_tools.py) | Azure AI Agent with Function Tools Example | -| [`getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py`](./getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py) | Azure AI Agent with Hosted MCP Example | -| [`getting_started/agents/azure_ai/azure_ai_with_local_mcp.py`](./getting_started/agents/azure_ai/azure_ai_with_local_mcp.py) | Azure AI Agent with Local MCP Example | -| [`getting_started/agents/azure_ai/azure_ai_with_multiple_tools.py`](./getting_started/agents/azure_ai/azure_ai_with_multiple_tools.py) | Azure AI Agent with Multiple Tools Example | -| [`getting_started/agents/azure_ai/azure_ai_with_openapi_tools.py`](./getting_started/agents/azure_ai/azure_ai_with_openapi_tools.py) | Azure AI agent with OpenAPI tools | -| [`getting_started/agents/azure_ai/azure_ai_with_thread.py`](./getting_started/agents/azure_ai/azure_ai_with_thread.py) | Azure AI Agent with Thread Management Example | +| [`getting_started/agents/azure_ai_agent/azure_ai_basic.py`](./getting_started/agents/azure_ai/azure_ai_basic.py) | Azure AI Agent Basic Example | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py) | Azure AI Agent with Azure AI Search Example | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding.py) | Azure AI agent with Bing Grounding search for real-time web information | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter.py) | Azure AI Agent with Code Interpreter Example | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_existing_agent.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_existing_agent.py) | Azure AI Agent with Existing Agent Example | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py) | Azure AI Agent with Existing Thread Example | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py) | Azure AI Agent with Explicit Settings Example | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_file_search.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_file_search.py) | Azure AI agent with File Search capabilities | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_function_tools.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_function_tools.py) | Azure AI Agent with Function Tools Example | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_hosted_mcp.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_hosted_mcp.py) | Azure AI Agent with Hosted MCP Example | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_local_mcp.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_local_mcp.py) | Azure AI Agent with Local MCP Example | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py) | Azure AI Agent with Multiple Tools Example | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_openapi_tools.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_openapi_tools.py) | Azure AI agent with OpenAPI tools | +| [`getting_started/agents/azure_ai_agent/azure_ai_with_thread.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_thread.py) | Azure AI Agent with Thread Management Example | ### Azure OpenAI diff --git a/python/samples/getting_started/agents/azure_ai/README.md b/python/samples/getting_started/agents/azure_ai/README.md index 375f682474..16ebab6838 100644 --- a/python/samples/getting_started/agents/azure_ai/README.md +++ b/python/samples/getting_started/agents/azure_ai/README.md @@ -1,24 +1,21 @@ # Azure AI Agent Examples -This folder contains examples demonstrating different ways to create and use agents with the Azure AI chat client from the `agent_framework.azure` package. +This folder contains examples demonstrating different ways to create and use agents with the Azure AI client from the `agent_framework.azure` package. ## Examples | File | Description | |------|-------------| -| [`azure_ai_basic.py`](azure_ai_basic.py) | The simplest way to create an agent using `ChatAgent` with `AzureAIAgentClient`. It automatically handles all configuration using environment variables. | -| [`azure_ai_with_bing_grounding.py`](azure_ai_with_bing_grounding.py) | Shows how to use Bing Grounding search with Azure AI agents to find real-time information from the web. Demonstrates web search capabilities with proper source citations and comprehensive error handling. | -| [`azure_ai_with_code_interpreter.py`](azure_ai_with_code_interpreter.py) | Shows how to use the HostedCodeInterpreterTool with Azure AI agents to write and execute Python code. Includes helper methods for accessing code interpreter data from response chunks. | -| [`azure_ai_with_existing_agent.py`](azure_ai_with_existing_agent.py) | Shows how to work with a pre-existing agent by providing the agent ID to the Azure AI chat client. This example also demonstrates proper cleanup of manually created agents. | -| [`azure_ai_with_existing_thread.py`](azure_ai_with_existing_thread.py) | Shows how to work with a pre-existing thread by providing the thread ID to the Azure AI chat client. This example also demonstrates proper cleanup of manually created threads. | -| [`azure_ai_with_explicit_settings.py`](azure_ai_with_explicit_settings.py) | Shows how to create an agent with explicitly configured `AzureAIAgentClient` settings, including project endpoint, model deployment, credentials, and agent name. | -| [`azure_ai_with_azure_ai_search.py`](azure_ai_with_azure_ai_search.py) | Demonstrates how to use Azure AI Search with Azure AI agents to search through indexed data. Shows how to configure search parameters, query types, and integrate with existing search indexes. | -| [`azure_ai_with_file_search.py`](azure_ai_with_file_search.py) | Demonstrates how to use the HostedFileSearchTool with Azure AI agents to search through uploaded documents. Shows file upload, vector store creation, and querying document content. Includes both streaming and non-streaming examples. | -| [`azure_ai_with_function_tools.py`](azure_ai_with_function_tools.py) | Demonstrates how to use function tools with agents. Shows both agent-level tools (defined when creating the agent) and query-level tools (provided with specific queries). | -| [`azure_ai_with_hosted_mcp.py`](azure_ai_with_hosted_mcp.py) | Shows how to integrate Azure AI agents with hosted Model Context Protocol (MCP) servers for enhanced functionality and tool integration. Demonstrates remote MCP server connections and tool discovery. | -| [`azure_ai_with_local_mcp.py`](azure_ai_with_local_mcp.py) | Shows how to integrate Azure AI agents with local Model Context Protocol (MCP) servers for enhanced functionality and tool integration. Demonstrates both agent-level and run-level tool configuration. | -| [`azure_ai_with_multiple_tools.py`](azure_ai_with_multiple_tools.py) | Demonstrates how to use multiple tools together with Azure AI agents, including web search, MCP servers, and function tools. Shows coordinated multi-tool interactions and approval workflows. | -| [`azure_ai_with_openapi_tools.py`](azure_ai_with_openapi_tools.py) | Demonstrates how to use OpenAPI tools with Azure AI agents to integrate external REST APIs. Shows OpenAPI specification loading, anonymous authentication, thread context management, and coordinated multi-API conversations using weather and countries APIs. | +| [`azure_ai_basic.py`](azure_ai_basic.py) | The simplest way to create an agent using `AzureAIClient`. Demonstrates both streaming and non-streaming responses with function tools. Shows automatic agent creation and basic weather functionality. | +| [`azure_ai_use_latest_version.py`](azure_ai_use_latest_version.py) | Demonstrates how to reuse the latest version of an existing agent instead of creating a new agent version on each instantiation using the `use_latest_version=True` parameter. | +| [`azure_ai_with_azure_ai_search.py`](azure_ai_with_azure_ai_search.py) | Shows how to use Azure AI Search with Azure AI agents to search through indexed data and answer user questions with proper citations. Requires an Azure AI Search connection and index configured in your Azure AI project. | +| [`azure_ai_with_code_interpreter.py`](azure_ai_with_code_interpreter.py) | Shows how to use the `HostedCodeInterpreterTool` with Azure AI agents to write and execute Python code for mathematical problem solving and data analysis. | +| [`azure_ai_with_existing_agent.py`](azure_ai_with_existing_agent.py) | Shows how to work with a pre-existing agent by providing the agent name and version to the Azure AI client. Demonstrates agent reuse patterns for production scenarios. | +| [`azure_ai_with_existing_conversation.py`](azure_ai_with_existing_conversation.py) | Demonstrates how to use an existing conversation created on the service side with Azure AI agents. Shows two approaches: specifying conversation ID at the client level and using AgentThread with an existing conversation ID. | +| [`azure_ai_with_explicit_settings.py`](azure_ai_with_explicit_settings.py) | Shows how to create an agent with explicitly configured `AzureAIClient` settings, including project endpoint, model deployment, and credentials rather than relying on environment variable defaults. | +| [`azure_ai_with_file_search.py`](azure_ai_with_file_search.py) | Shows how to use the `HostedFileSearchTool` with Azure AI agents to upload files, create vector stores, and enable agents to search through uploaded documents to answer user questions. | +| [`azure_ai_with_hosted_mcp.py`](azure_ai_with_hosted_mcp.py) | Shows how to integrate hosted Model Context Protocol (MCP) tools with Azure AI Agent. | +| [`azure_ai_with_response_format.py`](azure_ai_with_response_format.py) | Shows how to use structured outputs (response format) with Azure AI agents using Pydantic models to enforce specific response schemas. | | [`azure_ai_with_thread.py`](azure_ai_with_thread.py) | Demonstrates thread management with Azure AI agents, including automatic thread creation for stateless conversations and explicit thread management for maintaining conversation context across multiple interactions. | ## Environment Variables @@ -28,27 +25,18 @@ Before running the examples, you need to set up your environment variables. You ### Option 1: Using a .env file (Recommended) 1. Copy the `.env.example` file from the `python` directory to create a `.env` file: + ```bash - cp ../../.env.example ../../.env + cp ../../../../.env.example ../../../../.env ``` 2. Edit the `.env` file and add your values: - ``` + + ```env AZURE_AI_PROJECT_ENDPOINT="your-project-endpoint" AZURE_AI_MODEL_DEPLOYMENT_NAME="your-model-deployment-name" ``` -3. For samples using Bing Grounding search (like `azure_ai_with_bing_grounding.py` and `azure_ai_with_multiple_tools.py`), you'll also need: - ``` - BING_CONNECTION_ID="your-bing-connection-id" - ``` - - To get your Bing connection details: - - Go to [Azure AI Foundry portal](https://ai.azure.com) - - Navigate to your project's "Connected resources" section - - Add a new connection for "Grounding with Bing Search" - - Copy the ID - ### Option 2: Using environment variables directly Set the environment variables in your shell: @@ -56,7 +44,6 @@ Set the environment variables in your shell: ```bash export AZURE_AI_PROJECT_ENDPOINT="your-project-endpoint" export AZURE_AI_MODEL_DEPLOYMENT_NAME="your-model-deployment-name" -export BING_CONNECTION_ID="your-bing-connection-id" ``` ### Required Variables @@ -64,6 +51,24 @@ export BING_CONNECTION_ID="your-bing-connection-id" - `AZURE_AI_PROJECT_ENDPOINT`: Your Azure AI project endpoint (required for all examples) - `AZURE_AI_MODEL_DEPLOYMENT_NAME`: The name of your model deployment (required for all examples) -### Optional Variables +## Authentication + +All examples use `AzureCliCredential` for authentication by default. Before running the examples: + +1. Install the Azure CLI +2. Run `az login` to authenticate with your Azure account +3. Ensure you have appropriate permissions to the Azure AI project + +Alternatively, you can replace `AzureCliCredential` with other authentication options like `DefaultAzureCredential` or environment-based credentials. + +## Running the Examples + +Each example can be run independently. Navigate to this directory and run any example: + +```bash +python azure_ai_basic.py +python azure_ai_with_code_interpreter.py +# ... etc +``` -- `BING_CONNECTION_ID`: Your Bing connection ID (required for `azure_ai_with_bing_grounding.py` and `azure_ai_with_multiple_tools.py`) +The examples demonstrate various patterns for working with Azure AI agents, from basic usage to advanced scenarios like thread management and structured outputs. diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py b/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py index 633b5b9daa..87a121d015 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py @@ -4,15 +4,15 @@ from random import randint from typing import Annotated -from agent_framework.azure import AzureAIAgentClient +from agent_framework.azure import AzureAIClient from azure.identity.aio import AzureCliCredential from pydantic import Field """ Azure AI Agent Basic Example -This sample demonstrates basic usage of AzureAIAgentClient to create agents with automatic -lifecycle management. Shows both streaming and non-streaming responses with function tools. +This sample demonstrates basic usage of AzureAIClient. +Shows both streaming and non-streaming responses with function tools. """ @@ -28,14 +28,13 @@ async def non_streaming_example() -> None: """Example of non-streaming response (get the complete result at once).""" print("=== Non-streaming Response Example ===") - # Since no Agent ID is provided, the agent will be automatically created - # and deleted after getting a response + # Since no Agent ID is provided, the agent will be automatically created. # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. async with ( AzureCliCredential() as credential, - AzureAIAgentClient(async_credential=credential).create_agent( - name="WeatherAgent", + AzureAIClient(async_credential=credential).create_agent( + name="BasicWeatherAgent", instructions="You are a helpful weather agent.", tools=get_weather, ) as agent, @@ -50,19 +49,18 @@ async def streaming_example() -> None: """Example of streaming response (get results as they are generated).""" print("=== Streaming Response Example ===") - # Since no Agent ID is provided, the agent will be automatically created - # and deleted after getting a response + # Since no Agent ID is provided, the agent will be automatically created. # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. async with ( AzureCliCredential() as credential, - AzureAIAgentClient(async_credential=credential).create_agent( - name="WeatherAgent", + AzureAIClient(async_credential=credential).create_agent( + name="BasicWeatherAgent", instructions="You are a helpful weather agent.", tools=get_weather, ) as agent, ): - query = "What's the weather like in Portland?" + query = "What's the weather like in Tokyo?" print(f"User: {query}") print("Agent: ", end="", flush=True) async for chunk in agent.run_stream(query): diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_use_latest_version.py b/python/samples/getting_started/agents/azure_ai/azure_ai_use_latest_version.py new file mode 100644 index 0000000000..1875936183 --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_use_latest_version.py @@ -0,0 +1,67 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from random import randint +from typing import Annotated + +from agent_framework.azure import AzureAIClient +from azure.identity.aio import AzureCliCredential +from pydantic import Field + +""" +Azure AI Agent Latest Version Example + +This sample demonstrates how to reuse the latest version of an existing agent +instead of creating a new agent version on each instantiation. The first call creates a new agent, +while subsequent calls with `use_latest_version=True` reuse the latest agent version. +""" + + +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +async def main() -> None: + # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred + # authentication option. + async with AzureCliCredential() as credential: + async with ( + AzureAIClient( + async_credential=credential, + ).create_agent( + name="MyWeatherAgent", + instructions="You are a helpful weather agent.", + tools=get_weather, + ) as agent, + ): + # First query will create a new agent + query = "What's the weather like in Seattle?" + print(f"User: {query}") + result = await agent.run(query) + print(f"Agent: {result}\n") + + # Create a new agent instance + async with ( + AzureAIClient( + async_credential=credential, + # This parameter will allow to re-use latest agent version + # instead of creating a new one + use_latest_version=True, + ).create_agent( + name="MyWeatherAgent", + instructions="You are a helpful weather agent.", + tools=get_weather, + ) as agent, + ): + query = "What's the weather like in Tokyo?" + print(f"User: {query}") + result = await agent.run(query) + print(f"Agent: {result}\n") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_azure_ai_search.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_azure_ai_search.py index 7d094089bc..47dd026c89 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_azure_ai_search.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_azure_ai_search.py @@ -1,120 +1,49 @@ # Copyright (c) Microsoft. All rights reserved. - import asyncio import os -from agent_framework import ChatAgent, CitationAnnotation -from agent_framework.azure import AzureAIAgentClient -from azure.ai.agents.aio import AgentsClient -from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import ConnectionType +from agent_framework.azure import AzureAIClient from azure.identity.aio import AzureCliCredential """ Azure AI Agent with Azure AI Search Example -This sample demonstrates how to create an Azure AI agent that uses Azure AI Search -to search through indexed hotel data and answer user questions about hotels. +This sample demonstrates usage of AzureAIClient with Azure AI Search +to search through indexed data and answer user questions about it. Prerequisites: -1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables +1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables. 2. Ensure you have an Azure AI Search connection configured in your Azure AI project -3. The search index "hotels-sample-index" should exist in your Azure AI Search service - (you can create this using the Azure portal with sample hotel data) - -NOTE: To ensure consistent search tool usage: -- Include explicit instructions for the agent to use the search tool -- Mention the search requirement in your queries -- Use `tool_choice="required"` to force tool usage - -More info on `query type` can be found here: -https://learn.microsoft.com/en-us/python/api/azure-ai-agents/azure.ai.agents.models.aisearchindexresource?view=azure-python-preview + and set AI_SEARCH_PROJECT_CONNECTION_ID and AI_SEARCH_INDEX_NAME environment variable. """ async def main() -> None: - """Main function demonstrating Azure AI agent with raw Azure AI Search tool.""" - print("=== Azure AI Agent with Raw Azure AI Search Tool ===") - - # Create the client and manually create an agent with Azure AI Search tool async with ( AzureCliCredential() as credential, - AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, - AgentsClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as agents_client, - ): - ai_search_conn_id = "" - async for connection in project_client.connections.list(): - if connection.type == ConnectionType.AZURE_AI_SEARCH: - ai_search_conn_id = connection.id - break - - # 1. Create Azure AI agent with the search tool - azure_ai_agent = await project_client.agents.create_agent( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - name="HotelSearchAgent", - instructions=( - "You are a helpful agent that searches hotel information using Azure AI Search. " - "Always use the search tool and index to find hotel data and provide accurate information." - ), - tools=[{"type": "azure_ai_search"}], - tool_resources={ + AzureAIClient(async_credential=credential).create_agent( + name="MySearchAgent", + instructions="""You are a helpful assistant. You must always provide citations for + answers using the tool and render them as: `[message_idx:search_idx†source]`.""", + tools={ + "type": "azure_ai_search", "azure_ai_search": { "indexes": [ { - "index_connection_id": ai_search_conn_id, - "index_name": "hotels-sample-index", - "query_type": "vector", + "project_connection_id": os.environ["AI_SEARCH_PROJECT_CONNECTION_ID"], + "index_name": os.environ["AI_SEARCH_INDEX_NAME"], + # For query_type=vector, ensure your index has a field with vectorized data. + "query_type": "simple", } ] - } + }, }, - ) - - # 2. Create chat client with the existing agent - chat_client = AzureAIAgentClient(agents_client=agents_client, agent_id=azure_ai_agent.id) - - try: - async with ChatAgent( - chat_client=chat_client, - # Additional instructions for this specific conversation - instructions=("You are a helpful agent that uses the search tool and index to find hotel information."), - ) as agent: - print("This agent uses raw Azure AI Search tool to search hotel data.\n") - - # 3. Simulate conversation with the agent - user_input = ( - "Use Azure AI search knowledge tool to find detailed information about a winter hotel." - " Use the search tool and index." # You can modify prompt to force tool usage - ) - print(f"User: {user_input}") - print("Agent: ", end="", flush=True) - - # Stream the response and collect citations - citations: list[CitationAnnotation] = [] - async for chunk in agent.run_stream(user_input): - if chunk.text: - print(chunk.text, end="", flush=True) - - # Collect citations from Azure AI Search responses - for content in getattr(chunk, "contents", []): - annotations = getattr(content, "annotations", []) - if annotations: - citations.extend(annotations) - - print() - - # Display collected citations - if citations: - print("\n\nCitations:") - for i, citation in enumerate(citations, 1): - print(f"[{i}] Reference: {citation.url}") - - print("\n" + "=" * 50 + "\n") - print("Hotel search conversation completed!") - - finally: - # Clean up the agent manually - await project_client.agents.delete_agent(azure_ai_agent.id) + ) as agent, + ): + query = "Tell me about insurance options" + print(f"User: {query}") + result = await agent.run(query) + print(f"Result: {result}\n") if __name__ == "__main__": diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter.py index f4bf48bd59..a2ea4aafe3 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter.py @@ -2,57 +2,53 @@ import asyncio -from agent_framework import AgentRunResponse, ChatResponseUpdate, HostedCodeInterpreterTool -from agent_framework.azure import AzureAIAgentClient -from azure.ai.agents.models import ( - RunStepDeltaCodeInterpreterDetailItemObject, -) +from agent_framework import ChatResponse, HostedCodeInterpreterTool +from agent_framework.azure import AzureAIClient from azure.identity.aio import AzureCliCredential +from openai.types.responses.response import Response as OpenAIResponse +from openai.types.responses.response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall """ -Azure AI Agent with Code Interpreter Example +Azure AI Agent Code Interpreter Example -This sample demonstrates using HostedCodeInterpreterTool with Azure AI Agents +This sample demonstrates using HostedCodeInterpreterTool with AzureAIClient for Python code execution and mathematical problem solving. """ -def print_code_interpreter_inputs(response: AgentRunResponse) -> None: - """Helper method to access code interpreter data.""" - - print("\nCode Interpreter Inputs during the run:") - if response.raw_representation is None: - return - for chunk in response.raw_representation: - if isinstance(chunk, ChatResponseUpdate) and isinstance( - chunk.raw_representation, RunStepDeltaCodeInterpreterDetailItemObject - ): - print(chunk.raw_representation.input, end="") - print("\n") - - async def main() -> None: - """Example showing how to use the HostedCodeInterpreterTool with Azure AI.""" - print("=== Azure AI Agent with Code Interpreter Example ===") + """Example showing how to use the HostedCodeInterpreterTool with AzureAIClient.""" - # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred - # authentication option. async with ( AzureCliCredential() as credential, - AzureAIAgentClient(async_credential=credential) as chat_client, - ): - agent = chat_client.create_agent( - name="CodingAgent", - instructions=("You are a helpful assistant that can write and execute Python code to solve problems."), + AzureAIClient(async_credential=credential).create_agent( + instructions="You are a helpful assistant that can write and execute Python code to solve problems.", tools=HostedCodeInterpreterTool(), - ) - query = "Generate the factorial of 100 using python code, show the code and execute it." + ) as agent, + ): + query = "Use code to get the factorial of 100?" print(f"User: {query}") - response = await AgentRunResponse.from_agent_response_generator(agent.run_stream(query)) - print(f"Agent: {response}") - # To review the code interpreter outputs, you can access - # them from the response raw_representations, just uncomment the next line: - # print_code_interpreter_inputs(response) + result = await agent.run(query) + print(f"Result: {result}\n") + + if ( + isinstance(result.raw_representation, ChatResponse) + and isinstance(result.raw_representation.raw_representation, OpenAIResponse) + and len(result.raw_representation.raw_representation.output) > 0 + ): + # Find the first ResponseCodeInterpreterToolCall item + code_interpreter_item = next( + ( + item + for item in result.raw_representation.raw_representation.output + if isinstance(item, ResponseCodeInterpreterToolCall) + ), + None, + ) + + if code_interpreter_item is not None: + generated_code = code_interpreter_item.code + print(f"Generated code:\n{generated_code}") if __name__ == "__main__": diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_agent.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_agent.py index f0fc2c79fc..7486b19ec7 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_agent.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_agent.py @@ -4,55 +4,60 @@ import os from agent_framework import ChatAgent -from agent_framework.azure import AzureAIAgentClient -from azure.ai.agents.aio import AgentsClient +from agent_framework.azure import AzureAIClient from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import PromptAgentDefinition from azure.identity.aio import AzureCliCredential """ Azure AI Agent with Existing Agent Example This sample demonstrates working with pre-existing Azure AI Agents by providing -agent IDs, showing agent reuse patterns for production scenarios. +agent name and version, showing agent reuse patterns for production scenarios. """ async def main() -> None: - print("=== Azure AI Chat Client with Existing Agent ===") - # Create the client async with ( AzureCliCredential() as credential, AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, - AgentsClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as agents_client, ): - azure_ai_agent = await project_client.agents.create_agent( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - # Create remote agent with default instructions - # These instructions will persist on created agent for every run. - instructions="End each response with [END].", + azure_ai_agent = await project_client.agents.create_version( + agent_name="MyNewTestAgent", + definition=PromptAgentDefinition( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + # Setting specific requirements to verify that this agent is used. + instructions="End each response with [END].", + ), ) - chat_client = AzureAIAgentClient(agents_client=agents_client, agent_id=azure_ai_agent.id) + chat_client = AzureAIClient( + project_client=project_client, + agent_name=azure_ai_agent.name, + # Property agent_version is required for existing agents. + # If this property is not configured, the client will try to create a new agent using + # provided agent_name. + # It's also possible to leave agent_version empty but set use_latest_version=True. + # This will pull latest available agent version and use that version for operations. + agent_version=azure_ai_agent.version, + ) try: async with ChatAgent( chat_client=chat_client, - # Instructions here are applicable only to this ChatAgent instance - # These instructions will be combined with instructions on existing remote agent. - # The final instructions during the execution will look like: - # "'End each response with [END]. Respond with 'Hello World' only'" - instructions="Respond with 'Hello World' only", ) as agent: query = "How are you?" print(f"User: {query}") result = await agent.run(query) - # Based on local and remote instructions, the result will be - # 'Hello World [END]'. + # Response that indicates that previously created agent was used: + # "I'm here and ready to help you! How can I assist you today? [END]" print(f"Agent: {result}\n") finally: # Clean up the agent manually - await project_client.agents.delete_agent(azure_ai_agent.id) + await project_client.agents.delete_version( + agent_name=azure_ai_agent.name, agent_version=azure_ai_agent.version + ) if __name__ == "__main__": diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_conversation.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_conversation.py new file mode 100644 index 0000000000..a268b0db0e --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_conversation.py @@ -0,0 +1,98 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio +import os +from random import randint +from typing import Annotated + +from agent_framework.azure import AzureAIClient +from azure.ai.projects.aio import AIProjectClient +from azure.identity.aio import AzureCliCredential +from pydantic import Field + +""" +Azure AI Agent Existing Conversation Example + +This sample demonstrates usage of AzureAIClient with existing conversation created on service side. +""" + + +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +async def example_with_client() -> None: + """Example shows how to specify existing conversation ID when initializing Azure AI Client.""" + print("=== Azure AI Agent With Existing Conversation and Client ===") + async with ( + AzureCliCredential() as credential, + AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, + ): + # Create a conversation using OpenAI client + openai_client = await project_client.get_openai_client() + conversation = await openai_client.conversations.create() + conversation_id = conversation.id + print(f"Conversation ID: {conversation_id}") + + async with AzureAIClient( + project_client=project_client, + # Specify conversation ID on client level + conversation_id=conversation_id, + ).create_agent( + name="BasicAgent", + instructions="You are a helpful agent.", + tools=get_weather, + ) as agent: + query = "What's the weather like in Seattle?" + print(f"User: {query}") + result = await agent.run(query) + print(f"Agent: {result.text}\n") + + query = "What was my last question?" + print(f"User: {query}") + result = await agent.run(query) + print(f"Agent: {result.text}\n") + + +async def example_with_thread() -> None: + """This example shows how to specify existing conversation ID with AgentThread.""" + print("=== Azure AI Agent With Existing Conversation and Thread ===") + async with ( + AzureCliCredential() as credential, + AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, + AzureAIClient(project_client=project_client).create_agent( + name="BasicAgent", + instructions="You are a helpful agent.", + tools=get_weather, + ) as agent, + ): + # Create a conversation using OpenAI client + openai_client = await project_client.get_openai_client() + conversation = await openai_client.conversations.create() + conversation_id = conversation.id + print(f"Conversation ID: {conversation_id}") + + # Create a thread with the existing ID + thread = agent.get_new_thread(service_thread_id=conversation_id) + + query = "What's the weather like in Seattle?" + print(f"User: {query}") + result = await agent.run(query, thread=thread) + print(f"Agent: {result.text}\n") + + query = "What was my last question?" + print(f"User: {query}") + result = await agent.run(query, thread=thread) + print(f"Agent: {result.text}\n") + + +async def main() -> None: + await example_with_client() + await example_with_thread() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py index 0ac2ee620c..7d6c91f731 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py @@ -6,7 +6,7 @@ from typing import Annotated from agent_framework import ChatAgent -from agent_framework.azure import AzureAIAgentClient +from agent_framework.azure import AzureAIClient from azure.identity.aio import AzureCliCredential from pydantic import Field @@ -27,28 +27,26 @@ def get_weather( async def main() -> None: - print("=== Azure AI Chat Client with Explicit Settings ===") - - # Since no Agent ID is provided, the agent will be automatically created - # and deleted after getting a response + # Since no Agent ID is provided, the agent will be automatically created. # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. async with ( AzureCliCredential() as credential, ChatAgent( - chat_client=AzureAIAgentClient( + chat_client=AzureAIClient( project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], model_deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], async_credential=credential, agent_name="WeatherAgent", - should_cleanup_agent=True, # Set to False if you want to disable automatic agent cleanup ), instructions="You are a helpful weather agent.", tools=get_weather, ) as agent, ): - result = await agent.run("What's the weather like in New York?") - print(f"Result: {result}\n") + query = "What's the weather like in New York?" + print(f"User: {query}") + result = await agent.run(query) + print(f"Agent: {result}\n") if __name__ == "__main__": diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_file_search.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_file_search.py index 761f8b9c87..28c47de5ca 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_file_search.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_file_search.py @@ -1,10 +1,12 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from pathlib import Path from agent_framework import ChatAgent, HostedFileSearchTool, HostedVectorStoreContent -from agent_framework_azure_ai import AzureAIAgentClient +from agent_framework.azure import AzureAIClient +from azure.ai.agents.aio import AgentsClient from azure.ai.agents.models import FileInfo, VectorStore from azure.identity.aio import AzureCliCredential @@ -24,69 +26,50 @@ async def main() -> None: """Main function demonstrating Azure AI agent with file search capabilities.""" - client = AzureAIAgentClient(async_credential=AzureCliCredential()) file: FileInfo | None = None vector_store: VectorStore | None = None - try: - # 1. Upload file and create vector store - pdf_file_path = Path(__file__).parent.parent / "resources" / "employees.pdf" - print(f"Uploading file from: {pdf_file_path}") - - file = await client.project_client.agents.files.upload_and_poll( - file_path=str(pdf_file_path), purpose="assistants" - ) - print(f"Uploaded file, file ID: {file.id}") - - vector_store = await client.project_client.agents.vector_stores.create_and_poll( - file_ids=[file.id], name="my_vectorstore" - ) - print(f"Created vector store, vector store ID: {vector_store.id}") - - # 2. Create file search tool with uploaded resources - file_search_tool = HostedFileSearchTool(inputs=[HostedVectorStoreContent(vector_store_id=vector_store.id)]) - - # 3. Create an agent with file search capabilities - # The tool_resources are automatically extracted from HostedFileSearchTool - async with ChatAgent( - chat_client=client, - name="EmployeeSearchAgent", - instructions=( - "You are a helpful assistant that can search through uploaded employee files " - "to answer questions about employees." - ), - tools=file_search_tool, - ) as agent: - # 4. Simulate conversation with the agent - for user_input in USER_INPUTS: - print(f"# User: '{user_input}'") - response = await agent.run(user_input) - print(f"# Agent: {response.text}") - - # 5. Cleanup: Delete the vector store and file - try: - if vector_store: - await client.project_client.agents.vector_stores.delete(vector_store.id) - if file: - await client.project_client.agents.files.delete(file.id) - except Exception: - # Ignore cleanup errors to avoid masking issues - pass - finally: - # 6. Cleanup: Delete the vector store and file in case of eariler failure to prevent orphaned resources. - - # Refreshing the client is required since chat agent closes it - client = AzureAIAgentClient(async_credential=AzureCliCredential()) + async with ( + AzureCliCredential() as credential, + AgentsClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as agents_client, + AzureAIClient(async_credential=credential) as client, + ): try: + # 1. Upload file and create vector store + pdf_file_path = Path(__file__).parent.parent / "resources" / "employees.pdf" + print(f"Uploading file from: {pdf_file_path}") + + file = await agents_client.files.upload_and_poll(file_path=str(pdf_file_path), purpose="assistants") + print(f"Uploaded file, file ID: {file.id}") + + vector_store = await agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # 2. Create file search tool with uploaded resources + file_search_tool = HostedFileSearchTool(inputs=[HostedVectorStoreContent(vector_store_id=vector_store.id)]) + + # 3. Create an agent with file search capabilities + # The tool_resources are automatically extracted from HostedFileSearchTool + async with ChatAgent( + chat_client=client, + name="EmployeeSearchAgent", + instructions=( + "You are a helpful assistant that can search through uploaded employee files " + "to answer questions about employees." + ), + tools=file_search_tool, + ) as agent: + # 4. Simulate conversation with the agent + for user_input in USER_INPUTS: + print(f"# User: '{user_input}'") + response = await agent.run(user_input) + print(f"# Agent: {response.text}") + finally: + # 5. Cleanup: Delete the vector store and file in case of earlier failure to prevent orphaned resources. if vector_store: - await client.project_client.agents.vector_stores.delete(vector_store.id) + await agents_client.vector_stores.delete(vector_store.id) if file: - await client.project_client.agents.files.delete(file.id) - except Exception: - # Ignore cleanup errors to avoid masking issues - pass - finally: - await client.close() + await agents_client.files.delete(file.id) if __name__ == "__main__": diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py index fb9d13323e..bf9d162a2d 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py @@ -3,23 +3,42 @@ import asyncio from typing import Any -from agent_framework import AgentProtocol, AgentThread, HostedMCPTool -from agent_framework.azure import AzureAIAgentClient +from agent_framework import AgentProtocol, AgentRunResponse, AgentThread, ChatMessage, HostedMCPTool +from agent_framework.azure import AzureAIClient from azure.identity.aio import AzureCliCredential """ Azure AI Agent with Hosted MCP Example -This sample demonstrates integration of Azure AI Agents with hosted Model Context Protocol (MCP) -servers, including user approval workflows for function call security. +This sample demonstrates integrating hosted Model Context Protocol (MCP) tools with Azure AI Agent. """ -async def handle_approvals_with_thread(query: str, agent: "AgentProtocol", thread: "AgentThread"): +async def handle_approvals_without_thread(query: str, agent: "AgentProtocol") -> AgentRunResponse: + """When we don't have a thread, we need to ensure we return with the input, approval request and approval.""" + + result = await agent.run(query, store=False) + while len(result.user_input_requests) > 0: + new_inputs: list[Any] = [query] + for user_input_needed in result.user_input_requests: + print( + f"User Input Request for function from {agent.name}: {user_input_needed.function_call.name}" + f" with arguments: {user_input_needed.function_call.arguments}" + ) + new_inputs.append(ChatMessage(role="assistant", contents=[user_input_needed])) + user_approval = input("Approve function call? (y/n): ") + new_inputs.append( + ChatMessage(role="user", contents=[user_input_needed.create_response(user_approval.lower() == "y")]) + ) + + result = await agent.run(new_inputs, store=False) + return result + + +async def handle_approvals_with_thread(query: str, agent: "AgentProtocol", thread: "AgentThread") -> AgentRunResponse: """Here we let the thread deal with the previous responses, and we just rerun with the approval.""" - from agent_framework import ChatMessage - result = await agent.run(query, thread=thread, store=True) + result = await agent.run(query, thread=thread) while len(result.user_input_requests) > 0: new_input: list[Any] = [] for user_input_needed in result.user_input_requests: @@ -34,36 +53,64 @@ async def handle_approvals_with_thread(query: str, agent: "AgentProtocol", threa contents=[user_input_needed.create_response(user_approval.lower() == "y")], ) ) - result = await agent.run(new_input, thread=thread, store=True) + result = await agent.run(new_input, thread=thread) return result -async def main() -> None: - """Example showing Hosted MCP tools for a Azure AI Agent.""" +async def run_hosted_mcp_without_approval() -> None: + """Example showing MCP Tools without approval.""" + # Since no Agent ID is provided, the agent will be automatically created. + # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred + # authentication option. async with ( AzureCliCredential() as credential, - AzureAIAgentClient(async_credential=credential) as chat_client, - ): - agent = chat_client.create_agent( - name="DocsAgent", - instructions="You are a helpful assistant that can help with microsoft documentation questions.", + AzureAIClient(async_credential=credential).create_agent( + name="MyLearnDocsAgent", + instructions="You are a helpful assistant that can help with Microsoft documentation questions.", tools=HostedMCPTool( name="Microsoft Learn MCP", url="https://learn.microsoft.com/api/mcp", + approval_mode="never_require", + ), + ) as agent, + ): + query = "How to create an Azure storage account using az cli?" + print(f"User: {query}") + result = await handle_approvals_without_thread(query, agent) + print(f"{agent.name}: {result}\n") + + +async def run_hosted_mcp_with_approval_and_thread() -> None: + """Example showing MCP Tools with approvals using a thread.""" + print("=== MCP with approvals and with thread ===") + + # Since no Agent ID is provided, the agent will be automatically created. + # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred + # authentication option. + async with ( + AzureCliCredential() as credential, + AzureAIClient(async_credential=credential).create_agent( + name="MyApiSpecsAgent", + instructions="You are a helpful agent that can use MCP tools to assist users.", + tools=HostedMCPTool( + name="api-specs", + url="https://gitmcp.io/Azure/azure-rest-api-specs", + approval_mode="always_require", ), - ) + ) as agent, + ): thread = agent.get_new_thread() - # First query - query1 = "How to create an Azure storage account using az cli?" - print(f"User: {query1}") - result1 = await handle_approvals_with_thread(query1, agent, thread) - print(f"{agent.name}: {result1}\n") - print("\n=======================================\n") - # Second query - query2 = "What is Microsoft Agent Framework?" - print(f"User: {query2}") - result2 = await handle_approvals_with_thread(query2, agent, thread) - print(f"{agent.name}: {result2}\n") + query = "Please summarize the Azure REST API specifications Readme" + print(f"User: {query}") + result = await handle_approvals_with_thread(query, agent, thread) + print(f"{agent.name}: {result}\n") + + +async def main() -> None: + print("=== Azure AI Agent with Hosted MCP Tools Example ===\n") + + await run_hosted_mcp_without_approval() + await run_hosted_mcp_with_approval_and_thread() if __name__ == "__main__": diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_response_format.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_response_format.py new file mode 100644 index 0000000000..2ffc8a8bad --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_response_format.py @@ -0,0 +1,54 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from agent_framework.azure import AzureAIClient +from azure.identity.aio import AzureCliCredential +from pydantic import BaseModel, ConfigDict + +""" +Azure AI Agent Response Format Example + +This sample demonstrates basic usage of AzureAIClient with response format, +also known as structured outputs. +""" + + +class ReleaseBrief(BaseModel): + feature: str + benefit: str + launch_date: str + model_config = ConfigDict(extra="forbid") + + +async def main() -> None: + """Example of using response_format property.""" + + # Since no Agent ID is provided, the agent will be automatically created. + # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred + # authentication option. + async with ( + AzureCliCredential() as credential, + AzureAIClient(async_credential=credential).create_agent( + name="ProductMarketerAgent", + instructions="Return launch briefs as structured JSON.", + ) as agent, + ): + query = "Draft a launch brief for the Contoso Note app." + print(f"User: {query}") + result = await agent.run( + query, + # Specify type to use as response + response_format=ReleaseBrief, + ) + + if isinstance(result.value, ReleaseBrief): + release_brief = result.value + print("Agent:") + print(f"Feature: {release_brief.feature}") + print(f"Benefit: {release_brief.benefit}") + print(f"Launch date: {release_brief.launch_date}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_thread.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_thread.py index e2dd175657..e1cbe8c5e4 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_thread.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_thread.py @@ -4,16 +4,15 @@ from random import randint from typing import Annotated -from agent_framework import AgentThread, ChatAgent -from agent_framework.azure import AzureAIAgentClient +from agent_framework.azure import AzureAIClient from azure.identity.aio import AzureCliCredential from pydantic import Field """ Azure AI Agent with Thread Management Example -This sample demonstrates thread management with Azure AI Agents, comparing -automatic thread creation with explicit thread management for persistent context. +This sample demonstrates thread management with Azure AI Agent, showing +persistent conversation capabilities using service-managed threads as well as storing messages in-memory. """ @@ -26,44 +25,42 @@ def get_weather( async def example_with_automatic_thread_creation() -> None: - """Example showing automatic thread creation (service-managed thread).""" + """Example showing automatic thread creation.""" print("=== Automatic Thread Creation Example ===") - # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred - # authentication option. async with ( AzureCliCredential() as credential, - ChatAgent( - chat_client=AzureAIAgentClient(async_credential=credential), + AzureAIClient(async_credential=credential).create_agent( + name="BasicWeatherAgent", instructions="You are a helpful weather agent.", tools=get_weather, ) as agent, ): # First conversation - no thread provided, will be created automatically - first_query = "What's the weather like in Seattle?" - print(f"User: {first_query}") - first_result = await agent.run(first_query) - print(f"Agent: {first_result.text}") + query1 = "What's the weather like in Seattle?" + print(f"User: {query1}") + result1 = await agent.run(query1) + print(f"Agent: {result1.text}") # Second conversation - still no thread provided, will create another new thread - second_query = "What was the last city I asked about?" - print(f"\nUser: {second_query}") - second_result = await agent.run(second_query) - print(f"Agent: {second_result.text}") + query2 = "What was the last city I asked about?" + print(f"\nUser: {query2}") + result2 = await agent.run(query2) + print(f"Agent: {result2.text}") print("Note: Each call creates a separate thread, so the agent doesn't remember previous context.\n") -async def example_with_thread_persistence() -> None: - """Example showing thread persistence across multiple conversations.""" - print("=== Thread Persistence Example ===") - print("Using the same thread across multiple conversations to maintain context.\n") +async def example_with_thread_persistence_in_memory() -> None: + """ + Example showing thread persistence across multiple conversations. + In this example, messages are stored in-memory. + """ + print("=== Thread Persistence Example (In-Memory) ===") - # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred - # authentication option. async with ( AzureCliCredential() as credential, - ChatAgent( - chat_client=AzureAIAgentClient(async_credential=credential), + AzureAIClient(async_credential=credential).create_agent( + name="BasicWeatherAgent", instructions="You are a helpful weather agent.", tools=get_weather, ) as agent, @@ -72,81 +69,80 @@ async def example_with_thread_persistence() -> None: thread = agent.get_new_thread() # First conversation - first_query = "What's the weather like in Tokyo?" - print(f"User: {first_query}") - first_result = await agent.run(first_query, thread=thread) - print(f"Agent: {first_result.text}") + query1 = "What's the weather like in Tokyo?" + print(f"User: {query1}") + result1 = await agent.run(query1, thread=thread, store=False) + print(f"Agent: {result1.text}") # Second conversation using the same thread - maintains context - second_query = "How about London?" - print(f"\nUser: {second_query}") - second_result = await agent.run(second_query, thread=thread) - print(f"Agent: {second_result.text}") + query2 = "How about London?" + print(f"\nUser: {query2}") + result2 = await agent.run(query2, thread=thread, store=False) + print(f"Agent: {result2.text}") # Third conversation - agent should remember both previous cities - third_query = "Which of the cities I asked about has better weather?" - print(f"\nUser: {third_query}") - third_result = await agent.run(third_query, thread=thread) - print(f"Agent: {third_result.text}") + query3 = "Which of the cities I asked about has better weather?" + print(f"\nUser: {query3}") + result3 = await agent.run(query3, thread=thread, store=False) + print(f"Agent: {result3.text}") print("Note: The agent remembers context from previous messages in the same thread.\n") async def example_with_existing_thread_id() -> None: - """Example showing how to work with an existing thread ID from the service.""" + """ + Example showing how to work with an existing thread ID from the service. + In this example, messages are stored on the server. + """ print("=== Existing Thread ID Example ===") - print("Using a specific thread ID to continue an existing conversation.\n") # First, create a conversation and capture the thread ID existing_thread_id = None - # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred - # authentication option. async with ( AzureCliCredential() as credential, - ChatAgent( - chat_client=AzureAIAgentClient(async_credential=credential), + AzureAIClient(async_credential=credential).create_agent( + name="BasicWeatherAgent", instructions="You are a helpful weather agent.", tools=get_weather, ) as agent, ): # Start a conversation and get the thread ID thread = agent.get_new_thread() - first_query = "What's the weather in Paris?" - print(f"User: {first_query}") - first_result = await agent.run(first_query, thread=thread) - print(f"Agent: {first_result.text}") + + query1 = "What's the weather in Paris?" + print(f"User: {query1}") + result1 = await agent.run(query1, thread=thread) + print(f"Agent: {result1.text}") # The thread ID is set after the first response existing_thread_id = thread.service_thread_id print(f"Thread ID: {existing_thread_id}") - if existing_thread_id: - print("\n--- Continuing with the same thread ID in a new agent instance ---") + if existing_thread_id: + print("\n--- Continuing with the same thread ID in a new agent instance ---") - # Create a new agent instance but use the existing thread ID - async with ( - AzureCliCredential() as credential, - ChatAgent( - chat_client=AzureAIAgentClient(thread_id=existing_thread_id, async_credential=credential), - instructions="You are a helpful weather agent.", - tools=get_weather, - ) as agent, - ): - # Create a thread with the existing ID - thread = AgentThread(service_thread_id=existing_thread_id) + async with ( + AzureAIClient(async_credential=credential).create_agent( + name="BasicWeatherAgent", + instructions="You are a helpful weather agent.", + tools=get_weather, + ) as agent, + ): + # Create a thread with the existing ID + thread = agent.get_new_thread(service_thread_id=existing_thread_id) - second_query = "What was the last city I asked about?" - print(f"User: {second_query}") - second_result = await agent.run(second_query, thread=thread) - print(f"Agent: {second_result.text}") - print("Note: The agent continues the conversation from the previous thread.\n") + query2 = "What was the last city I asked about?" + print(f"User: {query2}") + result2 = await agent.run(query2, thread=thread) + print(f"Agent: {result2.text}") + print("Note: The agent continues the conversation from the previous thread by using thread ID.\n") async def main() -> None: - print("=== Azure AI Chat Client Agent Thread Management Examples ===\n") + print("=== Azure AI Agent Thread Management Examples ===\n") await example_with_automatic_thread_creation() - await example_with_thread_persistence() + await example_with_thread_persistence_in_memory() await example_with_existing_thread_id() diff --git a/python/samples/getting_started/agents/azure_ai_agent/README.md b/python/samples/getting_started/agents/azure_ai_agent/README.md new file mode 100644 index 0000000000..375f682474 --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai_agent/README.md @@ -0,0 +1,69 @@ +# Azure AI Agent Examples + +This folder contains examples demonstrating different ways to create and use agents with the Azure AI chat client from the `agent_framework.azure` package. + +## Examples + +| File | Description | +|------|-------------| +| [`azure_ai_basic.py`](azure_ai_basic.py) | The simplest way to create an agent using `ChatAgent` with `AzureAIAgentClient`. It automatically handles all configuration using environment variables. | +| [`azure_ai_with_bing_grounding.py`](azure_ai_with_bing_grounding.py) | Shows how to use Bing Grounding search with Azure AI agents to find real-time information from the web. Demonstrates web search capabilities with proper source citations and comprehensive error handling. | +| [`azure_ai_with_code_interpreter.py`](azure_ai_with_code_interpreter.py) | Shows how to use the HostedCodeInterpreterTool with Azure AI agents to write and execute Python code. Includes helper methods for accessing code interpreter data from response chunks. | +| [`azure_ai_with_existing_agent.py`](azure_ai_with_existing_agent.py) | Shows how to work with a pre-existing agent by providing the agent ID to the Azure AI chat client. This example also demonstrates proper cleanup of manually created agents. | +| [`azure_ai_with_existing_thread.py`](azure_ai_with_existing_thread.py) | Shows how to work with a pre-existing thread by providing the thread ID to the Azure AI chat client. This example also demonstrates proper cleanup of manually created threads. | +| [`azure_ai_with_explicit_settings.py`](azure_ai_with_explicit_settings.py) | Shows how to create an agent with explicitly configured `AzureAIAgentClient` settings, including project endpoint, model deployment, credentials, and agent name. | +| [`azure_ai_with_azure_ai_search.py`](azure_ai_with_azure_ai_search.py) | Demonstrates how to use Azure AI Search with Azure AI agents to search through indexed data. Shows how to configure search parameters, query types, and integrate with existing search indexes. | +| [`azure_ai_with_file_search.py`](azure_ai_with_file_search.py) | Demonstrates how to use the HostedFileSearchTool with Azure AI agents to search through uploaded documents. Shows file upload, vector store creation, and querying document content. Includes both streaming and non-streaming examples. | +| [`azure_ai_with_function_tools.py`](azure_ai_with_function_tools.py) | Demonstrates how to use function tools with agents. Shows both agent-level tools (defined when creating the agent) and query-level tools (provided with specific queries). | +| [`azure_ai_with_hosted_mcp.py`](azure_ai_with_hosted_mcp.py) | Shows how to integrate Azure AI agents with hosted Model Context Protocol (MCP) servers for enhanced functionality and tool integration. Demonstrates remote MCP server connections and tool discovery. | +| [`azure_ai_with_local_mcp.py`](azure_ai_with_local_mcp.py) | Shows how to integrate Azure AI agents with local Model Context Protocol (MCP) servers for enhanced functionality and tool integration. Demonstrates both agent-level and run-level tool configuration. | +| [`azure_ai_with_multiple_tools.py`](azure_ai_with_multiple_tools.py) | Demonstrates how to use multiple tools together with Azure AI agents, including web search, MCP servers, and function tools. Shows coordinated multi-tool interactions and approval workflows. | +| [`azure_ai_with_openapi_tools.py`](azure_ai_with_openapi_tools.py) | Demonstrates how to use OpenAPI tools with Azure AI agents to integrate external REST APIs. Shows OpenAPI specification loading, anonymous authentication, thread context management, and coordinated multi-API conversations using weather and countries APIs. | +| [`azure_ai_with_thread.py`](azure_ai_with_thread.py) | Demonstrates thread management with Azure AI agents, including automatic thread creation for stateless conversations and explicit thread management for maintaining conversation context across multiple interactions. | + +## Environment Variables + +Before running the examples, you need to set up your environment variables. You can do this in one of two ways: + +### Option 1: Using a .env file (Recommended) + +1. Copy the `.env.example` file from the `python` directory to create a `.env` file: + ```bash + cp ../../.env.example ../../.env + ``` + +2. Edit the `.env` file and add your values: + ``` + AZURE_AI_PROJECT_ENDPOINT="your-project-endpoint" + AZURE_AI_MODEL_DEPLOYMENT_NAME="your-model-deployment-name" + ``` + +3. For samples using Bing Grounding search (like `azure_ai_with_bing_grounding.py` and `azure_ai_with_multiple_tools.py`), you'll also need: + ``` + BING_CONNECTION_ID="your-bing-connection-id" + ``` + + To get your Bing connection details: + - Go to [Azure AI Foundry portal](https://ai.azure.com) + - Navigate to your project's "Connected resources" section + - Add a new connection for "Grounding with Bing Search" + - Copy the ID + +### Option 2: Using environment variables directly + +Set the environment variables in your shell: + +```bash +export AZURE_AI_PROJECT_ENDPOINT="your-project-endpoint" +export AZURE_AI_MODEL_DEPLOYMENT_NAME="your-model-deployment-name" +export BING_CONNECTION_ID="your-bing-connection-id" +``` + +### Required Variables + +- `AZURE_AI_PROJECT_ENDPOINT`: Your Azure AI project endpoint (required for all examples) +- `AZURE_AI_MODEL_DEPLOYMENT_NAME`: The name of your model deployment (required for all examples) + +### Optional Variables + +- `BING_CONNECTION_ID`: Your Bing connection ID (required for `azure_ai_with_bing_grounding.py` and `azure_ai_with_multiple_tools.py`) diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_basic.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_basic.py new file mode 100644 index 0000000000..633b5b9daa --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_basic.py @@ -0,0 +1,82 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from random import randint +from typing import Annotated + +from agent_framework.azure import AzureAIAgentClient +from azure.identity.aio import AzureCliCredential +from pydantic import Field + +""" +Azure AI Agent Basic Example + +This sample demonstrates basic usage of AzureAIAgentClient to create agents with automatic +lifecycle management. Shows both streaming and non-streaming responses with function tools. +""" + + +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +async def non_streaming_example() -> None: + """Example of non-streaming response (get the complete result at once).""" + print("=== Non-streaming Response Example ===") + + # Since no Agent ID is provided, the agent will be automatically created + # and deleted after getting a response + # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred + # authentication option. + async with ( + AzureCliCredential() as credential, + AzureAIAgentClient(async_credential=credential).create_agent( + name="WeatherAgent", + instructions="You are a helpful weather agent.", + tools=get_weather, + ) as agent, + ): + query = "What's the weather like in Seattle?" + print(f"User: {query}") + result = await agent.run(query) + print(f"Agent: {result}\n") + + +async def streaming_example() -> None: + """Example of streaming response (get results as they are generated).""" + print("=== Streaming Response Example ===") + + # Since no Agent ID is provided, the agent will be automatically created + # and deleted after getting a response + # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred + # authentication option. + async with ( + AzureCliCredential() as credential, + AzureAIAgentClient(async_credential=credential).create_agent( + name="WeatherAgent", + instructions="You are a helpful weather agent.", + tools=get_weather, + ) as agent, + ): + query = "What's the weather like in Portland?" + print(f"User: {query}") + print("Agent: ", end="", flush=True) + async for chunk in agent.run_stream(query): + if chunk.text: + print(chunk.text, end="", flush=True) + print("\n") + + +async def main() -> None: + print("=== Basic Azure AI Chat Client Agent Example ===") + + await non_streaming_example() + await streaming_example() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py new file mode 100644 index 0000000000..32fff1fbda --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py @@ -0,0 +1,121 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os + +from agent_framework import ChatAgent, CitationAnnotation +from agent_framework.azure import AzureAIAgentClient +from azure.ai.agents.aio import AgentsClient +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import ConnectionType +from azure.identity.aio import AzureCliCredential + +""" +Azure AI Agent with Azure AI Search Example + +This sample demonstrates how to create an Azure AI agent that uses Azure AI Search +to search through indexed hotel data and answer user questions about hotels. + +Prerequisites: +1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables +2. Ensure you have an Azure AI Search connection configured in your Azure AI project +3. The search index "hotels-sample-index" should exist in your Azure AI Search service + (you can create this using the Azure portal with sample hotel data) + +NOTE: To ensure consistent search tool usage: +- Include explicit instructions for the agent to use the search tool +- Mention the search requirement in your queries +- Use `tool_choice="required"` to force tool usage + +More info on `query type` can be found here: +https://learn.microsoft.com/en-us/python/api/azure-ai-agents/azure.ai.agents.models.aisearchindexresource?view=azure-python-preview +""" + + +async def main() -> None: + """Main function demonstrating Azure AI agent with raw Azure AI Search tool.""" + print("=== Azure AI Agent with Raw Azure AI Search Tool ===") + + # Create the client and manually create an agent with Azure AI Search tool + async with ( + AzureCliCredential() as credential, + AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, + AgentsClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as agents_client, + ): + ai_search_conn_id = "" + async for connection in project_client.connections.list(): + if connection.type == ConnectionType.AZURE_AI_SEARCH: + ai_search_conn_id = connection.id + break + + # 1. Create Azure AI agent with the search tool + azure_ai_agent = await agents_client.create_agent( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + name="HotelSearchAgent", + instructions=( + "You are a helpful agent that searches hotel information using Azure AI Search. " + "Always use the search tool and index to find hotel data and provide accurate information." + ), + tools=[{"type": "azure_ai_search"}], + tool_resources={ + "azure_ai_search": { + "indexes": [ + { + "index_connection_id": ai_search_conn_id, + "index_name": "hotels-sample-index", + "query_type": "vector", + } + ] + } + }, + ) + + # 2. Create chat client with the existing agent + chat_client = AzureAIAgentClient(agents_client=agents_client, agent_id=azure_ai_agent.id) + + try: + async with ChatAgent( + chat_client=chat_client, + # Additional instructions for this specific conversation + instructions=("You are a helpful agent that uses the search tool and index to find hotel information."), + ) as agent: + print("This agent uses raw Azure AI Search tool to search hotel data.\n") + + # 3. Simulate conversation with the agent + user_input = ( + "Use Azure AI search knowledge tool to find detailed information about a winter hotel." + " Use the search tool and index." # You can modify prompt to force tool usage + ) + print(f"User: {user_input}") + print("Agent: ", end="", flush=True) + + # Stream the response and collect citations + citations: list[CitationAnnotation] = [] + async for chunk in agent.run_stream(user_input): + if chunk.text: + print(chunk.text, end="", flush=True) + + # Collect citations from Azure AI Search responses + for content in getattr(chunk, "contents", []): + annotations = getattr(content, "annotations", []) + if annotations: + citations.extend(annotations) + + print() + + # Display collected citations + if citations: + print("\n\nCitations:") + for i, citation in enumerate(citations, 1): + print(f"[{i}] Reference: {citation.url}") + + print("\n" + "=" * 50 + "\n") + print("Hotel search conversation completed!") + + finally: + # Clean up the agent manually + await agents_client.delete_agent(azure_ai_agent.id) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_bing_grounding.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding.py similarity index 100% rename from python/samples/getting_started/agents/azure_ai/azure_ai_with_bing_grounding.py rename to python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding.py diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter.py new file mode 100644 index 0000000000..f4bf48bd59 --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter.py @@ -0,0 +1,59 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from agent_framework import AgentRunResponse, ChatResponseUpdate, HostedCodeInterpreterTool +from agent_framework.azure import AzureAIAgentClient +from azure.ai.agents.models import ( + RunStepDeltaCodeInterpreterDetailItemObject, +) +from azure.identity.aio import AzureCliCredential + +""" +Azure AI Agent with Code Interpreter Example + +This sample demonstrates using HostedCodeInterpreterTool with Azure AI Agents +for Python code execution and mathematical problem solving. +""" + + +def print_code_interpreter_inputs(response: AgentRunResponse) -> None: + """Helper method to access code interpreter data.""" + + print("\nCode Interpreter Inputs during the run:") + if response.raw_representation is None: + return + for chunk in response.raw_representation: + if isinstance(chunk, ChatResponseUpdate) and isinstance( + chunk.raw_representation, RunStepDeltaCodeInterpreterDetailItemObject + ): + print(chunk.raw_representation.input, end="") + print("\n") + + +async def main() -> None: + """Example showing how to use the HostedCodeInterpreterTool with Azure AI.""" + print("=== Azure AI Agent with Code Interpreter Example ===") + + # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred + # authentication option. + async with ( + AzureCliCredential() as credential, + AzureAIAgentClient(async_credential=credential) as chat_client, + ): + agent = chat_client.create_agent( + name="CodingAgent", + instructions=("You are a helpful assistant that can write and execute Python code to solve problems."), + tools=HostedCodeInterpreterTool(), + ) + query = "Generate the factorial of 100 using python code, show the code and execute it." + print(f"User: {query}") + response = await AgentRunResponse.from_agent_response_generator(agent.run_stream(query)) + print(f"Agent: {response}") + # To review the code interpreter outputs, you can access + # them from the response raw_representations, just uncomment the next line: + # print_code_interpreter_inputs(response) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_existing_agent.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_existing_agent.py new file mode 100644 index 0000000000..f35ac2412a --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_existing_agent.py @@ -0,0 +1,57 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os + +from agent_framework import ChatAgent +from agent_framework.azure import AzureAIAgentClient +from azure.ai.agents.aio import AgentsClient +from azure.identity.aio import AzureCliCredential + +""" +Azure AI Agent with Existing Agent Example + +This sample demonstrates working with pre-existing Azure AI Agents by providing +agent IDs, showing agent reuse patterns for production scenarios. +""" + + +async def main() -> None: + print("=== Azure AI Chat Client with Existing Agent ===") + + # Create the client + async with ( + AzureCliCredential() as credential, + AgentsClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as agents_client, + ): + azure_ai_agent = await agents_client.create_agent( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + # Create remote agent with default instructions + # These instructions will persist on created agent for every run. + instructions="End each response with [END].", + ) + + chat_client = AzureAIAgentClient(agents_client=agents_client, agent_id=azure_ai_agent.id) + + try: + async with ChatAgent( + chat_client=chat_client, + # Instructions here are applicable only to this ChatAgent instance + # These instructions will be combined with instructions on existing remote agent. + # The final instructions during the execution will look like: + # "'End each response with [END]. Respond with 'Hello World' only'" + instructions="Respond with 'Hello World' only", + ) as agent: + query = "How are you?" + print(f"User: {query}") + result = await agent.run(query) + # Based on local and remote instructions, the result will be + # 'Hello World [END]'. + print(f"Agent: {result}\n") + finally: + # Clean up the agent manually + await agents_client.delete_agent(azure_ai_agent.id) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_thread.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py similarity index 100% rename from python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_thread.py rename to python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py new file mode 100644 index 0000000000..0ac2ee620c --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py @@ -0,0 +1,55 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os +from random import randint +from typing import Annotated + +from agent_framework import ChatAgent +from agent_framework.azure import AzureAIAgentClient +from azure.identity.aio import AzureCliCredential +from pydantic import Field + +""" +Azure AI Agent with Explicit Settings Example + +This sample demonstrates creating Azure AI Agents with explicit configuration +settings rather than relying on environment variable defaults. +""" + + +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +async def main() -> None: + print("=== Azure AI Chat Client with Explicit Settings ===") + + # Since no Agent ID is provided, the agent will be automatically created + # and deleted after getting a response + # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred + # authentication option. + async with ( + AzureCliCredential() as credential, + ChatAgent( + chat_client=AzureAIAgentClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + model_deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + async_credential=credential, + agent_name="WeatherAgent", + should_cleanup_agent=True, # Set to False if you want to disable automatic agent cleanup + ), + instructions="You are a helpful weather agent.", + tools=get_weather, + ) as agent, + ): + result = await agent.run("What's the weather like in New York?") + print(f"Result: {result}\n") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_file_search.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_file_search.py new file mode 100644 index 0000000000..b00bfc1f26 --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_file_search.py @@ -0,0 +1,91 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from pathlib import Path + +from agent_framework import ChatAgent, HostedFileSearchTool, HostedVectorStoreContent +from agent_framework.azure import AzureAIAgentClient +from azure.ai.agents.models import FileInfo, VectorStore +from azure.identity.aio import AzureCliCredential + +""" +The following sample demonstrates how to create a simple, Azure AI agent that +uses a file search tool to answer user questions. +""" + + +# Simulate a conversation with the agent +USER_INPUTS = [ + "Who is the youngest employee?", + "Who works in sales?", + "I have a customer request, who can help me?", +] + + +async def main() -> None: + """Main function demonstrating Azure AI agent with file search capabilities.""" + client = AzureAIAgentClient(async_credential=AzureCliCredential()) + file: FileInfo | None = None + vector_store: VectorStore | None = None + + try: + # 1. Upload file and create vector store + pdf_file_path = Path(__file__).parent.parent / "resources" / "employees.pdf" + print(f"Uploading file from: {pdf_file_path}") + + file = await client.agents_client.files.upload_and_poll(file_path=str(pdf_file_path), purpose="assistants") + print(f"Uploaded file, file ID: {file.id}") + + vector_store = await client.agents_client.vector_stores.create_and_poll( + file_ids=[file.id], name="my_vectorstore" + ) + print(f"Created vector store, vector store ID: {vector_store.id}") + + # 2. Create file search tool with uploaded resources + file_search_tool = HostedFileSearchTool(inputs=[HostedVectorStoreContent(vector_store_id=vector_store.id)]) + + # 3. Create an agent with file search capabilities + # The tool_resources are automatically extracted from HostedFileSearchTool + async with ChatAgent( + chat_client=client, + name="EmployeeSearchAgent", + instructions=( + "You are a helpful assistant that can search through uploaded employee files " + "to answer questions about employees." + ), + tools=file_search_tool, + ) as agent: + # 4. Simulate conversation with the agent + for user_input in USER_INPUTS: + print(f"# User: '{user_input}'") + response = await agent.run(user_input) + print(f"# Agent: {response.text}") + + # 5. Cleanup: Delete the vector store and file + try: + if vector_store: + await client.agents_client.vector_stores.delete(vector_store.id) + if file: + await client.agents_client.files.delete(file.id) + except Exception: + # Ignore cleanup errors to avoid masking issues + pass + finally: + # 6. Cleanup: Delete the vector store and file in case of earlier failure to prevent orphaned resources. + + # Refreshing the client is required since chat agent closes it + client = AzureAIAgentClient(async_credential=AzureCliCredential()) + try: + if vector_store: + await client.agents_client.vector_stores.delete(vector_store.id) + if file: + await client.agents_client.files.delete(file.id) + except Exception: + # Ignore cleanup errors to avoid masking issues + pass + finally: + await client.close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_function_tools.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_function_tools.py similarity index 100% rename from python/samples/getting_started/agents/azure_ai/azure_ai_with_function_tools.py rename to python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_function_tools.py diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_hosted_mcp.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_hosted_mcp.py new file mode 100644 index 0000000000..6bed30dd21 --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_hosted_mcp.py @@ -0,0 +1,70 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from typing import Any + +from agent_framework import AgentProtocol, AgentRunResponse, AgentThread, HostedMCPTool +from agent_framework.azure import AzureAIAgentClient +from azure.identity.aio import AzureCliCredential + +""" +Azure AI Agent with Hosted MCP Example + +This sample demonstrates integration of Azure AI Agents with hosted Model Context Protocol (MCP) +servers, including user approval workflows for function call security. +""" + + +async def handle_approvals_with_thread(query: str, agent: "AgentProtocol", thread: "AgentThread") -> AgentRunResponse: + """Here we let the thread deal with the previous responses, and we just rerun with the approval.""" + from agent_framework import ChatMessage + + result = await agent.run(query, thread=thread, store=True) + while len(result.user_input_requests) > 0: + new_input: list[Any] = [] + for user_input_needed in result.user_input_requests: + print( + f"User Input Request for function from {agent.name}: {user_input_needed.function_call.name}" + f" with arguments: {user_input_needed.function_call.arguments}" + ) + user_approval = input("Approve function call? (y/n): ") + new_input.append( + ChatMessage( + role="user", + contents=[user_input_needed.create_response(user_approval.lower() == "y")], + ) + ) + result = await agent.run(new_input, thread=thread, store=True) + return result + + +async def main() -> None: + """Example showing Hosted MCP tools for a Azure AI Agent.""" + async with ( + AzureCliCredential() as credential, + AzureAIAgentClient(async_credential=credential) as chat_client, + ): + agent = chat_client.create_agent( + name="DocsAgent", + instructions="You are a helpful assistant that can help with microsoft documentation questions.", + tools=HostedMCPTool( + name="Microsoft Learn MCP", + url="https://learn.microsoft.com/api/mcp", + ), + ) + thread = agent.get_new_thread() + # First query + query1 = "How to create an Azure storage account using az cli?" + print(f"User: {query1}") + result1 = await handle_approvals_with_thread(query1, agent, thread) + print(f"{agent.name}: {result1}\n") + print("\n=======================================\n") + # Second query + query2 = "What is Microsoft Agent Framework?" + print(f"User: {query2}") + result2 = await handle_approvals_with_thread(query2, agent, thread) + print(f"{agent.name}: {result2}\n") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_local_mcp.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_local_mcp.py similarity index 100% rename from python/samples/getting_started/agents/azure_ai/azure_ai_with_local_mcp.py rename to python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_local_mcp.py diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_multiple_tools.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py similarity index 100% rename from python/samples/getting_started/agents/azure_ai/azure_ai_with_multiple_tools.py rename to python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_openapi_tools.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_openapi_tools.py similarity index 100% rename from python/samples/getting_started/agents/azure_ai/azure_ai_with_openapi_tools.py rename to python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_openapi_tools.py diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_thread.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_thread.py new file mode 100644 index 0000000000..e2dd175657 --- /dev/null +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_thread.py @@ -0,0 +1,154 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from random import randint +from typing import Annotated + +from agent_framework import AgentThread, ChatAgent +from agent_framework.azure import AzureAIAgentClient +from azure.identity.aio import AzureCliCredential +from pydantic import Field + +""" +Azure AI Agent with Thread Management Example + +This sample demonstrates thread management with Azure AI Agents, comparing +automatic thread creation with explicit thread management for persistent context. +""" + + +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +async def example_with_automatic_thread_creation() -> None: + """Example showing automatic thread creation (service-managed thread).""" + print("=== Automatic Thread Creation Example ===") + + # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred + # authentication option. + async with ( + AzureCliCredential() as credential, + ChatAgent( + chat_client=AzureAIAgentClient(async_credential=credential), + instructions="You are a helpful weather agent.", + tools=get_weather, + ) as agent, + ): + # First conversation - no thread provided, will be created automatically + first_query = "What's the weather like in Seattle?" + print(f"User: {first_query}") + first_result = await agent.run(first_query) + print(f"Agent: {first_result.text}") + + # Second conversation - still no thread provided, will create another new thread + second_query = "What was the last city I asked about?" + print(f"\nUser: {second_query}") + second_result = await agent.run(second_query) + print(f"Agent: {second_result.text}") + print("Note: Each call creates a separate thread, so the agent doesn't remember previous context.\n") + + +async def example_with_thread_persistence() -> None: + """Example showing thread persistence across multiple conversations.""" + print("=== Thread Persistence Example ===") + print("Using the same thread across multiple conversations to maintain context.\n") + + # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred + # authentication option. + async with ( + AzureCliCredential() as credential, + ChatAgent( + chat_client=AzureAIAgentClient(async_credential=credential), + instructions="You are a helpful weather agent.", + tools=get_weather, + ) as agent, + ): + # Create a new thread that will be reused + thread = agent.get_new_thread() + + # First conversation + first_query = "What's the weather like in Tokyo?" + print(f"User: {first_query}") + first_result = await agent.run(first_query, thread=thread) + print(f"Agent: {first_result.text}") + + # Second conversation using the same thread - maintains context + second_query = "How about London?" + print(f"\nUser: {second_query}") + second_result = await agent.run(second_query, thread=thread) + print(f"Agent: {second_result.text}") + + # Third conversation - agent should remember both previous cities + third_query = "Which of the cities I asked about has better weather?" + print(f"\nUser: {third_query}") + third_result = await agent.run(third_query, thread=thread) + print(f"Agent: {third_result.text}") + print("Note: The agent remembers context from previous messages in the same thread.\n") + + +async def example_with_existing_thread_id() -> None: + """Example showing how to work with an existing thread ID from the service.""" + print("=== Existing Thread ID Example ===") + print("Using a specific thread ID to continue an existing conversation.\n") + + # First, create a conversation and capture the thread ID + existing_thread_id = None + + # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred + # authentication option. + async with ( + AzureCliCredential() as credential, + ChatAgent( + chat_client=AzureAIAgentClient(async_credential=credential), + instructions="You are a helpful weather agent.", + tools=get_weather, + ) as agent, + ): + # Start a conversation and get the thread ID + thread = agent.get_new_thread() + first_query = "What's the weather in Paris?" + print(f"User: {first_query}") + first_result = await agent.run(first_query, thread=thread) + print(f"Agent: {first_result.text}") + + # The thread ID is set after the first response + existing_thread_id = thread.service_thread_id + print(f"Thread ID: {existing_thread_id}") + + if existing_thread_id: + print("\n--- Continuing with the same thread ID in a new agent instance ---") + + # Create a new agent instance but use the existing thread ID + async with ( + AzureCliCredential() as credential, + ChatAgent( + chat_client=AzureAIAgentClient(thread_id=existing_thread_id, async_credential=credential), + instructions="You are a helpful weather agent.", + tools=get_weather, + ) as agent, + ): + # Create a thread with the existing ID + thread = AgentThread(service_thread_id=existing_thread_id) + + second_query = "What was the last city I asked about?" + print(f"User: {second_query}") + second_result = await agent.run(second_query, thread=thread) + print(f"Agent: {second_result.text}") + print("Note: The agent continues the conversation from the previous thread.\n") + + +async def main() -> None: + print("=== Azure AI Chat Client Agent Thread Management Examples ===\n") + + await example_with_automatic_thread_creation() + await example_with_thread_persistence() + await example_with_existing_thread_id() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/uv.lock b/python/uv.lock index 4ac3e71980..2ace8a6982 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -246,7 +246,7 @@ requires-dist = [ { name = "agent-framework-core", editable = "packages/core" }, { name = "aiohttp" }, { name = "azure-ai-agents", specifier = "==1.2.0b5" }, - { name = "azure-ai-projects", specifier = ">=1.0.0b11" }, + { name = "azure-ai-projects", specifier = ">=2.0.0b1" }, ] [[package]] @@ -1688,11 +1688,11 @@ wheels = [ [[package]] name = "execnet" -version = "2.1.1" +version = "2.1.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524 } +sdist = { url = "https://files.pythonhosted.org/packages/bf/89/780e11f9588d9e7128a3f87788354c7946a9cbb1401ad38a48c4db9a4f07/execnet-2.1.2.tar.gz", hash = "sha256:63d83bfdd9a23e35b9c6a3261412324f964c2ec8dcd8d3c6916ee9373e0befcd", size = 166622 } wheels = [ - { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612 }, + { url = "https://files.pythonhosted.org/packages/ab/84/02fc1827e8cdded4aa65baef11296a9bbe595c474f0d6d758af082d849fd/execnet-2.1.2-py3-none-any.whl", hash = "sha256:67fba928dd5a544b783f6056f449e5e3931a5c378b128bc18501f7ea79e296ec", size = 40708 }, ] [[package]] @@ -2543,14 +2543,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/aa/51/2cb4468b3448a8385ebcd15059d325c9ce67df4e2758d133ab9442b19834/jiter-0.12.0-cp314-cp314t-win32.whl", hash = "sha256:8bbcfe2791dfdb7c5e48baf646d37a6a3dcb5a97a032017741dea9f817dca183", size = 205110 }, { url = "https://files.pythonhosted.org/packages/b2/c5/ae5ec83dec9c2d1af805fd5fe8f74ebded9c8670c5210ec7820ce0dbeb1e/jiter-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2fa940963bf02e1d8226027ef461e36af472dea85d36054ff835aeed944dd873", size = 205223 }, { url = "https://files.pythonhosted.org/packages/97/9a/3c5391907277f0e55195550cf3fa8e293ae9ee0c00fb402fec1e38c0c82f/jiter-0.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:506c9708dd29b27288f9f8f1140c3cb0e3d8ddb045956d7757b1fa0e0f39a473", size = 185564 }, - { url = "https://files.pythonhosted.org/packages/fe/54/5339ef1ecaa881c6948669956567a64d2670941925f245c434f494ffb0e5/jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:4739a4657179ebf08f85914ce50332495811004cc1747852e8b2041ed2aab9b8", size = 311144 }, - { url = "https://files.pythonhosted.org/packages/27/74/3446c652bffbd5e81ab354e388b1b5fc1d20daac34ee0ed11ff096b1b01a/jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:41da8def934bf7bec16cb24bd33c0ca62126d2d45d81d17b864bd5ad721393c3", size = 305877 }, - { url = "https://files.pythonhosted.org/packages/a1/f4/ed76ef9043450f57aac2d4fbeb27175aa0eb9c38f833be6ef6379b3b9a86/jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c44ee814f499c082e69872d426b624987dbc5943ab06e9bbaa4f81989fdb79e", size = 340419 }, - { url = "https://files.pythonhosted.org/packages/21/01/857d4608f5edb0664aa791a3d45702e1a5bcfff9934da74035e7b9803846/jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd2097de91cf03eaa27b3cbdb969addf83f0179c6afc41bbc4513705e013c65d", size = 347212 }, - { url = "https://files.pythonhosted.org/packages/cb/f5/12efb8ada5f5c9edc1d4555fe383c1fb2eac05ac5859258a72d61981d999/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:e8547883d7b96ef2e5fe22b88f8a4c8725a56e7f4abafff20fd5272d634c7ecb", size = 309974 }, - { url = "https://files.pythonhosted.org/packages/85/15/d6eb3b770f6a0d332675141ab3962fd4a7c270ede3515d9f3583e1d28276/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:89163163c0934854a668ed783a2546a0617f71706a2551a4a0666d91ab365d6b", size = 304233 }, - { url = "https://files.pythonhosted.org/packages/8c/3e/e7e06743294eea2cf02ced6aa0ff2ad237367394e37a0e2b4a1108c67a36/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d96b264ab7d34bbb2312dedc47ce07cd53f06835eacbc16dde3761f47c3a9e7f", size = 338537 }, - { url = "https://files.pythonhosted.org/packages/2f/9c/6753e6522b8d0ef07d3a3d239426669e984fb0eba15a315cdbc1253904e4/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24e864cb30ab82311c6425655b0cdab0a98c5d973b065c66a3f020740c2324c", size = 346110 }, ] [[package]] @@ -2729,7 +2721,7 @@ wheels = [ [[package]] name = "langfuse" -version = "3.9.2" +version = "3.9.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "backoff", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -2743,9 +2735,9 @@ dependencies = [ { name = "requests", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "wrapt", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/75/6e/8c3ca648cf1640ac30021bbb18bde629b03a5c44763ed442b44083ed4a36/langfuse-3.9.2.tar.gz", hash = "sha256:4ab2c79c2309d146f207bc65603e0ca26ef540690296cebaec594c9faec899e2", size = 206089 } +sdist = { url = "https://files.pythonhosted.org/packages/dd/fe/80bda024047570ed6d918ee7481888ee572659d04f40ff49b58701e52245/langfuse-3.9.3.tar.gz", hash = "sha256:8081691d40325b7022b07fc4e5884853c4a4ffe03b05c702490788b9a1a125df", size = 206118 } wheels = [ - { url = "https://files.pythonhosted.org/packages/0f/9d/8a6099a271dfeda9f7cb817911c6f87fe84256762d447f11aa2f8efd5809/langfuse-3.9.2-py3-none-any.whl", hash = "sha256:9783af051ec07408b2e97548328b2218c9b7aad22e8bcc3d274fdea3d641b621", size = 374873 }, + { url = "https://files.pythonhosted.org/packages/01/bc/af86791c761b820097e3ec2faa747bc6a507f6bf0cd6d7b7bbe4d087a6d6/langfuse-3.9.3-py3-none-any.whl", hash = "sha256:0afe88773f20fc67636d8a52dd3a5fb4e10f1c6e226f2c99ace8c65190fb5697", size = 374759 }, ] [[package]] @@ -4540,14 +4532,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906 }, { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607 }, { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769 }, - { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441 }, - { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291 }, - { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632 }, - { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905 }, - { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495 }, - { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388 }, - { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879 }, - { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017 }, { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351 }, { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363 }, { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615 }, @@ -4664,7 +4648,7 @@ wheels = [ [[package]] name = "pytest" -version = "9.0.0" +version = "9.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -4675,9 +4659,9 @@ dependencies = [ { name = "pygments", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tomli", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/da/1d/eb34f286b164c5e431a810a38697409cca1112cee04b287bb56ac486730b/pytest-9.0.0.tar.gz", hash = "sha256:8f44522eafe4137b0f35c9ce3072931a788a21ee40a2ed279e817d3cc16ed21e", size = 1562764 } +sdist = { url = "https://files.pythonhosted.org/packages/07/56/f013048ac4bc4c1d9be45afd4ab209ea62822fb1598f40687e6bf45dcea4/pytest-9.0.1.tar.gz", hash = "sha256:3e9c069ea73583e255c3b21cf46b8d3c56f6e3a1a8f6da94ccb0fcf57b9d73c8", size = 1564125 } wheels = [ - { url = "https://files.pythonhosted.org/packages/72/99/cafef234114a3b6d9f3aaed0723b437c40c57bdb7b3e4c3a575bc4890052/pytest-9.0.0-py3-none-any.whl", hash = "sha256:e5ccdf10b0bac554970ee88fc1a4ad0ee5d221f8ef22321f9b7e4584e19d7f96", size = 373364 }, + { url = "https://files.pythonhosted.org/packages/0b/8b/6300fb80f858cda1c51ffa17075df5d846757081d11ab4aa35cef9e6258b/pytest-9.0.1-py3-none-any.whl", hash = "sha256:67be0030d194df2dfa7b556f2e56fb3c3315bd5c8822c6951162b92b32ce7dad", size = 373668 }, ] [[package]]