diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 44d721bfc338..f7a9bcb75a98 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -35,7 +35,8 @@ ### Sample updates -* Add and update samples for `WebSearchTool` and `WebSearchPreviewTool` +* Add and update samples for `AzureFunctionTool`, `WebSearchTool`, and `WebSearchPreviewTool` +* All samples for agent tools call `responses.create` API with `agent_reference` instead of `agent` ## 2.0.0b3 (2026-01-06) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 68dd4424cc0b..adf4d58436d8 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -8,6 +8,7 @@ resources in your Microsoft Foundry Project. Use it to: * Agent Memory Search * Agent-to-Agent (A2A) * Azure AI Search + * Azure Functions * Bing Custom Search * Bing Grounding * Browser Automation @@ -161,7 +162,7 @@ with project_client.get_openai_client() as openai_client: response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") @@ -173,7 +174,7 @@ with project_client.get_openai_client() as openai_client: response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") @@ -287,7 +288,9 @@ Discover up-to-date web content with the GA Web Search tool or try the Web Searc ```python -tool = WebSearchTool(user_location=WebSearchApproximateLocation(country="GB", city="London", region="London")) +tool = WebSearchTool( + user_location=WebSearchApproximateLocation(type="approximate", country="GB", city="London", region="London") +) ``` @@ -413,6 +416,45 @@ tool = FunctionTool( See the full sample in file `\agents\tools\sample_agent_function_tool.py` in the [Samples][samples] folder. +**Azure Functions** + +Integrate Azure Functions with agents to extend capabilities via serverless compute. Functions are invoked through Azure Storage Queue triggers, allowing asynchronous execution of custom logic. + + + +```python +tool = AzureFunctionTool( + azure_function=AzureFunctionDefinition( + input_binding=AzureFunctionBinding( + storage_queue=AzureFunctionStorageQueue( + queue_name=os.environ["STORAGE_INPUT_QUEUE_NAME"], + queue_service_endpoint=os.environ["STORAGE_QUEUE_SERVICE_ENDPOINT"], + ) + ), + output_binding=AzureFunctionBinding( + storage_queue=AzureFunctionStorageQueue( + queue_name=os.environ["STORAGE_OUTPUT_QUEUE_NAME"], + queue_service_endpoint=os.environ["STORAGE_QUEUE_SERVICE_ENDPOINT"], + ) + ), + function=AzureFunctionDefinitionFunction( + name="queue_trigger", + description="Get weather for a given location", + parameters={ + "type": "object", + "properties": {"location": {"type": "string", "description": "location to determine weather for"}}, + }, + ), + ) +) +``` + + + +*After calling `responses.create()`, the agent enqueues function arguments to the input queue. Your Azure Function processes the request and returns results via the output queue.* + +See the full sample in file `\agents\tools\sample_agent_azure_function.py` and the Azure Function implementation in `\agents\tools\get_weather_func_app.py` in the [Samples][samples] folder. + * **Memory Search Tool** The Memory Store Tool adds Memory to an Agent, allowing the Agent's AI model to search for past information related to the current user prompt. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py index 2862b067c208..a3da6b6cc721 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic.py @@ -59,7 +59,7 @@ response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") @@ -71,7 +71,7 @@ response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py index fd5f03e1101f..16492bca4bc1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_basic_async.py @@ -61,7 +61,7 @@ async def main() -> None: response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") @@ -73,7 +73,7 @@ async def main() -> None: response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py index 8d8c190c57d5..e4c065f8b9e1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py @@ -61,6 +61,8 @@ print(f"Added a user message to the conversation") response = openai_client.responses.create( - conversation=conversation.id, extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, input="" + conversation=conversation.id, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, + input="", ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py index 0c2d4f38b8d4..7925a87f5f8b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py @@ -64,7 +64,7 @@ async def main(): response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py index f60e85650b83..aee3058b1d13 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_stream_events.py @@ -59,7 +59,7 @@ with openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=True, ) as response_stream_events: diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py index 461ed8ff4e29..c62485742cfa 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output.py @@ -88,7 +88,7 @@ class CalendarEvent(BaseModel): response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py index f7d8b2ce6ae7..ef99258bf8bf 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_structured_output_async.py @@ -89,7 +89,7 @@ async def main() -> None: response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py index 6fb3d9f8f065..61fd40e27700 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent.py @@ -148,7 +148,7 @@ stream = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=True, # REMOVE ME? metadata={"x-ms-debug-mode-enabled": "1"}, diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py index db8919c0f426..9b71d85182c1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_workflow_multi_agent_async.py @@ -148,7 +148,7 @@ async def main(): stream = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=True, # Remove me? metadata={"x-ms-debug-mode-enabled": "1"}, diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py index e79fab387673..dc8675c9ba59 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_azure_monitor_tracing.py @@ -72,7 +72,7 @@ response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "id": agent.id, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "id": agent.id, "type": "agent_reference"}}, input="What is the size of France in square miles?", ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py index 5abdba346def..6eb1d5f40da0 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/telemetry/sample_agent_basic_with_console_tracing.py @@ -106,7 +106,7 @@ def display_conversation_item(item: Any) -> None: request = "Hello, tell me a joke." response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, input=request, ) print(f"Answer: {response.output}") @@ -114,7 +114,7 @@ def display_conversation_item(item: Any) -> None: response = openai_client.responses.create( conversation=conversation.id, input="Tell another one about computers.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Answer: {response.output}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/get_weather_func_app.py b/sdk/ai/azure-ai-projects/samples/agents/tools/get_weather_func_app.py new file mode 100644 index 000000000000..593fcc97de6c --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/get_weather_func_app.py @@ -0,0 +1,38 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +This is a sample Azure Function app used by sample_agent_azure_function.py +to demonstrate how to integrate Azure Functions with AI Agents using queue triggers. +""" + +# import azure.functions as func +# import logging +# import json + +# app = func.FunctionApp() + +# @app.queue_trigger(arg_name="msg", queue_name="input", connection="STORAGE_CONNECTION") +# @app.queue_output(arg_name="outputQueue", queue_name="output", connection="STORAGE_CONNECTION") +# def queue_trigger(msg: func.QueueMessage, outputQueue: func.Out[str]): +# try: +# messagepayload: dict = json.loads(msg.get_body().decode("utf-8")) +# logging.info(f"The function receives the following message: {json.dumps(messagepayload)}") + +# function_args: dict = messagepayload.get("function_args", {}) +# location: str = function_args.get("location") + + +# weather_result = f"Weather is {len(location)} degrees and sunny in {location}" +# response_message = { +# "Value": weather_result, +# "CorrelationId": messagepayload["CorrelationId"] +# } +# logging.info(f"The function returns the following message through the {outputQueue} queue: {json.dumps(response_message)}") + +# outputQueue.set(json.dumps(response_message)) + +# except Exception as e: +# logging.error(f"Error processing message: {e}") \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py index eaa05ad3fda4..8783568fa4f0 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py @@ -84,7 +84,7 @@ stream=True, tool_choice="required", input=user_input, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py new file mode 100644 index 000000000000..a56cdb0211cd --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_azure_function.py @@ -0,0 +1,104 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to create an AI agent with Microsoft Fabric capabilities + using the MicrosoftFabricPreviewTool and synchronous Azure AI Projects client. The agent can query + Fabric data sources and provide responses based on data analysis. + +USAGE: + python sample_agent_fabric.py + + Before running the sample: + + pip install "azure-ai-projects>=2.0.0b1" python-dotenv + + Set these environment variables with your own values: + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Microsoft Foundry portal. + 2) AZURE_AI_MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Microsoft Foundry project. + 3) STORAGE_INPUT_QUEUE_NAME - The name of the Azure Storage Queue to use for input and output in the Azure Function tool. + 4) STORAGE_OUTPUT_QUEUE_NAME - The name of the Azure Storage Queue to use for output in the Azure Function tool. + 5) STORAGE_QUEUE_SERVICE_ENDPOINT - The endpoint of the Azure Storage Queue service. +""" + +import os +from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import ( + AzureFunctionBinding, + AzureFunctionDefinition, + AzureFunctionStorageQueue, + AzureFunctionDefinitionFunction, + AzureFunctionTool, + PromptAgentDefinition, +) + +load_dotenv() + +agent = None + +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + +with ( + DefaultAzureCredential() as credential, + AIProjectClient(endpoint=endpoint, credential=credential) as project_client, + project_client.get_openai_client() as openai_client, +): + + + # [START tool_declaration] + tool = AzureFunctionTool( + azure_function=AzureFunctionDefinition( + input_binding=AzureFunctionBinding( + storage_queue=AzureFunctionStorageQueue( + queue_name=os.environ["STORAGE_INPUT_QUEUE_NAME"], + queue_service_endpoint=os.environ["STORAGE_QUEUE_SERVICE_ENDPOINT"], + ) + ), + output_binding=AzureFunctionBinding( + storage_queue=AzureFunctionStorageQueue( + queue_name=os.environ["STORAGE_OUTPUT_QUEUE_NAME"], + queue_service_endpoint=os.environ["STORAGE_QUEUE_SERVICE_ENDPOINT"], + ) + ), + function=AzureFunctionDefinitionFunction( + name="queue_trigger", + description="Get weather for a given location", + parameters={ + "type": "object", + "properties": {"location": {"type": "string", "description": "location to determine weather for"}}, + }, + ), + ) + ) + # [END tool_declaration] + + agent = project_client.agents.create_version( + agent_name="MyAgent", + definition=PromptAgentDefinition( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + instructions="You are a helpful assistant.", + tools=[tool], + ), + ) + print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})") + + user_input = "What is the weather in Seattle?" + + response = openai_client.responses.create( + tool_choice="required", + input=user_input, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, + ) + + print(f"Response output: {response.output_text}") + + print("\nCleaning up...") + project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) + print("Agent deleted") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py index d5bbee261047..0ca7be0e8ed5 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_custom_search.py @@ -79,7 +79,7 @@ stream_response = openai_client.responses.create( stream=True, input=user_input, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py index 891250783659..e6a122d4b4db 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_bing_grounding.py @@ -78,7 +78,7 @@ stream=True, tool_choice="required", input="What is today's date and whether in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py index 87d444c6b623..21c111adbde7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_browser_automation.py @@ -79,7 +79,7 @@ Enter the value 'MSFT', to get information about the Microsoft stock price. At the top of the resulting page you will see a default chart of Microsoft stock price. Click on 'YTD' at the top of that chart, and report the percent value that shows up just below it.""", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py index 0f0c522bc61f..e37c37f4074d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter.py @@ -73,7 +73,7 @@ response = openai_client.responses.create( conversation=conversation.id, input="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response completed (id: {response.id})") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py index 3aafc79626e7..9fbbb7f1711e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_code_interpreter_async.py @@ -75,7 +75,7 @@ async def main() -> None: response = await openai_client.responses.create( conversation=conversation.id, input="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response completed (id: {response.id})") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py index e439bd388b3b..dd0dcfd8080a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use.py @@ -68,7 +68,7 @@ # [END tool_declaration] agent = project_client.agents.create_version( - agent_name="ComputerUseAgent", + agent_name="MyAgent", definition=PromptAgentDefinition( model=os.environ.get("COMPUTER_USE_MODEL_DEPLOYMENT_NAME", "computer-use-preview"), instructions=""" @@ -101,7 +101,7 @@ ], } ], - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, truncation="auto", ) @@ -151,7 +151,7 @@ }, } ], - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, truncation="auto", ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py index 86ae06ce232f..b0123554212e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_computer_use_async.py @@ -70,7 +70,7 @@ async def main(): computer_use_tool = ComputerUsePreviewTool(display_width=1026, display_height=769, environment="windows") agent = await project_client.agents.create_version( - agent_name="ComputerUseAgent", + agent_name="MyAgent", definition=PromptAgentDefinition( model=os.environ.get("COMPUTER_USE_MODEL_DEPLOYMENT_NAME", "computer-use-preview"), instructions=""" @@ -103,7 +103,7 @@ async def main(): ], } ], - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, truncation="auto", ) @@ -155,7 +155,7 @@ async def main(): }, } ], - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, truncation="auto", ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py index 4a7639aaa308..771148bc1b05 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_fabric.py @@ -71,7 +71,7 @@ response = openai_client.responses.create( tool_choice="required", input=user_input, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py index 43c71427f6e9..c008a3f1339e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search.py @@ -76,7 +76,7 @@ response = openai_client.responses.create( conversation=conversation.id, input="Tell me about Contoso products", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Agent response: {response.output_text}") print("\nCleaning up...") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py index c1a95213708c..e95d03aa3df9 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream.py @@ -89,7 +89,7 @@ "content": "Tell me about Contoso products and their features in detail. Please search through the available documentation.", }, ], - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print("Processing streaming file search results...\n") @@ -117,7 +117,7 @@ input=[ {"role": "user", "content": "Tell me about Smart Eyewear and its features."}, ], - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print("Processing follow-up streaming response...\n") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py index 746c83f58837..22a30e9fe085 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_file_search_in_stream_async.py @@ -90,7 +90,7 @@ async def main() -> None: }, ], tool_choice="required", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print("Processing streaming file search results...\n") @@ -122,7 +122,7 @@ async def main() -> None: }, ], tool_choice="required", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print("Processing follow-up streaming response...\n") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py index 8c8a7bc4d281..39e3065039c7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool.py @@ -78,7 +78,7 @@ def get_horoscope(sign: str) -> str: # Prompt the model with tools defined response = openai_client.responses.create( input="What is my horoscope? I am an Aquarius.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") @@ -105,7 +105,7 @@ def get_horoscope(sign: str) -> str: response = openai_client.responses.create( input=input_list, previous_response_id=response.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Agent response: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py index 4fc0ec2756ce..0f76b0d18ebe 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_function_tool_async.py @@ -79,7 +79,7 @@ async def main(): # Prompt the model with tools defined response = await openai_client.responses.create( input="What is my horoscope? I am an Aquarius.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") @@ -106,7 +106,7 @@ async def main(): response = await openai_client.responses.create( input=input_list, previous_response_id=response.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Agent response: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py index 815da33ac701..52b7ca29f8e5 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation.py @@ -84,7 +84,7 @@ extra_headers={ "x-ms-oai-image-generation-deployment": image_generation_model }, # this is required at the moment for image generation - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response created: {response.id}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py index ae97e3817ccc..5de56d2dacd4 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_image_generation_async.py @@ -79,7 +79,7 @@ async def main(): extra_headers={ "x-ms-oai-image-generation-deployment": image_generation_model }, # this is required at the moment for image generation - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response created: {response.id}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py index f914919afc3f..758abe9ddd44 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp.py @@ -66,7 +66,7 @@ response = openai_client.responses.create( conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Process any MCP approval requests that were generated @@ -92,7 +92,7 @@ response = openai_client.responses.create( input=input_list, previous_response_id=response.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Agent response: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py index 292a22509019..56cb3420308c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_async.py @@ -70,7 +70,7 @@ async def main(): response = await openai_client.responses.create( conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Process any MCP approval requests that were generated @@ -96,7 +96,7 @@ async def main(): response = await openai_client.responses.create( input=input_list, previous_response_id=response.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Agent response: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py index f0422331df33..9a2a92e9a3a7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection.py @@ -72,7 +72,7 @@ response = openai_client.responses.create( conversation=conversation.id, input="What is my username in Github profile?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Process any MCP approval requests that were generated @@ -98,7 +98,7 @@ response = openai_client.responses.create( input=input_list, previous_response_id=response.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py index 28072869aea8..578ccb2d2936 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_mcp_with_project_connection_async.py @@ -74,7 +74,7 @@ async def main(): response = await openai_client.responses.create( conversation=conversation.id, input="What is my username in GitHub profile?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Process any MCP approval requests that were generated @@ -99,7 +99,7 @@ async def main(): response = await openai_client.responses.create( input=input_list, previous_response_id=response.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py index 586fd1269ba2..4009037ab8a6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search.py @@ -116,7 +116,7 @@ response = openai_client.responses.create( input="I prefer dark roast coffee", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") @@ -132,7 +132,7 @@ new_response = openai_client.responses.create( input="Please order my usual coffee", conversation=new_conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {new_response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py index c5e26caddfce..40c22fc39694 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_memory_search_async.py @@ -117,7 +117,7 @@ async def main() -> None: response = await openai_client.responses.create( input="I prefer dark roast coffee", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") @@ -133,7 +133,7 @@ async def main() -> None: new_response = await openai_client.responses.create( input="Please order my usual coffee", conversation=new_conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {new_response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py index 0d2865edcd3a..bf398a3de250 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi.py @@ -74,7 +74,7 @@ response = openai_client.responses.create( input="Use the OpenAPI tool to print out, what is the weather in Seattle, WA today.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Agent response: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py index 63cbc730908d..b8b9dbb5308c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_openapi_with_project_connection.py @@ -84,7 +84,7 @@ response = openai_client.responses.create( input="Recommend me 5 top hotels in paris, France", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # The response to the question may contain non ASCII letters. To avoid error, encode and re decode them. print(f"Response created: {response.output_text.encode().decode('ascii', errors='ignore')}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py index dec10b425b54..6dcbd4c9a26c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_sharepoint.py @@ -77,7 +77,7 @@ stream_response = openai_client.responses.create( stream=True, input=user_input, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py index 87fa94ab0649..3bc2b36ab620 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_to_agent.py @@ -76,7 +76,7 @@ stream=True, tool_choice="required", input=user_input, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py index e522f691c40e..5bd603f0a529 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search.py @@ -44,7 +44,9 @@ project_client.get_openai_client() as openai_client, ): # [START tool_declaration] - tool = WebSearchTool(user_location=WebSearchApproximateLocation(country="GB", city="London", region="London")) + tool = WebSearchTool( + user_location=WebSearchApproximateLocation(type="approximate", country="GB", city="London", region="London") + ) # [END tool_declaration] # Create Agent with web search tool agent = project_client.agents.create_version( @@ -68,7 +70,7 @@ stream=True, input=user_input, tool_choice="required", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py index 68c955e0f851..fdc7352baa97 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_preview.py @@ -64,7 +64,7 @@ stream=True, input=user_input, tool_choice="required", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py index 7bff7f43527d..6db6a8cfe171 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_web_search_with_custom_search.py @@ -80,7 +80,7 @@ stream=True, input=user_input, tool_choice="required", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py index 2828cb338151..e139c416d856 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation.py @@ -63,7 +63,7 @@ response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text} (id: {response.id})") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py index ecc232f0ebcb..769c3e84104c 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_response_evaluation_with_function_tool.py @@ -87,7 +87,7 @@ def get_horoscope(sign: str) -> str: # Prompt the model with tools defined response = openai_client.responses.create( input="What is my horoscope? I am an Aquarius.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") @@ -114,7 +114,7 @@ def get_horoscope(sign: str) -> str: response = openai_client.responses.create( input=input_list, previous_response_id=response.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text} (id: {response.id})") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py index fe55482effea..73816d8ab30a 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_continuous_evaluation_rule.py @@ -106,7 +106,7 @@ response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") @@ -122,7 +122,7 @@ response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response output: {response.output_text}") diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py index f1aac0ec12fd..409556e5db59 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py @@ -1024,7 +1024,7 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl( response = client.responses.create( conversation=conversation.id, input="What's the weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=False, ) function_calls = [item for item in response.output if item.type == "function_call"] @@ -1047,7 +1047,7 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl( response2 = client.responses.create( conversation=conversation.id, input=input_list, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=False, ) assert hasattr(response2, "output") @@ -1283,7 +1283,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl( stream = client.responses.create( conversation=conversation.id, input="What's the weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=True, ) # Consume the stream and collect function calls @@ -1321,7 +1321,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl( stream2 = client.responses.create( conversation=conversation.id, input=input_list, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=True, ) # Consume the second stream @@ -1607,7 +1607,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl( response = client.responses.create( conversation=conversation.id, input="What's the weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=False, ) function_calls = [item for item in response.output if item.type == "function_call"] @@ -1630,7 +1630,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl( response2 = client.responses.create( conversation=conversation.id, input=input_list, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=False, ) assert hasattr(response2, "output") @@ -1843,7 +1843,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl( stream = client.responses.create( conversation=conversation.id, input="What's the weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=True, ) # Consume the stream and collect function calls @@ -1882,7 +1882,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl( stream2 = client.responses.create( conversation=conversation.id, input=input_list, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=True, ) # Consume the second stream @@ -2161,7 +2161,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, response = client.responses.create( conversation=conversation.id, input="What's the weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=False, ) @@ -2182,7 +2182,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, response2 = client.responses.create( conversation=conversation.id, input=input_list, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=False, ) @@ -2311,7 +2311,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se response = client.responses.create( conversation=conversation.id, input="What's the weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=False, ) @@ -2332,7 +2332,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se response2 = client.responses.create( conversation=conversation.id, input=input_list, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=False, ) @@ -5055,7 +5055,7 @@ def test_workflow_agent_non_streaming_with_content_recording(self, **kwargs): response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow_agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow_agent.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=False, ) @@ -5175,7 +5175,7 @@ def test_workflow_agent_non_streaming_without_content_recording(self, **kwargs): response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow_agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow_agent.name, "type": "agent_reference"}}, input="Test workflow", stream=False, ) @@ -5371,7 +5371,7 @@ def test_workflow_agent_streaming_with_content_recording(self, **kwargs): stream = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow_agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow_agent.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=True, ) @@ -5494,7 +5494,7 @@ def test_workflow_agent_streaming_without_content_recording(self, **kwargs): stream = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow_agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow_agent.name, "type": "agent_reference"}}, input="Test workflow", stream=True, ) @@ -5605,7 +5605,7 @@ def test_prompt_agent_with_responses_non_streaming(self, **kwargs): # Create response with agent name and id response = client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "id": agent.id, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "id": agent.id, "type": "agent_reference"}}, input="What is the capital of France?", ) @@ -5675,7 +5675,7 @@ def test_prompt_agent_with_responses_streaming(self, **kwargs): # Create streaming response with agent name and id stream = client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "id": agent.id, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "id": agent.id, "type": "agent_reference"}}, input="What is the capital of France?", stream=True, ) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py index 75c082f5930a..b68aa26aeabb 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py @@ -457,7 +457,7 @@ async def _test_async_function_tool_with_content_recording_streaming_impl( stream = await client.responses.create( conversation=conversation.id, input="What's the weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=True, ) # Consume the stream and collect function calls @@ -495,7 +495,7 @@ async def _test_async_function_tool_with_content_recording_streaming_impl( stream2 = await client.responses.create( conversation=conversation.id, input=input_list, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=True, ) # Consume the second stream @@ -718,7 +718,7 @@ async def _test_async_function_tool_without_content_recording_streaming_impl( stream = await client.responses.create( conversation=conversation.id, input="What\\'s the weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=True, ) # Consume the stream and collect function calls @@ -757,7 +757,7 @@ async def _test_async_function_tool_without_content_recording_streaming_impl( stream2 = await client.responses.create( conversation=conversation.id, input=input_list, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, stream=True, ) # Consume the second stream @@ -3241,7 +3241,7 @@ async def test_async_workflow_agent_non_streaming_with_content_recording(self, * response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow_agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow_agent.name, "type": "agent_reference"}}, input="Test workflow", stream=False, ) @@ -3357,7 +3357,7 @@ async def test_async_workflow_agent_non_streaming_without_content_recording(self response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow_agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow_agent.name, "type": "agent_reference"}}, input="Test workflow", stream=False, ) @@ -3480,7 +3480,7 @@ async def test_async_workflow_agent_streaming_with_content_recording(self, **kwa stream = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow_agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow_agent.name, "type": "agent_reference"}}, input="Test workflow", stream=True, ) @@ -3601,7 +3601,7 @@ async def test_async_workflow_agent_streaming_without_content_recording(self, ** stream = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow_agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow_agent.name, "type": "agent_reference"}}, input="Test workflow", stream=True, ) @@ -3730,7 +3730,7 @@ async def _test_async_prompt_agent_with_responses_non_streaming_impl( # Create response with agent name and id response = await client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "id": agent.id, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "id": agent.id, "type": "agent_reference"}}, input="What is the capital of France?", ) @@ -3868,7 +3868,7 @@ async def _test_async_prompt_agent_with_responses_streaming_impl( # Create streaming response with agent name and id stream = await client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "id": agent.id, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "id": agent.id, "type": "agent_reference"}}, input="What is the capital of France?", stream=True, ) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py index 50a74dbeb08a..cb3036dd3ef3 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py @@ -101,7 +101,7 @@ def test_sync_browser_automation_non_streaming_with_content_recording(self, **kw news stories and provide a summary of the most recent one. """, stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) assert response.output is not None @@ -236,7 +236,7 @@ def test_sync_browser_automation_non_streaming_without_content_recording(self, * news stories and provide a summary of the most recent one. """, stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.exporter.force_flush() @@ -359,7 +359,7 @@ def test_sync_browser_automation_streaming_with_content_recording(self, **kwargs news stories and provide a summary of the most recent one. """, stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for _ in stream: pass @@ -485,7 +485,7 @@ def test_sync_browser_automation_streaming_without_content_recording(self, **kwa news stories and provide a summary of the most recent one. """, stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for _ in stream: pass diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py index d15c9346f77b..a4abc6aa33f9 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py @@ -104,7 +104,7 @@ async def test_async_browser_automation_non_streaming_with_content_recording(sel news stories and provide a summary of the most recent one. """, stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) assert response.output is not None @@ -235,7 +235,7 @@ async def test_async_browser_automation_non_streaming_without_content_recording( news stories and provide a summary of the most recent one. """, stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.exporter.force_flush() @@ -354,7 +354,7 @@ async def test_async_browser_automation_streaming_with_content_recording(self, * news stories and provide a summary of the most recent one. """, stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) async for _ in stream: pass @@ -477,7 +477,7 @@ async def test_async_browser_automation_streaming_without_content_recording(self news stories and provide a summary of the most recent one. """, stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) async for _ in stream: pass diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py index 28d496482dad..b01e31055f07 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py @@ -106,7 +106,7 @@ def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwar response = openai_client.responses.create( conversation=conversation.id, input="Calculate the average operating profit from the transportation data", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Explicitly call and iterate through conversation items @@ -293,7 +293,7 @@ def test_sync_code_interpreter_non_streaming_without_content_recording(self, **k response = openai_client.responses.create( conversation=conversation.id, input="Calculate the average operating profit from the transportation data", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Explicitly call and iterate through conversation items @@ -485,7 +485,7 @@ def test_sync_code_interpreter_streaming_with_content_recording(self, **kwargs): conversation=conversation.id, input="Calculate the average operating profit from the transportation data", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume the stream @@ -676,7 +676,7 @@ def test_sync_code_interpreter_streaming_without_content_recording(self, **kwarg conversation=conversation.id, input="Calculate the average operating profit from the transportation data", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume the stream diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py index 6881f72d160a..0eaa37a37ec2 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py @@ -106,7 +106,7 @@ async def test_async_code_interpreter_non_streaming_with_content_recording(self, response = await openai_client.responses.create( conversation=conversation.id, input="Calculate the average operating profit from the transportation data", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Explicitly call and iterate through conversation items @@ -293,7 +293,7 @@ async def test_async_code_interpreter_non_streaming_without_content_recording(se response = await openai_client.responses.create( conversation=conversation.id, input="Calculate the average operating profit from the transportation data", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Explicitly call and iterate through conversation items @@ -485,7 +485,7 @@ async def test_async_code_interpreter_streaming_with_content_recording(self, **k conversation=conversation.id, input="Calculate the average operating profit from the transportation data", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume the stream @@ -676,7 +676,7 @@ async def test_async_code_interpreter_streaming_without_content_recording(self, conversation=conversation.id, input="Calculate the average operating profit from the transportation data", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume the stream diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py index f60a732ebfdf..319c7f7d65e6 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py @@ -108,7 +108,7 @@ def test_sync_file_search_non_streaming_with_content_recording(self, **kwargs): conversation=conversation.id, input="Tell me about Contoso products", stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) assert response.output_text is not None @@ -317,7 +317,7 @@ def test_sync_file_search_non_streaming_without_content_recording(self, **kwargs conversation=conversation.id, input="Tell me about Contoso products", stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) assert response.output_text is not None @@ -524,7 +524,7 @@ def test_sync_file_search_streaming_with_content_recording(self, **kwargs): conversation=conversation.id, input="Tell me about Contoso products", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume the stream @@ -729,7 +729,7 @@ def test_sync_file_search_streaming_without_content_recording(self, **kwargs): conversation=conversation.id, input="Tell me about Contoso products", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume the stream diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py index 1fb973211813..c415d627da71 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py @@ -109,7 +109,7 @@ async def test_async_file_search_non_streaming_with_content_recording(self, **kw conversation=conversation.id, input="Tell me about Contoso products", stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) assert response.output_text is not None @@ -318,7 +318,7 @@ async def test_async_file_search_non_streaming_without_content_recording(self, * conversation=conversation.id, input="Tell me about Contoso products", stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) assert response.output_text is not None @@ -525,7 +525,7 @@ async def test_async_file_search_streaming_with_content_recording(self, **kwargs conversation=conversation.id, input="Tell me about Contoso products", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume the stream @@ -730,7 +730,7 @@ async def test_async_file_search_streaming_without_content_recording(self, **kwa conversation=conversation.id, input="Tell me about Contoso products", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume the stream diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py index 36b1cec4c98c..6d438e026518 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py @@ -91,7 +91,7 @@ def _test_sync_mcp_non_streaming_with_content_recording_impl(self, use_events, * conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Collect approval requests @@ -112,7 +112,7 @@ def _test_sync_mcp_non_streaming_with_content_recording_impl(self, use_events, * conversation=conversation.id, input=input_list, stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) assert response2.output_text is not None @@ -421,7 +421,7 @@ def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Collect approval requests @@ -442,7 +442,7 @@ def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events conversation=conversation.id, input=input_list, stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) assert response2.output_text is not None @@ -738,7 +738,7 @@ def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwa conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Collect approval requests from stream @@ -762,7 +762,7 @@ def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwa conversation=conversation.id, input=input_list, stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume second stream @@ -1014,7 +1014,7 @@ def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, ** conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Collect approval requests from stream @@ -1038,7 +1038,7 @@ def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, ** conversation=conversation.id, input=input_list, stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume second stream diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py index 2db3a44ac344..1394018e1950 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py @@ -92,7 +92,7 @@ async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_ev conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Collect approval requests @@ -113,7 +113,7 @@ async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_ev conversation=conversation.id, input=input_list, stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) assert response2.output_text is not None @@ -421,7 +421,7 @@ async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Collect approval requests @@ -442,7 +442,7 @@ async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use conversation=conversation.id, input=input_list, stream=False, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) assert response2.output_text is not None @@ -740,7 +740,7 @@ async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Collect approval requests from stream @@ -764,7 +764,7 @@ async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events conversation=conversation.id, input=input_list, stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume second stream @@ -1019,7 +1019,7 @@ async def _test_async_mcp_streaming_without_content_recording_impl(self, use_eve conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Collect approval requests from stream @@ -1043,7 +1043,7 @@ async def _test_async_mcp_streaming_without_content_recording_impl(self, use_eve conversation=conversation.id, input=input_list, stream=True, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Consume second stream diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py index afa4428cb80e..926b384e111b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py @@ -245,7 +245,7 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): # Non-streaming request response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=False, # Remove me? metadata={"x-ms-debug-mode-enabled": "1"}, @@ -410,7 +410,7 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): # Non-streaming request response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=False, # Remove me? metadata={"x-ms-debug-mode-enabled": "1"}, @@ -577,7 +577,7 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): # Streaming request stream = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=True, # Remove me? metadata={"x-ms-debug-mode-enabled": "1"}, @@ -745,7 +745,7 @@ def test_sync_workflow_streaming_without_content_recording(self, **kwargs): # Streaming request stream = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=True, # Remove me? metadata={"x-ms-debug-mode-enabled": "1"}, diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py index c29871c62aa6..b9baf72fe610 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py @@ -244,7 +244,7 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg # Non-streaming request response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=False, # Remove me? metadata={"x-ms-debug-mode-enabled": "1"}, @@ -405,7 +405,7 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw # Non-streaming request response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=False, # Remove me? metadata={"x-ms-debug-mode-enabled": "1"}, @@ -570,7 +570,7 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): # Streaming request stream = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=True, # Remove me? metadata={"x-ms-debug-mode-enabled": "1"}, @@ -736,7 +736,7 @@ async def test_async_workflow_streaming_without_content_recording(self, **kwargs # Streaming request stream = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": workflow.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": workflow.name, "type": "agent_reference"}}, input="1 + 1 = ?", stream=True, # Remove me? metadata={"x-ms-debug-mode-enabled": "1"}, diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py index 6c6ecb594259..fa4cb60012f1 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud.py @@ -73,7 +73,7 @@ def test_agent_responses_crud(self, **kwargs): response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response id: {response.id}, output text: {response.output_text}") assert "5280" in response.output_text or "5,280" in response.output_text @@ -112,7 +112,7 @@ def test_agent_responses_crud(self, **kwargs): response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response id: {response.id}, output text: {response.output_text}") assert "1609" in response.output_text or "1,609" in response.output_text @@ -139,7 +139,7 @@ def test_agent_responses_crud(self, **kwargs): # response = openai_client.responses.create( # conversation=conversation.id, - # extra_body={"agent": {"name": agent.name, "type": "agent_reference"}} + # extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}} # ) # print(f"Response id: {response.id}, output text: {response.output_text}") @@ -201,7 +201,7 @@ class CalendarEvent(BaseModel): response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response id: {response.id}, output text: {response.output_text}") assert response.output_text == '{"name":"Science Fair","date":"2025-11-07","participants":["Alice","Bob"]}' diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py index 6f82cab2eb97..56ffb4820c7b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agent_responses_crud_async.py @@ -50,7 +50,7 @@ async def test_agent_responses_crud_async(self, **kwargs): response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response id: {response.id}, output text: {response.output_text}") assert "5280" in response.output_text or "5,280" in response.output_text @@ -83,7 +83,7 @@ async def test_agent_responses_crud_async(self, **kwargs): response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response id: {response.id}, output text: {response.output_text}") assert "1609" in response.output_text or "1,609" in response.output_text @@ -110,7 +110,7 @@ async def test_agent_responses_crud_async(self, **kwargs): # response = await project_client.agents.responses.create( # conversation=conversation.id, - # extra_body={"agent": {"name": agent.name, "type": "agent_reference"}} + # extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}} # ) # print(f"Response id: {response.id}, output text: {response.output_text}") @@ -176,7 +176,7 @@ class CalendarEvent(BaseModel): response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response id: {response.id}, output text: {response.output_text}") assert response.output_text == '{"name":"Science Fair","date":"2025-11-07","participants":["Alice","Bob"]}' diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_container_app_agents.py b/sdk/ai/azure-ai-projects/tests/agents/test_container_app_agents.py index c847c1923899..8b172e7fc9c4 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_container_app_agents.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_container_app_agents.py @@ -52,7 +52,7 @@ def test_container_app_agent(self, **kwargs): try: response = openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent_version.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent_version.name, "type": "agent_reference"}}, ) print(f"Response id: {response.id}, output text: {response.output_text}") assert "5280" in response.output_text or "5,280" in response.output_text diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_container_app_agents_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_container_app_agents_async.py index bf4b150db65c..75343bc6b533 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_container_app_agents_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_container_app_agents_async.py @@ -52,7 +52,7 @@ async def test_container_app_agent_async(self, **kwargs): try: response = await openai_client.responses.create( conversation=conversation.id, - extra_body={"agent": {"name": agent_version.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent_version.name, "type": "agent_reference"}}, ) print(f"Response id: {response.id}, output text: {response.output_text}") assert "5280" in response.output_text or "5,280" in response.output_text diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py index 58d0163f3e14..7e5265244802 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_code_interpreter_and_function.py @@ -84,7 +84,7 @@ def test_calculate_and_save(self, **kwargs): # 17^4 = 83521 - not something easily computed mentally response = openai_client.responses.create( input="Calculate 17 to the power of 4 using code, then save the result.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) print("✓ Code Interpreter + Function Tool works!") @@ -145,7 +145,7 @@ def test_generate_data_and_report(self, **kwargs): # Request data generation and report - use a fixed seed for reproducibility in verification response = openai_client.responses.create( input="Using Python with random.seed(42), generate exactly 10 random integers between 1 and 100, calculate their average, and create a report with the results.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py index b5c1ada0057f..90cd78ac0bf3 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_code_interpreter.py @@ -103,7 +103,7 @@ def test_find_and_analyze_data(self, **kwargs): # Request that requires both tools: find data AND calculate response = openai_client.responses.create( input="Find the sensor readings file and use code to calculate the average temperature. Show me the result.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) assert len(response.output_text) > 20 @@ -173,7 +173,7 @@ def fibonacci(n): # Request that requires both tools: find code AND execute it response = openai_client.responses.create( input="Find the fibonacci code file and run it to calculate fibonacci(15). What is the result?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_text = response.output_text diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py index fb7f74c537e9..de33f1db0c5a 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_and_function.py @@ -111,7 +111,7 @@ def test_data_analysis_workflow(self, **kwargs): response = openai_client.responses.create( input="Analyze the sales data and calculate the total revenue for each product. Then save the results.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Initial response completed (id: {response.id})") @@ -148,7 +148,7 @@ def test_data_analysis_workflow(self, **kwargs): response = openai_client.responses.create( input=input_list, previous_response_id=response.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Final response: {response.output_text[:200]}...") @@ -214,7 +214,7 @@ def test_empty_vector_store_handling(self, **kwargs): response = openai_client.responses.create( input="Find and analyze the quarterly sales report.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_text = response.output_text @@ -312,7 +312,7 @@ def calculate_sum(numbers): response = openai_client.responses.create( input="Find the Python code file, explain what the calculate_sum function does, and save your code review findings.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Verify function call is made (both tools should be used) @@ -351,7 +351,7 @@ def calculate_sum(numbers): response = openai_client.responses.create( input=input_list, previous_response_id=response.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_text = response.output_text print(f"Final response: {response_text[:200] if response_text else '(confirmation)'}...") @@ -458,7 +458,7 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs): print("\n--- Turn 1: Initial search query ---") response_1 = openai_client.responses.create( input="What does the research say about machine learning in healthcare?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_1_text = response_1.output_text @@ -470,7 +470,7 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs): response_2 = openai_client.responses.create( input="What specific applications are mentioned?", previous_response_id=response_1.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_2_text = response_2.output_text @@ -483,7 +483,7 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs): response_3 = openai_client.responses.create( input="Please save that finding about ML accuracy.", previous_response_id=response_2.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle function call @@ -514,7 +514,7 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs): response_3 = openai_client.responses.create( input=input_list, previous_response_id=response_3.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response 3: {response_3.output_text[:150]}...") @@ -523,7 +523,7 @@ def test_multi_turn_search_and_save_workflow(self, **kwargs): response_4 = openai_client.responses.create( input="Now tell me about AI ethics concerns mentioned in the research.", previous_response_id=response_3.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_4_text = response_4.output_text diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py index 29d7caa9b412..4562c426d7fc 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_agent_file_search_code_interpreter_function.py @@ -120,7 +120,7 @@ def test_complete_analysis_workflow(self, **kwargs): # Request that requires all three tools response = openai_client.responses.create( input="Find the sales report, use code to calculate the total and average of all monthly sales figures, then save the analysis results.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) print("✓ Three-tool combination works!") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py index 51b1d9426348..85cd9a7fbf4e 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/multitool/test_multitool_with_conversations.py @@ -105,7 +105,7 @@ def test_file_search_and_function_with_conversation(self, **kwargs): response_1 = openai_client.responses.create( input="What was the total revenue in Q1 2024?", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response 1: {response_1.output_text[:150]}...") @@ -116,7 +116,7 @@ def test_file_search_and_function_with_conversation(self, **kwargs): response_2 = openai_client.responses.create( input="Which product had the highest sales?", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response 2: {response_2.output_text[:150]}...") @@ -127,7 +127,7 @@ def test_file_search_and_function_with_conversation(self, **kwargs): response_3 = openai_client.responses.create( input="Save a summary report of these Q1 results", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle function call @@ -150,7 +150,7 @@ def test_file_search_and_function_with_conversation(self, **kwargs): response_3 = openai_client.responses.create( input=input_list, conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response 3: {response_3.output_text[:150]}...") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py index d97bc133306d..56d9ac3ea579 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search.py @@ -151,7 +151,7 @@ def test_agent_ai_search_question_answering(self, **kwargs): stream=True, tool_choice="required", input=f"Answer this question with only 'True' or 'False': {question}", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py index 92fda91873c0..184b9ceb402d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_ai_search_async.py @@ -77,7 +77,7 @@ async def _ask_question_async( stream=True, tool_choice="required", input=f"Answer this question with only 'True' or 'False': {question}", - extra_body={"agent": {"name": agent_name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent_name, "type": "agent_reference"}}, ) async for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py index 9cf4076a9be9..951bebf915fd 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_bing_grounding.py @@ -93,7 +93,7 @@ def test_agent_bing_grounding(self, **kwargs): stream=True, tool_choice="required", input="What is the current weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: @@ -196,7 +196,7 @@ def test_agent_bing_grounding_multiple_queries(self, **kwargs): stream=True, tool_choice="required", input=query, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) for event in stream_response: diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py index 696ac9f4c373..e31506e6df82 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter.py @@ -70,7 +70,7 @@ def test_agent_code_interpreter_simple_math(self, **kwargs): response = openai_client.responses.create( input="Calculate this using Python: First, find the sum of cubes from 1 to 50 (1³ + 2³ + ... + 50³). Then add 12 factorial divided by 8 factorial (12!/8!). What is the final result?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) @@ -171,7 +171,7 @@ def test_agent_code_interpreter_file_generation(self, **kwargs): response = openai_client.responses.create( input="Create a bar chart showing operating profit by sector from the uploaded CSV file. Save it as a PNG file.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py index a4ca2f8837af..0a679f8e2509 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_code_interpreter_async.py @@ -57,7 +57,7 @@ async def test_agent_code_interpreter_simple_math_async(self, **kwargs): response = await openai_client.responses.create( input="Calculate this using Python: First, find the sum of cubes from 1 to 50 (1³ + 2³ + ... + 50³). Then add 12 factorial divided by 8 factorial (12!/8!). What is the final result?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py index 59c074f565a4..c1a93eee1552 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search.py @@ -96,7 +96,7 @@ def test_agent_file_search(self, **kwargs): response = openai_client.responses.create( input="What products are mentioned in the document?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response completed (id: {response.id})") @@ -263,7 +263,7 @@ def test_agent_file_search_multi_turn_conversation(self, **kwargs): print("\n--- Turn 1: Initial query ---") response_1 = openai_client.responses.create( input="What is the price of Widget B?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_1_text = response_1.output_text @@ -275,7 +275,7 @@ def test_agent_file_search_multi_turn_conversation(self, **kwargs): response_2 = openai_client.responses.create( input="What about its stock level?", previous_response_id=response_1.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_2_text = response_2.output_text @@ -289,7 +289,7 @@ def test_agent_file_search_multi_turn_conversation(self, **kwargs): response_3 = openai_client.responses.create( input="How does that compare to Widget A's stock?", previous_response_id=response_2.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_3_text = response_3.output_text @@ -303,7 +303,7 @@ def test_agent_file_search_multi_turn_conversation(self, **kwargs): response_4 = openai_client.responses.create( input="Which widget has the highest rating?", previous_response_id=response_3.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_4_text = response_4.output_text diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py index ae30e7e037a3..54be6f9e1188 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_async.py @@ -71,7 +71,7 @@ async def test_agent_file_search_async(self, **kwargs): response = await openai_client.responses.create( input="What products are mentioned in the document?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response completed (id: {response.id})") @@ -166,7 +166,7 @@ async def test_agent_file_search_multi_turn_conversation_async(self, **kwargs): print("\n--- Turn 1: Initial query ---") response_1 = await openai_client.responses.create( input="What is the price of Widget B?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_1_text = response_1.output_text @@ -178,7 +178,7 @@ async def test_agent_file_search_multi_turn_conversation_async(self, **kwargs): response_2 = await openai_client.responses.create( input="What about its stock level?", previous_response_id=response_1.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_2_text = response_2.output_text @@ -192,7 +192,7 @@ async def test_agent_file_search_multi_turn_conversation_async(self, **kwargs): response_3 = await openai_client.responses.create( input="How does that compare to Widget A's stock?", previous_response_id=response_2.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_3_text = response_3.output_text @@ -206,7 +206,7 @@ async def test_agent_file_search_multi_turn_conversation_async(self, **kwargs): response_4 = await openai_client.responses.create( input="Which widget has the highest rating?", previous_response_id=response_3.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_4_text = response_4.output_text diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py index c0ed973ef2c2..489b3870d6f5 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream.py @@ -94,7 +94,7 @@ def test_agent_file_search_stream(self, **kwargs): stream_response = openai_client.responses.create( stream=True, input="What products are mentioned in the document? Please provide a brief summary.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Collect streamed response diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py index e668ef5474f9..57b8674aeb69 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_file_search_stream_async.py @@ -71,7 +71,7 @@ async def test_agent_file_search_stream_async(self, **kwargs): stream_response = await openai_client.responses.create( stream=True, input="What products are mentioned in the document? Please provide a brief summary.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Collect streamed response diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py index 8b215d7560fe..a67b35472845 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool.py @@ -86,7 +86,7 @@ def test_agent_function_tool(self, **kwargs): response = openai_client.responses.create( input="What's the weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response, print_message="Initial response completed") @@ -139,7 +139,7 @@ def test_agent_function_tool(self, **kwargs): response = openai_client.responses.create( input=input_list, previous_response_id=response.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response, print_message="Final response completed") @@ -232,7 +232,7 @@ def test_agent_function_tool_multi_turn_with_multiple_calls(self, **kwargs): print("\n--- Turn 1: Current weather query ---") response_1 = openai_client.responses.create( input="What's the weather in New York?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle function call @@ -256,7 +256,7 @@ def test_agent_function_tool_multi_turn_with_multiple_calls(self, **kwargs): response_1 = openai_client.responses.create( input=input_list, previous_response_id=response_1.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_1_text = response_1.output_text @@ -268,7 +268,7 @@ def test_agent_function_tool_multi_turn_with_multiple_calls(self, **kwargs): response_2 = openai_client.responses.create( input="What about the forecast for the next few days?", previous_response_id=response_1.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle forecast function call @@ -303,7 +303,7 @@ def test_agent_function_tool_multi_turn_with_multiple_calls(self, **kwargs): response_2 = openai_client.responses.create( input=input_list, previous_response_id=response_2.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_2_text = response_2.output_text @@ -315,7 +315,7 @@ def test_agent_function_tool_multi_turn_with_multiple_calls(self, **kwargs): response_3 = openai_client.responses.create( input="How does that compare to Seattle's weather?", previous_response_id=response_2.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle function calls for Seattle (agent might call both weather and forecast) @@ -357,7 +357,7 @@ def test_agent_function_tool_multi_turn_with_multiple_calls(self, **kwargs): response_3 = openai_client.responses.create( input=input_list, previous_response_id=response_3.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_3_text = response_3.output_text @@ -424,7 +424,7 @@ def test_agent_function_tool_context_dependent_followup(self, **kwargs): print("\n--- Turn 1: Get temperature ---") response_1 = openai_client.responses.create( input="What's the temperature in Boston?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle function call @@ -443,7 +443,7 @@ def test_agent_function_tool_context_dependent_followup(self, **kwargs): response_1 = openai_client.responses.create( input=input_list, previous_response_id=response_1.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_1_text = response_1.output_text @@ -455,7 +455,7 @@ def test_agent_function_tool_context_dependent_followup(self, **kwargs): response_2 = openai_client.responses.create( input="What is that in Celsius?", # "that" refers to the 72°F from previous response previous_response_id=response_1.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_2_text = response_2.output_text @@ -476,7 +476,7 @@ def test_agent_function_tool_context_dependent_followup(self, **kwargs): response_3 = openai_client.responses.create( input="Is that warmer or colder than 25°C?", # "that" refers to the Celsius value just mentioned previous_response_id=response_2.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_3_text = response_3.output_text diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py index eb0600fc4c4a..3413d03d8baf 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_function_tool_async.py @@ -74,7 +74,7 @@ async def test_agent_function_tool_async(self, **kwargs): response = await openai_client.responses.create( input="What's the weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response, print_message="Initial response completed") @@ -127,7 +127,7 @@ async def test_agent_function_tool_async(self, **kwargs): response = await openai_client.responses.create( input=input_list, previous_response_id=response.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response, print_message="Final response completed") @@ -221,7 +221,7 @@ async def test_agent_function_tool_multi_turn_with_multiple_calls_async(self, ** print("\n--- Turn 1: Current weather query ---") response_1 = await openai_client.responses.create( input="What's the weather in New York?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle function call @@ -245,7 +245,7 @@ async def test_agent_function_tool_multi_turn_with_multiple_calls_async(self, ** response_1 = await openai_client.responses.create( input=input_list, previous_response_id=response_1.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_1_text = response_1.output_text @@ -257,7 +257,7 @@ async def test_agent_function_tool_multi_turn_with_multiple_calls_async(self, ** response_2 = await openai_client.responses.create( input="What about the forecast for the next few days?", previous_response_id=response_1.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle forecast function call @@ -292,7 +292,7 @@ async def test_agent_function_tool_multi_turn_with_multiple_calls_async(self, ** response_2 = await openai_client.responses.create( input=input_list, previous_response_id=response_2.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_2_text = response_2.output_text @@ -304,7 +304,7 @@ async def test_agent_function_tool_multi_turn_with_multiple_calls_async(self, ** response_3 = await openai_client.responses.create( input="How does that compare to Seattle's weather?", previous_response_id=response_2.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle function calls for Seattle (agent might call both weather and forecast) @@ -346,7 +346,7 @@ async def test_agent_function_tool_multi_turn_with_multiple_calls_async(self, ** response_3 = await openai_client.responses.create( input=input_list, previous_response_id=response_3.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_3_text = response_3.output_text @@ -414,7 +414,7 @@ async def test_agent_function_tool_context_dependent_followup_async(self, **kwar print("\n--- Turn 1: Get temperature ---") response_1 = await openai_client.responses.create( input="What's the temperature in Boston?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle function call @@ -433,7 +433,7 @@ async def test_agent_function_tool_context_dependent_followup_async(self, **kwar response_1 = await openai_client.responses.create( input=input_list, previous_response_id=response_1.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_1_text = response_1.output_text @@ -445,7 +445,7 @@ async def test_agent_function_tool_context_dependent_followup_async(self, **kwar response_2 = await openai_client.responses.create( input="What is that in Celsius?", # "that" refers to the 72°F from previous response previous_response_id=response_1.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_2_text = response_2.output_text @@ -466,7 +466,7 @@ async def test_agent_function_tool_context_dependent_followup_async(self, **kwar response_3 = await openai_client.responses.create( input="Is that warmer or colder than 25°C?", # "that" refers to the Celsius value just mentioned previous_response_id=response_2.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_3_text = response_3.output_text diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py index d34c6e497238..ac29ee0a30d7 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation.py @@ -82,7 +82,7 @@ def test_agent_image_generation(self, **kwargs): response = openai_client.responses.create( input="Generate an image of a blue circle on a white background.", extra_headers={"x-ms-oai-image-generation-deployment": image_model}, # Required for image generation - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response created (id: {response.id})") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py index 16529f228f65..5c70d312c2ab 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_image_generation_async.py @@ -63,7 +63,7 @@ async def test_agent_image_generation_async(self, **kwargs): response = await openai_client.responses.create( input="Generate an image of a blue circle on a white background.", extra_headers={"x-ms-oai-image-generation-deployment": image_model}, # Required for image generation - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response created (id: {response.id})") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py index bb3d9a306907..7029d25f4be7 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp.py @@ -90,7 +90,7 @@ def test_agent_mcp_basic(self, **kwargs): response = openai_client.responses.create( conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response, print_message="Initial response completed") @@ -128,7 +128,7 @@ def test_agent_mcp_basic(self, **kwargs): response = openai_client.responses.create( conversation=conversation.id, input=input_list, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response, print_message="Final response completed") @@ -233,7 +233,7 @@ def test_agent_mcp_with_project_connection(self, **kwargs): response = openai_client.responses.create( conversation=conversation.id, input="What is my username in Github profile?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response, print_message="Initial response completed") @@ -271,7 +271,7 @@ def test_agent_mcp_with_project_connection(self, **kwargs): response = openai_client.responses.create( conversation=conversation.id, input=input_list, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response, print_message="Final response completed") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py index 8ac7732e4978..5f27d42becd4 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_mcp_async.py @@ -63,7 +63,7 @@ async def test_agent_mcp_basic_async(self, **kwargs): response = await openai_client.responses.create( conversation=conversation.id, input="Please summarize the Azure REST API specifications Readme", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response, print_message="Initial response completed") @@ -101,7 +101,7 @@ async def test_agent_mcp_basic_async(self, **kwargs): response = await openai_client.responses.create( conversation=conversation.id, input=input_list, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response, print_message="Final response completed") diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py index 9ffaeaf4b950..fa041dc271a6 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search.py @@ -140,7 +140,7 @@ def test_agent_memory_search(self, **kwargs): response = openai_client.responses.create( input="I prefer dark roast coffee", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) @@ -165,7 +165,7 @@ def test_agent_memory_search(self, **kwargs): new_response = openai_client.responses.create( input="Please order my usual coffee", conversation=new_conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(new_response) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py index 0209fadd94ef..95dff2280588 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_memory_search_async.py @@ -115,7 +115,7 @@ async def test_agent_memory_search_async(self, **kwargs): response = await openai_client.responses.create( input="I prefer dark roast coffee", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) @@ -140,7 +140,7 @@ async def test_agent_memory_search_async(self, **kwargs): new_response = await openai_client.responses.create( input="Please order my usual coffee", conversation=new_conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(new_response) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py index b9c3f1b289a1..de8b85f19723 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi.py @@ -97,7 +97,7 @@ def test_agent_openapi(self, **kwargs): response = openai_client.responses.create( input="Use the OpenAPI tool to print out, what is the weather in Seattle, WA today.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py index 5934f77faa4b..1b3e87ef063a 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_openapi_async.py @@ -77,7 +77,7 @@ async def test_agent_openapi_async(self, **kwargs): response = await openai_client.responses.create( input="Use the OpenAPI tool to print out, what is the weather in Seattle, WA today.", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py index c15e0ced6dc1..1d8a94bb44c7 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_tools_with_conversations.py @@ -91,7 +91,7 @@ def test_function_tool_with_conversation(self, **kwargs): response_1 = openai_client.responses.create( input="What is 15 plus 27?", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle function call @@ -120,7 +120,7 @@ def test_function_tool_with_conversation(self, **kwargs): response_1 = openai_client.responses.create( input=input_list, conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response 1: {response_1.output_text[:100]}...") assert "42" in response_1.output_text @@ -130,7 +130,7 @@ def test_function_tool_with_conversation(self, **kwargs): response_2 = openai_client.responses.create( input="Now multiply that result by 2", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) # Handle function call @@ -156,7 +156,7 @@ def test_function_tool_with_conversation(self, **kwargs): response_2 = openai_client.responses.create( input=input_list, conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) print(f"Response 2: {response_2.output_text[:100]}...") assert "84" in response_2.output_text @@ -268,7 +268,7 @@ def test_file_search_with_conversation(self, **kwargs): response_1 = openai_client.responses.create( input="Which widget has the highest rating?", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_1_text = response_1.output_text @@ -280,7 +280,7 @@ def test_file_search_with_conversation(self, **kwargs): response_2 = openai_client.responses.create( input="What is its price?", # "its" refers to Widget B from previous turn conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_2_text = response_2.output_text @@ -292,7 +292,7 @@ def test_file_search_with_conversation(self, **kwargs): response_3 = openai_client.responses.create( input="Which widget is in the Home Goods category?", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_3_text = response_3.output_text @@ -348,7 +348,7 @@ def test_code_interpreter_with_conversation(self, **kwargs): response_1 = openai_client.responses.create( input="Calculate the average of these numbers: 10, 20, 30, 40, 50", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_1_text = response_1.output_text @@ -360,7 +360,7 @@ def test_code_interpreter_with_conversation(self, **kwargs): response_2 = openai_client.responses.create( input="Now calculate the standard deviation of those same numbers", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_2_text = response_2.output_text @@ -373,7 +373,7 @@ def test_code_interpreter_with_conversation(self, **kwargs): response_3 = openai_client.responses.create( input="Create a list of squares from 1 to 5", conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_3_text = response_3.output_text @@ -447,7 +447,7 @@ def test_code_interpreter_with_file_in_conversation(self, **kwargs): response_1 = openai_client.responses.create( conversation=conversation.id, input="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) response_1_text = response_1.output_text diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py index 501da235ece3..673863ba877a 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search.py @@ -70,7 +70,7 @@ def test_agent_web_search(self, **kwargs): response = openai_client.responses.create( input="What is the current weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) diff --git a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py index 99b0bdbfda1b..700196858a1c 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/tools/test_agent_web_search_async.py @@ -50,7 +50,7 @@ async def test_agent_web_search_async(self, **kwargs): response = await openai_client.responses.create( input="What is the current weather in Seattle?", - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + extra_body={"agent_reference": {"name": agent.name, "type": "agent_reference"}}, ) self.validate_response(response) diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py index 52fc5ae46a37..bbafe4466b89 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py @@ -27,8 +27,12 @@ class TestSamples(AzureRecordedTestCase): @additionalSampleTests( [ AdditionalSampleTestDetail( - sample_filename="sample_agent_computer_use.py", - env_vars={"COMPUTER_USE_MODEL_DEPLOYMENT_NAME": "sanitized_model"}, + test_id="sample_agent_azure_function", + sample_filename="sample_agent_azure_function.py", + env_vars={"STORAGE_INPUT_QUEUE_NAME": "sanitized_input_queue_name", + "STORAGE_OUTPUT_QUEUE_NAME": "sanitized_output_queue_name", + "STORAGE_QUEUE_SERVICE_ENDPOINT": "sanitized_queue_service_endpoint", + }, ), ] ) @@ -47,6 +51,7 @@ class TestSamples(AzureRecordedTestCase): "sample_agent_web_search.py", "sample_agent_web_search_preview.py", "sample_agent_web_search_with_custom_search.py", + "sample_agent_azure_function.py", ], ), )