From 299db4f2a73ddb38f13f0347800371bbbac258d3 Mon Sep 17 00:00:00 2001 From: Waqas Javed <7674577+w-javed@users.noreply.github.com> Date: Mon, 15 Dec 2025 16:46:24 -0800 Subject: [PATCH 1/5] sample-fix --- sdk/ai/azure-ai-projects/.env.template | 2 +- .../samples/agents/sample_agent_retrieve_basic.py | 4 ++-- .../samples/agents/sample_agent_retrieve_basic_async.py | 4 ++-- .../samples/evaluations/sample_agent_evaluation.py | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/sdk/ai/azure-ai-projects/.env.template b/sdk/ai/azure-ai-projects/.env.template index 1c18021a4e7d..5449d0303d4b 100644 --- a/sdk/ai/azure-ai-projects/.env.template +++ b/sdk/ai/azure-ai-projects/.env.template @@ -17,7 +17,7 @@ # `https://.services.ai.azure.com/api/projects/` AZURE_AI_PROJECT_ENDPOINT= AZURE_AI_MODEL_DEPLOYMENT_NAME= -AGENT_NAME= +AZURE_AI_AGENT_NAME= CONVERSATION_ID= CONNECTION_NAME= AZURE_AI_PROJECTS_AZURE_SUBSCRIPTION_ID= diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py index a04bc7c64fe8..1102d326b2f3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic.py @@ -23,7 +23,7 @@ Set these environment variables with your own values: 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project. + 2) AZURE_AI_AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project. 3) CONVERSATION_ID - The ID of an existing Conversation associated with the Agent """ @@ -34,7 +34,7 @@ load_dotenv() -agent_name = os.environ["AGENT_NAME"] +agent_name = os.environ["AZURE_AI_AGENT_NAME"] conversation_id = os.environ["CONVERSATION_ID"] endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py index 16988819741c..5cae332aa01f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agent_retrieve_basic_async.py @@ -23,7 +23,7 @@ Set these environment variables with your own values: 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview page of your Microsoft Foundry portal. - 2) AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project. + 2) AZURE_AI_AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project. 3) CONVERSATION_ID - The ID of an existing Conversation associated with the Agent """ @@ -36,7 +36,7 @@ load_dotenv() endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -agent_name = os.environ["AGENT_NAME"] +agent_name = os.environ["AZURE_AI_AGENT_NAME"] conversation_id = os.environ["CONVERSATION_ID"] diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py index 8dc67f31fa1d..e11a993bd84a 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py @@ -66,7 +66,7 @@ "type": "azure_ai_evaluator", "name": "violence_detection", "evaluator_name": "builtin.violence", - "data_mapping": {"query": "{{item.query}}", "response": "{{item.response}}"}, + "data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"}, } ] eval_object = openai_client.evals.create( @@ -123,8 +123,8 @@ else: print("\n✗ Evaluation run failed.") - openai_client.evals.delete(eval_id=eval_object.id) + # openai_client.evals.delete(eval_id=eval_object.id) print("Evaluation deleted") - project_client.agents.delete(agent_name=agent.name) + # project_client.agents.delete(agent_name=agent.name) print("Agent deleted") From b126fe2b7e64d98918f3e25e077db3cd5060f53b Mon Sep 17 00:00:00 2001 From: Waqas Javed <7674577+w-javed@users.noreply.github.com> Date: Mon, 15 Dec 2025 16:49:58 -0800 Subject: [PATCH 2/5] revert --- .../samples/evaluations/sample_agent_evaluation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py index e11a993bd84a..4d6cd4a098c0 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py @@ -123,8 +123,8 @@ else: print("\n✗ Evaluation run failed.") - # openai_client.evals.delete(eval_id=eval_object.id) + openai_client.evals.delete(eval_id=eval_object.id) print("Evaluation deleted") - # project_client.agents.delete(agent_name=agent.name) + project_client.agents.delete(agent_name=agent.name) print("Agent deleted") From 2c99bacb5e50638c8dbffd144928341594e7deed Mon Sep 17 00:00:00 2001 From: Waqas Javed <7674577+w-javed@users.noreply.github.com> Date: Tue, 23 Dec 2025 14:43:54 -0800 Subject: [PATCH 3/5] model fix --- sdk/ai/azure-ai-projects/README.md | 112 +++++++++--------- .../evaluations/sample_model_evaluation.py | 6 +- 2 files changed, 59 insertions(+), 59 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index a708cb3c9694..5a4ce3973811 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -122,19 +122,20 @@ See the "responses" folder in the [package samples][samples] for additional samp ```python -with project_client.get_openai_client() as openai_client: - response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - input="What is the size of France in square miles?", - ) - print(f"Response output: {response.output_text}") +openai_client = project_client.get_openai_client() - response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - input="And what is the capital city?", - previous_response_id=response.id, - ) - print(f"Response output: {response.output_text}") +response = openai_client.responses.create( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + input="What is the size of France in square miles?", +) +print(f"Response output: {response.output_text}") + +response = openai_client.responses.create( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + input="And what is the capital city?", + previous_response_id=response.id, +) +print(f"Response output: {response.output_text}") ``` @@ -150,43 +151,44 @@ See the "agents" folder in the [package samples][samples] for an extensive set o ```python -with project_client.get_openai_client() as openai_client: - agent = project_client.agents.create_version( - agent_name="MyAgent", - definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - instructions="You are a helpful assistant that answers general questions", - ), - ) - print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})") +openai_client = project_client.get_openai_client() - conversation = openai_client.conversations.create( - items=[{"type": "message", "role": "user", "content": "What is the size of France in square miles?"}], - ) - print(f"Created conversation with initial user message (id: {conversation.id})") +agent = project_client.agents.create_version( + agent_name="MyAgent", + definition=PromptAgentDefinition( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + instructions="You are a helpful assistant that answers general questions", + ), +) +print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})") - response = openai_client.responses.create( - conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, - input="", - ) - print(f"Response output: {response.output_text}") +conversation = openai_client.conversations.create( + items=[{"type": "message", "role": "user", "content": "What is the size of France in square miles?"}], +) +print(f"Created conversation with initial user message (id: {conversation.id})") - openai_client.conversations.items.create( - conversation_id=conversation.id, - items=[{"type": "message", "role": "user", "content": "And what is the capital city?"}], - ) - print(f"Added a second user message to the conversation") +response = openai_client.responses.create( + conversation=conversation.id, + extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + input="", +) +print(f"Response output: {response.output_text}") - response = openai_client.responses.create( - conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, - input="", - ) - print(f"Response output: {response.output_text}") +openai_client.conversations.items.create( + conversation_id=conversation.id, + items=[{"type": "message", "role": "user", "content": "And what is the capital city?"}], +) +print(f"Added a second user message to the conversation") + +response = openai_client.responses.create( + conversation=conversation.id, + extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + input="", +) +print(f"Response output: {response.output_text}") - openai_client.conversations.delete(conversation_id=conversation.id) - print("Conversation deleted") +openai_client.conversations.delete(conversation_id=conversation.id) +print("Conversation deleted") project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) print("Agent deleted") @@ -646,7 +648,7 @@ with ( "type": "azure_ai_evaluator", "name": "violence_detection", "evaluator_name": "builtin.violence", - "data_mapping": {"query": "{{item.query}}", "response": "{{item.response}}"}, + "data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"}, } ] eval_object = openai_client.evals.create( @@ -823,7 +825,9 @@ folder in the [package samples][samples]. ```python -print(f"Create Index `{index_name}` with version `{index_version}`, referencing an existing AI Search resource:") +print( + f"Create Index `{index_name}` with version `{index_version}`, referencing an existing AI Search resource:" +) index = project_client.indexes.create_or_update( name=index_name, version=index_version, @@ -861,24 +865,20 @@ with open(file_path, "rb") as f: uploaded_file = openai_client.files.create(file=f, purpose="fine-tune") print(uploaded_file) -print("Waits for the given file to be processed, default timeout is 30 mins") -processed_file = openai_client.files.wait_for_processing(uploaded_file.id) -print(processed_file) - -print(f"Retrieving file metadata with ID: {processed_file.id}") -retrieved_file = openai_client.files.retrieve(processed_file.id) +print(f"Retrieving file metadata with ID: {uploaded_file.id}") +retrieved_file = openai_client.files.retrieve(uploaded_file.id) print(retrieved_file) -print(f"Retrieving file content with ID: {processed_file.id}") -file_content = openai_client.files.content(processed_file.id) +print(f"Retrieving file content with ID: {uploaded_file.id}") +file_content = openai_client.files.content(uploaded_file.id) print(file_content.content) print("Listing all files:") for file in openai_client.files.list(): print(file) -print(f"Deleting file with ID: {processed_file.id}") -deleted_file = openai_client.files.delete(processed_file.id) +print(f"Deleting file with ID: {uploaded_file.id}") +deleted_file = openai_client.files.delete(uploaded_file.id) print(f"Successfully deleted file: {deleted_file.id}") ``` diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py index 5fb80829e4db..24ec48493b22 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py @@ -56,11 +56,11 @@ "type": "azure_ai_evaluator", "name": "violence_detection", "evaluator_name": "builtin.violence", - "data_mapping": {"query": "{{item.query}}", "response": "{{item.response}}"}, + "data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"}, } ] eval_object = openai_client.evals.create( - name="Agent Evaluation", + name="Model Evaluation", data_source_config=data_source_config, testing_criteria=testing_criteria, # type: ignore ) @@ -116,5 +116,5 @@ else: print("\n✗ Evaluation run failed.") - openai_client.evals.delete(eval_id=eval_object.id) + # openai_client.evals.delete(eval_id=eval_object.id) print("Evaluation deleted") From 653c6f7698f19c708ddd2c0ef05de02231b17b98 Mon Sep 17 00:00:00 2001 From: Waqas Javed <7674577+w-javed@users.noreply.github.com> Date: Tue, 23 Dec 2025 19:04:49 -0800 Subject: [PATCH 4/5] Fix --- sdk/ai/azure-ai-projects/README.md | 19 +++++++++++++++++- .../evaluations/sample_agent_evaluation.py | 20 ++++++++++++++++++- .../evaluations/sample_model_evaluation.py | 4 +++- 3 files changed, 40 insertions(+), 3 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 5a4ce3973811..e4b20d42b325 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -632,7 +632,7 @@ with ( agent = project_client.agents.create_version( agent_name=os.environ["AZURE_AI_AGENT_NAME"], definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=model_deployment_name, instructions="You are a helpful assistant that answers general questions", ), ) @@ -643,12 +643,29 @@ with ( item_schema={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]}, include_sample_schema=True, ) + # Notes: for data_mapping: + # sample.output_text is the string output of the agent + # sample.output_items is the structured JSON output of the agent, including tool calls information testing_criteria = [ { "type": "azure_ai_evaluator", "name": "violence_detection", "evaluator_name": "builtin.violence", "data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"}, + }, + { + "type": "azure_ai_evaluator", + "name": "fluency", + "evaluator_name": "builtin.fluency", + "initialization_parameters": {"deployment_name": f"{model_deployment_name}"}, + "data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"}, + }, + { + "type": "azure_ai_evaluator", + "name": "task_adherence", + "evaluator_name": "builtin.task_adherence", + "initialization_parameters": {"deployment_name": f"{model_deployment_name}"}, + "data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_items}}"}, } ] eval_object = openai_client.evals.create( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py index 4d6cd4a098c0..1f4602048645 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py @@ -40,6 +40,7 @@ load_dotenv() endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini # [START agent_evaluation_basic] with ( @@ -50,7 +51,7 @@ agent = project_client.agents.create_version( agent_name=os.environ["AZURE_AI_AGENT_NAME"], definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=model_deployment_name, instructions="You are a helpful assistant that answers general questions", ), ) @@ -61,12 +62,29 @@ item_schema={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]}, include_sample_schema=True, ) + # Notes: for data_mapping: + # sample.output_text is the string output of the agent + # sample.output_items is the structured JSON output of the agent, including tool calls information testing_criteria = [ { "type": "azure_ai_evaluator", "name": "violence_detection", "evaluator_name": "builtin.violence", "data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"}, + }, + { + "type": "azure_ai_evaluator", + "name": "fluency", + "evaluator_name": "builtin.fluency", + "initialization_parameters": {"deployment_name": f"{model_deployment_name}"}, + "data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"}, + }, + { + "type": "azure_ai_evaluator", + "name": "task_adherence", + "evaluator_name": "builtin.task_adherence", + "initialization_parameters": {"deployment_name": f"{model_deployment_name}"}, + "data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_items}}"}, } ] eval_object = openai_client.evals.create( diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py index 24ec48493b22..91e45ecf3355 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_model_evaluation.py @@ -51,6 +51,8 @@ item_schema={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]}, include_sample_schema=True, ) + # Notes: for data_mapping: + # {{sample.output_text}} is the string output of the provide model target for the given input in {{item.query}} testing_criteria = [ { "type": "azure_ai_evaluator", @@ -116,5 +118,5 @@ else: print("\n✗ Evaluation run failed.") - # openai_client.evals.delete(eval_id=eval_object.id) + openai_client.evals.delete(eval_id=eval_object.id) print("Evaluation deleted") From 074c6566135b6c6628847baa78ba66424ce57df0 Mon Sep 17 00:00:00 2001 From: Waqas Javed <7674577+w-javed@users.noreply.github.com> Date: Wed, 24 Dec 2025 00:50:00 -0800 Subject: [PATCH 5/5] fix --- sdk/ai/azure-ai-projects/README.md | 112 +++++++++--------- .../evaluations/sample_agent_evaluation.py | 2 +- 2 files changed, 57 insertions(+), 57 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 4adb5edbe4fd..b623eae7e8d1 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -122,20 +122,19 @@ See the "responses" folder in the [package samples][samples] for additional samp ```python -openai_client = project_client.get_openai_client() - -response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - input="What is the size of France in square miles?", -) -print(f"Response output: {response.output_text}") +with project_client.get_openai_client() as openai_client: + response = openai_client.responses.create( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + input="What is the size of France in square miles?", + ) + print(f"Response output: {response.output_text}") -response = openai_client.responses.create( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - input="And what is the capital city?", - previous_response_id=response.id, -) -print(f"Response output: {response.output_text}") + response = openai_client.responses.create( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + input="And what is the capital city?", + previous_response_id=response.id, + ) + print(f"Response output: {response.output_text}") ``` @@ -151,44 +150,43 @@ See the "agents" folder in the [package samples][samples] for an extensive set o ```python -openai_client = project_client.get_openai_client() - -agent = project_client.agents.create_version( - agent_name="MyAgent", - definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - instructions="You are a helpful assistant that answers general questions", - ), -) -print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})") +with project_client.get_openai_client() as openai_client: + agent = project_client.agents.create_version( + agent_name="MyAgent", + definition=PromptAgentDefinition( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + instructions="You are a helpful assistant that answers general questions", + ), + ) + print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})") -conversation = openai_client.conversations.create( - items=[{"type": "message", "role": "user", "content": "What is the size of France in square miles?"}], -) -print(f"Created conversation with initial user message (id: {conversation.id})") + conversation = openai_client.conversations.create( + items=[{"type": "message", "role": "user", "content": "What is the size of France in square miles?"}], + ) + print(f"Created conversation with initial user message (id: {conversation.id})") -response = openai_client.responses.create( - conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, - input="", -) -print(f"Response output: {response.output_text}") + response = openai_client.responses.create( + conversation=conversation.id, + extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + input="", + ) + print(f"Response output: {response.output_text}") -openai_client.conversations.items.create( - conversation_id=conversation.id, - items=[{"type": "message", "role": "user", "content": "And what is the capital city?"}], -) -print(f"Added a second user message to the conversation") + openai_client.conversations.items.create( + conversation_id=conversation.id, + items=[{"type": "message", "role": "user", "content": "And what is the capital city?"}], + ) + print(f"Added a second user message to the conversation") -response = openai_client.responses.create( - conversation=conversation.id, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, - input="", -) -print(f"Response output: {response.output_text}") + response = openai_client.responses.create( + conversation=conversation.id, + extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + input="", + ) + print(f"Response output: {response.output_text}") -openai_client.conversations.delete(conversation_id=conversation.id) -print("Conversation deleted") + openai_client.conversations.delete(conversation_id=conversation.id) + print("Conversation deleted") project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) print("Agent deleted") @@ -668,7 +666,7 @@ with ( "evaluator_name": "builtin.task_adherence", "initialization_parameters": {"deployment_name": f"{model_deployment_name}"}, "data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_items}}"}, - } + }, ] eval_object = openai_client.evals.create( name="Agent Evaluation", @@ -844,9 +842,7 @@ folder in the [package samples][samples]. ```python -print( - f"Create Index `{index_name}` with version `{index_version}`, referencing an existing AI Search resource:" -) +print(f"Create Index `{index_name}` with version `{index_version}`, referencing an existing AI Search resource:") index = project_client.indexes.create_or_update( name=index_name, version=index_version, @@ -884,20 +880,24 @@ with open(file_path, "rb") as f: uploaded_file = openai_client.files.create(file=f, purpose="fine-tune") print(uploaded_file) -print(f"Retrieving file metadata with ID: {uploaded_file.id}") -retrieved_file = openai_client.files.retrieve(uploaded_file.id) +print("Waits for the given file to be processed, default timeout is 30 mins") +processed_file = openai_client.files.wait_for_processing(uploaded_file.id) +print(processed_file) + +print(f"Retrieving file metadata with ID: {processed_file.id}") +retrieved_file = openai_client.files.retrieve(processed_file.id) print(retrieved_file) -print(f"Retrieving file content with ID: {uploaded_file.id}") -file_content = openai_client.files.content(uploaded_file.id) +print(f"Retrieving file content with ID: {processed_file.id}") +file_content = openai_client.files.content(processed_file.id) print(file_content.content) print("Listing all files:") for file in openai_client.files.list(): print(file) -print(f"Deleting file with ID: {uploaded_file.id}") -deleted_file = openai_client.files.delete(uploaded_file.id) +print(f"Deleting file with ID: {processed_file.id}") +deleted_file = openai_client.files.delete(processed_file.id) print(f"Successfully deleted file: {deleted_file.id}") ``` diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py index 1f4602048645..af587d75b6b7 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_agent_evaluation.py @@ -85,7 +85,7 @@ "evaluator_name": "builtin.task_adherence", "initialization_parameters": {"deployment_name": f"{model_deployment_name}"}, "data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_items}}"}, - } + }, ] eval_object = openai_client.evals.create( name="Agent Evaluation",