test
Some checks are pending
CodeQL / Analyze (csharp) (push) Waiting to run
CodeQL / Analyze (python) (push) Waiting to run
dotnet-build-and-test / paths-filter (push) Waiting to run
dotnet-build-and-test / dotnet-build-and-test (Debug, windows-latest, net9.0) (push) Blocked by required conditions
dotnet-build-and-test / dotnet-build-and-test (Release, integration, true, ubuntu-latest, net10.0) (push) Blocked by required conditions
dotnet-build-and-test / dotnet-build-and-test (Release, integration, true, windows-latest, net472) (push) Blocked by required conditions
dotnet-build-and-test / dotnet-build-and-test (Release, ubuntu-latest, net8.0) (push) Blocked by required conditions
dotnet-build-and-test / dotnet-build-and-test-check (push) Blocked by required conditions

This commit is contained in:
2026-01-24 03:05:12 +11:00
parent f78f2388b3
commit 539852f81c
2584 changed files with 287471 additions and 0 deletions

81
python/.cspell.json Normal file
View File

@@ -0,0 +1,81 @@
{
"version": "0.2",
"languageSettings": [
{
"languageId": "py",
"allowCompoundWords": true,
"locale": "en-US"
}
],
"language": "en-US",
"patterns": [
{
"name": "import",
"pattern": "import [a-zA-Z0-9_]+"
},
{
"name": "from import",
"pattern": "from [a-zA-Z0-9_]+ import [a-zA-Z0-9_]+"
}
],
"ignorePaths": [
"samples/**",
"notebooks/**"
],
"words": [
"aeiou",
"aiplatform",
"agui",
"azuredocindex",
"azuredocs",
"azurefunctions",
"boto",
"contentvector",
"contoso",
"datamodel",
"desync",
"dotenv",
"endregion",
"entra",
"faiss",
"genai",
"generativeai",
"hnsw",
"httpx",
"huggingface",
"Instrumentor",
"logit",
"logprobs",
"lowlevel",
"Magentic",
"mistralai",
"mongocluster",
"nd",
"ndarray",
"nopep",
"NOSQL",
"ollama",
"otlp",
"Onnx",
"onyourdatatest",
"OPENAI",
"opentelemetry",
"OTEL",
"powerfx",
"protos",
"pydantic",
"pytestmark",
"qdrant",
"retrywrites",
"streamable",
"serde",
"templating",
"uninstrument",
"vectordb",
"vectorizable",
"vectorizer",
"vectorstoremodel",
"vertexai",
"Weaviate"
]
}

38
python/.env.example Normal file
View File

@@ -0,0 +1,38 @@
# Azure AI
AZURE_AI_PROJECT_ENDPOINT=""
AZURE_AI_MODEL_DEPLOYMENT_NAME=""
# Bing connection for web search (optional, used by samples with web search)
BING_CONNECTION_ID=""
# Azure AI Search (optional, used by AzureAISearchContextProvider samples)
AZURE_SEARCH_ENDPOINT=""
AZURE_SEARCH_API_KEY=""
AZURE_SEARCH_INDEX_NAME=""
AZURE_SEARCH_SEMANTIC_CONFIG=""
AZURE_SEARCH_KNOWLEDGE_BASE_NAME=""
# Note: For agentic mode Knowledge Bases, also set AZURE_OPENAI_ENDPOINT below
# (different from AZURE_AI_PROJECT_ENDPOINT - Knowledge Base needs OpenAI endpoint for model calls)
# OpenAI
OPENAI_API_KEY=""
OPENAI_CHAT_MODEL_ID=""
OPENAI_RESPONSES_MODEL_ID=""
# Azure OpenAI
AZURE_OPENAI_ENDPOINT=""
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=""
AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME=""
# Mem0
MEM0_API_KEY=""
# Copilot Studio
COPILOTSTUDIOAGENT__ENVIRONMENTID=""
COPILOTSTUDIOAGENT__SCHEMANAME=""
COPILOTSTUDIOAGENT__TENANTID=""
COPILOTSTUDIOAGENT__AGENTAPPID=""
# Anthropic
ANTHROPIC_API_KEY=""
ANTHROPIC_MODEL=""
# Ollama
OLLAMA_ENDPOINT=""
OLLAMA_MODEL=""
# Observability
ENABLE_INSTRUMENTATION=true
ENABLE_SENSITIVE_DATA=true
OTEL_EXPORTER_OTLP_ENDPOINT="http://localhost:4317/"

View File

@@ -0,0 +1,26 @@
---
applyTo: '**/agent-framework/python/**'
---
- Use `uv run` as the main entrypoint for running Python commands with all packages available.
- Use `uv run poe <task>` for development tasks like formatting (`fmt`), linting (`lint`), type checking (`pyright`, `mypy`), and testing (`test`).
- Use `uv run --directory packages/<package> poe <task>` to run tasks for a specific package.
- Read [DEV_SETUP.md](../../DEV_SETUP.md) for detailed development environment setup and available poe tasks.
- Read [CODING_STANDARD.md](../../CODING_STANDARD.md) for the project's coding standards and best practices.
- When verifying logic with unit tests, run only the related tests, not the entire test suite.
- For new tests and samples, review existing ones to understand the coding style and reuse it.
- When generating new functions, always specify the function return type and parameter types.
- Do not use `Optional`; use `Type | None` instead.
- Before running any commands to execute or test the code, ensure that all problems, compilation errors, and warnings are resolved.
- When formatting files, format only the files you changed or are currently working on; do not format the entire codebase.
- Do not mark new tests with `@pytest.mark.asyncio`.
- If you need debug information to understand an issue, use print statements as needed and remove them when testing is complete.
- Avoid adding excessive comments.
- When working with samples, make sure to update the associated README files with the latest information. These files are usually located in the same folder as the sample or in one of its parent folders.
Sample structure:
1. Copyright header: `# Copyright (c) Microsoft. All rights reserved.`
2. Required imports.
3. Short description about the sample: `"""This sample demonstrates..."""`
4. Helper functions.
5. Main functions that demonstrate the functionality. If it is a single scenario, use a `main` function. If there are multiple scenarios, define separate functions and add a `main` function that invokes all scenarios.
6. Place `if __name__ == "__main__": asyncio.run(main())` at the end of the sample file to make the example executable.

View File

@@ -0,0 +1,65 @@
files: ^python/
fail_fast: true
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-toml
name: Check TOML files
files: \.toml$
exclude: ^python/packages/lab/cookiecutter-agent-framework-lab/
- id: check-yaml
name: Check YAML files
files: \.yaml$
- id: check-json
name: Check JSON files
files: \.json$
exclude: ^.*\.vscode\/.*|^python/demos/samples/chatkit-integration/frontend/(tsconfig.*\.json|package-lock\.json)$
- id: end-of-file-fixer
name: Fix End of File
files: \.py$
exclude: ^python/packages/lab/cookiecutter-agent-framework-lab/
- id: mixed-line-ending
name: Check Mixed Line Endings
files: \.py$
exclude: ^python/packages/lab/cookiecutter-agent-framework-lab/
- id: check-ast
name: Check Valid Python Samples
types: ["python"]
exclude: ^python/packages/lab/cookiecutter-agent-framework-lab/
- repo: https://github.com/nbQA-dev/nbQA
rev: 1.9.1
hooks:
- id: nbqa-check-ast
name: Check Valid Python Notebooks
types: ["jupyter"]
- repo: https://github.com/asottile/pyupgrade
rev: v3.20.0
hooks:
- id: pyupgrade
name: Upgrade Python syntax
args: [--py310-plus]
exclude: ^python/packages/lab/cookiecutter-agent-framework-lab/
- repo: local
hooks:
- id: poe-check
name: Run checks through Poe
entry: uv --directory ./python run poe pre-commit-check
language: system
files: ^python/
- repo: https://github.com/astral-sh/uv-pre-commit
# uv version.
rev: 0.7.18
hooks:
# Update the uv lockfile
- id: uv-lock
name: Update uv lockfile
files: python/pyproject.toml
args: [--project, python]
- repo: https://github.com/PyCQA/bandit
rev: 1.8.5
hooks:
- id: bandit
name: Bandit Security Checks
args: ["-c", "python/pyproject.toml"]
additional_dependencies: ["bandit[toml]"]

34
python/.vscode/launch.json vendored Normal file
View File

@@ -0,0 +1,34 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Python Debugger: Current File",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal",
"justMyCode": false
},
{
"name": "AG-UI Examples Server",
"type": "debugpy",
"request": "launch",
"module": "agent_framework_ag_ui_examples",
"cwd": "${workspaceFolder}/packages/ag-ui",
"console": "integratedTerminal",
"justMyCode": false
},
{
"name": "Python Attach",
"type": "debugpy",
"request": "attach",
"connect": {
"host": "localhost",
"port": 5678
}
}
]
}

39
python/.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,39 @@
{
"cSpell.languageSettings": [
{
"languageId": "py",
"allowCompoundWords": true,
"locale": "en-US"
}
],
"[python]": {
"editor.codeActionsOnSave": {
"source.organizeImports.ruff": "always",
"source.fixAll.ruff": "always"
},
"editor.formatOnSave": true,
"editor.formatOnPaste": true,
"editor.formatOnType": true,
"editor.defaultFormatter": "charliermarsh.ruff"
},
"python.analysis.autoFormatStrings": true,
"python.analysis.importFormat": "relative",
"python.analysis.packageIndexDepths": [
{
"name": "agent_framework",
"depth": 2
},
{
"name": "extensions",
"depth": 2
},
{
"name": "openai",
"depth": 2
},
{
"name": "azure",
"depth": 2
}
]
}

210
python/.vscode/tasks.json vendored Normal file
View File

@@ -0,0 +1,210 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "Run Checks",
"type": "shell",
"command": "uv",
"args": [
"run",
"pre-commit",
"run",
"-a"
],
"problemMatcher": {
"owner": "python",
"fileLocation": [
"relative",
"${workspaceFolder}"
],
"pattern": {
"regexp": "^(.*):(\\d+):(\\d+):\\s+(.*)$",
"file": 1,
"line": 2,
"column": 3,
"message": 4
}
},
"presentation": {
"panel": "shared"
}
},
{
"label": "Format",
"type": "shell",
"command": "uv",
"args": [
"run",
"poe",
"fmt",
],
"problemMatcher": {
"owner": "python",
"fileLocation": [
"relative",
"${workspaceFolder}"
],
"pattern": {
"regexp": "^(.*):(\\d+):(\\d+):\\s+(.*)$",
"file": 1,
"line": 2,
"column": 3,
"message": 4
}
},
"presentation": {
"panel": "shared"
}
},
{
"label": "Lint",
"type": "shell",
"command": "uv",
"args": [
"run",
"poe",
"lint",
],
"problemMatcher": {
"owner": "python",
"fileLocation": [
"relative",
"${workspaceFolder}"
],
"pattern": {
"regexp": "^(.*):(\\d+):(\\d+):\\s+(.*)$",
"file": 1,
"line": 2,
"column": 3,
"message": 4
}
},
"presentation": {
"panel": "shared"
}
},
{
"label": "Mypy",
"type": "shell",
"command": "uv",
"args": [
"run",
"poe",
"mypy",
],
"problemMatcher": {
"owner": "python",
"fileLocation": [
"relative",
"${workspaceFolder}"
],
"pattern": {
"regexp": "^(.*):(\\d+):(\\d+):\\s+(.*)$",
"file": 1,
"line": 2,
"column": 3,
"message": 4
}
},
"presentation": {
"panel": "shared"
}
},
{
"label": "Pyright",
"type": "shell",
"command": "uv",
"args": [
"run",
"poe",
"pyright",
],
"problemMatcher": {
"owner": "python",
"fileLocation": [
"relative",
"${workspaceFolder}"
],
"pattern": {
"regexp": "^(.*):(\\d+):(\\d+):\\s+(.*)$",
"file": 1,
"line": 2,
"column": 3,
"message": 4
}
},
"presentation": {
"panel": "shared"
}
},
{
"label": "Test",
"type": "shell",
"command": "uv",
"args": [
"run",
"poe",
"test",
],
"problemMatcher": {
"owner": "python",
"fileLocation": [
"relative",
"${workspaceFolder}"
],
"pattern": {
"regexp": "^(.*):(\\d+):(\\d+):\\s+(.*)$",
"file": 1,
"line": 2,
"column": 3,
"message": 4
}
},
"presentation": {
"panel": "shared"
}
},
{
"label": "Create Venv",
"type": "shell",
"command": "uv venv PYTHON=${input:py_version}",
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Install all dependencies",
"type": "shell",
"command": "uv",
"args": [
"run",
"poe",
"setup",
"--python=${input:py_version}"
],
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
}
],
"inputs": [
{
"type": "pickString",
"options": [
"3.10",
"3.11",
"3.12",
"3.13"
],
"id": "py_version",
"description": "Python version",
"default": "3.10"
}
]
}

539
python/CHANGELOG.md Normal file
View File

@@ -0,0 +1,539 @@
# Changelog
All notable changes to the Agent Framework Python packages will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [1.0.0b260116] - 2026-01-16
### Added
- **agent-framework-azure-ai**: Create/Get Agent API for Azure V1 ([#3192](https://github.com/microsoft/agent-framework/pull/3192))
- **agent-framework-core**: Create/Get Agent API for OpenAI Assistants ([#3208](https://github.com/microsoft/agent-framework/pull/3208))
- **agent-framework-ag-ui**: Support service-managed thread on AG-UI ([#3136](https://github.com/microsoft/agent-framework/pull/3136))
- **agent-framework-ag-ui**: Add MCP tool support for AG-UI approval flows ([#3212](https://github.com/microsoft/agent-framework/pull/3212))
- **samples**: Add AzureAI sample for downloading code interpreter generated files ([#3189](https://github.com/microsoft/agent-framework/pull/3189))
### Changed
- **agent-framework-core**: [BREAKING] Rename `create_agent` to `as_agent` ([#3249](https://github.com/microsoft/agent-framework/pull/3249))
- **agent-framework-core**: [BREAKING] Rename `WorkflowOutputEvent.source_executor_id` to `executor_id` for API consistency ([#3166](https://github.com/microsoft/agent-framework/pull/3166))
### Fixed
- **agent-framework-core**: Properly configure structured outputs based on new options dict ([#3213](https://github.com/microsoft/agent-framework/pull/3213))
- **agent-framework-core**: Correct `FunctionResultContent` ordering in `WorkflowAgent.merge_updates` ([#3168](https://github.com/microsoft/agent-framework/pull/3168))
- **agent-framework-azurefunctions**: Update `DurableAIAgent` and fix integration tests ([#3241](https://github.com/microsoft/agent-framework/pull/3241))
- **agent-framework-azure-ai**: Create/Get Agent API fixes and example improvements ([#3246](https://github.com/microsoft/agent-framework/pull/3246))
## [1.0.0b260114] - 2026-01-14
### Added
- **agent-framework-azure-ai**: Create/Get Agent API for Azure V2 ([#3059](https://github.com/microsoft/agent-framework/pull/3059)) by @moonbox3
- **agent-framework-declarative**: Add declarative workflow runtime ([#2815](https://github.com/microsoft/agent-framework/pull/2815)) by @moonbox3
- **agent-framework-ag-ui**: Add dependencies param to ag-ui FastAPI endpoint ([#3191](https://github.com/microsoft/agent-framework/pull/3191)) by @moonbox3
- **agent-framework-ag-ui**: Add Pydantic request model and OpenAPI tags support to AG-UI FastAPI endpoint ([#2522](https://github.com/microsoft/agent-framework/pull/2522)) by @claude89757
- **agent-framework-core**: Add tool call/result content types and update connectors and samples ([#2971](https://github.com/microsoft/agent-framework/pull/2971)) by @moonbox3
- **agent-framework-core**: Add more specific exceptions to Workflow ([#3188](https://github.com/microsoft/agent-framework/pull/3188)) by @TaoChenOSU
### Changed
- **agent-framework-core**: [BREAKING] Refactor orchestrations ([#3023](https://github.com/microsoft/agent-framework/pull/3023)) by @TaoChenOSU
- **agent-framework-core**: [BREAKING] Introducing Options as TypedDict and Generic ([#3140](https://github.com/microsoft/agent-framework/pull/3140)) by @eavanvalkenburg
- **agent-framework-core**: [BREAKING] Removed display_name, renamed context_providers, middleware and AggregateContextProvider ([#3139](https://github.com/microsoft/agent-framework/pull/3139)) by @eavanvalkenburg
- **agent-framework-core**: MCP Improvements: improved connection loss behavior, pagination for loading and a param to control representation ([#3154](https://github.com/microsoft/agent-framework/pull/3154)) by @eavanvalkenburg
- **agent-framework-azure-ai**: Azure AI direct A2A endpoint support ([#3127](https://github.com/microsoft/agent-framework/pull/3127)) by @moonbox3
### Fixed
- **agent-framework-anthropic**: Fix duplicate ToolCallStartEvent in streaming tool calls ([#3051](https://github.com/microsoft/agent-framework/pull/3051)) by @moonbox3
- **agent-framework-anthropic**: Fix Anthropic streaming response bugs ([#3141](https://github.com/microsoft/agent-framework/pull/3141)) by @eavanvalkenburg
- **agent-framework-ag-ui**: Execute tools with approval_mode, fix shared state, code cleanup ([#3079](https://github.com/microsoft/agent-framework/pull/3079)) by @moonbox3
- **agent-framework-azure-ai**: Fix AzureAIClient tool call bug for AG-UI use ([#3148](https://github.com/microsoft/agent-framework/pull/3148)) by @moonbox3
- **agent-framework-core**: Fix MCPStreamableHTTPTool to use new streamable_http_client API ([#3088](https://github.com/microsoft/agent-framework/pull/3088)) by @Copilot
- **agent-framework-core**: Multiple bug fixes ([#3150](https://github.com/microsoft/agent-framework/pull/3150)) by @eavanvalkenburg
## [1.0.0b260107] - 2026-01-07
### Added
- **agent-framework-devui**: Improve DevUI and add Context Inspector view as a new tab under traces ([#2742](https://github.com/microsoft/agent-framework/pull/2742)) by @victordibia
- **samples**: Add streaming sample for Azure Functions ([#3057](https://github.com/microsoft/agent-framework/pull/3057)) by @gavin-aguiar
### Changed
- **repo**: Update templates ([#3106](https://github.com/microsoft/agent-framework/pull/3106)) by @eavanvalkenburg
### Fixed
- **agent-framework-ag-ui**: Fix MCP tool result serialization for list[TextContent] ([#2523](https://github.com/microsoft/agent-framework/pull/2523)) by @claude89757
- **agent-framework-azure-ai**: Fix response_format handling for structured outputs ([#3114](https://github.com/microsoft/agent-framework/pull/3114)) by @moonbox3
## [1.0.0b260106] - 2026-01-06
### Added
- **repo**: Add issue template and additional labeling ([#3006](https://github.com/microsoft/agent-framework/pull/3006)) by @eavanvalkenburg
### Changed
- None
### Fixed
- **agent-framework-core**: Fix max tokens translation and add extra integer test ([#3037](https://github.com/microsoft/agent-framework/pull/3037)) by @eavanvalkenburg
- **agent-framework-azure-ai**: Fix failure when conversation history contains assistant messages ([#3076](https://github.com/microsoft/agent-framework/pull/3076)) by @moonbox3
- **agent-framework-core**: Use HTTP exporter for http/protobuf protocol ([#3070](https://github.com/microsoft/agent-framework/pull/3070)) by @takanori-terai
- **agent-framework-core**: Fix ExecutorInvokedEvent and ExecutorCompletedEvent observability data ([#3090](https://github.com/microsoft/agent-framework/pull/3090)) by @moonbox3
- **agent-framework-core**: Honor tool_choice parameter passed to agent.run() and chat client methods ([#3095](https://github.com/microsoft/agent-framework/pull/3095)) by @moonbox3
- **samples**: AzureAI SharePoint sample fix ([#3108](https://github.com/microsoft/agent-framework/pull/3108)) by @giles17
## [1.0.0b251223] - 2025-12-23
### Added
- **agent-framework-bedrock**: Introducing support for Bedrock-hosted models (Anthropic, Cohere, etc.) ([#2610](https://github.com/microsoft/agent-framework/pull/2610))
- **agent-framework-core**: Added `response.created` and `response.in_progress` event process to `OpenAIBaseResponseClient` ([#2975](https://github.com/microsoft/agent-framework/pull/2975))
- **agent-framework-foundry-local**: Introducing Foundry Local Chat Clients ([#2915](https://github.com/microsoft/agent-framework/pull/2915))
- **samples**: Added GitHub MCP sample with PAT ([#2967](https://github.com/microsoft/agent-framework/pull/2967))
### Changed
- **agent-framework-core**: Preserve reasoning blocks with OpenRouter ([#2950](https://github.com/microsoft/agent-framework/pull/2950))
## [1.0.0b251218] - 2025-12-18
### Added
- **agent-framework-core**: Azure AI Agent with Bing Grounding Citations sample ([#2892](https://github.com/microsoft/agent-framework/pull/2892))
- **agent-framework-core**: Workflow option to visualize internal executors ([#2917](https://github.com/microsoft/agent-framework/pull/2917))
- **agent-framework-core**: Workflow cancellation sample ([#2732](https://github.com/microsoft/agent-framework/pull/2732))
- **agent-framework-core**: Azure Managed Redis support with credential provider ([#2887](https://github.com/microsoft/agent-framework/pull/2887))
- **agent-framework-core**: Additional arguments for Azure AI agent configuration ([#2922](https://github.com/microsoft/agent-framework/pull/2922))
### Changed
- **agent-framework-ollama**: Updated Ollama package version ([#2920](https://github.com/microsoft/agent-framework/pull/2920))
- **agent-framework-ollama**: Move Ollama samples to samples getting started directory ([#2921](https://github.com/microsoft/agent-framework/pull/2921))
- **agent-framework-core**: Cleanup and refactoring of chat clients ([#2937](https://github.com/microsoft/agent-framework/pull/2937))
- **agent-framework-core**: Align Run ID and Thread ID casing with AG-UI TypeScript SDK ([#2948](https://github.com/microsoft/agent-framework/pull/2948))
### Fixed
- **agent-framework-core**: Fix Pydantic error when using Literal types for tool parameters ([#2893](https://github.com/microsoft/agent-framework/pull/2893))
- **agent-framework-core**: Correct MCP image type conversion in `_mcp.py` ([#2901](https://github.com/microsoft/agent-framework/pull/2901))
- **agent-framework-core**: Fix BadRequestError when using Pydantic models in response formatting ([#1843](https://github.com/microsoft/agent-framework/pull/1843))
- **agent-framework-core**: Propagate workflow kwargs to sub-workflows via WorkflowExecutor ([#2923](https://github.com/microsoft/agent-framework/pull/2923))
- **agent-framework-core**: Fix WorkflowAgent event handling and kwargs forwarding ([#2946](https://github.com/microsoft/agent-framework/pull/2946))
## [1.0.0b251216] - 2025-12-16
### Added
- **agent-framework-ollama**: Ollama connector for Agent Framework (#1104)
- **agent-framework-core**: Added custom args and thread object to `ai_function` kwargs (#2769)
- **agent-framework-core**: Enable checkpointing for `WorkflowAgent` (#2774)
### Changed
- **agent-framework-core**: [BREAKING] Observability updates (#2782)
- **agent-framework-core**: Use agent description in `HandoffBuilder` auto-generated tools (#2714)
- **agent-framework-core**: Remove warnings from workflow builder when not using factories (#2808)
### Fixed
- **agent-framework-core**: Fix `WorkflowAgent` to include thread conversation history (#2774)
- **agent-framework-core**: Fix context duplication in handoff workflows when restoring from checkpoint (#2867)
- **agent-framework-core**: Fix middleware terminate flag to exit function calling loop immediately (#2868)
- **agent-framework-core**: Fix `WorkflowAgent` to emit `yield_output` as agent response (#2866)
- **agent-framework-core**: Filter framework kwargs from MCP tool invocations (#2870)
## [1.0.0b251211] - 2025-12-11
### Added
- **agent-framework-core**: Extend HITL support for all orchestration patterns (#2620)
- **agent-framework-core**: Add factory pattern to concurrent orchestration builder (#2738)
- **agent-framework-core**: Add factory pattern to sequential orchestration builder (#2710)
- **agent-framework-azure-ai**: Capture file IDs from code interpreter in streaming responses (#2741)
### Changed
- **agent-framework-azurefunctions**: Change DurableAIAgent log level from warning to debug when invoked without thread (#2736)
### Fixed
- **agent-framework-core**: Added more complete parsing for mcp tool arguments (#2756)
- **agent-framework-core**: Fix GroupChat ManagerSelectionResponse JSON Schema for OpenAI Structured Outputs (#2750)
- **samples**: Standardize OpenAI API key environment variable naming (#2629)
## [1.0.0b251209] - 2025-12-09
### Added
- **agent-framework-core**: Support an autonomous handoff flow (#2497)
- **agent-framework-core**: WorkflowBuilder registry (#2486)
- **agent-framework-a2a**: Add configurable timeout support to A2AAgent (#2432)
- **samples**: Added Azure OpenAI Responses File Search sample + Integration test update (#2645)
- **samples**: Update fan in fan out sample to show concurrency (#2705)
### Changed
- **agent-framework-azure-ai**: [BREAKING] Renamed `async_credential` to `credential` (#2648)
- **samples**: Improve sample logging (#2692)
- **samples**: azureai image gen sample update (#2709)
### Fixed
- **agent-framework-core**: Fix DurableState schema serializations (#2670)
- **agent-framework-core**: Fix context provider lifecycle agentic mode (#2650)
- **agent-framework-devui**: Fix WorkflowFailedEvent error extraction (#2706)
- **agent-framework-devui**: Fix DevUI fails when uploading Pdf file (#2675)
- **agent-framework-devui**: Fix message serialization issue (#2674)
- **observability**: Display system prompt in langfuse (#2653)
## [1.0.0b251204] - 2025-12-04
### Added
- **agent-framework-core**: Add support for Pydantic `BaseModel` as function call result (#2606)
- **agent-framework-core**: Executor events now include I/O data (#2591)
- **samples**: Inline YAML declarative sample (#2582)
- **samples**: Handoff-as-agent with HITL sample (#2534)
### Changed
- **agent-framework-core**: [BREAKING] Support Magentic agent tool call approvals and plan stalling HITL behavior (#2569)
- **agent-framework-core**: [BREAKING] Standardize orchestration outputs as list of `ChatMessage`; allow agent as group chat manager (#2291)
- **agent-framework-core**: [BREAKING] Respond with `AgentRunResponse` including serialized structured output (#2285)
- **observability**: Use `executor_id` and `edge_group_id` as span names for clearer traces (#2538)
- **agent-framework-devui**: Add multimodal input support for workflows and refactor chat input (#2593)
- **docs**: Update Python orchestration documentation (#2087)
### Fixed
- **observability**: Resolve mypy error in observability module (#2641)
- **agent-framework-core**: Fix `AgentRunResponse.created_at` returning local datetime labeled as UTC (#2590)
- **agent-framework-core**: Emit `ExecutorFailedEvent` before `WorkflowFailedEvent` when executor throws (#2537)
- **agent-framework-core**: Fix MagenticAgentExecutor producing `repr` string for tool call content (#2566)
- **agent-framework-core**: Fixed empty text content Pydantic validation failure (#2539)
- **agent-framework-azure-ai**: Added support for application endpoints in Azure AI client (#2460)
- **agent-framework-azurefunctions**: Add MCP tool support (#2385)
- **agent-framework-core**: Preserve MCP array items schema in Pydantic field generation (#2382)
- **agent-framework-devui**: Make tool call view optional and fix links (#2243)
- **agent-framework-core**: Always include output in function call result messages (#2414)
- **agent-framework-redis**: Fix TypeError (#2411)
## [1.0.0b251120] - 2025-11-20
### Added
- **agent-framework-core**: Introducing support for declarative YAML spec ([#2002](https://github.com/microsoft/agent-framework/pull/2002))
- **agent-framework-core**: Use AI Foundry evaluators for self-reflection ([#2250](https://github.com/microsoft/agent-framework/pull/2250))
- **agent-framework-core**: Propagate `as_tool()` kwargs and add runtime context + middleware sample ([#2311](https://github.com/microsoft/agent-framework/pull/2311))
- **agent-framework-anthropic**: Anthropic Foundry integration ([#2302](https://github.com/microsoft/agent-framework/pull/2302))
- **samples**: M365 Agent SDK Hosting sample ([#2292](https://github.com/microsoft/agent-framework/pull/2292))
- **samples**: Foundry Sample for A2A + SharePoint Samples ([#2313](https://github.com/microsoft/agent-framework/pull/2313))
### Changed
- **agent-framework-azurefunctions**: [BREAKING] Schema changes for Azure Functions package ([#2151](https://github.com/microsoft/agent-framework/pull/2151))
- **agent-framework-core**: Move evaluation folders under `evaluations` ([#2355](https://github.com/microsoft/agent-framework/pull/2355))
- **agent-framework-core**: Move red teaming files to their own folder ([#2333](https://github.com/microsoft/agent-framework/pull/2333))
- **agent-framework-core**: "fix all" task now single source of truth ([#2303](https://github.com/microsoft/agent-framework/pull/2303))
- **agent-framework-core**: Improve and clean up exception handling ([#2337](https://github.com/microsoft/agent-framework/pull/2337), [#2319](https://github.com/microsoft/agent-framework/pull/2319))
- **agent-framework-core**: Clean up imports ([#2318](https://github.com/microsoft/agent-framework/pull/2318))
### Fixed
- **agent-framework-azure-ai**: Fix for Azure AI client ([#2358](https://github.com/microsoft/agent-framework/pull/2358))
- **agent-framework-core**: Fix tool execution bleed-over in aiohttp/Bot Framework scenarios ([#2314](https://github.com/microsoft/agent-framework/pull/2314))
- **agent-framework-core**: `@ai_function` now correctly handles `self` parameter ([#2266](https://github.com/microsoft/agent-framework/pull/2266))
- **agent-framework-core**: Resolve string annotations in `FunctionExecutor` ([#2308](https://github.com/microsoft/agent-framework/pull/2308))
- **agent-framework-core**: Langfuse observability captures ChatAgent system instructions ([#2316](https://github.com/microsoft/agent-framework/pull/2316))
- **agent-framework-core**: Incomplete URL substring sanitization fix ([#2274](https://github.com/microsoft/agent-framework/pull/2274))
- **observability**: Handle datetime serialization in tool results ([#2248](https://github.com/microsoft/agent-framework/pull/2248))
## [1.0.0b251117] - 2025-11-17
### Fixed
- **agent-framework-ag-ui**: Fix ag-ui state handling issues ([#2289](https://github.com/microsoft/agent-framework/pull/2289))
## [1.0.0b251114] - 2025-11-14
### Added
- **samples**: Bing Custom Search sample using `HostedWebSearchTool` ([#2226](https://github.com/microsoft/agent-framework/pull/2226))
- **samples**: Fabric and Browser Automation samples ([#2207](https://github.com/microsoft/agent-framework/pull/2207))
- **samples**: Hosted agent samples ([#2205](https://github.com/microsoft/agent-framework/pull/2205))
- **samples**: Azure OpenAI Responses API Hosted MCP sample ([#2108](https://github.com/microsoft/agent-framework/pull/2108))
- **samples**: Bing Grounding and Custom Search samples ([#2200](https://github.com/microsoft/agent-framework/pull/2200))
### Changed
- **agent-framework-azure-ai**: Enhance Azure AI Search citations with complete URL information ([#2066](https://github.com/microsoft/agent-framework/pull/2066))
- **agent-framework-azurefunctions**: Update samples to latest stable Azure Functions Worker packages ([#2189](https://github.com/microsoft/agent-framework/pull/2189))
- **agent-framework-azure-ai**: Agent name now required for `AzureAIClient` ([#2198](https://github.com/microsoft/agent-framework/pull/2198))
- **build**: Use `uv build` for packaging ([#2161](https://github.com/microsoft/agent-framework/pull/2161))
- **tooling**: Pre-commit improvements ([#2222](https://github.com/microsoft/agent-framework/pull/2222))
- **dependencies**: Updated package versions ([#2208](https://github.com/microsoft/agent-framework/pull/2208))
### Fixed
- **agent-framework-core**: Prevent duplicate MCP tools and prompts ([#1876](https://github.com/microsoft/agent-framework/pull/1876)) ([#1890](https://github.com/microsoft/agent-framework/pull/1890))
- **agent-framework-devui**: Fix HIL regression ([#2167](https://github.com/microsoft/agent-framework/pull/2167))
- **agent-framework-chatkit**: ChatKit sample fixes ([#2174](https://github.com/microsoft/agent-framework/pull/2174))
## [1.0.0b251112.post1] - 2025-11-12
### Added
- **agent-framework-azurefunctions**: Merge Azure Functions feature branch (#1916)
### Fixed
- **agent-framework-ag-ui**: fix tool call id mismatch in ag-ui ([#2166](https://github.com/microsoft/agent-framework/pull/2166))
## [1.0.0b251112] - 2025-11-12
### Added
- **agent-framework-azure-ai**: Azure AI client based on new `azure-ai-projects` package ([#1910](https://github.com/microsoft/agent-framework/pull/1910))
- **agent-framework-anthropic**: Add convenience method on data content ([#2083](https://github.com/microsoft/agent-framework/pull/2083))
### Changed
- **agent-framework-core**: Update OpenAI samples to use agents ([#2012](https://github.com/microsoft/agent-framework/pull/2012))
### Fixed
- **agent-framework-anthropic**: Fixed image handling in Anthropic client ([#2083](https://github.com/microsoft/agent-framework/pull/2083))
## [1.0.0b251111] - 2025-11-11
### Added
- **agent-framework-core**: Add OpenAI Responses Image Generation Stream Support with partial images and unit tests ([#1853](https://github.com/microsoft/agent-framework/pull/1853))
- **agent-framework-ag-ui**: Add concrete AGUIChatClient implementation ([#2072](https://github.com/microsoft/agent-framework/pull/2072))
### Fixed
- **agent-framework-a2a**: Use the last entry in the task history to avoid empty responses ([#2101](https://github.com/microsoft/agent-framework/pull/2101))
- **agent-framework-core**: Fix MCP Tool Parameter Descriptions not propagated to LLMs ([#1978](https://github.com/microsoft/agent-framework/pull/1978))
- **agent-framework-core**: Handle agent user input request in AgentExecutor ([#2022](https://github.com/microsoft/agent-framework/pull/2022))
- **agent-framework-core**: Fix Model ID attribute not showing up in `invoke_agent` span ([#2061](https://github.com/microsoft/agent-framework/pull/2061))
- **agent-framework-core**: Fix underlying tool choice bug and enable return to previous Handoff subagent ([#2037](https://github.com/microsoft/agent-framework/pull/2037))
## [1.0.0b251108] - 2025-11-08
### Added
- **agent-framework-devui**: Add OpenAI Responses API proxy support + HIL (Human-in-the-Loop) for Workflows ([#1737](https://github.com/microsoft/agent-framework/pull/1737))
- **agent-framework-purview**: Add Caching and background processing in Python Purview Middleware ([#1844](https://github.com/microsoft/agent-framework/pull/1844))
### Changed
- **agent-framework-devui**: Use metadata.entity_id instead of model field ([#1984](https://github.com/microsoft/agent-framework/pull/1984))
- **agent-framework-devui**: Serialize workflow input as string to maintain conformance with OpenAI Responses format ([#2021](https://github.com/microsoft/agent-framework/pull/2021))
## [1.0.0b251106.post1] - 2025-11-06
### Fixed
- **agent-framework-ag-ui**: Fix ag-ui examples packaging for PyPI publish ([#1953](https://github.com/microsoft/agent-framework/pull/1953))
## [1.0.0b251106] - 2025-11-06
### Changed
- **agent-framework-ag-ui**: export sample ag-ui agents ([#1927](https://github.com/microsoft/agent-framework/pull/1927))
## [1.0.0b251105] - 2025-11-05
### Added
- **agent-framework-ag-ui**: Initial release of AG-UI protocol integration for Agent Framework ([#1826](https://github.com/microsoft/agent-framework/pull/1826))
- **agent-framework-chatkit**: ChatKit integration with a sample application ([#1273](https://github.com/microsoft/agent-framework/pull/1273))
- Added parameter to disable agent cleanup in AzureAIAgentClient ([#1882](https://github.com/microsoft/agent-framework/pull/1882))
- Add support for Python 3.14 ([#1904](https://github.com/microsoft/agent-framework/pull/1904))
### Changed
- [BREAKING] Replaced AIProjectClient with AgentsClient in Foundry ([#1936](https://github.com/microsoft/agent-framework/pull/1936))
- Updates to Tools ([#1835](https://github.com/microsoft/agent-framework/pull/1835))
### Fixed
- Fix missing packaging dependency ([#1929](https://github.com/microsoft/agent-framework/pull/1929))
## [1.0.0b251104] - 2025-11-04
### Added
- Introducing the Anthropic Client ([#1819](https://github.com/microsoft/agent-framework/pull/1819))
### Changed
- [BREAKING] Consolidate workflow run APIs ([#1723](https://github.com/microsoft/agent-framework/pull/1723))
- [BREAKING] Remove request_type param from ctx.request_info() ([#1824](https://github.com/microsoft/agent-framework/pull/1824))
- [BREAKING] Cleanup of dependencies ([#1803](https://github.com/microsoft/agent-framework/pull/1803))
- [BREAKING] Replace `RequestInfoExecutor` with `request_info` API and `@response_handler` ([#1466](https://github.com/microsoft/agent-framework/pull/1466))
- Azure AI Search Support Update + Refactored Samples & Unit Tests ([#1683](https://github.com/microsoft/agent-framework/pull/1683))
- Lab: Updates to GAIA module ([#1763](https://github.com/microsoft/agent-framework/pull/1763))
### Fixed
- Azure AI `top_p` and `temperature` parameters fix ([#1839](https://github.com/microsoft/agent-framework/pull/1839))
- Ensure agent thread is part of checkpoint ([#1756](https://github.com/microsoft/agent-framework/pull/1756))
- Fix middleware and cleanup confusing function ([#1865](https://github.com/microsoft/agent-framework/pull/1865))
- Fix type compatibility check ([#1753](https://github.com/microsoft/agent-framework/pull/1753))
- Fix mcp tool cloning for handoff pattern ([#1883](https://github.com/microsoft/agent-framework/pull/1883))
## [1.0.0b251028] - 2025-10-28
### Added
- Added thread to AgentRunContext ([#1732](https://github.com/microsoft/agent-framework/pull/1732))
- AutoGen migration samples ([#1738](https://github.com/microsoft/agent-framework/pull/1738))
- Add Handoff orchestration pattern support ([#1469](https://github.com/microsoft/agent-framework/pull/1469))
- Added Samples for HostedCodeInterpreterTool with files ([#1583](https://github.com/microsoft/agent-framework/pull/1583))
### Changed
- [BREAKING] Introduce group chat and refactor orchestrations. Fix as_agent(). Standardize orchestration start msg types. ([#1538](https://github.com/microsoft/agent-framework/pull/1538))
- [BREAKING] Update Agent Framework Lab Lightning to use Agent-lightning v0.2.0 API ([#1644](https://github.com/microsoft/agent-framework/pull/1644))
- [BREAKING] Refactor Checkpointing for runner and runner context ([#1645](https://github.com/microsoft/agent-framework/pull/1645))
- Update lab packages and installation instructions ([#1687](https://github.com/microsoft/agent-framework/pull/1687))
- Remove deprecated add_agent() calls from workflow samples ([#1508](https://github.com/microsoft/agent-framework/pull/1508))
### Fixed
- Reject @executor on staticmethod/classmethod with clear error message ([#1719](https://github.com/microsoft/agent-framework/pull/1719))
- DevUI Fix Serialization, Timestamp and Other Issues ([#1584](https://github.com/microsoft/agent-framework/pull/1584))
- MCP Error Handling Fix + Added Unit Tests ([#1621](https://github.com/microsoft/agent-framework/pull/1621))
- InMemoryCheckpointManager is not JSON serializable ([#1639](https://github.com/microsoft/agent-framework/pull/1639))
- Fix gen_ai.operation.name to be invoke_agent ([#1729](https://github.com/microsoft/agent-framework/pull/1729))
## [1.0.0b251016] - 2025-10-16
### Added
- Add Purview Middleware ([#1142](https://github.com/microsoft/agent-framework/pull/1142))
- Added URL Citation Support to Azure AI Agent ([#1397](https://github.com/microsoft/agent-framework/pull/1397))
- Added MCP headers for AzureAI ([#1506](https://github.com/microsoft/agent-framework/pull/1506))
- Add Function Approval UI to DevUI ([#1401](https://github.com/microsoft/agent-framework/pull/1401))
- Added function approval example with streaming ([#1365](https://github.com/microsoft/agent-framework/pull/1365))
- Added A2A AuthInterceptor Support ([#1317](https://github.com/microsoft/agent-framework/pull/1317))
- Added example with MCP and authentication ([#1389](https://github.com/microsoft/agent-framework/pull/1389))
- Added sample with Foundry Redteams ([#1306](https://github.com/microsoft/agent-framework/pull/1306))
- Added AzureAI Agent AI Search Sample ([#1281](https://github.com/microsoft/agent-framework/pull/1281))
- Added AzureAI Bing Connection Name Support ([#1364](https://github.com/microsoft/agent-framework/pull/1364))
### Changed
- Enhanced documentation for dependency injection and serialization features ([#1324](https://github.com/microsoft/agent-framework/pull/1324))
- Update README to list all available examples ([#1394](https://github.com/microsoft/agent-framework/pull/1394))
- Reorganize workflows modules ([#1282](https://github.com/microsoft/agent-framework/pull/1282))
- Improved thread serialization and deserialization with better tests ([#1316](https://github.com/microsoft/agent-framework/pull/1316))
- Included existing agent definition in requests to Azure AI ([#1285](https://github.com/microsoft/agent-framework/pull/1285))
- DevUI - Internal Refactor, Conversations API support, and performance improvements ([#1235](https://github.com/microsoft/agent-framework/pull/1235))
- Refactor `RequestInfoExecutor` ([#1403](https://github.com/microsoft/agent-framework/pull/1403))
### Fixed
- Fix AI Search Tool Sample and improve AI Search Exceptions ([#1206](https://github.com/microsoft/agent-framework/pull/1206))
- Fix Failure with Function Approval Messages in Chat Clients ([#1322](https://github.com/microsoft/agent-framework/pull/1322))
- Fix deadlock in Magentic workflow ([#1325](https://github.com/microsoft/agent-framework/pull/1325))
- Fix tool call content not showing up in workflow events ([#1290](https://github.com/microsoft/agent-framework/pull/1290))
- Fixed instructions duplication in model clients ([#1332](https://github.com/microsoft/agent-framework/pull/1332))
- Agent Name Sanitization ([#1523](https://github.com/microsoft/agent-framework/pull/1523))
## [1.0.0b251007] - 2025-10-07
### Added
- Added method to expose agent as MCP server ([#1248](https://github.com/microsoft/agent-framework/pull/1248))
- Add PDF file support to OpenAI content parser with filename mapping ([#1121](https://github.com/microsoft/agent-framework/pull/1121))
- Sample on integration of Azure OpenAI Responses Client with a local MCP server ([#1215](https://github.com/microsoft/agent-framework/pull/1215))
- Added approval_mode and allowed_tools to local MCP ([#1203](https://github.com/microsoft/agent-framework/pull/1203))
- Introducing AI Function approval ([#1131](https://github.com/microsoft/agent-framework/pull/1131))
- Add name and description to workflows ([#1183](https://github.com/microsoft/agent-framework/pull/1183))
- Add Ollama example using OpenAIChatClient ([#1100](https://github.com/microsoft/agent-framework/pull/1100))
- Add DevUI improvements with color scheme, linking, agent details, and token usage data ([#1091](https://github.com/microsoft/agent-framework/pull/1091))
- Add semantic-kernel to agent-framework migration code samples ([#1045](https://github.com/microsoft/agent-framework/pull/1045))
### Changed
- [BREAKING] Parameter naming and other fixes ([#1255](https://github.com/microsoft/agent-framework/pull/1255))
- [BREAKING] Introduce add_agent functionality and added output_response to AgentExecutor; agent streaming behavior to follow workflow invocation ([#1184](https://github.com/microsoft/agent-framework/pull/1184))
- OpenAI Clients accepting api_key callback ([#1139](https://github.com/microsoft/agent-framework/pull/1139))
- Updated docstrings ([#1225](https://github.com/microsoft/agent-framework/pull/1225))
- Standardize docstrings: Use Keyword Args for Settings classes and add environment variable examples ([#1202](https://github.com/microsoft/agent-framework/pull/1202))
- Update References to Agent2Agent protocol to use correct terminology ([#1162](https://github.com/microsoft/agent-framework/pull/1162))
- Update getting started samples to reflect AF and update unit test ([#1093](https://github.com/microsoft/agent-framework/pull/1093))
- Update Lab Installation instructions to install from source ([#1051](https://github.com/microsoft/agent-framework/pull/1051))
- Update python DEV_SETUP to add brew-based uv installation ([#1173](https://github.com/microsoft/agent-framework/pull/1173))
- Update docstrings of all files and add example code in public interfaces ([#1107](https://github.com/microsoft/agent-framework/pull/1107))
- Clarifications on installing packages in README ([#1036](https://github.com/microsoft/agent-framework/pull/1036))
- DevUI Fixes ([#1035](https://github.com/microsoft/agent-framework/pull/1035))
- Packaging fixes: removed lab from dependencies, setup build/publish tasks, set homepage url ([#1056](https://github.com/microsoft/agent-framework/pull/1056))
- Agents + Chat Client Samples Docstring Updates ([#1028](https://github.com/microsoft/agent-framework/pull/1028))
- Python: Foundry Agent Completeness ([#954](https://github.com/microsoft/agent-framework/pull/954))
### Fixed
- Ollama + azureai openapi samples fix ([#1244](https://github.com/microsoft/agent-framework/pull/1244))
- Fix multimodal input sample: Document required environment variables and configuration options ([#1088](https://github.com/microsoft/agent-framework/pull/1088))
- Fix Azure AI Getting Started samples: Improve documentation and code readability ([#1089](https://github.com/microsoft/agent-framework/pull/1089))
- Fix a2a import ([#1058](https://github.com/microsoft/agent-framework/pull/1058))
- Fix DevUI serialization and agent structured outputs ([#1055](https://github.com/microsoft/agent-framework/pull/1055))
- Default DevUI workflows to string input when start node is auto-wrapped agent ([#1143](https://github.com/microsoft/agent-framework/pull/1143))
- Add missing pre flags on pip packages ([#1130](https://github.com/microsoft/agent-framework/pull/1130))
## [1.0.0b251001] - 2025-10-01
### Added
- First release of Agent Framework for Python
- agent-framework-core: Main abstractions, types and implementations for OpenAI and Azure OpenAI
- agent-framework-azure-ai: Integration with Azure AI Foundry Agents
- agent-framework-copilotstudio: Integration with Microsoft Copilot Studio agents
- agent-framework-a2a: Create A2A agents
- agent-framework-devui: Browser-based UI to chat with agents and workflows, with tracing visualization
- agent-framework-mem0 and agent-framework-redis: Integrations for Mem0 Context Provider and Redis Context Provider/Chat Memory Store
- agent-framework: Meta-package for installing all packages
For more information, see the [announcement blog post](https://devblogs.microsoft.com/foundry/introducing-microsoft-agent-framework-the-open-source-engine-for-agentic-ai-apps/).
[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b260116...HEAD
[1.0.0b260116]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b260114...python-1.0.0b260116
[1.0.0b260114]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b260107...python-1.0.0b260114
[1.0.0b260107]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b260106...python-1.0.0b260107
[1.0.0b260106]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251223...python-1.0.0b260106
[1.0.0b251223]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251218...python-1.0.0b251223
[1.0.0b251218]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251216...python-1.0.0b251218
[1.0.0b251216]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251211...python-1.0.0b251216
[1.0.0b251211]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251209...python-1.0.0b251211
[1.0.0b251209]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251204...python-1.0.0b251209
[1.0.0b251204]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251120...python-1.0.0b251204
[1.0.0b251120]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251117...python-1.0.0b251120
[1.0.0b251117]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251114...python-1.0.0b251117
[1.0.0b251114]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251112.post1...python-1.0.0b251114
[1.0.0b251112.post1]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251112...python-1.0.0b251112.post1
[1.0.0b251112]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251111...python-1.0.0b251112
[1.0.0b251111]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251108...python-1.0.0b251111
[1.0.0b251108]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251106.post1...python-1.0.0b251108
[1.0.0b251106.post1]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251106...python-1.0.0b251106.post1
[1.0.0b251106]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251105...python-1.0.0b251106
[1.0.0b251105]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251104...python-1.0.0b251105
[1.0.0b251104]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251028...python-1.0.0b251104
[1.0.0b251028]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251016...python-1.0.0b251028
[1.0.0b251016]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251007...python-1.0.0b251016
[1.0.0b251007]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251001...python-1.0.0b251007
[1.0.0b251001]: https://github.com/microsoft/agent-framework/releases/tag/python-1.0.0b251001

402
python/CODING_STANDARD.md Normal file
View File

@@ -0,0 +1,402 @@
# Coding Standards
This document describes the coding standards and conventions for the Agent Framework project.
## Code Style and Formatting
We use [ruff](https://github.com/astral-sh/ruff) for both linting and formatting with the following configuration:
- **Line length**: 120 characters
- **Target Python version**: 3.10+
- **Google-style docstrings**: All public functions, classes, and modules should have docstrings following Google conventions
## Function Parameter Guidelines
To make the code easier to use and maintain:
- **Positional parameters**: Only use for up to 3 fully expected parameters
- **Keyword parameters**: Use for all other parameters, especially when there are multiple required parameters without obvious ordering
- **Avoid additional imports**: Do not require the user to import additional modules to use the function, so provide string based overrides when applicable, for instance:
```python
def create_agent(name: str, tool_mode: ChatToolMode) -> Agent:
# Implementation here
```
Should be:
```python
def create_agent(name: str, tool_mode: Literal['auto', 'required', 'none'] | ChatToolMode) -> Agent:
# Implementation here
if isinstance(tool_mode, str):
tool_mode = ChatToolMode(tool_mode)
```
- **Document kwargs**: Always document how `kwargs` are used, either by referencing external documentation or explaining their purpose
- **Separate kwargs**: When combining kwargs for multiple purposes, use specific parameters like `client_kwargs: dict[str, Any]` instead of mixing everything in `**kwargs`
## Method Naming Inside Connectors
When naming methods inside connectors, we have a loose preference for using the following conventions:
- Use `_prepare_<object>_for_<purpose>` as a prefix for methods that prepare data for sending to the external service.
- Use `_parse_<object>_from_<source>` as a prefix for methods that process data received from the external service.
This is not a strict rule, but a guideline to help maintain consistency across the codebase.
## Implementation Decisions
### Asynchronous Programming
It's important to note that most of this library is written with asynchronous in mind. The
developer should always assume everything is asynchronous. One can use the function signature
with either `async def` or `def` to understand if something is asynchronous or not.
### Attributes vs Inheritance
Prefer attributes over inheritance when parameters are mostly the same:
```python
# ✅ Preferred - using attributes
from agent_framework import ChatMessage
user_msg = ChatMessage(role="user", content="Hello, world!")
asst_msg = ChatMessage(role="assistant", content="Hello, world!")
# ❌ Not preferred - unnecessary inheritance
from agent_framework import UserMessage, AssistantMessage
user_msg = UserMessage(content="Hello, world!")
asst_msg = AssistantMessage(content="Hello, world!")
```
### Logging
Use the centralized logging system:
```python
from agent_framework import get_logger
# For main package
logger = get_logger()
# For subpackages
logger = get_logger('agent_framework.azure')
```
**Do not use** direct logging module imports:
```python
# ❌ Avoid this
import logging
logger = logging.getLogger(__name__)
```
### Import Structure
The package follows a flat import structure:
- **Core**: Import directly from `agent_framework`
```python
from agent_framework import ChatAgent, ai_function
```
- **Components**: Import from `agent_framework.<component>`
```python
from agent_framework.observability import enable_instrumentation, configure_otel_providers
```
- **Connectors**: Import from `agent_framework.<vendor/platform>`
```python
from agent_framework.openai import OpenAIChatClient
from agent_framework.azure import AzureOpenAIChatClient
```
## Package Structure
The project uses a monorepo structure with separate packages for each connector/extension:
```plaintext
python/
├── pyproject.toml # Root package (agent-framework) depends on agent-framework-core[all]
├── samples/ # Sample code and examples
├── packages/
│ ├── core/ # agent-framework-core - Core abstractions and implementations
│ │ ├── pyproject.toml # Defines [all] extra that includes all connector packages
│ │ ├── tests/ # Tests for core package
│ │ └── agent_framework/
│ │ ├── __init__.py # Public API exports
│ │ ├── _agents.py # Agent implementations
│ │ ├── _clients.py # Chat client protocols and base classes
│ │ ├── _tools.py # Tool definitions
│ │ ├── _types.py # Type definitions
│ │ ├── _logging.py # Logging utilities
│ │ │
│ │ │ # Provider folders - lazy load from connector packages
│ │ ├── openai/ # OpenAI clients (built into core)
│ │ ├── azure/ # Lazy loads from azure-ai, azure-ai-search, azurefunctions
│ │ ├── anthropic/ # Lazy loads from agent-framework-anthropic
│ │ ├── ollama/ # Lazy loads from agent-framework-ollama
│ │ ├── a2a/ # Lazy loads from agent-framework-a2a
│ │ ├── ag_ui/ # Lazy loads from agent-framework-ag-ui
│ │ ├── chatkit/ # Lazy loads from agent-framework-chatkit
│ │ ├── declarative/ # Lazy loads from agent-framework-declarative
│ │ ├── devui/ # Lazy loads from agent-framework-devui
│ │ ├── mem0/ # Lazy loads from agent-framework-mem0
│ │ └── redis/ # Lazy loads from agent-framework-redis
│ │
│ ├── azure-ai/ # agent-framework-azure-ai
│ │ ├── pyproject.toml
│ │ ├── tests/
│ │ └── agent_framework_azure_ai/
│ │ ├── __init__.py # Public exports
│ │ ├── _chat_client.py # AzureAIClient implementation
│ │ ├── _client.py # AzureAIAgentClient implementation
│ │ ├── _shared.py # AzureAISettings and shared utilities
│ │ └── py.typed # PEP 561 marker
│ ├── anthropic/ # agent-framework-anthropic
│ ├── bedrock/ # agent-framework-bedrock
│ ├── ollama/ # agent-framework-ollama
│ └── ... # Other connector packages
```
### Lazy Loading Pattern
Provider folders in the core package use `__getattr__` to lazy load classes from their respective connector packages. This allows users to import from a consistent location while only loading dependencies when needed:
```python
# In agent_framework/azure/__init__.py
_IMPORTS: dict[str, tuple[str, str]] = {
"AzureAIAgentClient": ("agent_framework_azure_ai", "agent-framework-azure-ai"),
# ...
}
def __getattr__(name: str) -> Any:
if name in _IMPORTS:
import_path, package_name = _IMPORTS[name]
try:
return getattr(importlib.import_module(import_path), name)
except ModuleNotFoundError as exc:
raise ModuleNotFoundError(
f"The package {package_name} is required to use `{name}`. "
f"Install it with: pip install {package_name}"
) from exc
```
### Adding a New Connector Package
**Important:** Do not create a new package unless there is an issue that has been reviewed and approved by the core team.
#### Initial Release (Preview Phase)
For the first release of a new connector package:
1. Create a new directory under `packages/` (e.g., `packages/my-connector/`)
2. Add the package to `tool.uv.sources` in the root `pyproject.toml`
3. Include samples inside the package itself (e.g., `packages/my-connector/samples/`)
4. **Do NOT** add the package to the `[all]` extra in `packages/core/pyproject.toml`
5. **Do NOT** create lazy loading in core yet
#### Promotion to Stable
After the package has been released and gained a measure of confidence:
1. Move samples from the package to the root `samples/` folder
2. Add the package to the `[all]` extra in `packages/core/pyproject.toml`
3. Create a provider folder in `agent_framework/` with lazy loading `__init__.py`
### Installation Options
Connectors are distributed as separate packages and are not imported by default in the core package. Users install the specific connectors they need:
```bash
# Install core only
pip install agent-framework-core
# Install core with all connectors
pip install agent-framework-core[all]
# or (equivalently):
pip install agent-framework
# Install specific connector
pip install agent-framework-azure-ai
```
## Documentation
Each file should have a single first line containing: # Copyright (c) Microsoft. All rights reserved.
We follow the [Google Docstring](https://github.com/google/styleguide/blob/gh-pages/pyguide.md#383-functions-and-methods) style guide for functions and methods.
They are currently not checked for private functions (functions starting with '_').
They should contain:
- Single line explaining what the function does, ending with a period.
- If necessary to further explain the logic a newline follows the first line and then the explanation is given.
- The following three sections are optional, and if used should be separated by a single empty line.
- Arguments are then specified after a header called `Args:`, with each argument being specified in the following format:
- `arg_name`: Explanation of the argument.
- if a longer explanation is needed for a argument, it should be placed on the next line, indented by 4 spaces.
- Type and default values do not have to be specified, they will be pulled from the definition.
- Returns are specified after a header called `Returns:` or `Yields:`, with the return type and explanation of the return value.
- Keyword arguments are specified after a header called `Keyword Args:`, with each argument being specified in the same format as `Args:`.
- A header for exceptions can be added, called `Raises:`, but should only be used for:
- Agent Framework specific exceptions (e.g., `ServiceInitializationError`)
- Base exceptions that might be unexpected in the context
- Obvious exceptions like `ValueError` or `TypeError` do not need to be documented
- Format: `ExceptionType`: Explanation of the exception.
- If a longer explanation is needed, it should be placed on the next line, indented by 4 spaces.
- Code examples can be added using the `Examples:` header followed by `.. code-block:: python` directive.
Putting them all together, gives you at minimum this:
```python
def equal(arg1: str, arg2: str) -> bool:
"""Compares two strings and returns True if they are the same."""
...
```
Or a complete version of this:
```python
def equal(arg1: str, arg2: str) -> bool:
"""Compares two strings and returns True if they are the same.
Here is extra explanation of the logic involved.
Args:
arg1: The first string to compare.
arg2: The second string to compare.
Returns:
True if the strings are the same, False otherwise.
"""
```
A more complete example with keyword arguments and code samples:
```python
def create_client(
model_id: str | None = None,
*,
timeout: float | None = None,
env_file_path: str | None = None,
**kwargs: Any,
) -> Client:
"""Create a new client with the specified configuration.
Args:
model_id: The model ID to use. If not provided,
it will be loaded from settings.
Keyword Args:
timeout: Optional timeout for requests.
env_file_path: If provided, settings are read from this file.
kwargs: Additional keyword arguments passed to the underlying client.
Returns:
A configured client instance.
Raises:
ValueError: If the model_id is invalid.
Examples:
.. code-block:: python
# Create a client with default settings:
client = create_client(model_id="gpt-4o")
# Or load from environment:
client = create_client(env_file_path=".env")
"""
...
```
Use Google-style docstrings for all public APIs:
```python
def create_agent(name: str, chat_client: ChatClientProtocol) -> Agent:
"""Create a new agent with the specified configuration.
Args:
name: The name of the agent.
chat_client: The chat client to use for communication.
Returns:
True if the strings are the same, False otherwise.
Raises:
ValueError: If one of the strings is empty.
"""
...
```
If in doubt, use the link above to read much more considerations of what to do and when, or use common sense.
## Performance considerations
### Cache Expensive Computations
Think about caching where appropriate. Cache the results of expensive operations that are called repeatedly with the same inputs:
```python
# ✅ Preferred - cache expensive computations
class AIFunction:
def __init__(self, ...):
self._cached_parameters: dict[str, Any] | None = None
def parameters(self) -> dict[str, Any]:
"""Return the JSON schema for the function's parameters.
The result is cached after the first call for performance.
"""
if self._cached_parameters is None:
self._cached_parameters = self.input_model.model_json_schema()
return self._cached_parameters
# ❌ Avoid - recalculating every time
def parameters(self) -> dict[str, Any]:
return self.input_model.model_json_schema()
```
### Prefer Attribute Access Over isinstance()
When checking types in hot paths, prefer checking a `type` attribute (fast string comparison) over `isinstance()` (slower due to method resolution order traversal):
```python
# ✅ Preferred - use match/case with type attribute (faster)
match content.type:
case "function_call":
# handle function call
case "usage":
# handle usage
case _:
# handle other types
# ❌ Avoid in hot paths - isinstance() is slower
if isinstance(content, FunctionCallContent):
# handle function call
elif isinstance(content, UsageContent):
# handle usage
```
For inline conditionals:
```python
# ✅ Preferred - type attribute comparison
result = value if content.type == "function_call" else other
# ❌ Avoid - isinstance() in hot paths
result = value if isinstance(content, FunctionCallContent) else other
```
### Avoid Redundant Serialization
When the same data needs to be used in multiple places, compute it once and reuse it:
```python
# ✅ Preferred - reuse computed representation
otel_message = _to_otel_message(message)
otel_messages.append(otel_message)
logger.info(otel_message, extra={...})
# ❌ Avoid - computing the same thing twice
otel_messages.append(_to_otel_message(message)) # this already serializes
message_data = message.to_dict(exclude_none=True) # and this does so again!
logger.info(message_data, extra={...})
```

328
python/DEV_SETUP.md Normal file
View File

@@ -0,0 +1,328 @@
# Dev Setup
This document describes how to setup your environment with Python and uv,
if you're working on new features or a bug fix for Agent Framework, or simply
want to run the tests included.
For coding standards and conventions, see [CODING_STANDARD.md](CODING_STANDARD.md).
## System setup
We are using a tool called [poethepoet](https://github.com/nat-n/poethepoet) for task management and [uv](https://github.com/astral-sh/uv) for dependency management. At the [end of this document](#available-poe-tasks), you will find the available Poe tasks.
## If you're on WSL
Check that you've cloned the repository to `~/workspace` or a similar folder.
Avoid `/mnt/c/` and prefer using your WSL user's home directory.
Ensure you have the WSL extension for VSCode installed.
## Using uv
uv allows us to use AF from the local files, without worrying about paths, as
if you had AF pip package installed.
To install AF and all the required tools in your system, first, navigate to the directory containing
this DEV_SETUP using your chosen shell.
### For windows (non-WSL)
Check the [uv documentation](https://docs.astral.sh/uv/getting-started/installation/) for the installation instructions. At the time of writing this is the command to install uv:
```powershell
powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
```
### For WSL, Linux or MacOS
Check the [uv documentation](https://docs.astral.sh/uv/getting-started/installation/) for the installation instructions. At the time of writing this is the command to install uv:
```bash
curl -LsSf https://astral.sh/uv/install.sh | sh
```
### Alternative for MacOS
For MacOS users, Homebrew provides an easy installation of uv with the [uv Formulae](https://formulae.brew.sh/formula/uv)
```bash
brew install uv
```
### After installing uv
You can then run the following commands manually:
```bash
# Install Python 3.10, 3.11, 3.12, and 3.13
uv python install 3.10 3.11 3.12 3.13
# Create a virtual environment with Python 3.10 (you can change this to 3.11, 3.12 or 3.13)
$PYTHON_VERSION = "3.10"
uv venv --python $PYTHON_VERSION
# Install AF and all dependencies
uv sync --dev
# Install all the tools and dependencies
uv run poe install
# Install pre-commit hooks
uv run poe pre-commit-install
```
Alternatively, you can reinstall the venv, pacakges, dependencies and pre-commit hooks with a single command (but this requires poe in the current env), this is especially useful if you want to switch python versions:
```bash
uv run poe setup -p 3.13
```
You can then run different commands through Poe the Poet, use `uv run poe` to discover which ones.
## VSCode Setup
Install the [Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python) for VSCode.
Open the `python` folder in [VSCode](https://code.visualstudio.com/docs/editor/workspaces).
> The workspace for python should be rooted in the `./python` folder.
Open any of the `.py` files in the project and run the `Python: Select Interpreter`
command from the command palette. Make sure the virtual env (default path is `.venv`) created by `uv` is selected.
## LLM setup
Make sure you have an
[OpenAI API Key](https://platform.openai.com) or
[Azure OpenAI service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=rest-api)
There are two methods to manage keys, secrets, and endpoints:
1. Store them in environment variables. AF Python leverages pydantic settings to load keys, secrets, and endpoints from the environment.
> When you are using VSCode and have the python extension setup, it automatically loads environment variables from a `.env` file, so you don't have to manually set them in the terminal.
> During runtime on different platforms, environment settings set as part of the deployments should be used.
2. Store them in a separate `.env` file, like `dev.env`, you can then pass that name into the constructor for most services, to the `env_file_path` parameter, see below.
> Make sure to add `*.env` to your `.gitignore` file.
### Example for file-based setup with OpenAI Chat Completions
To configure a `.env` file with just the keys needed for OpenAI Chat Completions, you can create a `openai.env` (this name is just as an example, a single `.env` with all required keys is more common) file in the root of the `python` folder with the following content:
Content of `.env` or `openai.env`:
```env
OPENAI_API_KEY=""
OPENAI_CHAT_MODEL_ID="gpt-4o-mini"
```
You will then configure the ChatClient class with the keyword argument `env_file_path`:
```python
from agent_framework.openai import OpenAIChatClient
chat_client = OpenAIChatClient(env_file_path="openai.env")
```
## Tests
All the tests are located in the `tests` folder of each package. There are tests that are marked with a `@skip_if_..._integration_tests_disabled` decorator, these are integration tests that require an external service to be running, like OpenAI or Azure OpenAI.
If you want to run these tests, you need to set the environment variable `RUN_INTEGRATION_TESTS` to `true` and have the appropriate key per services set in your environment or in a `.env` file.
Alternatively, you can run them using VSCode Tasks. Open the command palette
(`Ctrl+Shift+P`) and type `Tasks: Run Task`. Select `Test` from the list.
If you want to run the tests for a single package, you can use the `uv run poe test` command with the package name as an argument. For example, to run the tests for the `agent_framework` package, you can use:
```bash
uv run poe --directory packages/core test
```
These commands also output the coverage report.
## Code quality checks
To run the same checks that run during a commit and the GitHub Action `Python Code Quality`, you can use this command, from the [python](../python) folder:
```bash
uv run poe check
```
Ideally you should run these checks before committing any changes, when you install using the instructions above the pre-commit hooks should be installed already.
## Code Coverage
We try to maintain a high code coverage for the project. To run the code coverage on the unit tests, you can use the following command:
```bash
uv run poe test
```
This will show you which files are not covered by the tests, including the specific lines not covered. Make sure to consider the untested lines from the code you are working on, but feel free to add other tests as well, that is always welcome!
## Catching up with the latest changes
There are many people committing to Semantic Kernel, so it is important to keep your local repository up to date. To do this, you can run the following commands:
```bash
git fetch upstream main
git rebase upstream/main
git push --force-with-lease
```
or:
```bash
git fetch upstream main
git merge upstream/main
git push
```
This is assuming the upstream branch refers to the main repository. If you have a different name for the upstream branch, you can replace `upstream` with the name of your upstream branch.
After running the rebase command, you may need to resolve any conflicts that arise. If you are unsure how to resolve a conflict, please refer to the [GitHub's documentation on resolving conflicts](https://docs.github.com/en/get-started/using-git/resolving-merge-conflicts-after-a-git-rebase), or for [VSCode](https://code.visualstudio.com/docs/sourcecontrol/overview#_merge-conflicts).
# Task automation
## Available Poe Tasks
This project uses [poethepoet](https://github.com/nat-n/poethepoet) for task management and [uv](https://github.com/astral-sh/uv) for dependency management.
### Setup and Installation
Once uv is installed, and you do not yet have a virtual environment setup:
```bash
uv venv
```
and then you can run the following tasks:
```bash
uv sync --all-extras --dev
```
After this initial setup, you can use the following tasks to manage your development environment. It is advised to use the following setup command since that also installs the pre-commit hooks.
#### `setup`
Set up the development environment with a virtual environment, install dependencies and pre-commit hooks:
```bash
uv run poe setup
# or with specific Python version
uv run poe setup --python 3.12
```
#### `install`
Install all dependencies including extras and dev dependencies, including updates:
```bash
uv run poe install
```
#### `venv`
Create a virtual environment with specified Python version or switch python version:
```bash
uv run poe venv
# or with specific Python version
uv run poe venv --python 3.12
```
#### `pre-commit-install`
Install pre-commit hooks:
```bash
uv run poe pre-commit-install
```
### Code Quality and Formatting
Each of the following tasks are designed to run against both the main `agent-framework` package and the extension packages, ensuring consistent code quality across the project.
#### `fmt` (format)
Format code using ruff:
```bash
uv run poe fmt
```
#### `lint`
Run linting checks and fix issues:
```bash
uv run poe lint
```
#### `pyright`
Run Pyright type checking:
```bash
uv run poe pyright
```
#### `mypy`
Run MyPy type checking:
```bash
uv run poe mypy
```
### Code Validation
#### `markdown-code-lint`
Lint markdown code blocks:
```bash
uv run poe markdown-code-lint
```
### Comprehensive Checks
#### `check`
Run all quality checks (format, lint, pyright, mypy, test, markdown lint):
```bash
uv run poe check
```
### Testing
#### `test`
Run unit tests with coverage by invoking the `test` task in each package sequentially:
```bash
uv run poe test
```
To run tests for a specific package only, use the `--directory` flag:
```bash
# Run tests for the core package
uv run --directory packages/core poe test
# Run tests for the azure-ai package
uv run --directory packages/azure-ai poe test
```
#### `all-tests`
Run all tests in a single pytest invocation across all packages in parallel (excluding lab and devui). This is faster than `test` as it uses pytest's parallel execution:
```bash
uv run poe all-tests
```
#### `all-tests-cov`
Same as `all-tests` but with coverage reporting enabled:
```bash
uv run poe all-tests-cov
```
### Building and Publishing
#### `build`
Build all packages:
```bash
uv run poe build
```
#### `clean-dist`
Clean the dist directories:
```bash
uv run poe clean-dist
```
#### `publish`
Publish packages to PyPI:
```bash
uv run poe publish
```
## Pre-commit Hooks
Pre-commit hooks run automatically on commit and execute a subset of the checks on changed files only. You can also run all checks using pre-commit directly:
```bash
uv run pre-commit run -a
```

21
python/LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) Microsoft Corporation.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE

251
python/README.md Normal file
View File

@@ -0,0 +1,251 @@
# Get Started with Microsoft Agent Framework for Python Developers
## Quick Install
We recommend two common installation paths depending on your use case.
### 1. Development mode
If you are exploring or developing locally, install the entire framework with all sub-packages:
```bash
pip install agent-framework --pre
```
This installs the core and every integration package, making sure that all features are available without additional steps. The `--pre` flag is required while Agent Framework is in preview. This is the simplest way to get started.
### 2. Selective install
If you only need specific integrations, you can install at a more granular level. This keeps dependencies lighter and focuses on what you actually plan to use. Some examples:
```bash
# Core only
# includes Azure OpenAI and OpenAI support by default
# also includes workflows and orchestrations
pip install agent-framework-core --pre
# Core + Azure AI integration
pip install agent-framework-azure-ai --pre
# Core + Microsoft Copilot Studio integration
pip install agent-framework-copilotstudio --pre
# Core + both Microsoft Copilot Studio and Azure AI integration
pip install agent-framework-microsoft agent-framework-azure-ai --pre
```
This selective approach is useful when you know which integrations you need, and it is the recommended way to set up lightweight environments.
Supported Platforms:
- Python: 3.10+
- OS: Windows, macOS, Linux
## 1. Setup API Keys
Set as environment variables, or create a .env file at your project root:
```bash
OPENAI_API_KEY=sk-...
OPENAI_CHAT_MODEL_ID=...
...
AZURE_OPENAI_API_KEY=...
AZURE_OPENAI_ENDPOINT=...
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=...
...
AZURE_AI_PROJECT_ENDPOINT=...
AZURE_AI_MODEL_DEPLOYMENT_NAME=...
```
You can also override environment variables by explicitly passing configuration parameters to the chat client constructor:
```python
from agent_framework.azure import AzureOpenAIChatClient
chat_client = AzureOpenAIChatClient(
api_key='',
endpoint='',
deployment_name='',
api_version='',
)
```
See the following [setup guide](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started) for more information.
## 2. Create a Simple Agent
Create agents and invoke them directly:
```python
import asyncio
from agent_framework import ChatAgent
from agent_framework.openai import OpenAIChatClient
async def main():
agent = ChatAgent(
chat_client=OpenAIChatClient(),
instructions="""
1) A robot may not injure a human being...
2) A robot must obey orders given it by human beings...
3) A robot must protect its own existence...
Give me the TLDR in exactly 5 words.
"""
)
result = await agent.run("Summarize the Three Laws of Robotics")
print(result)
asyncio.run(main())
# Output: Protect humans, obey, self-preserve, prioritized.
```
## 3. Directly Use Chat Clients (No Agent Required)
You can use the chat client classes directly for advanced workflows:
```python
import asyncio
from agent_framework import ChatMessage
from agent_framework.openai import OpenAIChatClient
async def main():
client = OpenAIChatClient()
messages = [
ChatMessage(role="system", text="You are a helpful assistant."),
ChatMessage(role="user", text="Write a haiku about Agent Framework.")
]
response = await client.get_response(messages)
print(response.messages[0].text)
"""
Output:
Agents work in sync,
Framework threads through each task—
Code sparks collaboration.
"""
asyncio.run(main())
```
## 4. Build an Agent with Tools and Functions
Enhance your agent with custom tools and function calling:
```python
import asyncio
from typing import Annotated
from random import randint
from pydantic import Field
from agent_framework import ChatAgent
from agent_framework.openai import OpenAIChatClient
def get_weather(
location: Annotated[str, Field(description="The location to get the weather for.")],
) -> str:
"""Get the weather for a given location."""
conditions = ["sunny", "cloudy", "rainy", "stormy"]
return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C."
def get_menu_specials() -> str:
"""Get today's menu specials."""
return """
Special Soup: Clam Chowder
Special Salad: Cobb Salad
Special Drink: Chai Tea
"""
async def main():
agent = ChatAgent(
chat_client=OpenAIChatClient(),
instructions="You are a helpful assistant that can provide weather and restaurant information.",
tools=[get_weather, get_menu_specials]
)
response = await agent.run("What's the weather in Amsterdam and what are today's specials?")
print(response)
"""
Output:
The weather in Amsterdam is sunny with a high of 22°C. Today's specials include
Clam Chowder soup, Cobb Salad, and Chai Tea as the special drink.
"""
if __name__ == "__main__":
asyncio.run(main())
```
You can explore additional agent samples [here](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/agents).
## 5. Multi-Agent Orchestration
Coordinate multiple agents to collaborate on complex tasks using orchestration patterns:
```python
import asyncio
from agent_framework import ChatAgent
from agent_framework.openai import OpenAIChatClient
async def main():
# Create specialized agents
writer = ChatAgent(
chat_client=OpenAIChatClient(),
name="Writer",
instructions="You are a creative content writer. Generate and refine slogans based on feedback."
)
reviewer = ChatAgent(
chat_client=OpenAIChatClient(),
name="Reviewer",
instructions="You are a critical reviewer. Provide detailed feedback on proposed slogans."
)
# Sequential workflow: Writer creates, Reviewer provides feedback
task = "Create a slogan for a new electric SUV that is affordable and fun to drive."
# Step 1: Writer creates initial slogan
initial_result = await writer.run(task)
print(f"Writer: {initial_result}")
# Step 2: Reviewer provides feedback
feedback_request = f"Please review this slogan: {initial_result}"
feedback = await reviewer.run(feedback_request)
print(f"Reviewer: {feedback}")
# Step 3: Writer refines based on feedback
refinement_request = f"Please refine this slogan based on the feedback: {initial_result}\nFeedback: {feedback}"
final_result = await writer.run(refinement_request)
print(f"Final Slogan: {final_result}")
# Example Output:
# Writer: "Charge Forward: Affordable Adventure Awaits!"
# Reviewer: "Good energy, but 'Charge Forward' is overused in EV marketing..."
# Final Slogan: "Power Up Your Adventure: Premium Feel, Smart Price!"
if __name__ == "__main__":
asyncio.run(main())
```
For more advanced orchestration patterns including Sequential, GroupChat, Concurrent, Magentic, and Handoff orchestrations, see the [orchestration samples](samples/getting_started/workflows/orchestration).
## More Examples & Samples
- [Getting Started with Agents](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/agents): Basic agent creation and tool usage
- [Chat Client Examples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/chat_client): Direct chat client usage patterns
- [Azure AI Integration](https://github.com/microsoft/agent-framework/tree/main/python/packages/azure-ai): Azure AI integration
- [Workflow Samples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/workflows): Advanced multi-agent patterns
## Agent Framework Documentation
- [Agent Framework Repository](https://github.com/microsoft/agent-framework)
- [Python Package Documentation](https://github.com/microsoft/agent-framework/tree/main/python)
- [.NET Package Documentation](https://github.com/microsoft/agent-framework/tree/main/dotnet)
- [Design Documents](https://github.com/microsoft/agent-framework/tree/main/docs/design)
- Learn docs are coming soon.

View File

@@ -0,0 +1,31 @@
# Copyright (c) Microsoft. All rights reserved.
from importlib import metadata as _metadata
from pathlib import Path as _Path
from typing import Any, cast
try:
import tomllib as _toml # type: ignore # Python 3.11+
except ModuleNotFoundError: # Python 3.10
import tomli as _toml # type: ignore
def _load_pyproject() -> dict[str, Any]:
pyproject = (_Path(__file__).resolve().parents[1] / "pyproject.toml").read_text("utf-8")
return cast(dict[str, Any], _toml.loads(pyproject)) # type: ignore
def _version() -> str:
try:
return _metadata.version("agent-framework")
except _metadata.PackageNotFoundError as ex:
data = _load_pyproject()
project = cast(dict[str, Any], data.get("project", {}))
version = project.get("version")
if isinstance(version, str):
return version
raise RuntimeError("pyproject.toml missing project.version") from ex
__version__ = _version()
__all__ = ["__version__"]

View File

@@ -0,0 +1,141 @@
# Copyright (c) Microsoft. All rights reserved.
"""Check code blocks in Markdown files for syntax errors."""
import argparse
from enum import Enum
import glob
import logging
import tempfile
import subprocess # nosec
from pygments import highlight # type: ignore
from pygments.formatters import TerminalFormatter
from pygments.lexers import PythonLexer
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
class Colors(str, Enum):
CEND = "\33[0m"
CRED = "\33[31m"
CREDBG = "\33[41m"
CGREEN = "\33[32m"
CGREENBG = "\33[42m"
CVIOLET = "\33[35m"
CGREY = "\33[90m"
def with_color(text: str, color: Colors) -> str:
"""Prints a string with the specified color."""
return f"{color.value}{text}{Colors.CEND.value}"
def expand_file_patterns(patterns: list[str], skip_glob: bool = False) -> list[str]:
"""Expand glob patterns to actual file paths."""
all_files: list[str] = []
for pattern in patterns:
if skip_glob:
# When skip_glob is True, treat patterns as literal file paths
# Only include if it's a markdown file
if pattern.endswith('.md'):
matches = glob.glob(pattern, recursive=False)
all_files.extend(matches)
else:
# Handle both relative and absolute paths with glob expansion
matches = glob.glob(pattern, recursive=True)
all_files.extend(matches)
return sorted(set(all_files)) # Remove duplicates and sort
def extract_python_code_blocks(markdown_file_path: str) -> list[tuple[str, int]]:
"""Extract Python code blocks from a Markdown file."""
with open(markdown_file_path, encoding="utf-8") as file:
lines = file.readlines()
code_blocks: list[tuple[str, int]] = []
in_code_block = False
current_block: list[str] = []
for i, line in enumerate(lines):
if line.strip().startswith("```python"):
in_code_block = True
current_block = []
elif line.strip().startswith("```"):
in_code_block = False
code_blocks.append(("\n".join(current_block), i - len(current_block) + 1))
elif in_code_block:
current_block.append(line)
return code_blocks
def check_code_blocks(markdown_file_paths: list[str], exclude_patterns: list[str] | None = None) -> None:
"""Check Python code blocks in a Markdown file for syntax errors."""
files_with_errors: list[str] = []
exclude_patterns = exclude_patterns or []
for markdown_file_path in markdown_file_paths:
# Skip files that match any exclude pattern
if any(pattern in markdown_file_path for pattern in exclude_patterns):
logger.info(f"Skipping {markdown_file_path} (matches exclude pattern)")
continue
code_blocks = extract_python_code_blocks(markdown_file_path)
had_errors = False
for code_block, line_no in code_blocks:
markdown_file_path_with_line_no = f"{markdown_file_path}:{line_no}"
logger.info("Checking a code block in %s...", markdown_file_path_with_line_no)
# Skip blocks that don't import agent_framework modules or import lab modules
if (all(
all(import_code not in code_block for import_code in [f"import {module}", f"from {module}"])
for module in ["agent_framework"]
) or "agent_framework.lab" in code_block):
logger.info(f' {with_color("OK[ignored]", Colors.CGREENBG)}')
continue
with tempfile.NamedTemporaryFile(suffix=".py", delete=False) as temp_file:
temp_file.write(code_block.encode("utf-8"))
temp_file.flush()
# Run pyright on the temporary file using subprocess.run
result = subprocess.run(["uv", "run", "pyright", temp_file.name], capture_output=True, text=True, cwd=".") # nosec
if result.returncode != 0:
highlighted_code = highlight(code_block, PythonLexer(), TerminalFormatter()) # type: ignore
logger.info(
f" {with_color('FAIL', Colors.CREDBG)}\n"
f"{with_color('========================================================', Colors.CGREY)}\n"
f"{with_color('Error', Colors.CRED)}: Pyright found issues in {with_color(markdown_file_path_with_line_no, Colors.CVIOLET)}:\n"
f"{with_color('--------------------------------------------------------', Colors.CGREY)}\n"
f"{highlighted_code}\n"
f"{with_color('--------------------------------------------------------', Colors.CGREY)}\n"
"\n"
f"{with_color('pyright output:', Colors.CVIOLET)}\n"
f"{with_color(result.stdout, Colors.CRED)}"
f"{with_color('========================================================', Colors.CGREY)}\n"
)
had_errors = True
else:
logger.info(f" {with_color('OK', Colors.CGREENBG)}")
if had_errors:
files_with_errors.append(markdown_file_path)
if files_with_errors:
raise RuntimeError("Syntax errors found in the following files:\n" + "\n".join(files_with_errors))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check code blocks in Markdown files for syntax errors.")
# Argument is a list of markdown files containing glob patterns
parser.add_argument("markdown_files", nargs="+", help="Markdown files to check (supports glob patterns).")
parser.add_argument("--exclude", action="append", help="Exclude files containing this pattern.")
parser.add_argument("--no-glob", action="store_true", help="Treat file arguments as literal paths (no glob expansion).")
args = parser.parse_args()
# Expand glob patterns to actual file paths (or skip if --no-glob)
expanded_files = expand_file_patterns(args.markdown_files, skip_glob=args.no_glob)
check_code_blocks(expanded_files, args.exclude)

10
python/devsetup.sh Normal file
View File

@@ -0,0 +1,10 @@
uv python install 3.10 3.11 3.12 3.13
# Create a virtual environment with Python 3.10 (you can change this to 3.11, 3.12 or 3.13)
PYTHON_VERSION="3.13"
uv venv --python $PYTHON_VERSION
# Install AF and all dependencies
uv sync --dev
# Install all the tools and dependencies
uv run poe install
# Install pre-commit hooks
uv run poe pre-commit-install

View File

@@ -0,0 +1,107 @@
# Copyright (c) Microsoft. All rights reserved.
import debugpy
import asyncio
import json
import os
from pathlib import Path
from dotenv import load_dotenv
from py2docfx.__main__ import main as py2docfx_main
load_dotenv()
async def generate_af_docs(root_path: Path):
"""Generate documentation for the Agent Framework using py2docfx.
This function runs the py2docfx command with the specified parameters.
"""
package = {
"packages": [
{
"package_info": {
"name": "agent-framework-core",
"version": "1.0.0b251001",
"install_type": "pypi",
"extras": ["all"]
},
"sphinx_extensions": [
"sphinxcontrib.autodoc_pydantic",
"sphinx-pydantic",
"sphinx.ext.autosummary"
],
"extension_config": {
"napoleon_google_docstring": 1,
"napoleon_preprocess_types": 1,
"napoleon_use_param": 0,
"autodoc_pydantic_field_doc_policy": "both",
"autodoc_pydantic_model_show_json": 0,
"autodoc_pydantic_model_show_config_summary": 1,
"autodoc_pydantic_model_show_field_summary": 1,
"autodoc_pydantic_model_hide_paramlist": 0,
"autodoc_pydantic_model_show_json_error_strategy": "coerce",
"autodoc_pydantic_settings_show_config_summary": 1,
"autodoc_pydantic_settings_show_field_summary": 1,
"python_use_unqualified_type_names": 1,
"autodoc_preserve_defaults": 1,
"autodoc_class_signature": "separated",
"autodoc_typehints": "description",
"autodoc_typehints_format": "fully-qualified",
"autodoc_default_options": {
"members": 1,
"member-order": "alphabetical",
"undoc-members": 1,
"show-inheritance": 1,
"imported-members": 1,
},
},
}
],
"required_packages": [
{
"install_type": "pypi",
"name": "autodoc_pydantic",
"version": ">=2.0.0",
},
{
"install_type": "pypi",
"name": "sphinx-pydantic",
}
],
}
args = [
"-o",
str((root_path / "docs" / "build").absolute()),
"-j",
json.dumps(package),
"--verbose"
]
try:
await py2docfx_main(args)
except Exception as e:
print(f"Error generating documentation: {e}")
if __name__ == "__main__":
# Ensure the script is run from the correct directory
debug = False
if debug:
debugpy.listen(("localhost", 5678))
debugpy.wait_for_client()
debugpy.breakpoint()
current_path = Path(__file__).parent.parent.resolve()
print(f"Current path: {current_path}")
# ensure the dist folder exists
dist_path = current_path / "dist"
if not dist_path.exists():
print(" Please run `poe build` to generate the dist folder.")
exit(1)
if os.getenv("PIP_FIND_LINKS") != str(dist_path.absolute()):
print(f"Setting PIP_FIND_LINKS to {dist_path.absolute()}")
os.environ["PIP_FIND_LINKS"] = str(dist_path.absolute())
print(f"Generating documentation in: {current_path / 'docs' / 'build'}")
# Generate the documentation
asyncio.run(generate_af_docs(current_path))

340
python/pyproject.toml Normal file
View File

@@ -0,0 +1,340 @@
[project]
name = "agent-framework"
description = "Microsoft Agent Framework for building AI Agents with Python. This package contains all the core and optional packages."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
version = "1.0.0b260116"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
urls.release_notes = "https://github.com/microsoft/agent-framework/releases?q=tag%3Apython-1&expanded=true"
urls.issues = "https://github.com/microsoft/agent-framework/issues"
classifiers = [
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
"Typing :: Typed",
]
dependencies = [
"agent-framework-core[all]==1.0.0b260116",
]
[dependency-groups]
dev = [
"uv>=0.9,<1.0.0",
"flit>=3.12.0",
"pre-commit >= 3.7",
"ruff>=0.11.8",
"pytest>=8.4.1",
"pytest-asyncio>=1.0.0",
"pytest-cov>=6.2.1",
"pytest-env>=1.1.5",
"pytest-xdist[psutil]>=3.8.0",
"pytest-timeout>=2.3.1",
"pytest-retry>=1",
"mypy>=1.16.1",
"pyright>=1.1.402",
#tasks
"poethepoet>=0.36.0",
"rich",
"tomli",
"tomli-w",
# AutoGen migration samples
"autogen-agentchat",
"autogen-ext[openai]",
]
docs = [
# Documentation
"debugpy>=1.8.16",
"py2docfx>=0.1.22.dev2259826",
"pip",
]
[tool.uv]
package = false
prerelease = "if-necessary-or-explicit"
environments = [
"sys_platform == 'darwin'",
"sys_platform == 'linux'",
"sys_platform == 'win32'"
]
override-dependencies = [
# A conflict between the dependency of litellm[proxy] < 0.30.0, which is a dependency of agent-lightning
# and uvicorn >= 0.34.0, which is a dependency of tau2
"uvicorn==0.38.0",
# Similar problem with websockets, which is a dependency conflict between litellm[proxy] and mcp
"websockets==15.0.1",
# grpcio 1.67.x has no Python 3.14 wheels; grpcio 1.76.0+ supports Python 3.14
# litellm constrains grpcio<1.68.0 due to resource exhaustion bug (https://github.com/grpc/grpc/issues/38290)
# Use version-specific overrides to satisfy both constraints
"grpcio>=1.76.0; python_version >= '3.14'",
"grpcio>=1.62.3,<1.68.0; python_version < '3.14'",
]
[tool.uv.workspace]
members = [ "packages/*" ]
[tool.uv.sources]
agent-framework = { workspace = true }
agent-framework-core = { workspace = true }
agent-framework-a2a = { workspace = true }
agent-framework-ag-ui = { workspace = true }
agent-framework-azure-ai-search = { workspace = true }
agent-framework-anthropic = { workspace = true }
agent-framework-azure-ai = { workspace = true }
agent-framework-azurefunctions = { workspace = true }
agent-framework-bedrock = { workspace = true }
agent-framework-chatkit = { workspace = true }
agent-framework-copilotstudio = { workspace = true }
agent-framework-declarative = { workspace = true }
agent-framework-devui = { workspace = true }
agent-framework-foundry-local = { workspace = true }
agent-framework-lab = { workspace = true }
agent-framework-mem0 = { workspace = true }
agent-framework-ollama = { workspace = true }
agent-framework-purview = { workspace = true }
agent-framework-redis = { workspace = true }
[tool.ruff]
line-length = 120
target-version = "py310"
fix = true
include = ["*.py", "*.pyi", "**/pyproject.toml", "*.ipynb"]
exclude = ["docs/*", "run_tasks_in_packages_if_exists.py", "check_md_code_blocks.py"]
extend-exclude = [
"[{][{]cookiecutter.package_name[}][}]",
]
preview = true
[tool.ruff.lint]
fixable = ["ALL"]
unfixable = []
select = [
"ASYNC", # async checks
"B", # bugbear checks
"CPY", # copyright
"D", # pydocstyle checks
"E", # pycodestyle error checks
"ERA", # remove connected out code
"F", # pyflakes checks
"FIX", # fixme checks
"I", # isort
"INP", # implicit namespace package
"ISC", # implicit string concat
"Q", # flake8-quotes checks
"RET", # flake8-return check
"RSE", # raise exception parantheses check
"RUF", # RUF specific rules
"SIM", # flake8-simplify check
"T20", # typing checks
"TD", # todos
"W", # pycodestyle warning checks
"T100", # Debugger,
"S", # Bandit checks
]
ignore = [
"D100", # allow missing docstring in public module
"D104", # allow missing docstring in public package
"D418", # allow overload to have a docstring
"TD003", # allow missing link to todo issue
"FIX002", # allow todo
"B027", # allow empty non-abstract method in ABC
"B905", # `zip()` without an explicit `strict=` parameter
"RUF067", # allow version detection in __init__.py
]
[tool.ruff.lint.per-file-ignores]
# Ignore all directories named `tests` and `samples`.
"**/tests/**" = ["D", "INP", "TD", "ERA001", "RUF", "S"]
"samples/**" = ["D", "INP", "ERA001", "RUF", "S", "T201"]
"*.ipynb" = ["CPY", "E501"]
[tool.ruff.format]
docstring-code-format = true
[tool.ruff.lint.pydocstyle]
convention = "google"
[tool.ruff.lint.flake8-copyright]
notice-rgx = "^# Copyright \\(c\\) Microsoft\\. All rights reserved\\."
min-file-size = 1
[tool.pytest.ini_options]
testpaths = 'packages/**/tests'
norecursedirs = '**/lab/**'
addopts = "-ra -q -r fEX"
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "function"
filterwarnings = []
timeout = 120
markers = [
"azure: marks tests as Azure provider specific",
"azure-ai: marks tests as Azure AI provider specific",
"openai: marks tests as OpenAI provider specific",
]
[tool.coverage.run]
omit = [
"**/__init__.py"
]
[tool.pyright]
include = ["agent_framework*"]
exclude = ["**/tests/**", "docs", "**/.venv/**", "packages/devui/frontend/**"]
typeCheckingMode = "strict"
reportUnnecessaryIsInstance = false
reportMissingTypeStubs = false
[tool.mypy]
plugins = ['pydantic.mypy']
strict = true
python_version = "3.10"
ignore_missing_imports = true
disallow_untyped_defs = true
no_implicit_optional = true
check_untyped_defs = true
warn_return_any = true
show_error_codes = true
warn_unused_ignores = false
disallow_incomplete_defs = true
disallow_untyped_decorators = true
[tool.bandit]
targets = ["agent_framework"]
exclude_dirs = ["tests", "./run_tasks_in_packages_if_exists.py", "./check_md_code_blocks.py", "docs", "samples"]
[tool.poe]
executor.type = "uv"
[tool.poe.tasks]
markdown-code-lint = "uv run python check_md_code_blocks.py 'README.md' './packages/**/README.md' './samples/**/*.md' --exclude cookiecutter-agent-framework-lab --exclude tau2 --exclude 'packages/devui/frontend'"
pre-commit-install = "uv run pre-commit install --install-hooks --overwrite"
install = "uv sync --all-packages --all-extras --dev -U --prerelease=if-necessary-or-explicit --no-group=docs"
test = "python run_tasks_in_packages_if_exists.py test"
fmt = "python run_tasks_in_packages_if_exists.py fmt"
format.ref = "fmt"
lint = "python run_tasks_in_packages_if_exists.py lint"
pyright = "python run_tasks_in_packages_if_exists.py pyright"
mypy = "python run_tasks_in_packages_if_exists.py mypy"
# cleaning
clean-dist-packages = "python run_tasks_in_packages_if_exists.py clean-dist"
clean-dist-meta = "rm -rf dist"
clean-dist = ["clean-dist-packages", "clean-dist-meta"]
# build and publish
build-packages = "python run_tasks_in_packages_if_exists.py build"
build-meta = "python -m flit build"
build = ["build-packages", "build-meta"]
publish = "uv publish"
# combined checks
check = ["fmt", "lint", "pyright", "mypy", "test", "markdown-code-lint"]
[tool.poe.tasks.all-tests-cov]
cmd = """
pytest --import-mode=importlib
--cov=agent_framework
--cov=agent_framework_core
--cov=agent_framework_a2a
--cov=agent_framework_ag_ui
--cov=agent_framework_anthropic
--cov=agent_framework_azure_ai
--cov=agent_framework_azurefunctions
--cov=agent_framework_chatkit
--cov=agent_framework_copilotstudio
--cov=agent_framework_mem0
--cov=agent_framework_purview
--cov=agent_framework_redis
--cov-config=pyproject.toml
--cov-report=term-missing:skip-covered
--ignore-glob=packages/lab/**
--ignore-glob=packages/devui/**
-rs
-n logical --dist loadfile --dist worksteal
packages/**/tests
"""
[tool.poe.tasks.all-tests]
cmd = """
pytest --import-mode=importlib
--ignore-glob=packages/lab/**
--ignore-glob=packages/devui/**
-rs
-n logical --dist loadfile --dist worksteal
packages/**/tests
"""
[tool.poe.tasks.venv]
cmd = "uv venv --clear --python $python"
args = [{ name = "python", default = "3.13", options = ['-p', '--python'] }]
[tool.poe.tasks.setup]
sequence = [
{ ref = "venv --python $python"},
{ ref = "install" },
{ ref = "pre-commit-install" }
]
args = [{ name = "python", default = "3.13", options = ['-p', '--python'] }]
[tool.poe.tasks.pre-commit-markdown-code-lint]
cmd = "uv run python check_md_code_blocks.py ${files} --no-glob --exclude cookiecutter-agent-framework-lab --exclude tau2 --exclude 'packages/devui/frontend'"
args = [{ name = "files", default = ".", positional = true, multiple = true }]
[tool.poe.tasks.pre-commit-pyright]
cmd = "uv run python run_tasks_in_changed_packages.py pyright ${files}"
args = [{ name = "files", default = ".", positional = true, multiple = true }]
[tool.poe.tasks.ci-mypy]
shell = """
# Try multiple strategies to get changed files
if [ -n "$GITHUB_BASE_REF" ]; then
# In GitHub Actions PR context
git fetch origin $GITHUB_BASE_REF --depth=1 2>/dev/null || true
CHANGED_FILES=$(git diff --name-only origin/$GITHUB_BASE_REF...HEAD -- . 2>/dev/null || \
git diff --name-only FETCH_HEAD...HEAD -- . 2>/dev/null || \
git diff --name-only HEAD^...HEAD -- . 2>/dev/null || \
echo ".")
else
# Local development
CHANGED_FILES=$(git diff --name-only origin/main...HEAD -- . 2>/dev/null || \
git diff --name-only main...HEAD -- . 2>/dev/null || \
git diff --name-only HEAD~1 -- . 2>/dev/null || \
echo ".")
fi
echo "Changed files: $CHANGED_FILES"
uv run python run_tasks_in_changed_packages.py mypy $CHANGED_FILES
"""
interpreter = "bash"
[tool.poe.tasks.pre-commit-check]
sequence = [
{ ref = "fmt" },
{ ref = "lint" },
{ ref = "pre-commit-pyright ${files}" },
{ ref = "pre-commit-markdown-code-lint ${files}" }
]
args = [{ name = "files", default = ".", positional = true, multiple = true }]
[tool.setuptools.packages.find]
where = ["packages"]
include = ["agent_framework**"]
namespaces = true
[[tool.uv.index]]
name = "testpypi"
url = "https://test.pypi.org/simple/"
publish-url = "https://test.pypi.org/legacy/"
explicit = true
[tool.flit.module]
name = "agent_framework_meta"
[build-system]
requires = ["flit-core >= 3.11,<4.0"]
build-backend = "flit_core.buildapi"

View File

@@ -0,0 +1,133 @@
# Copyright (c) Microsoft. All rights reserved.
"""Run a task only in packages that have changed files."""
import argparse
import glob
import sys
from pathlib import Path
import tomli
from poethepoet.app import PoeThePoet
from rich import print
def discover_projects(workspace_pyproject_file: Path) -> list[Path]:
with workspace_pyproject_file.open("rb") as f:
data = tomli.load(f)
projects = data["tool"]["uv"]["workspace"]["members"]
exclude = data["tool"]["uv"]["workspace"].get("exclude", [])
all_projects: list[Path] = []
for project in projects:
if "*" in project:
globbed = glob.glob(str(project), root_dir=workspace_pyproject_file.parent)
globbed_paths = [Path(p) for p in globbed]
all_projects.extend(globbed_paths)
else:
all_projects.append(Path(project))
for project in exclude:
if "*" in project:
globbed = glob.glob(str(project), root_dir=workspace_pyproject_file.parent)
globbed_paths = [Path(p) for p in globbed]
all_projects = [p for p in all_projects if p not in globbed_paths]
else:
all_projects = [p for p in all_projects if p != Path(project)]
return all_projects
def extract_poe_tasks(file: Path) -> set[str]:
with file.open("rb") as f:
data = tomli.load(f)
tasks = set(data.get("tool", {}).get("poe", {}).get("tasks", {}).keys())
# Check if there is an include too
include: str | None = data.get("tool", {}).get("poe", {}).get("include", None)
if include:
include_file = file.parent / include
if include_file.exists():
tasks = tasks.union(extract_poe_tasks(include_file))
return tasks
def get_changed_packages(projects: list[Path], changed_files: list[str], workspace_root: Path) -> set[Path]:
"""Determine which packages have changed files."""
changed_packages: set[Path] = set()
core_package_changed = False
for file_path in changed_files:
# Strip 'python/' prefix if present (when git diff is run from repo root)
file_path_str = str(file_path)
if file_path_str.startswith("python/"):
file_path_str = file_path_str[7:] # Remove 'python/' prefix
# Convert to absolute path if relative
abs_path = Path(file_path_str)
if not abs_path.is_absolute():
abs_path = workspace_root / file_path_str
# Check which package this file belongs to
for project in projects:
project_abs = workspace_root / project
try:
# Check if the file is within this project directory
abs_path.relative_to(project_abs)
changed_packages.add(project)
# Check if the core package was changed
if project == Path("packages/core"):
core_package_changed = True
break
except ValueError:
# File is not in this project
continue
# If core package changed, check all packages
if core_package_changed:
print("[yellow]Core package changed - checking all packages[/yellow]")
return set(projects)
return changed_packages
def main() -> None:
parser = argparse.ArgumentParser(description="Run a task only in packages with changed files.")
parser.add_argument("task", help="The task name to run")
parser.add_argument("files", nargs="*", help="Changed files to determine which packages to run")
args = parser.parse_args()
pyproject_file = Path(__file__).parent / "pyproject.toml"
workspace_root = pyproject_file.parent
projects = discover_projects(pyproject_file)
# If no files specified, run in all packages (default behavior)
if not args.files or args.files == ["."]:
print(f"[yellow]No specific files provided, running {args.task} in all packages[/yellow]")
changed_packages = set(projects)
else:
changed_packages = get_changed_packages(projects, args.files, workspace_root)
if changed_packages:
print(f"[cyan]Detected changes in packages: {', '.join(str(p) for p in sorted(changed_packages))}[/cyan]")
else:
print(f"[yellow]No changes detected in any package, skipping {args.task}[/yellow]")
return
# Run the task in changed packages
for project in sorted(changed_packages):
tasks = extract_poe_tasks(project / "pyproject.toml")
if args.task in tasks:
print(f"Running task {args.task} in {project}")
app = PoeThePoet(cwd=project)
result = app(cli_args=[args.task])
if result:
sys.exit(result)
else:
print(f"Task {args.task} not found in {project}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,77 @@
# Copyright (c) Microsoft. All rights reserved.
import glob
import sys
from pathlib import Path
import tomli
from poethepoet.app import PoeThePoet
from rich import print
def discover_projects(workspace_pyproject_file: Path) -> list[Path]:
with workspace_pyproject_file.open("rb") as f:
data = tomli.load(f)
projects = data["tool"]["uv"]["workspace"]["members"]
exclude = data["tool"]["uv"]["workspace"].get("exclude", [])
all_projects: list[Path] = []
for project in projects:
if "*" in project:
globbed = glob.glob(str(project), root_dir=workspace_pyproject_file.parent)
globbed_paths = [Path(p) for p in globbed]
all_projects.extend(globbed_paths)
else:
all_projects.append(Path(project))
for project in exclude:
if "*" in project:
globbed = glob.glob(str(project), root_dir=workspace_pyproject_file.parent)
globbed_paths = [Path(p) for p in globbed]
all_projects = [p for p in all_projects if p not in globbed_paths]
else:
all_projects = [p for p in all_projects if p != Path(project)]
return all_projects
def extract_poe_tasks(file: Path) -> set[str]:
with file.open("rb") as f:
data = tomli.load(f)
tasks = set(data.get("tool", {}).get("poe", {}).get("tasks", {}).keys())
# Check if there is an include too
include: str | None = data.get("tool", {}).get("poe", {}).get("include", None)
if include:
include_file = file.parent / include
if include_file.exists():
tasks = tasks.union(extract_poe_tasks(include_file))
return tasks
def main() -> None:
pyproject_file = Path(__file__).parent / "pyproject.toml"
projects = discover_projects(pyproject_file)
if len(sys.argv) < 2:
print("Please provide a task name")
sys.exit(1)
task_name = sys.argv[1]
for project in projects:
tasks = extract_poe_tasks(project / "pyproject.toml")
if task_name in tasks:
print(f"Running task {task_name} in {project}")
app = PoeThePoet(cwd=project)
result = app(cli_args=sys.argv[1:])
if result:
sys.exit(result)
else:
print(f"Task {task_name} not found in {project}")
if __name__ == "__main__":
main()

301
python/samples/README.md Normal file
View File

@@ -0,0 +1,301 @@
# Python Samples
This directory contains samples demonstrating the capabilities of Microsoft Agent Framework for Python.
## Agents
### A2A (Agent-to-Agent)
| File | Description |
|------|-------------|
| [`getting_started/agents/a2a/agent_with_a2a.py`](./getting_started/agents/a2a/agent_with_a2a.py) | Agent2Agent (A2A) Protocol Integration Sample |
### Anthropic
| File | Description |
|------|-------------|
| [`getting_started/agents/anthropic/anthropic_basic.py`](./getting_started/agents/anthropic/anthropic_basic.py) | Agent with Anthropic Client |
| [`getting_started/agents/anthropic/anthropic_advanced.py`](./getting_started/agents/anthropic/anthropic_advanced.py) | Advanced sample with `thinking` and hosted tools. |
### Azure AI (based on `azure-ai-agents` V1 package)
| File | Description |
|------|-------------|
| [`getting_started/agents/azure_ai_agent/azure_ai_basic.py`](./getting_started/agents/azure_ai_agent/azure_ai_basic.py) | Azure AI Agent Basic Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_azure_ai_search.py) | Azure AI Agent with Azure AI Search Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_bing_grounding.py) | Azure AI agent with Bing Grounding search for real-time web information |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter.py) | Azure AI Agent with Code Interpreter Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py) | Azure AI Agent with Code Interpreter File Generation Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_existing_agent.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_existing_agent.py) | Azure AI Agent with Existing Agent Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py) | Azure AI Agent with Existing Thread Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py) | Azure AI Agent with Explicit Settings Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_file_search.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_file_search.py) | Azure AI agent with File Search capabilities |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_function_tools.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_function_tools.py) | Azure AI Agent with Function Tools Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_hosted_mcp.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_hosted_mcp.py) | Azure AI Agent with Hosted MCP Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_local_mcp.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_local_mcp.py) | Azure AI Agent with Local MCP Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py) | Azure AI Agent with Multiple Tools Example |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_openapi_tools.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_openapi_tools.py) | Azure AI agent with OpenAPI tools |
| [`getting_started/agents/azure_ai_agent/azure_ai_with_thread.py`](./getting_started/agents/azure_ai_agent/azure_ai_with_thread.py) | Azure AI Agent with Thread Management Example |
### Azure AI (based on `azure-ai-projects` V2 package)
| File | Description |
|------|-------------|
| [`getting_started/agents/azure_ai/azure_ai_basic.py`](./getting_started/agents/azure_ai/azure_ai_basic.py) | Azure AI Agent Basic Example |
| [`getting_started/agents/azure_ai/azure_ai_use_latest_version.py`](./getting_started/agents/azure_ai/azure_ai_use_latest_version.py) | Azure AI Agent latest version reuse example |
| [`getting_started/agents/azure_ai/azure_ai_with_azure_ai_search.py`](./getting_started/agents/azure_ai/azure_ai_with_azure_ai_search.py) | Azure AI Agent with Azure AI Search Example |
| [`getting_started/agents/azure_ai/azure_ai_with_bing_grounding.py`](./getting_started/agents/azure_ai/azure_ai_with_bing_grounding.py) | Azure AI Agent with Bing Grounding Example |
| [`getting_started/agents/azure_ai/azure_ai_with_bing_custom_search.py`](./getting_started/agents/azure_ai/azure_ai_with_bing_custom_search.py) | Azure AI Agent with Bing Custom Search Example |
| [`getting_started/agents/azure_ai/azure_ai_with_browser_automation.py`](./getting_started/agents/azure_ai/azure_ai_with_browser_automation.py) | Azure AI Agent with Browser Automation Example |
| [`getting_started/agents/azure_ai/azure_ai_with_code_interpreter.py`](./getting_started/agents/azure_ai/azure_ai_with_code_interpreter.py) | Azure AI Agent with Code Interpreter Example |
| [`getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py`](./getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py) | Azure AI Agent with Code Interpreter File Generation Example |
| [`getting_started/agents/azure_ai/azure_ai_with_existing_agent.py`](./getting_started/agents/azure_ai/azure_ai_with_existing_agent.py) | Azure AI Agent with Existing Agent Example |
| [`getting_started/agents/azure_ai/azure_ai_with_existing_conversation.py`](./getting_started/agents/azure_ai/azure_ai_with_existing_conversation.py) | Azure AI Agent with Existing Conversation Example |
| [`getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py`](./getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py) | Azure AI Agent with Explicit Settings Example |
| [`getting_started/agents/azure_ai/azure_ai_with_file_search.py`](./getting_started/agents/azure_ai/azure_ai_with_file_search.py) | Azure AI Agent with File Search Example |
| [`getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py`](./getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py) | Azure AI Agent with Hosted MCP Example |
| [`getting_started/agents/azure_ai/azure_ai_with_response_format.py`](./getting_started/agents/azure_ai/azure_ai_with_response_format.py) | Azure AI Agent with Structured Output Example |
| [`getting_started/agents/azure_ai/azure_ai_with_thread.py`](./getting_started/agents/azure_ai/azure_ai_with_thread.py) | Azure AI Agent with Thread Management Example |
| [`getting_started/agents/azure_ai/azure_ai_with_image_generation.py`](./getting_started/agents/azure_ai/azure_ai_with_image_generation.py) | Azure AI Agent with Image Generation Example |
| [`getting_started/agents/azure_ai/azure_ai_with_microsoft_fabric.py`](./getting_started/agents/azure_ai/azure_ai_with_microsoft_fabric.py) | Azure AI Agent with Microsoft Fabric Example |
| [`getting_started/agents/azure_ai/azure_ai_with_web_search.py`](./getting_started/agents/azure_ai/azure_ai_with_web_search.py) | Azure AI Agent with Web Search Example |
### Azure OpenAI
| File | Description |
|------|-------------|
| [`getting_started/agents/azure_openai/azure_assistants_basic.py`](./getting_started/agents/azure_openai/azure_assistants_basic.py) | Azure OpenAI Assistants Basic Example |
| [`getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py`](./getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py) | Azure OpenAI Assistants with Code Interpreter Example |
| [`getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py`](./getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py) | Azure OpenAI Assistants with Existing Assistant Example |
| [`getting_started/agents/azure_openai/azure_assistants_with_explicit_settings.py`](./getting_started/agents/azure_openai/azure_assistants_with_explicit_settings.py) | Azure OpenAI Assistants with Explicit Settings Example |
| [`getting_started/agents/azure_openai/azure_assistants_with_function_tools.py`](./getting_started/agents/azure_openai/azure_assistants_with_function_tools.py) | Azure OpenAI Assistants with Function Tools Example |
| [`getting_started/agents/azure_openai/azure_assistants_with_thread.py`](./getting_started/agents/azure_openai/azure_assistants_with_thread.py) | Azure OpenAI Assistants with Thread Management Example |
| [`getting_started/agents/azure_openai/azure_chat_client_basic.py`](./getting_started/agents/azure_openai/azure_chat_client_basic.py) | Azure OpenAI Chat Client Basic Example |
| [`getting_started/agents/azure_openai/azure_chat_client_with_explicit_settings.py`](./getting_started/agents/azure_openai/azure_chat_client_with_explicit_settings.py) | Azure OpenAI Chat Client with Explicit Settings Example |
| [`getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py`](./getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py) | Azure OpenAI Chat Client with Function Tools Example |
| [`getting_started/agents/azure_openai/azure_chat_client_with_thread.py`](./getting_started/agents/azure_openai/azure_chat_client_with_thread.py) | Azure OpenAI Chat Client with Thread Management Example |
| [`getting_started/agents/azure_openai/azure_responses_client_basic.py`](./getting_started/agents/azure_openai/azure_responses_client_basic.py) | Azure OpenAI Responses Client Basic Example |
| [`getting_started/agents/azure_openai/azure_responses_client_image_analysis.py`](./getting_started/agents/azure_openai/azure_responses_client_image_analysis.py) | Azure OpenAI Responses Client with Image Analysis Example |
| [`getting_started/agents/azure_openai/azure_responses_client_with_code_interpreter.py`](./getting_started/agents/azure_openai/azure_responses_client_with_code_interpreter.py) | Azure OpenAI Responses Client with Code Interpreter Example |
| [`getting_started/agents/azure_openai/azure_responses_client_with_explicit_settings.py`](./getting_started/agents/azure_openai/azure_responses_client_with_explicit_settings.py) | Azure OpenAI Responses Client with Explicit Settings Example |
| [`getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py`](./getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py) | Azure OpenAI Responses Client with Function Tools Example |
| [`getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py`](./getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py) | Azure OpenAI Responses Client with Hosted Model Context Protocol (MCP) Example |
| [`getting_started/agents/azure_openai/azure_responses_client_with_local_mcp.py`](./getting_started/agents/azure_openai/azure_responses_client_with_local_mcp.py) | Azure OpenAI Responses Client with local Model Context Protocol (MCP) Example |
| [`getting_started/agents/azure_openai/azure_responses_client_with_thread.py`](./getting_started/agents/azure_openai/azure_responses_client_with_thread.py) | Azure OpenAI Responses Client with Thread Management Example |
### Copilot Studio
| File | Description |
|------|-------------|
| [`getting_started/agents/copilotstudio/copilotstudio_basic.py`](./getting_started/agents/copilotstudio/copilotstudio_basic.py) | Copilot Studio Agent Basic Example |
| [`getting_started/agents/copilotstudio/copilotstudio_with_explicit_settings.py`](./getting_started/agents/copilotstudio/copilotstudio_with_explicit_settings.py) | Copilot Studio Agent with Explicit Settings Example |
### Custom
| File | Description |
|------|-------------|
| [`getting_started/agents/custom/custom_agent.py`](./getting_started/agents/custom/custom_agent.py) | Custom Agent Implementation Example |
| [`getting_started/agents/custom/custom_chat_client.py`](./getting_started/agents/custom/custom_chat_client.py) | Custom Chat Client Implementation Example |
### Ollama
The recommended way to use Ollama is via the native `OllamaChatClient` from the `agent-framework-ollama` package.
| File | Description |
|------|-------------|
| [`getting_started/agents/ollama/ollama_agent_basic.py`](./getting_started/agents/ollama/ollama_agent_basic.py) | Basic Ollama Agent with native Ollama Chat Client |
| [`getting_started/agents/ollama/ollama_agent_reasoning.py`](./getting_started/agents/ollama/ollama_agent_reasoning.py) | Ollama Agent with reasoning capabilities |
| [`getting_started/agents/ollama/ollama_chat_client.py`](./getting_started/agents/ollama/ollama_chat_client.py) | Direct usage of Ollama Chat Client |
| [`getting_started/agents/ollama/ollama_chat_multimodal.py`](./getting_started/agents/ollama/ollama_chat_multimodal.py) | Ollama Chat Client with multimodal (image) input |
| [`getting_started/agents/ollama/ollama_with_openai_chat_client.py`](./getting_started/agents/ollama/ollama_with_openai_chat_client.py) | Alternative: Ollama via OpenAI Chat Client |
### OpenAI
| File | Description |
|------|-------------|
| [`getting_started/agents/openai/openai_assistants_basic.py`](./getting_started/agents/openai/openai_assistants_basic.py) | OpenAI Assistants Basic Example |
| [`getting_started/agents/openai/openai_assistants_with_code_interpreter.py`](./getting_started/agents/openai/openai_assistants_with_code_interpreter.py) | OpenAI Assistants with Code Interpreter Example |
| [`getting_started/agents/openai/openai_assistants_with_existing_assistant.py`](./getting_started/agents/openai/openai_assistants_with_existing_assistant.py) | OpenAI Assistants with Existing Assistant Example |
| [`getting_started/agents/openai/openai_assistants_with_explicit_settings.py`](./getting_started/agents/openai/openai_assistants_with_explicit_settings.py) | OpenAI Assistants with Explicit Settings Example |
| [`getting_started/agents/openai/openai_assistants_with_file_search.py`](./getting_started/agents/openai/openai_assistants_with_file_search.py) | OpenAI Assistants with File Search Example |
| [`getting_started/agents/openai/openai_assistants_with_function_tools.py`](./getting_started/agents/openai/openai_assistants_with_function_tools.py) | OpenAI Assistants with Function Tools Example |
| [`getting_started/agents/openai/openai_assistants_with_thread.py`](./getting_started/agents/openai/openai_assistants_with_thread.py) | OpenAI Assistants with Thread Management Example |
| [`getting_started/agents/openai/openai_chat_client_basic.py`](./getting_started/agents/openai/openai_chat_client_basic.py) | OpenAI Chat Client Basic Example |
| [`getting_started/agents/openai/openai_chat_client_with_explicit_settings.py`](./getting_started/agents/openai/openai_chat_client_with_explicit_settings.py) | OpenAI Chat Client with Explicit Settings Example |
| [`getting_started/agents/openai/openai_chat_client_with_function_tools.py`](./getting_started/agents/openai/openai_chat_client_with_function_tools.py) | OpenAI Chat Client with Function Tools Example |
| [`getting_started/agents/openai/openai_chat_client_with_local_mcp.py`](./getting_started/agents/openai/openai_chat_client_with_local_mcp.py) | OpenAI Chat Client with Local MCP Example |
| [`getting_started/agents/openai/openai_chat_client_with_thread.py`](./getting_started/agents/openai/openai_chat_client_with_thread.py) | OpenAI Chat Client with Thread Management Example |
| [`getting_started/agents/openai/openai_chat_client_with_web_search.py`](./getting_started/agents/openai/openai_chat_client_with_web_search.py) | OpenAI Chat Client with Web Search Example |
| [`getting_started/agents/openai/openai_chat_client_with_runtime_json_schema.py`](./getting_started/agents/openai/openai_chat_client_with_runtime_json_schema.py) | OpenAI Chat Client with runtime JSON Schema for structured output without a Pydantic model |
| [`getting_started/agents/openai/openai_responses_client_basic.py`](./getting_started/agents/openai/openai_responses_client_basic.py) | OpenAI Responses Client Basic Example |
| [`getting_started/agents/openai/openai_responses_client_image_analysis.py`](./getting_started/agents/openai/openai_responses_client_image_analysis.py) | OpenAI Responses Client Image Analysis Example |
| [`getting_started/agents/openai/openai_responses_client_image_generation.py`](./getting_started/agents/openai/openai_responses_client_image_generation.py) | OpenAI Responses Client Image Generation Example |
| [`getting_started/agents/openai/openai_responses_client_reasoning.py`](./getting_started/agents/openai/openai_responses_client_reasoning.py) | OpenAI Responses Client Reasoning Example |
| [`getting_started/agents/openai/openai_responses_client_with_code_interpreter.py`](./getting_started/agents/openai/openai_responses_client_with_code_interpreter.py) | OpenAI Responses Client with Code Interpreter Example |
| [`getting_started/agents/openai/openai_responses_client_with_explicit_settings.py`](./getting_started/agents/openai/openai_responses_client_with_explicit_settings.py) | OpenAI Responses Client with Explicit Settings Example |
| [`getting_started/agents/openai/openai_responses_client_with_file_search.py`](./getting_started/agents/openai/openai_responses_client_with_file_search.py) | OpenAI Responses Client with File Search Example |
| [`getting_started/agents/openai/openai_responses_client_with_function_tools.py`](./getting_started/agents/openai/openai_responses_client_with_function_tools.py) | OpenAI Responses Client with Function Tools Example |
| [`getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py`](./getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py) | OpenAI Responses Client with Hosted MCP Example |
| [`getting_started/agents/openai/openai_responses_client_with_local_mcp.py`](./getting_started/agents/openai/openai_responses_client_with_local_mcp.py) | OpenAI Responses Client with Local MCP Example |
| [`getting_started/agents/openai/openai_responses_client_with_structured_output.py`](./getting_started/agents/openai/openai_responses_client_with_structured_output.py) | OpenAI Responses Client with Structured Output Example |
| [`getting_started/agents/openai/openai_responses_client_with_thread.py`](./getting_started/agents/openai/openai_responses_client_with_thread.py) | OpenAI Responses Client with Thread Management Example |
| [`getting_started/agents/openai/openai_responses_client_with_web_search.py`](./getting_started/agents/openai/openai_responses_client_with_web_search.py) | OpenAI Responses Client with Web Search Example |
## Chat Client
| File | Description |
|------|-------------|
| [`getting_started/chat_client/azure_ai_chat_client.py`](./getting_started/chat_client/azure_ai_chat_client.py) | Azure AI Chat Client Direct Usage Example |
| [`getting_started/chat_client/azure_assistants_client.py`](./getting_started/chat_client/azure_assistants_client.py) | Azure OpenAI Assistants Client Direct Usage Example |
| [`getting_started/chat_client/azure_chat_client.py`](./getting_started/chat_client/azure_chat_client.py) | Azure Chat Client Direct Usage Example |
| [`getting_started/chat_client/azure_responses_client.py`](./getting_started/chat_client/azure_responses_client.py) | Azure OpenAI Responses Client Direct Usage Example |
| [`getting_started/chat_client/chat_response_cancellation.py`](./getting_started/chat_client/chat_response_cancellation.py) | Chat Response Cancellation Example |
| [`getting_started/chat_client/openai_assistants_client.py`](./getting_started/chat_client/openai_assistants_client.py) | OpenAI Assistants Client Direct Usage Example |
| [`getting_started/chat_client/openai_chat_client.py`](./getting_started/chat_client/openai_chat_client.py) | OpenAI Chat Client Direct Usage Example |
| [`getting_started/chat_client/openai_responses_client.py`](./getting_started/chat_client/openai_responses_client.py) | OpenAI Responses Client Direct Usage Example |
## Context Providers
### Mem0
| File | Description |
|------|-------------|
| [`getting_started/context_providers/mem0/mem0_basic.py`](./getting_started/context_providers/mem0/mem0_basic.py) | Basic Mem0 integration example |
| [`getting_started/context_providers/mem0/mem0_oss.py`](./getting_started/context_providers/mem0/mem0_oss.py) | Mem0 OSS (Open Source) integration example |
| [`getting_started/context_providers/mem0/mem0_threads.py`](./getting_started/context_providers/mem0/mem0_threads.py) | Mem0 with thread management example |
### Redis
| File | Description |
|------|-------------|
| [`getting_started/context_providers/redis/redis_basics.py`](./getting_started/context_providers/redis/redis_basics.py) | Basic Redis provider example |
| [`getting_started/context_providers/redis/redis_conversation.py`](./getting_started/context_providers/redis/redis_conversation.py) | Redis conversation context management example |
| [`getting_started/context_providers/redis/redis_threads.py`](./getting_started/context_providers/redis/redis_threads.py) | Redis with thread management example |
### Other
| File | Description |
|------|-------------|
| [`getting_started/context_providers/simple_context_provider.py`](./getting_started/context_providers/simple_context_provider.py) | Simple context provider implementation example |
| [`getting_started/context_providers/aggregate_context_provider.py`](./getting_started/context_providers/aggregate_context_provider.py) | Shows how to combine multiple context providers using an AggregateContextProvider |
## Declarative
| File | Description |
|------|-------------|
| [`getting_started/declarative/azure_openai_responses_agent.py`](./getting_started/declarative/azure_openai_responses_agent.py) | Basic agent using Azure OpenAI with structured responses |
| [`getting_started/declarative/get_weather_agent.py`](./getting_started/declarative/get_weather_agent.py) | Agent with custom function tools using declarative bindings |
| [`getting_started/declarative/inline_yaml.py`](./getting_started/declarative/inline_yaml.py) | Agent created from inline YAML string |
| [`getting_started/declarative/mcp_tool_yaml.py`](./getting_started/declarative/mcp_tool_yaml.py) | MCP tool configuration with API key and Azure Foundry connection auth |
| [`getting_started/declarative/microsoft_learn_agent.py`](./getting_started/declarative/microsoft_learn_agent.py) | Agent with MCP server integration for Microsoft Learn documentation |
| [`getting_started/declarative/openai_responses_agent.py`](./getting_started/declarative/openai_responses_agent.py) | Basic agent using OpenAI directly |
## DevUI
| File | Description |
|------|-------------|
| [`getting_started/devui/fanout_workflow/workflow.py`](./getting_started/devui/fanout_workflow/workflow.py) | Complex fan-out/fan-in workflow example |
| [`getting_started/devui/foundry_agent/agent.py`](./getting_started/devui/foundry_agent/agent.py) | Azure AI Foundry agent example |
| [`getting_started/devui/in_memory_mode.py`](./getting_started/devui/in_memory_mode.py) | In-memory mode example for DevUI |
| [`getting_started/devui/spam_workflow/workflow.py`](./getting_started/devui/spam_workflow/workflow.py) | Spam detection workflow example |
| [`getting_started/devui/weather_agent_azure/agent.py`](./getting_started/devui/weather_agent_azure/agent.py) | Weather agent using Azure OpenAI example |
| [`getting_started/devui/workflow_agents/workflow.py`](./getting_started/devui/workflow_agents/workflow.py) | Workflow with multiple agents example |
## Evaluation
| File | Description |
|------|-------------|
| [`getting_started/evaluation/red_teaming/red_team_agent_sample.py`](./getting_started/evaluation/red_teaming/red_team_agent_sample.py) | Red team agent evaluation sample for Azure AI Foundry |
| [`getting_started/evaluation/self_reflection/self_reflection.py`](./getting_started/evaluation/self_reflection/self_reflection.py) | LLM self-reflection with AI Foundry graders example |
| [`demos/workflow_evaluation/run_evaluation.py`](./demos/workflow_evaluation/run_evaluation.py) | Multi-agent workflow evaluation demo with travel planning agents evaluated using Azure AI Foundry evaluators |
## MCP (Model Context Protocol)
| File | Description |
|------|-------------|
| [`getting_started/mcp/agent_as_mcp_server.py`](./getting_started/mcp/agent_as_mcp_server.py) | Agent as MCP Server Example |
| [`getting_started/mcp/mcp_api_key_auth.py`](./getting_started/mcp/mcp_api_key_auth.py) | MCP Authentication Example |
## Middleware
| File | Description |
|------|-------------|
| [`getting_started/middleware/agent_and_run_level_middleware.py`](./getting_started/middleware/agent_and_run_level_middleware.py) | Agent and run-level middleware example |
| [`getting_started/middleware/chat_middleware.py`](./getting_started/middleware/chat_middleware.py) | Chat middleware example |
| [`getting_started/middleware/class_based_middleware.py`](./getting_started/middleware/class_based_middleware.py) | Class-based middleware implementation example |
| [`getting_started/middleware/decorator_middleware.py`](./getting_started/middleware/decorator_middleware.py) | Decorator-based middleware example |
| [`getting_started/middleware/exception_handling_with_middleware.py`](./getting_started/middleware/exception_handling_with_middleware.py) | Exception handling with middleware example |
| [`getting_started/middleware/function_based_middleware.py`](./getting_started/middleware/function_based_middleware.py) | Function-based middleware example |
| [`getting_started/middleware/middleware_termination.py`](./getting_started/middleware/middleware_termination.py) | Middleware termination example |
| [`getting_started/middleware/override_result_with_middleware.py`](./getting_started/middleware/override_result_with_middleware.py) | Override result with middleware example |
| [`getting_started/middleware/runtime_context_delegation.py`](./getting_started/middleware/runtime_context_delegation.py) | Runtime context delegation example demonstrating how to pass API tokens, session data, and other context through hierarchical agent delegation |
| [`getting_started/middleware/shared_state_middleware.py`](./getting_started/middleware/shared_state_middleware.py) | Shared state middleware example |
| [`getting_started/middleware/thread_behavior_middleware.py`](./getting_started/middleware/thread_behavior_middleware.py) | Thread behavior middleware example demonstrating how to track conversation state across multiple agent runs |
## Multimodal Input
| File | Description |
|------|-------------|
| [`getting_started/multimodal_input/azure_chat_multimodal.py`](./getting_started/multimodal_input/azure_chat_multimodal.py) | Azure OpenAI Chat with multimodal (image) input example |
| [`getting_started/multimodal_input/azure_responses_multimodal.py`](./getting_started/multimodal_input/azure_responses_multimodal.py) | Azure OpenAI Responses with multimodal (image) input example |
| [`getting_started/multimodal_input/openai_chat_multimodal.py`](./getting_started/multimodal_input/openai_chat_multimodal.py) | OpenAI Chat with multimodal (image) input example |
## Azure Functions
| Sample | Description |
|--------|-------------|
| [`getting_started/azure_functions/01_single_agent/`](./getting_started/azure_functions/01_single_agent/) | Host a single agent in Azure Functions with Durable Extension HTTP endpoints and per-session state. |
| [`getting_started/azure_functions/02_multi_agent/`](./getting_started/azure_functions/02_multi_agent/) | Register multiple agents in one function app with dedicated run routes and a health check endpoint. |
| [`getting_started/azure_functions/03_reliable_streaming/`](./getting_started/azure_functions/03_reliable_streaming/) | Implement reliable streaming for durable agents using Redis Streams with cursor-based resumption. |
| [`getting_started/azure_functions/04_single_agent_orchestration_chaining/`](./getting_started/azure_functions/04_single_agent_orchestration_chaining/) | Chain sequential agent executions inside a durable orchestration while preserving the shared thread context. |
| [`getting_started/azure_functions/05_multi_agent_orchestration_concurrency/`](./getting_started/azure_functions/05_multi_agent_orchestration_concurrency/) | Run two agents concurrently within a durable orchestration and combine their domain-specific outputs. |
| [`getting_started/azure_functions/06_multi_agent_orchestration_conditionals/`](./getting_started/azure_functions/06_multi_agent_orchestration_conditionals/) | Route orchestration logic based on structured agent responses for spam detection and reply drafting. |
| [`getting_started/azure_functions/07_single_agent_orchestration_hitl/`](./getting_started/azure_functions/07_single_agent_orchestration_hitl/) | Implement a human-in-the-loop approval loop that iterates on agent output inside a durable orchestration. |
## Observability
| File | Description |
|------|-------------|
| [`getting_started/observability/advanced_manual_setup_console_output.py`](./getting_started/observability/advanced_manual_setup_console_output.py) | Advanced manual observability setup with console output |
| [`getting_started/observability/advanced_zero_code.py`](./getting_started/observability/advanced_zero_code.py) | Zero-code observability setup example |
| [`getting_started/observability/agent_observability.py`](./getting_started/observability/agent_observability.py) | Agent observability example |
| [`getting_started/observability/agent_with_foundry_tracing.py`](./getting_started/observability/agent_with_foundry_tracing.py) | Any chat client setup with Azure Foundry Observability |
| [`getting_started/observability/azure_ai_agent_observability.py`](./getting_started/observability/azure_ai_agent_observability.py) | Azure AI agent observability example |
| [`getting_started/observability/configure_otel_providers_with_env_var.py`](./getting_started/observability/configure_otel_providers_with_env_var.py) | Setup observability using environment variables |
| [`getting_started/observability/configure_otel_providers_with_parameters.py`](./getting_started/observability/configure_otel_providers_with_parameters.py) | Setup observability using parameters |
| [`getting_started/observability/workflow_observability.py`](./getting_started/observability/workflow_observability.py) | Workflow observability example |
## Threads
| File | Description |
|------|-------------|
| [`getting_started/threads/custom_chat_message_store_thread.py`](./getting_started/threads/custom_chat_message_store_thread.py) | Implementation of custom chat message store state |
| [`getting_started/threads/redis_chat_message_store_thread.py`](./getting_started/threads/redis_chat_message_store_thread.py) | Basic example of using Redis chat message store |
| [`getting_started/threads/suspend_resume_thread.py`](./getting_started/threads/suspend_resume_thread.py) | Demonstrates how to suspend and resume a service-managed thread |
## Tools
| File | Description |
|------|-------------|
| [`getting_started/tools/ai_function_declaration_only.py`](./getting_started/tools/ai_function_declaration_only.py) | Function declarations without implementations for testing agent reasoning |
| [`getting_started/tools/ai_function_from_dict_with_dependency_injection.py`](./getting_started/tools/ai_function_from_dict_with_dependency_injection.py) | Creating AI functions from dictionary definitions using dependency injection |
| [`getting_started/tools/ai_function_recover_from_failures.py`](./getting_started/tools/ai_function_recover_from_failures.py) | Graceful error handling when tools raise exceptions |
| [`getting_started/tools/ai_function_with_approval.py`](./getting_started/tools/ai_function_with_approval.py) | User approval workflows for function calls without threads |
| [`getting_started/tools/ai_function_with_approval_and_threads.py`](./getting_started/tools/ai_function_with_approval_and_threads.py) | Tool approval workflows using threads for conversation history management |
| [`getting_started/tools/ai_function_with_max_exceptions.py`](./getting_started/tools/ai_function_with_max_exceptions.py) | Limiting tool failure exceptions using max_invocation_exceptions |
| [`getting_started/tools/ai_function_with_max_invocations.py`](./getting_started/tools/ai_function_with_max_invocations.py) | Limiting total tool invocations using max_invocations |
| [`getting_started/tools/ai_functions_in_class.py`](./getting_started/tools/ai_functions_in_class.py) | Using ai_function decorator with class methods for stateful tools |
## Workflows
View the list of Workflows samples [here](./getting_started/workflows/README.md).
## Sample Guidelines
For information on creating new samples, see [SAMPLE_GUIDELINES.md](./SAMPLE_GUIDELINES.md).
## More Information
- [Python Package Documentation](../README.md)

View File

@@ -0,0 +1,76 @@
# Sample Guidelines
Samples are extremely important for developers to get started with Agent Framework. We strive to provide a wide range of samples that demonstrate the capabilities of Agent Framework with consistency and quality. This document outlines the guidelines for creating samples.
## General Guidelines
- **Clear and Concise**: Samples should be clear and concise. They should demonstrate a specific set of features or capabilities of Agent Framework. The less concepts a sample demonstrates, the better.
- **Consistent Structure**: All samples should have a consistent structure. This includes the folder structure, file naming, and the content of the sample.
- **Incremental Complexity**: Samples should start simple and gradually increase in complexity. This helps developers understand the concepts and features of Agent Framework.
- **Documentation**: Samples should be over-documented.
### **Clear and Concise**
Try not to include too many concepts in a single sample. The goal is to demonstrate a specific feature or capability of Agent Framework. If you find yourself including too many concepts, consider breaking the sample into multiple samples. A good example of this is to break non-streaming and streaming modes into separate samples.
### **Consistent Structure**
! TODO: Update folder structure to our new needs.
! TODO: Decide on single samples folder or also samples in extensions
#### Getting Started Samples
The getting started samples are the simplest samples that require minimal setup. These samples should be named in the following format: `step<number>_<name>.py`. One exception to this rule is when the sample is a notebook, in which case the sample should be named in the following format: `<number>_<name>.ipynb`.
### **Incremental Complexity**
Try to do a best effort to make sure that the samples are incremental in complexity. For example, in the getting started samples, each step should build on the previous step, and the concept samples should build on the getting started samples, same with the demos.
### **Documentation**
Try to over-document the samples. This includes comments in the code, README.md files, and any other documentation that is necessary to understand the sample. We use the guidance from [PEP8](https://peps.python.org/pep-0008/#comments) for comments in the code, with a deviation for the initial summary comment in samples and the output of the samples.
For the getting started samples and the concept samples, we should have the following:
1. A README.md file is included in each set of samples that explains the purpose of the samples and the setup required to run them.
2. A summary should be included underneath the imports that explains the purpose of the sample and required components/concepts to understand the sample. For example:
```python
'''
This sample shows how to create a chatbot. This sample uses the following two main components:
- a ChatCompletionService: This component is responsible for generating responses to user messages.
- a ChatHistory: This component is responsible for keeping track of the chat history.
The chatbot in this sample is called Mosscap, who responds to user messages with long flowery prose.
'''
```
3. Mark the code with comments to explain the purpose of each section of the code. For example:
```python
# 1. Create the instance of the Kernel to register the plugin and service.
...
# 2. Create the agent with the kernel instance.
...
```
> This will also allow the sample creator to track if the sample is getting too complex.
4. At the end of the sample, include a section that explains the expected output of the sample. For example:
```python
'''
Sample output:
User:> Why is the sky blue in one sentence?
Mosscap:> The sky is blue due to the scattering of sunlight by the molecules in the Earth's atmosphere,
a phenomenon known as Rayleigh scattering, which causes shorter blue wavelengths to become more
prominent in our visual perception.
'''
```
For the demos, a README.md file must be included that explains the purpose of the demo and how to run it. The README.md file should include the following:
- A description of the demo.
- A list of dependencies required to run the demo.
- Instructions on how to run the demo.
- Expected output of the demo.

View File

View File

@@ -0,0 +1,304 @@
# Copyright (c) Microsoft. All rights reserved.
"""
Script to run all Python samples in the samples directory concurrently.
This script will run all samples and report results at the end.
Note: This script is AI generated. This is for internal validation purposes only.
Samples that require human interaction are known to fail.
Usage:
python run_all_samples.py # Run all samples using uv run (concurrent)
python run_all_samples.py --direct # Run all samples directly (concurrent,
# assumes environment is set up)
python run_all_samples.py --subdir <directory> # Run samples only in specific subdirectory
python run_all_samples.py --subdir getting_started/workflows # Example: run only workflow samples
"""
import argparse
import os
import subprocess
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
def find_python_samples(samples_dir: Path, subdir: str | None = None) -> list[Path]:
"""Find all Python sample files in the samples directory or a subdirectory."""
python_files: list[Path] = []
# Determine the search directory
if subdir:
search_dir = samples_dir / subdir
if not search_dir.exists():
print(f"Warning: Subdirectory '{subdir}' does not exist in {samples_dir}")
return []
print(f"Searching in subdirectory: {search_dir}")
else:
search_dir = samples_dir
print(f"Searching in all samples: {search_dir}")
# Walk through all subdirectories and find .py files
for root, dirs, files in os.walk(search_dir):
# Skip __pycache__ directories
dirs[:] = [d for d in dirs if d != "__pycache__"]
for file in files:
if file.endswith(".py") and not file.startswith("_") and file != "_run_all_samples.py":
python_files.append(Path(root) / file)
# Sort files for consistent execution order
return sorted(python_files)
def run_sample(
sample_path: Path,
use_uv: bool = True,
python_root: Path | None = None,
) -> tuple[bool, str, str, str]:
"""
Run a single sample file using subprocess and return (success, output, error_info, error_type).
Args:
sample_path: Path to the sample file
use_uv: Whether to use uv run
python_root: Root directory for uv run
Returns:
Tuple of (success, output, error_info, error_type)
error_type can be: "timeout", "input_hang", "execution_error", "exception"
"""
if use_uv and python_root:
cmd = ["uv", "run", "python", str(sample_path)]
cwd = python_root
else:
cmd = [sys.executable, sample_path.name]
cwd = sample_path.parent
# Set environment variables to handle Unicode properly
env = os.environ.copy()
env["PYTHONIOENCODING"] = "utf-8" # Force Python to use UTF-8 for I/O
env["PYTHONUTF8"] = "1" # Enable UTF-8 mode in Python 3.7+
try:
# Use Popen for better timeout handling with stdin for samples that may wait for input
# Popen gives us more control over process lifecycle compared to subprocess.run()
process = subprocess.Popen(
cmd, # Command to execute as a list [program, arg1, arg2, ...]
cwd=cwd, # Working directory for the subprocess
stdout=subprocess.PIPE, # Capture stdout so we can read the output
stderr=subprocess.PIPE, # Capture stderr so we can read error messages
stdin=subprocess.PIPE, # Create a pipe for stdin so we can send input
text=True, # Handle input/output as text strings (not bytes)
encoding="utf-8", # Use UTF-8 encoding to handle Unicode characters like emojis
errors="replace", # Replace problematic characters instead of failing
env=env, # Pass environment variables for proper Unicode handling
)
try:
# communicate() sends input to stdin and waits for process to complete
# input="" sends an empty string to stdin, which causes input() calls to
# immediately receive EOFError (End Of File) since there's no data to read.
# This prevents the process from hanging indefinitely waiting for user input.
stdout, stderr = process.communicate(input="", timeout=60)
except subprocess.TimeoutExpired:
# If the process doesn't complete within the timeout period, we need to
# forcibly terminate it. This is especially important for processes that
# ignore EOFError and continue to hang on input() calls.
# First attempt: Send SIGKILL (immediate termination) on Unix or TerminateProcess on Windows
process.kill()
try:
# Give the process a few seconds to clean up after being killed
stdout, stderr = process.communicate(timeout=5)
except subprocess.TimeoutExpired:
# If the process is still alive after kill(), use terminate() as a last resort
# terminate() sends SIGTERM (graceful termination request) which may work
# when kill() doesn't on some systems
process.terminate()
stdout, stderr = "", "Process forcibly terminated"
return False, "", f"TIMEOUT: {sample_path.name} (exceeded 60 seconds)", "timeout"
if process.returncode == 0:
output = stdout.strip() if stdout.strip() else "No output"
return True, output, "", "success"
error_info = f"Exit code: {process.returncode}"
if stderr.strip():
error_info += f"\nSTDERR: {stderr}"
# Check if this looks like an input/interaction related error
error_type = "execution_error"
stderr_safe = stderr.encode("utf-8", errors="replace").decode("utf-8") if stderr else ""
if "EOFError" in stderr_safe or "input" in stderr_safe.lower() or "stdin" in stderr_safe.lower():
error_type = "input_hang"
elif "UnicodeEncodeError" in stderr_safe and ("charmap" in stderr_safe or "codec can't encode" in stderr_safe):
error_type = "input_hang" # Unicode errors often indicate interactive samples with emojis
return False, stdout.strip() if stdout.strip() else "", error_info, error_type
except Exception as e:
return False, "", f"ERROR: {sample_path.name} - Exception: {str(e)}", "exception"
def parse_arguments() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Run Python samples concurrently",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python run_all_samples.py # Run all samples
python run_all_samples.py --direct # Run all samples directly
python run_all_samples.py --subdir getting_started # Run only getting_started samples
python run_all_samples.py --subdir getting_started/workflows # Run only workflow samples
python run_all_samples.py --subdir semantic-kernel-migration # Run only SK migration samples
""",
)
parser.add_argument(
"--direct", action="store_true", help="Run samples directly with python instead of using uv run"
)
parser.add_argument(
"--subdir", type=str, help="Run samples only in the specified subdirectory (relative to samples/)"
)
parser.add_argument(
"--max-workers", type=int, default=16, help="Maximum number of concurrent workers (default: 16)"
)
return parser.parse_args()
def main() -> None:
"""Main function to run all samples concurrently."""
args = parse_arguments()
# Get the samples directory (assuming this script is in the samples directory)
samples_dir = Path(__file__).parent
python_root = samples_dir.parent # Go up to the python/ directory
print("Python samples runner")
print(f"Samples directory: {samples_dir}")
if args.direct:
print("Running samples directly (assuming environment is set up)")
else:
print(f"Using uv run from: {python_root}")
if args.subdir:
print(f"Filtering to subdirectory: {args.subdir}")
print("🚀 Running samples concurrently...")
# Find all Python sample files
sample_files = find_python_samples(samples_dir, args.subdir)
if not sample_files:
print("No Python sample files found!")
return
print(f"Found {len(sample_files)} Python sample files")
# Run samples concurrently
results: list[tuple[Path, bool, str, str, str]] = []
with ThreadPoolExecutor(max_workers=args.max_workers) as executor:
# Submit all tasks
future_to_sample = {
executor.submit(run_sample, sample_path, not args.direct, python_root): sample_path
for sample_path in sample_files
}
# Collect results as they complete
for future in as_completed(future_to_sample):
sample_path = future_to_sample[future]
try:
success, output, error_info, error_type = future.result()
results.append((sample_path, success, output, error_info, error_type))
# Print progress - show relative path from samples directory
relative_path = sample_path.relative_to(samples_dir)
if success:
print(f"{relative_path}")
else:
# Show error type in progress display
error_display = f"{error_type.upper()}" if error_type != "execution_error" else "ERROR"
print(f"{relative_path} - {error_display}")
except Exception as e:
error_info = f"Future exception: {str(e)}"
results.append((sample_path, False, "", error_info, "exception"))
relative_path = sample_path.relative_to(samples_dir)
print(f"{relative_path} - EXCEPTION")
# Sort results by original file order for consistent reporting
sample_to_index = {path: i for i, path in enumerate(sample_files)}
results.sort(key=lambda x: sample_to_index[x[0]])
successful_runs = sum(1 for _, success, _, _, _ in results if success)
failed_runs = len(results) - successful_runs
# Categorize failures by type
timeout_failures = [r for r in results if not r[1] and r[4] == "timeout"]
input_hang_failures = [r for r in results if not r[1] and r[4] == "input_hang"]
execution_errors = [r for r in results if not r[1] and r[4] == "execution_error"]
exceptions = [r for r in results if not r[1] and r[4] == "exception"]
# Print detailed results
print(f"\n{'=' * 80}")
print("DETAILED RESULTS:")
print(f"{'=' * 80}")
for sample_path, success, output, error_info, error_type in results:
relative_path = sample_path.relative_to(samples_dir)
if success:
print(f"{relative_path}")
if output and output != "No output":
print(f" Output preview: {output[:100]}{'...' if len(output) > 100 else ''}")
else:
# Display error with type indicator
if error_type == "timeout":
print(f"⏱️ {relative_path} - TIMEOUT (likely waiting for input)")
elif error_type == "input_hang":
print(f"⌨️ {relative_path} - INPUT ERROR (interactive sample)")
elif error_type == "exception":
print(f"💥 {relative_path} - EXCEPTION")
else:
print(f"{relative_path} - EXECUTION ERROR")
print(f" Error: {error_info}")
# Print categorized summary
print(f"\n{'=' * 80}")
if failed_runs == 0:
print("🎉 ALL SAMPLES COMPLETED SUCCESSFULLY!")
else:
print(f"{failed_runs} SAMPLE(S) FAILED!")
print(f"Successful runs: {successful_runs}")
print(f"Failed runs: {failed_runs}")
if failed_runs > 0:
print("\nFailure breakdown:")
if len(timeout_failures) > 0:
print(f" ⏱️ Timeouts (likely interactive): {len(timeout_failures)}")
if len(input_hang_failures) > 0:
print(f" ⌨️ Input errors (interactive): {len(input_hang_failures)}")
if len(execution_errors) > 0:
print(f" ❌ Execution errors: {len(execution_errors)}")
if len(exceptions) > 0:
print(f" 💥 Exceptions: {len(exceptions)}")
if args.subdir:
print(f"Subdirectory filter: {args.subdir}")
print(f"{'=' * 80}")
# Exit with error code if any samples failed
if failed_runs > 0:
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1 @@
"""This sample has moved to python/packages/bedrock/samples/bedrock_sample.py."""

View File

@@ -0,0 +1,2 @@
# Ignore autogen source files
autogen

View File

@@ -0,0 +1,61 @@
# AutoGen → Microsoft Agent Framework Migration Samples
This gallery helps AutoGen developers move to the Microsoft Agent Framework (AF) with minimal guesswork. Each script pairs AutoGen code with its AF equivalent so you can compare primitives, tooling, and orchestration patterns side by side while you migrate production workloads.
## What's Included
### Single-Agent Parity
- [01_basic_assistant_agent.py](single_agent/01_basic_assistant_agent.py) — Minimal AutoGen `AssistantAgent` and AF `ChatAgent` comparison.
- [02_assistant_agent_with_tool.py](single_agent/02_assistant_agent_with_tool.py) — Function tool integration in both SDKs.
- [03_assistant_agent_thread_and_stream.py](single_agent/03_assistant_agent_thread_and_stream.py) — Thread management and streaming responses.
- [04_agent_as_tool.py](single_agent/04_agent_as_tool.py) — Using agents as tools (hierarchical agent pattern) and streaming with tools.
### Multi-Agent Orchestration
- [01_round_robin_group_chat.py](orchestrations/01_round_robin_group_chat.py) — AutoGen `RoundRobinGroupChat` → AF `GroupChatBuilder`/`SequentialBuilder`.
- [02_selector_group_chat.py](orchestrations/02_selector_group_chat.py) — AutoGen `SelectorGroupChat` → AF `GroupChatBuilder`.
- [03_swarm.py](orchestrations/03_swarm.py) — AutoGen Swarm pattern → AF `HandoffBuilder`.
- [04_magentic_one.py](orchestrations/04_magentic_one.py) — AutoGen `MagenticOneGroupChat` → AF `MagenticBuilder`.
Each script is fully async and the `main()` routine runs both implementations back to back so you can observe their outputs in a single execution.
## Prerequisites
- Python 3.10 or later.
- Access to the necessary model endpoints (Azure OpenAI, OpenAI, etc.).
- Installed SDKs: Install AutoGen and the Microsoft Agent Framework with:
```bash
pip install "autogen-agentchat autogen-ext[openai] agent-framework"
```
- Service credentials exposed through environment variables (e.g., `OPENAI_API_KEY`).
## Running Single-Agent Samples
From the repository root:
```bash
python samples/autogen-migration/single_agent/01_basic_assistant_agent.py
```
Every script accepts no CLI arguments and will first call the AutoGen implementation, followed by the AF version. Adjust the prompt or credentials inside the file as necessary before running.
## Running Orchestration Samples
Advanced comparisons are in `autogen-migration/orchestrations` (RoundRobin, Selector, Swarm, Magentic). You can run them directly:
```bash
python samples/autogen-migration/orchestrations/01_round_robin_group_chat.py
python samples/autogen-migration/orchestrations/04_magentic_one.py
```
## Tips for Migration
- **Default behavior differences**: AutoGen's `AssistantAgent` is single-turn by default (`max_tool_iterations=1`), while AF's `ChatAgent` is multi-turn and continues tool execution automatically.
- **Thread management**: AF agents are stateless by default. Use `agent.get_new_thread()` and pass it to `run()`/`run_stream()` to maintain conversation state, similar to AutoGen's conversation context.
- **Tools**: AutoGen uses `FunctionTool` wrappers; AF uses `@ai_function` decorators with automatic schema inference.
- **Orchestration patterns**:
- `RoundRobinGroupChat` → `SequentialBuilder` or `WorkflowBuilder`
- `SelectorGroupChat` → `GroupChatBuilder` with LLM-based speaker selection
- `Swarm` → `HandoffBuilder` for agent handoff coordination
- `MagenticOneGroupChat` → `MagenticBuilder` for orchestrated multi-agent workflows

View File

@@ -0,0 +1,185 @@
# Copyright (c) Microsoft. All rights reserved.
"""AutoGen RoundRobinGroupChat vs Agent Framework GroupChatBuilder/SequentialBuilder.
Demonstrates sequential agent orchestration where agents take turns processing
the task in a round-robin fashion.
"""
import asyncio
async def run_autogen() -> None:
"""AutoGen's RoundRobinGroupChat for sequential agent orchestration."""
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.conditions import TextMentionTermination
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
client = OpenAIChatCompletionClient(model="gpt-4.1-mini")
# Create specialized agents
researcher = AssistantAgent(
name="researcher",
model_client=client,
system_message="You are a researcher. Provide facts and data about the topic.",
model_client_stream=True,
)
writer = AssistantAgent(
name="writer",
model_client=client,
system_message="You are a writer. Turn research into engaging content.",
model_client_stream=True,
)
editor = AssistantAgent(
name="editor",
model_client=client,
system_message="You are an editor. Review and finalize the content. End with APPROVED if satisfied.",
model_client_stream=True,
)
# Create round-robin team
team = RoundRobinGroupChat(
participants=[researcher, writer, editor],
termination_condition=TextMentionTermination("APPROVED"),
)
# Run the team and display the conversation.
print("[AutoGen] Round-robin conversation:")
await Console(team.run_stream(task="Create a brief summary about electric vehicles"))
async def run_agent_framework() -> None:
"""Agent Framework's SequentialBuilder for sequential agent orchestration."""
from agent_framework import AgentRunUpdateEvent, SequentialBuilder
from agent_framework.openai import OpenAIChatClient
client = OpenAIChatClient(model_id="gpt-4.1-mini")
# Create specialized agents
researcher = client.as_agent(
name="researcher",
instructions="You are a researcher. Provide facts and data about the topic.",
)
writer = client.as_agent(
name="writer",
instructions="You are a writer. Turn research into engaging content.",
)
editor = client.as_agent(
name="editor",
instructions="You are an editor. Review and finalize the content.",
)
# Create sequential workflow
workflow = SequentialBuilder().participants([researcher, writer, editor]).build()
# Run the workflow
print("[Agent Framework] Sequential conversation:")
current_executor = None
async for event in workflow.run_stream("Create a brief summary about electric vehicles"):
if isinstance(event, AgentRunUpdateEvent):
# Print executor name header when switching to a new agent
if current_executor != event.executor_id:
if current_executor is not None:
print() # Newline after previous agent's message
print(f"---------- {event.executor_id} ----------")
current_executor = event.executor_id
if event.data:
print(event.data.text, end="", flush=True)
print() # Final newline after conversation
async def run_agent_framework_with_cycle() -> None:
"""Agent Framework's WorkflowBuilder with cyclic edges and conditional exit."""
from agent_framework import (
AgentExecutorRequest,
AgentExecutorResponse,
AgentRunUpdateEvent,
WorkflowBuilder,
WorkflowContext,
WorkflowOutputEvent,
executor,
)
from agent_framework.openai import OpenAIChatClient
client = OpenAIChatClient(model_id="gpt-4.1-mini")
# Create specialized agents
researcher = client.as_agent(
name="researcher",
instructions="You are a researcher. Provide facts and data about the topic.",
)
writer = client.as_agent(
name="writer",
instructions="You are a writer. Turn research into engaging content.",
)
editor = client.as_agent(
name="editor",
instructions="You are an editor. Review and finalize the content. End with APPROVED if satisfied.",
)
# Create custom executor for checking approval
@executor
async def check_approval(
response: AgentExecutorResponse, context: WorkflowContext[AgentExecutorRequest, str]
) -> None:
assert response.full_conversation is not None
last_message = response.full_conversation[-1]
if last_message and "APPROVED" in last_message.text:
await context.yield_output("Content approved.")
else:
await context.send_message(AgentExecutorRequest(messages=response.full_conversation, should_respond=True))
workflow = (
WorkflowBuilder()
.add_edge(researcher, writer)
.add_edge(writer, editor)
.add_edge(
editor,
check_approval,
)
.add_edge(check_approval, researcher)
.set_start_executor(researcher)
.build()
)
# Run the workflow
print("[Agent Framework with Cycle] Cyclic conversation:")
current_executor = None
async for event in workflow.run_stream("Create a brief summary about electric vehicles"):
if isinstance(event, WorkflowOutputEvent):
print("\n---------- Workflow Output ----------")
print(event.data)
elif isinstance(event, AgentRunUpdateEvent):
# Print executor name header when switching to a new agent
if current_executor != event.executor_id:
if current_executor is not None:
print() # Newline after previous agent's message
print(f"---------- {event.executor_id} ----------")
current_executor = event.executor_id
if event.data:
print(event.data.text, end="", flush=True)
print() # Final newline after conversation
async def main() -> None:
print("=" * 60)
print("Round-Robin / Sequential Orchestration Comparison")
print("=" * 60)
print("AutoGen: RoundRobinGroupChat")
print("Agent Framework: SequentialBuilder + WorkflowBuilder with cycles\n")
await run_autogen()
print()
await run_agent_framework()
print()
await run_agent_framework_with_cycle()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,128 @@
# Copyright (c) Microsoft. All rights reserved.
"""AutoGen SelectorGroupChat vs Agent Framework GroupChatBuilder.
Demonstrates LLM-based speaker selection where an orchestrator decides
which agent should speak next based on the conversation context.
"""
import asyncio
async def run_autogen() -> None:
"""AutoGen's SelectorGroupChat with LLM-based speaker selection."""
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.conditions import MaxMessageTermination
from autogen_agentchat.teams import SelectorGroupChat
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
client = OpenAIChatCompletionClient(model="gpt-4.1-mini")
# Create specialized agents
python_expert = AssistantAgent(
name="python_expert",
model_client=client,
system_message="You are a Python programming expert. Answer Python-related questions.",
description="Expert in Python programming",
model_client_stream=True,
)
javascript_expert = AssistantAgent(
name="javascript_expert",
model_client=client,
system_message="You are a JavaScript programming expert. Answer JavaScript-related questions.",
description="Expert in JavaScript programming",
model_client_stream=True,
)
database_expert = AssistantAgent(
name="database_expert",
model_client=client,
system_message="You are a database expert. Answer SQL and database-related questions.",
description="Expert in databases and SQL",
model_client_stream=True,
)
# Create selector group chat - LLM selects appropriate expert
team = SelectorGroupChat(
participants=[python_expert, javascript_expert, database_expert],
model_client=client,
termination_condition=MaxMessageTermination(2),
selector_prompt="Based on the conversation so far:\n{history}\n, "
"select the most appropriate expert from {roles} to respond next.",
)
# Run with a question that requires expert selection
print("[AutoGen] Selector group chat conversation:")
await Console(team.run_stream(task="How do I connect to a PostgreSQL database using Python?"))
async def run_agent_framework() -> None:
"""Agent Framework's GroupChatBuilder with LLM-based speaker selection."""
from agent_framework import AgentRunUpdateEvent, GroupChatBuilder
from agent_framework.openai import OpenAIChatClient
client = OpenAIChatClient(model_id="gpt-4.1-mini")
# Create specialized agents
python_expert = client.as_agent(
name="python_expert",
instructions="You are a Python programming expert. Answer Python-related questions.",
description="Expert in Python programming",
)
javascript_expert = client.as_agent(
name="javascript_expert",
instructions="You are a JavaScript programming expert. Answer JavaScript-related questions.",
description="Expert in JavaScript programming",
)
database_expert = client.as_agent(
name="database_expert",
instructions="You are a database expert. Answer SQL and database-related questions.",
description="Expert in databases and SQL",
)
workflow = (
GroupChatBuilder()
.participants([python_expert, javascript_expert, database_expert])
.set_manager(
manager=client.as_agent(
name="selector_manager",
instructions="Based on the conversation, select the most appropriate expert to respond next.",
),
display_name="SelectorManager",
)
.with_max_rounds(1)
.build()
)
# Run with a question that requires expert selection
print("[Agent Framework] Group chat conversation:")
current_executor = None
async for event in workflow.run_stream("How do I connect to a PostgreSQL database using Python?"):
if isinstance(event, AgentRunUpdateEvent):
# Print executor name header when switching to a new agent
if current_executor != event.executor_id:
if current_executor is not None:
print() # Newline after previous agent's message
print(f"---------- {event.executor_id} ----------")
current_executor = event.executor_id
if event.data:
print(event.data.text, end="", flush=True)
print() # Final newline after conversation
async def main() -> None:
print("=" * 60)
print("Selector Group Chat Comparison")
print("=" * 60)
print("AutoGen: SelectorGroupChat")
print("Agent Framework: GroupChatBuilder with standard_manager\n")
await run_autogen()
print()
await run_agent_framework()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,238 @@
# Copyright (c) Microsoft. All rights reserved.
"""AutoGen Swarm pattern vs Agent Framework HandoffBuilder.
Demonstrates agent handoff coordination where agents can transfer control
to other specialized agents based on the task requirements.
"""
import asyncio
async def run_autogen() -> None:
"""AutoGen's Swarm pattern with human-in-the-loop handoffs."""
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.conditions import HandoffTermination, TextMentionTermination
from autogen_agentchat.messages import HandoffMessage
from autogen_agentchat.teams import Swarm
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
client = OpenAIChatCompletionClient(model="gpt-4.1-mini")
# Create triage agent that routes to specialists
triage_agent = AssistantAgent(
name="triage",
model_client=client,
system_message=(
"You are a triage agent. Analyze the user's request and hand off to the appropriate specialist.\n"
"If you need information from the user, first send your message, then handoff to user.\n"
"Use TERMINATE when the issue is fully resolved."
),
handoffs=["billing_agent", "technical_support", "user"],
model_client_stream=True,
)
# Create billing specialist
billing_agent = AssistantAgent(
name="billing_agent",
model_client=client,
system_message=(
"You are a billing specialist. Help with payment and billing questions.\n"
"If you need information from the user, first send your message, then handoff to user.\n"
"When the issue is resolved, handoff to triage to finalize."
),
handoffs=["triage", "user"],
model_client_stream=True,
)
# Create technical support specialist
tech_support = AssistantAgent(
name="technical_support",
model_client=client,
system_message=(
"You are technical support. Help with technical issues.\n"
"If you need information from the user, first send your message, then handoff to user.\n"
"When the issue is resolved, handoff to triage to finalize."
),
handoffs=["triage", "user"],
model_client_stream=True,
)
# Create swarm team with human-in-the-loop termination
termination = HandoffTermination(target="user") | TextMentionTermination("TERMINATE")
team = Swarm(
participants=[triage_agent, billing_agent, tech_support],
termination_condition=termination,
)
# Scripted user responses for demonstration
scripted_responses = [
"I was charged twice for my subscription",
"Yes, the charge of $49.99 appears twice on my credit card statement.",
"Thank you for your help!",
]
response_index = 0
# Run with human-in-the-loop pattern
print("[AutoGen] Swarm handoff conversation:")
task_result = await Console(team.run_stream(task=scripted_responses[response_index]))
last_message = task_result.messages[-1]
response_index += 1
# Continue conversation when agents handoff to user
while (
isinstance(last_message, HandoffMessage)
and last_message.target == "user"
and response_index < len(scripted_responses)
):
user_message = scripted_responses[response_index]
task_result = await Console(
team.run_stream(task=HandoffMessage(source="user", target=last_message.source, content=user_message))
)
last_message = task_result.messages[-1]
response_index += 1
async def run_agent_framework() -> None:
"""Agent Framework's HandoffBuilder for agent coordination."""
from agent_framework import (
AgentRunUpdateEvent,
HandoffBuilder,
HandoffUserInputRequest,
RequestInfoEvent,
WorkflowRunState,
WorkflowStatusEvent,
)
from agent_framework.openai import OpenAIChatClient
client = OpenAIChatClient(model_id="gpt-4.1-mini")
# Create triage agent
triage_agent = client.as_agent(
name="triage",
instructions=(
"You are a triage agent. Analyze the user's request and route to the appropriate specialist:\n"
"- For billing issues: call handoff_to_billing_agent\n"
"- For technical issues: call handoff_to_technical_support"
),
description="Routes requests to appropriate specialists",
)
# Create billing specialist
billing_agent = client.as_agent(
name="billing_agent",
instructions="You are a billing specialist. Help with payment and billing questions. Provide clear assistance.",
description="Handles billing and payment questions",
)
# Create technical support specialist
tech_support = client.as_agent(
name="technical_support",
instructions="You are technical support. Help with technical issues. Provide clear assistance.",
description="Handles technical support questions",
)
# Create handoff workflow - simpler configuration
# After specialists respond, control returns to user (via triage as coordinator)
workflow = (
HandoffBuilder(
name="support_handoff",
participants=[triage_agent, billing_agent, tech_support],
)
.set_coordinator(triage_agent)
.add_handoff(triage_agent, [billing_agent, tech_support])
.with_termination_condition(lambda conv: sum(1 for msg in conv if msg.role.value == "user") > 3)
.build()
)
# Scripted user responses
scripted_responses = [
"I was charged twice for my subscription",
"Yes, the charge of $49.99 appears twice on my credit card statement.",
"Thank you for your help!",
]
# Run with initial message
print("[Agent Framework] Handoff conversation:")
print("---------- user ----------")
print(scripted_responses[0])
current_executor = None
stream_line_open = False
pending_requests: list[RequestInfoEvent] = []
async for event in workflow.run_stream(scripted_responses[0]):
if isinstance(event, AgentRunUpdateEvent):
# Print executor name header when switching to a new agent
if current_executor != event.executor_id:
if stream_line_open:
print()
stream_line_open = False
print(f"---------- {event.executor_id} ----------")
current_executor = event.executor_id
stream_line_open = True
if event.data:
print(event.data.text, end="", flush=True)
elif isinstance(event, RequestInfoEvent):
if isinstance(event.data, HandoffUserInputRequest):
pending_requests.append(event)
elif isinstance(event, WorkflowStatusEvent):
if event.state in {WorkflowRunState.IDLE_WITH_PENDING_REQUESTS} and stream_line_open:
print()
stream_line_open = False
# Process scripted responses
response_index = 1
while pending_requests and response_index < len(scripted_responses):
user_response = scripted_responses[response_index]
print("---------- user ----------")
print(user_response)
responses = {req.request_id: user_response for req in pending_requests}
pending_requests = []
current_executor = None
stream_line_open = False
async for event in workflow.send_responses_streaming(responses):
if isinstance(event, AgentRunUpdateEvent):
# Print executor name header when switching to a new agent
if current_executor != event.executor_id:
if stream_line_open:
print()
stream_line_open = False
print(f"---------- {event.executor_id} ----------")
current_executor = event.executor_id
stream_line_open = True
if event.data:
print(event.data.text, end="", flush=True)
elif isinstance(event, RequestInfoEvent):
if isinstance(event.data, HandoffUserInputRequest):
pending_requests.append(event)
elif isinstance(event, WorkflowStatusEvent):
if (
event.state in {WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, WorkflowRunState.IDLE}
and stream_line_open
):
print()
stream_line_open = False
response_index += 1
if stream_line_open:
print()
print() # Final newline after conversation
async def main() -> None:
print("=" * 60)
print("Swarm / Handoff Pattern Comparison")
print("=" * 60)
print("AutoGen: Swarm with handoffs")
print("Agent Framework: HandoffBuilder\n")
await run_autogen()
print()
await run_agent_framework()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,153 @@
# Copyright (c) Microsoft. All rights reserved.
"""AutoGen MagenticOneGroupChat vs Agent Framework MagenticBuilder.
Demonstrates orchestrated multi-agent workflows with a central coordinator
managing specialized agents for complex tasks.
"""
import asyncio
async def run_autogen() -> None:
"""AutoGen's MagenticOneGroupChat for orchestrated collaboration."""
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import MagenticOneGroupChat
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
client = OpenAIChatCompletionClient(model="gpt-4.1-mini")
# Create specialized agents
researcher = AssistantAgent(
name="researcher",
model_client=client,
system_message="You are a research analyst. Gather and analyze information.",
description="Research analyst for data gathering",
model_client_stream=True,
)
coder = AssistantAgent(
name="coder",
model_client=client,
system_message="You are a programmer. Write code based on requirements.",
description="Software developer for implementation",
model_client_stream=True,
)
reviewer = AssistantAgent(
name="reviewer",
model_client=client,
system_message="You are a code reviewer. Review code for quality and correctness.",
description="Code reviewer for quality assurance",
model_client_stream=True,
)
# Create MagenticOne team with coordinator
team = MagenticOneGroupChat(
participants=[researcher, coder, reviewer],
model_client=client, # Coordinator uses this client
max_turns=20,
max_stalls=3,
)
# Run complex task and display the conversation
print("[AutoGen] Magentic One conversation:")
await Console(team.run_stream(task="Research Python async patterns and write a simple example"))
async def run_agent_framework() -> None:
"""Agent Framework's MagenticBuilder for orchestrated collaboration."""
from agent_framework import (
MagenticAgentDeltaEvent,
MagenticAgentMessageEvent,
MagenticBuilder,
MagenticFinalResultEvent,
MagenticOrchestratorMessageEvent,
)
from agent_framework.openai import OpenAIChatClient
client = OpenAIChatClient(model_id="gpt-4.1-mini")
# Create specialized agents
researcher = client.as_agent(
name="researcher",
instructions="You are a research analyst. Gather and analyze information.",
description="Research analyst for data gathering",
)
coder = client.as_agent(
name="coder",
instructions="You are a programmer. Write code based on requirements.",
description="Software developer for implementation",
)
reviewer = client.as_agent(
name="reviewer",
instructions="You are a code reviewer. Review code for quality and correctness.",
description="Code reviewer for quality assurance",
)
# Create Magentic workflow
workflow = (
MagenticBuilder()
.participants(researcher=researcher, coder=coder, reviewer=reviewer)
.with_standard_manager(
chat_client=client,
max_round_count=20,
max_stall_count=3,
max_reset_count=1,
)
.build()
)
# Run complex task
print("[Agent Framework] Magentic conversation:")
last_stream_agent_id: str | None = None
stream_line_open: bool = False
async for event in workflow.run_stream("Research Python async patterns and write a simple example"):
if isinstance(event, MagenticOrchestratorMessageEvent):
if stream_line_open:
print()
stream_line_open = False
print(f"---------- Orchestrator:{event.kind} ----------")
print(getattr(event.message, "text", ""))
elif isinstance(event, MagenticAgentDeltaEvent):
if last_stream_agent_id != event.agent_id or not stream_line_open:
if stream_line_open:
print()
print(f"---------- {event.agent_id} ----------")
last_stream_agent_id = event.agent_id
stream_line_open = True
if event.text:
print(event.text, end="", flush=True)
elif isinstance(event, MagenticAgentMessageEvent):
if stream_line_open:
print()
stream_line_open = False
elif isinstance(event, MagenticFinalResultEvent):
if stream_line_open:
print()
stream_line_open = False
print("---------- Final Result ----------")
if event.message is not None:
print(event.message.text)
if stream_line_open:
print()
print() # Final newline after conversation
async def main() -> None:
print("=" * 60)
print("Magentic One Orchestration Comparison")
print("=" * 60)
print("AutoGen: MagenticOneGroupChat")
print("Agent Framework: MagenticBuilder\n")
await run_autogen()
print()
await run_agent_framework()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,5 @@
{
"exclude": [
"autogen"
]
}

View File

@@ -0,0 +1,56 @@
# Copyright (c) Microsoft. All rights reserved.
"""Basic AutoGen AssistantAgent vs Agent Framework ChatAgent.
Both samples expect OpenAI-compatible environment variables (OPENAI_API_KEY or
Azure OpenAI configuration). Update the prompts or client wiring to match your
model of choice before running.
"""
import asyncio
async def run_autogen() -> None:
"""Call AutoGen's AssistantAgent for a simple question."""
from autogen_agentchat.agents import AssistantAgent
from autogen_ext.models.openai import OpenAIChatCompletionClient
# AutoGen agent with OpenAI model client
client = OpenAIChatCompletionClient(model="gpt-4.1-mini")
agent = AssistantAgent(
name="assistant",
model_client=client,
system_message="You are a helpful assistant. Answer in one sentence.",
)
# Run the agent (AutoGen maintains conversation state internally)
result = await agent.run(task="What is the capital of France?")
print("[AutoGen]", result.messages[-1].to_text())
async def run_agent_framework() -> None:
"""Call Agent Framework's ChatAgent created from OpenAIChatClient."""
from agent_framework.openai import OpenAIChatClient
# AF constructs a lightweight ChatAgent backed by OpenAIChatClient
client = OpenAIChatClient(model_id="gpt-4.1-mini")
agent = client.as_agent(
name="assistant",
instructions="You are a helpful assistant. Answer in one sentence.",
)
# Run the agent (AF agents are stateless by default)
result = await agent.run("What is the capital of France?")
print("[Agent Framework]", result.text)
async def main() -> None:
print("=" * 60)
print("Basic Assistant Agent Comparison")
print("=" * 60)
await run_autogen()
print()
await run_agent_framework()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,89 @@
# Copyright (c) Microsoft. All rights reserved.
"""AutoGen AssistantAgent vs Agent Framework ChatAgent with function tools.
Demonstrates how to create and attach tools to agents in both frameworks.
"""
import asyncio
async def run_autogen() -> None:
"""AutoGen agent with a FunctionTool."""
from autogen_agentchat.agents import AssistantAgent
from autogen_core.tools import FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
# Define a simple tool function
def get_weather(location: str) -> str:
"""Get the weather for a location.
Args:
location: The city name or location.
Returns:
A weather description.
"""
return f"The weather in {location} is sunny and 72°F."
# Wrap function in FunctionTool
weather_tool = FunctionTool(
func=get_weather,
description="Get weather information for a location",
)
# Create agent with tool
client = OpenAIChatCompletionClient(model="gpt-4.1-mini")
agent = AssistantAgent(
name="assistant",
model_client=client,
tools=[weather_tool],
system_message="You are a helpful assistant. Use available tools to answer questions.",
)
# Run with tool usage
result = await agent.run(task="What's the weather in Seattle?")
print("[AutoGen]", result.messages[-1].to_text())
async def run_agent_framework() -> None:
"""Agent Framework agent with @ai_function decorator."""
from agent_framework import ai_function
from agent_framework.openai import OpenAIChatClient
# Define tool with @ai_function decorator (automatic schema inference)
@ai_function
def get_weather(location: str) -> str:
"""Get the weather for a location.
Args:
location: The city name or location.
Returns:
A weather description.
"""
return f"The weather in {location} is sunny and 72°F."
# Create agent with tool
client = OpenAIChatClient(model_id="gpt-4.1-mini")
agent = client.as_agent(
name="assistant",
instructions="You are a helpful assistant. Use available tools to answer questions.",
tools=[get_weather],
)
# Run with tool usage
result = await agent.run("What's the weather in Seattle?")
print("[Agent Framework]", result.text)
async def main() -> None:
print("=" * 60)
print("Assistant Agent with Tools Comparison")
print("=" * 60)
await run_autogen()
print()
await run_agent_framework()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,79 @@
# Copyright (c) Microsoft. All rights reserved.
"""AutoGen vs Agent Framework: Thread management and streaming responses.
Demonstrates conversation state management and streaming in both frameworks.
"""
import asyncio
async def run_autogen() -> None:
"""AutoGen agent with conversation history and streaming."""
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
client = OpenAIChatCompletionClient(model="gpt-4.1-mini")
agent = AssistantAgent(
name="assistant",
model_client=client,
system_message="You are a helpful math tutor.",
model_client_stream=True,
)
print("[AutoGen] Conversation with history:")
# First turn - AutoGen maintains state internally with Console for streaming
result = await agent.run(task="What is 15 + 27?")
print(f" Q1: {result.messages[-1].to_text()}")
# Second turn - agent remembers context
result = await agent.run(task="What about that number times 2?")
print(f" Q2: {result.messages[-1].to_text()}")
print("\n[AutoGen] Streaming response:")
# Stream response with Console for token streaming
await Console(agent.run_stream(task="Count from 1 to 5"))
async def run_agent_framework() -> None:
"""Agent Framework agent with explicit thread and streaming."""
from agent_framework.openai import OpenAIChatClient
client = OpenAIChatClient(model_id="gpt-4.1-mini")
agent = client.as_agent(
name="assistant",
instructions="You are a helpful math tutor.",
)
print("[Agent Framework] Conversation with thread:")
# Create a thread to maintain state
thread = agent.get_new_thread()
# First turn - pass thread to maintain history
result1 = await agent.run("What is 15 + 27?", thread=thread)
print(f" Q1: {result1.text}")
# Second turn - agent remembers context via thread
result2 = await agent.run("What about that number times 2?", thread=thread)
print(f" Q2: {result2.text}")
print("\n[Agent Framework] Streaming response:")
# Stream response
print(" ", end="")
async for chunk in agent.run_stream("Count from 1 to 5"):
if chunk.text:
print(chunk.text, end="", flush=True)
print()
async def main() -> None:
print("=" * 60)
print("Thread Management and Streaming Comparison")
print("=" * 60)
await run_autogen()
print()
await run_agent_framework()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,130 @@
# Copyright (c) Microsoft. All rights reserved.
"""AutoGen vs Agent Framework: Agent-as-a-Tool pattern.
Demonstrates hierarchical agent architectures where one agent delegates
work to specialized sub-agents wrapped as tools.
"""
import asyncio
async def run_autogen() -> None:
"""AutoGen's AgentTool for hierarchical agents with streaming."""
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.tools import AgentTool
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
# Create a specialized writer agent
writer_client = OpenAIChatCompletionClient(model="gpt-4.1-mini")
writer = AssistantAgent(
name="writer",
model_client=writer_client,
system_message="You are a creative writer. Write short, engaging content.",
model_client_stream=True,
)
# Wrap writer agent as a tool (description is taken from agent.description)
writer_tool = AgentTool(agent=writer)
# Create coordinator agent with writer as a tool
# IMPORTANT: Disable parallel_tool_calls when using AgentTool
coordinator_client = OpenAIChatCompletionClient(
model="gpt-4.1-mini",
parallel_tool_calls=False,
)
coordinator = AssistantAgent(
name="coordinator",
model_client=coordinator_client,
tools=[writer_tool],
system_message="You coordinate with specialized agents. Delegate writing tasks to the writer agent.",
model_client_stream=True,
)
# Run coordinator with streaming - it will delegate to writer
print("[AutoGen]")
await Console(coordinator.run_stream(task="Create a tagline for a coffee shop"))
async def run_agent_framework() -> None:
"""Agent Framework's as_tool() for hierarchical agents with streaming."""
from agent_framework import FunctionCallContent, FunctionResultContent
from agent_framework.openai import OpenAIChatClient
client = OpenAIChatClient(model_id="gpt-4.1-mini")
# Create specialized writer agent
writer = client.as_agent(
name="writer",
instructions="You are a creative writer. Write short, engaging content.",
)
# Convert writer to a tool using as_tool()
writer_tool = writer.as_tool(
name="creative_writer",
description="Generate creative content",
arg_name="request",
arg_description="What to write",
)
# Create coordinator agent with writer tool
coordinator = client.as_agent(
name="coordinator",
instructions="You coordinate with specialized agents. Delegate writing tasks to the writer agent.",
tools=[writer_tool],
)
# Run coordinator with streaming - it will delegate to writer
print("[Agent Framework]")
# Track accumulated function calls (they stream in incrementally)
accumulated_calls: dict[str, FunctionCallContent] = {}
async for chunk in coordinator.run_stream("Create a tagline for a coffee shop"):
# Stream text tokens
if chunk.text:
print(chunk.text, end="", flush=True)
# Process streaming function calls and results
if chunk.contents:
for content in chunk.contents:
if isinstance(content, FunctionCallContent):
# Accumulate function call content as it streams in
call_id = content.call_id
if call_id in accumulated_calls:
# Add to existing call (arguments stream in gradually)
accumulated_calls[call_id] = accumulated_calls[call_id] + content
else:
# First chunk of this function call
accumulated_calls[call_id] = content
print("\n[Function Call - streaming]", flush=True)
print(f" Call ID: {call_id}", flush=True)
print(f" Name: {content.name}", flush=True)
# Show accumulated arguments so far
current_args = accumulated_calls[call_id].arguments
print(f" Arguments: {current_args}", flush=True)
elif isinstance(content, FunctionResultContent):
# Tool result - shows writer's response
result_text = content.result if isinstance(content.result, str) else str(content.result)
if result_text.strip():
print("\n[Function Result]", flush=True)
print(f" Call ID: {content.call_id}", flush=True)
print(f" Result: {result_text[:150]}{'...' if len(result_text) > 150 else ''}", flush=True)
print()
async def main() -> None:
print("=" * 60)
print("Agent-as-Tool Pattern Comparison")
print("=" * 60)
print("Note: AutoGen requires parallel_tool_calls=False for AgentTool")
print(" Agent Framework handles this automatically\n")
await run_autogen()
print()
await run_agent_framework()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,4 @@
*.db
*.db-shm
*.db-wal
uploads/

View File

@@ -0,0 +1,318 @@
# ChatKit Integration Sample with Weather Agent and Image Analysis
This sample demonstrates how to integrate Microsoft Agent Framework with OpenAI ChatKit. It provides a complete implementation of a weather assistant with interactive widget visualization, image analysis, and file upload support.
**Features:**
- Weather information with interactive widgets
- Image analysis using vision models
- Current time queries
- File upload with attachment storage
- Chat interface with streaming responses
- City selector widget with one-click weather
## Architecture
```mermaid
graph TB
subgraph Frontend["React Frontend (ChatKit UI)"]
UI[ChatKit Components]
Upload[File Upload]
end
subgraph Backend["FastAPI Server"]
FastAPI[FastAPI Endpoints]
subgraph ChatKit["WeatherChatKitServer"]
Respond[respond method]
Action[action method]
end
subgraph Stores["Data & Storage Layer"]
SQLite[SQLiteStore<br/>Store Protocol]
AttStore[FileBasedAttachmentStore<br/>AttachmentStore Protocol]
DB[(SQLite DB<br/>chatkit_demo.db)]
Files[/uploads directory/]
end
subgraph Integration["Agent Framework Integration"]
Converter[ThreadItemConverter]
Streamer[stream_agent_response]
Agent[ChatAgent]
end
Widgets[Widget Rendering<br/>render_weather_widget<br/>render_city_selector_widget]
end
subgraph Azure["Azure AI"]
Foundry[GPT-5<br/>with Vision]
end
UI -->|HTTP POST /chatkit| FastAPI
Upload -->|HTTP POST /upload/id| FastAPI
FastAPI --> ChatKit
ChatKit -->|save/load threads| SQLite
ChatKit -->|save/load attachments| AttStore
ChatKit -->|convert messages| Converter
SQLite -.->|persist| DB
AttStore -.->|save files| Files
AttStore -.->|save metadata| SQLite
Converter -->|ChatMessage array| Agent
Agent -->|AgentResponseUpdate| Streamer
Streamer -->|ThreadStreamEvent| ChatKit
ChatKit --> Widgets
Widgets -->|WidgetItem| ChatKit
Agent <-->|Chat Completions API| Foundry
ChatKit -->|ThreadStreamEvent| FastAPI
FastAPI -->|SSE Stream| UI
style ChatKit fill:#e1f5ff
style Stores fill:#fff4e1
style Integration fill:#f0e1ff
style Azure fill:#e1ffe1
```
### Server Implementation
The sample implements a ChatKit server using the `ChatKitServer` base class from the `chatkit` package:
**Core Components:**
- **`WeatherChatKitServer`**: Custom ChatKit server implementation that:
- Extends `ChatKitServer[dict[str, Any]]`
- Uses Agent Framework's `ChatAgent` with Azure OpenAI
- Converts ChatKit messages to Agent Framework format using `ThreadItemConverter`
- Streams responses back to ChatKit using `stream_agent_response`
- Creates and streams interactive widgets after agent responses
- **`SQLiteStore`**: Data persistence layer that:
- Implements the `Store[dict[str, Any]]` protocol from ChatKit
- Persists threads, messages, and attachment metadata in SQLite
- Provides thread management and item history
- Stores attachment metadata for the upload lifecycle
- **`FileBasedAttachmentStore`**: File storage implementation that:
- Implements the `AttachmentStore[dict[str, Any]]` protocol from ChatKit
- Stores uploaded files on the local filesystem (in `./uploads` directory)
- Generates upload URLs for two-phase file upload
- Saves attachment metadata to the data store for upload tracking
- Provides preview URLs for images
**Key Integration Points:**
```python
# Converting ChatKit messages to Agent Framework
converter = ThreadItemConverter(
attachment_data_fetcher=self._fetch_attachment_data
)
agent_messages = await converter.to_agent_input(user_message_item)
# Running agent and streaming back to ChatKit
async for event in stream_agent_response(
self.weather_agent.run_stream(agent_messages),
thread_id=thread.id,
):
yield event
# Streaming widgets
widget = render_weather_widget(weather_data)
async for event in stream_widget(thread_id=thread.id, widget=widget):
yield event
```
## Installation and Setup
### Prerequisites
- Python 3.10+
- Node.js 18.18+ and npm 9+
- Azure OpenAI service configured
- Azure CLI for authentication (`az login`)
### Network Requirements
> **Important:** This sample uses the OpenAI ChatKit frontend, which requires internet connectivity to OpenAI services.
The frontend makes outbound requests to:
- `cdn.platform.openai.com` - ChatKit UI library (required)
- `chatgpt.com` - Configuration endpoint
- `api-js.mixpanel.com` - Telemetry
**This sample is not suitable for air-gapped or network-restricted environments.** The ChatKit frontend library cannot be self-hosted. See [Limitations](#limitations) for details.
### Domain Key Configuration
For **local development**, the sample uses a default domain key (`domain_pk_localhost_dev`).
For **production deployment**:
1. Register your domain at [platform.openai.com](https://platform.openai.com/settings/organization/security/domain-allowlist)
2. Create a `.env` file in the `frontend` directory:
```
VITE_CHATKIT_API_DOMAIN_KEY=your_domain_key_here
```
### Backend Setup
1. **Install Python packages:**
```bash
cd python/samples/demos/chatkit-integration
pip install agent-framework-chatkit fastapi uvicorn azure-identity
```
2. **Configure Azure OpenAI:**
```bash
export AZURE_OPENAI_ENDPOINT="https://your-resource.openai.azure.com/"
export AZURE_OPENAI_API_VERSION="2024-06-01"
export AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-4o"
```
3. **Authenticate with Azure:**
```bash
az login
```
### Frontend Setup
Install the Node.js dependencies:
```bash
cd frontend
npm install
```
## How to Run
### Start the Backend Server
From the `chatkit-integration` directory:
```bash
python app.py
```
Or with auto-reload for development:
```bash
uvicorn app:app --host 127.0.0.1 --port 8001 --reload
```
The backend will start on `http://localhost:8001`
### Start the Frontend Development Server
In a new terminal, from the `frontend` directory:
```bash
npm run dev
```
The frontend will start on `http://localhost:5171`
### Access the Application
Open your browser and navigate to:
```
http://localhost:5171
```
You can now:
- Ask about weather in any location (weather widgets display automatically)
- Upload images for analysis using the attachment button
- Get the current time
- Ask to see available cities and click city buttons for instant weather
### Project Structure
```
chatkit-integration/
├── app.py # FastAPI backend with ChatKitServer implementation
├── store.py # SQLiteStore implementation
├── attachment_store.py # FileBasedAttachmentStore implementation
├── weather_widget.py # Widget rendering functions
├── chatkit_demo.db # SQLite database (auto-created)
├── uploads/ # Uploaded files directory (auto-created)
└── frontend/
├── package.json
├── vite.config.ts
├── index.html
└── src/
├── main.tsx
└── App.tsx # ChatKit UI integration
```
### Configuration
You can customize the application by editing constants at the top of `app.py`:
```python
# Server configuration
SERVER_HOST = "127.0.0.1" # Bind to localhost only for security (local dev)
SERVER_PORT = 8001
SERVER_BASE_URL = f"http://localhost:{SERVER_PORT}"
# Database configuration
DATABASE_PATH = "chatkit_demo.db"
# File storage configuration
UPLOADS_DIRECTORY = "./uploads"
# User context
DEFAULT_USER_ID = "demo_user"
```
### Sample Conversations
Try these example queries:
- "What's the weather like in Tokyo?"
- "Show me available cities" (displays interactive city selector)
- "What's the current time?"
- Upload an image and ask "What do you see in this image?"
## Limitations
### Air-Gapped / Regulated Environments
The ChatKit frontend (`chatkit.js`) is loaded from OpenAI's CDN and cannot be self-hosted. This means:
- **Not suitable for air-gapped environments** where `*.openai.com` is blocked
- **Not suitable for regulated environments** that prohibit external telemetry
- **Requires domain registration** with OpenAI for production use
**What you CAN self-host:**
- The Python backend (FastAPI server, `ChatKitServer`, stores)
- The `agent-framework-chatkit` integration layer
- Your LLM infrastructure (Azure OpenAI, local models, etc.)
**What you CANNOT self-host:**
- The ChatKit frontend UI library
For more details, see:
- [openai/chatkit-js#57](https://github.com/openai/chatkit-js/issues/57) - Self-hosting feature request
- [openai/chatkit-js#76](https://github.com/openai/chatkit-js/issues/76) - Domain key requirements
## Learn More
- [Agent Framework Documentation](https://aka.ms/agent-framework)
- [ChatKit Documentation](https://platform.openai.com/docs/guides/chatkit)
- [Azure OpenAI Documentation](https://learn.microsoft.com/en-us/azure/ai-foundry/)

View File

@@ -0,0 +1 @@
# Copyright (c) Microsoft. All rights reserved.

View File

@@ -0,0 +1,631 @@
# Copyright (c) Microsoft. All rights reserved.
"""
ChatKit Integration Sample with Weather Agent and Image Analysis
This sample demonstrates how to integrate Microsoft Agent Framework with OpenAI ChatKit
using a weather tool with widget visualization, image analysis, and Azure OpenAI. It shows
a complete ChatKit server implementation using Agent Framework agents with proper FastAPI
setup, interactive weather widgets, and vision capabilities for analyzing uploaded images.
"""
import logging
from collections.abc import AsyncIterator, Callable
from datetime import datetime, timezone
from random import randint
from typing import Annotated, Any
import uvicorn
# Agent Framework imports
from agent_framework import AgentResponseUpdate, ChatAgent, ChatMessage, FunctionResultContent, Role
from agent_framework.azure import AzureOpenAIChatClient
# Agent Framework ChatKit integration
from agent_framework_chatkit import ThreadItemConverter, stream_agent_response
# Local imports
from attachment_store import FileBasedAttachmentStore
from azure.identity import AzureCliCredential
# ChatKit imports
from chatkit.actions import Action
from chatkit.server import ChatKitServer
from chatkit.store import StoreItemType, default_generate_id
from chatkit.types import (
ThreadItem,
ThreadItemDoneEvent,
ThreadMetadata,
ThreadStreamEvent,
UserMessageItem,
WidgetItem,
)
from chatkit.widgets import WidgetRoot
from fastapi import FastAPI, File, Request, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse, JSONResponse, Response, StreamingResponse
from pydantic import Field
from store import SQLiteStore
from weather_widget import (
WeatherData,
city_selector_copy_text,
render_city_selector_widget,
render_weather_widget,
weather_widget_copy_text,
)
# ============================================================================
# Configuration Constants
# ============================================================================
# Server configuration
SERVER_HOST = "127.0.0.1" # Bind to localhost only for security (local dev)
SERVER_PORT = 8001
SERVER_BASE_URL = f"http://localhost:{SERVER_PORT}"
# Database configuration
DATABASE_PATH = "chatkit_demo.db"
# File storage configuration
UPLOADS_DIRECTORY = "./uploads"
# User context
DEFAULT_USER_ID = "demo_user"
# Logging configuration
LOG_LEVEL = logging.INFO
LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
# ============================================================================
# Logging Setup
# ============================================================================
logging.basicConfig(
level=LOG_LEVEL,
format=LOG_FORMAT,
datefmt=LOG_DATE_FORMAT,
)
logger = logging.getLogger(__name__)
class WeatherResponse(str):
"""A string response that also carries WeatherData for widget creation."""
def __new__(cls, text: str, weather_data: WeatherData):
instance = super().__new__(cls, text)
instance.weather_data = weather_data # type: ignore
return instance
async def stream_widget(
thread_id: str,
widget: WidgetRoot,
copy_text: str | None = None,
generate_id: Callable[[StoreItemType], str] = default_generate_id,
) -> AsyncIterator[ThreadStreamEvent]:
"""Stream a ChatKit widget as a ThreadStreamEvent.
This helper function creates a ChatKit widget item and yields it as a
ThreadItemDoneEvent that can be consumed by the ChatKit UI.
Args:
thread_id: The ChatKit thread ID for the conversation.
widget: The ChatKit widget to display.
copy_text: Optional text representation of the widget for copy/paste.
generate_id: Optional function to generate IDs for ChatKit items.
Yields:
ThreadStreamEvent: ChatKit event containing the widget.
"""
item_id = generate_id("message")
widget_item = WidgetItem(
id=item_id,
thread_id=thread_id,
created_at=datetime.now(),
widget=widget,
copy_text=copy_text,
)
yield ThreadItemDoneEvent(type="thread.item.done", item=widget_item)
def get_weather(
location: Annotated[str, Field(description="The location to get the weather for.")],
) -> str:
"""Get the weather for a given location.
Returns a string description with embedded WeatherData for widget creation.
"""
logger.info(f"Fetching weather for location: {location}")
conditions = ["sunny", "cloudy", "rainy", "stormy", "snowy", "foggy"]
temperature = randint(-5, 35)
condition = conditions[randint(0, len(conditions) - 1)]
# Add some realistic details
humidity = randint(30, 90)
wind_speed = randint(5, 25)
weather_data = WeatherData(
location=location,
condition=condition,
temperature=temperature,
humidity=humidity,
wind_speed=wind_speed,
)
logger.debug(f"Weather data generated: {condition}, {temperature}°C, {humidity}% humidity, {wind_speed} km/h wind")
# Return a WeatherResponse that is both a string (for the LLM) and carries structured data
text = (
f"Weather in {location}:\n"
f"• Condition: {condition.title()}\n"
f"• Temperature: {temperature}°C\n"
f"• Humidity: {humidity}%\n"
f"• Wind: {wind_speed} km/h"
)
return WeatherResponse(text, weather_data)
def get_time() -> str:
"""Get the current UTC time."""
current_time = datetime.now(timezone.utc)
logger.info("Getting current UTC time")
return f"Current UTC time: {current_time.strftime('%Y-%m-%d %H:%M:%S')} UTC"
def show_city_selector() -> str:
"""Show an interactive city selector widget to the user.
This function triggers the display of a widget that allows users
to select from popular cities to get weather information.
Returns a special marker string that will be detected to show the widget.
"""
logger.info("Activating city selector widget")
return "__SHOW_CITY_SELECTOR__"
class WeatherChatKitServer(ChatKitServer[dict[str, Any]]):
"""ChatKit server implementation using Agent Framework.
This server integrates Agent Framework agents with ChatKit's server protocol,
providing weather information with interactive widgets and time queries through Azure OpenAI.
"""
def __init__(self, data_store: SQLiteStore, attachment_store: FileBasedAttachmentStore):
super().__init__(data_store, attachment_store)
logger.info("Initializing WeatherChatKitServer")
# Create Agent Framework agent with Azure OpenAI
# For authentication, run `az login` command in terminal
try:
self.weather_agent = ChatAgent(
chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()),
instructions=(
"You are a helpful weather assistant with image analysis capabilities. "
"You can provide weather information for any location, tell the current time, "
"and analyze images that users upload. Be friendly and informative in your responses.\n\n"
"If a user asks to see a list of cities or wants to choose from available cities, "
"use the show_city_selector tool to display an interactive city selector.\n\n"
"When users upload images, you will automatically receive them and can analyze their content. "
"Describe what you see in detail and be helpful in answering questions about the images."
),
tools=[get_weather, get_time, show_city_selector],
)
logger.info("Weather agent initialized successfully with Azure OpenAI")
except Exception as e:
logger.error(f"Failed to initialize weather agent: {e}")
raise
# Create ThreadItemConverter with attachment data fetcher
self.converter = ThreadItemConverter(
attachment_data_fetcher=self._fetch_attachment_data,
)
logger.info("WeatherChatKitServer initialized")
async def _fetch_attachment_data(self, attachment_id: str) -> bytes:
"""Fetch attachment binary data for the converter.
Args:
attachment_id: The ID of the attachment to fetch.
Returns:
The binary data of the attachment.
"""
return await attachment_store.read_attachment_bytes(attachment_id)
async def _update_thread_title(
self, thread: ThreadMetadata, thread_items: list[ThreadItem], context: dict[str, Any]
) -> None:
"""Update thread title using LLM to generate a concise summary.
Args:
thread: The thread metadata to update.
thread_items: All items in the thread.
context: The context dictionary.
"""
logger.info(f"Attempting to update thread title for thread: {thread.id}")
if not thread_items:
logger.debug("No thread items available for title generation")
return
# Collect user messages to understand the conversation topic
user_messages: list[str] = []
for item in thread_items:
if isinstance(item, UserMessageItem) and item.content:
for content_part in item.content:
if hasattr(content_part, "text") and isinstance(content_part.text, str):
user_messages.append(content_part.text)
break
if not user_messages:
logger.debug("No user messages found for title generation")
return
logger.debug(f"Found {len(user_messages)} user message(s) for title generation")
try:
# Use the agent's chat client to generate a concise title
# Combine first few messages to capture the conversation topic
conversation_context = "\n".join(user_messages[:3])
title_prompt = [
ChatMessage(
role=Role.USER,
text=(
f"Generate a very short, concise title (max 40 characters) for a conversation "
f"that starts with:\n\n{conversation_context}\n\n"
"Respond with ONLY the title, nothing else."
),
)
]
# Use the chat client directly for a quick, lightweight call
response = await self.weather_agent.chat_client.get_response(
messages=title_prompt,
options={
"temperature": 0.3,
"max_tokens": 20,
},
)
if response.messages and response.messages[-1].text:
title = response.messages[-1].text.strip().strip('"').strip("'")
# Ensure it's not too long
if len(title) > 50:
title = title[:47] + "..."
thread.title = title
await self.store.save_thread(thread, context)
logger.info(f"Updated thread {thread.id} title to: {title}")
except Exception as e:
logger.warning(f"Failed to generate thread title, using fallback: {e}")
# Fallback to simple truncation
first_message: str = user_messages[0]
title: str = first_message[:50].strip()
if len(first_message) > 50:
title += "..."
thread.title = title
await self.store.save_thread(thread, context)
logger.info(f"Updated thread {thread.id} title to (fallback): {title}")
async def respond(
self,
thread: ThreadMetadata,
input_user_message: UserMessageItem | None,
context: dict[str, Any],
) -> AsyncIterator[ThreadStreamEvent]:
"""Handle incoming user messages and generate responses.
This method converts ChatKit messages to Agent Framework format using ThreadItemConverter,
runs the agent, converts the response back to ChatKit events using stream_agent_response,
and creates interactive weather widgets when weather data is queried.
"""
from agent_framework import FunctionResultContent
if input_user_message is None:
logger.debug("Received None user message, skipping")
return
logger.info(f"Processing message for thread: {thread.id}")
try:
# Track weather data and city selector flag for this request
weather_data: WeatherData | None = None
show_city_selector = False
# Load full thread history from the store
thread_items_page = await self.store.load_thread_items(
thread_id=thread.id,
after=None,
limit=1000,
order="asc",
context=context,
)
thread_items = thread_items_page.data
# Convert ALL thread items to Agent Framework ChatMessages using ThreadItemConverter
# This ensures the agent has the full conversation context
agent_messages = await self.converter.to_agent_input(thread_items)
if not agent_messages:
logger.warning("No messages after conversion")
return
logger.info(f"Running agent with {len(agent_messages)} message(s)")
# Run the Agent Framework agent with streaming
agent_stream = self.weather_agent.run_stream(agent_messages)
# Create an intercepting stream that extracts function results while passing through updates
async def intercept_stream() -> AsyncIterator[AgentResponseUpdate]:
nonlocal weather_data, show_city_selector
async for update in agent_stream:
# Check for function results in the update
if update.contents:
for content in update.contents:
if isinstance(content, FunctionResultContent):
result = content.result
# Check if it's a WeatherResponse (string subclass with weather_data attribute)
if isinstance(result, str) and hasattr(result, "weather_data"):
extracted_data = getattr(result, "weather_data", None)
if isinstance(extracted_data, WeatherData):
weather_data = extracted_data
logger.info(f"Weather data extracted: {weather_data.location}")
# Check if it's the city selector marker
elif isinstance(result, str) and result == "__SHOW_CITY_SELECTOR__":
show_city_selector = True
logger.info("City selector flag detected")
yield update
# Stream updates as ChatKit events with interception
async for event in stream_agent_response(
intercept_stream(),
thread_id=thread.id,
):
yield event
# If weather data was collected during the tool call, create a widget
if weather_data is not None and isinstance(weather_data, WeatherData):
logger.info(f"Creating weather widget for location: {weather_data.location}")
# Create weather widget
widget = render_weather_widget(weather_data)
copy_text = weather_widget_copy_text(weather_data)
# Stream the widget
async for widget_event in stream_widget(thread_id=thread.id, widget=widget, copy_text=copy_text):
yield widget_event
logger.debug("Weather widget streamed successfully")
# If city selector should be shown, create and stream that widget
if show_city_selector:
logger.info("Creating city selector widget")
# Create city selector widget
selector_widget = render_city_selector_widget()
selector_copy_text = city_selector_copy_text()
# Stream the widget
async for widget_event in stream_widget(
thread_id=thread.id, widget=selector_widget, copy_text=selector_copy_text
):
yield widget_event
logger.debug("City selector widget streamed successfully")
# Update thread title based on first user message if not already set
if not thread.title or thread.title == "New thread":
await self._update_thread_title(thread, thread_items, context)
logger.info(f"Completed processing message for thread: {thread.id}")
except Exception as e:
logger.error(f"Error processing message for thread {thread.id}: {e}", exc_info=True)
async def action(
self,
thread: ThreadMetadata,
action: Action[str, Any],
sender: WidgetItem | None,
context: dict[str, Any],
) -> AsyncIterator[ThreadStreamEvent]:
"""Handle widget actions from the frontend.
This method processes actions triggered by interactive widgets,
such as city selection from the city selector widget.
"""
logger.info(f"Received action: {action.type} for thread: {thread.id}")
if action.type == "city_selected":
# Extract city information from the action payload
city_label = action.payload.get("city_label", "Unknown")
logger.info(f"City selected: {city_label}")
logger.debug(f"Action payload: {action.payload}")
# Track weather data for this request
weather_data: WeatherData | None = None
# Create an agent message asking about the weather
agent_messages = [ChatMessage(role=Role.USER, text=f"What's the weather in {city_label}?")]
logger.debug(f"Processing weather query: {agent_messages[0].text}")
# Run the Agent Framework agent with streaming
agent_stream = self.weather_agent.run_stream(agent_messages)
# Create an intercepting stream that extracts function results while passing through updates
async def intercept_stream() -> AsyncIterator[AgentResponseUpdate]:
nonlocal weather_data
async for update in agent_stream:
# Check for function results in the update
if update.contents:
for content in update.contents:
if isinstance(content, FunctionResultContent):
result = content.result
# Check if it's a WeatherResponse (string subclass with weather_data attribute)
if isinstance(result, str) and hasattr(result, "weather_data"):
extracted_data = getattr(result, "weather_data", None)
if isinstance(extracted_data, WeatherData):
weather_data = extracted_data
logger.info(f"Weather data extracted: {weather_data.location}")
yield update
# Stream updates as ChatKit events with interception
async for event in stream_agent_response(
intercept_stream(),
thread_id=thread.id,
):
yield event
# If weather data was collected during the tool call, create a widget
if weather_data is not None and isinstance(weather_data, WeatherData):
logger.info(f"Creating weather widget for: {weather_data.location}")
# Create weather widget
widget = render_weather_widget(weather_data)
copy_text = weather_widget_copy_text(weather_data)
# Stream the widget
async for widget_event in stream_widget(thread_id=thread.id, widget=widget, copy_text=copy_text):
yield widget_event
logger.debug("Weather widget created successfully from action")
else:
logger.warning("No weather data available to create widget after action")
# FastAPI application setup
app = FastAPI(
title="ChatKit Weather & Vision Agent",
description="Weather and image analysis assistant powered by Agent Framework and Azure OpenAI",
version="1.0.0",
)
# Add CORS middleware to allow frontend connections
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # In production, specify exact origins
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize data store and ChatKit server
logger.info("Initializing application components")
data_store = SQLiteStore(db_path=DATABASE_PATH)
attachment_store = FileBasedAttachmentStore(
uploads_dir=UPLOADS_DIRECTORY,
base_url=SERVER_BASE_URL,
data_store=data_store,
)
chatkit_server = WeatherChatKitServer(data_store, attachment_store)
logger.info("Application initialization complete")
@app.post("/chatkit")
async def chatkit_endpoint(request: Request):
"""Main ChatKit endpoint that handles all ChatKit requests.
This endpoint follows the ChatKit server protocol and handles both
streaming and non-streaming responses.
"""
logger.debug(f"Received ChatKit request from {request.client}")
request_body = await request.body()
# Create context following the working examples pattern
context = {"request": request}
try:
# Process the request using ChatKit server
result = await chatkit_server.process(request_body, context)
# Return appropriate response type
if hasattr(result, "__aiter__"): # StreamingResult
logger.debug("Returning streaming response")
return StreamingResponse(result, media_type="text/event-stream") # type: ignore[arg-type]
# NonStreamingResult
logger.debug("Returning non-streaming response")
return Response(content=result.json, media_type="application/json") # type: ignore[union-attr]
except Exception as e:
logger.error(f"Error processing ChatKit request: {e}", exc_info=True)
raise
@app.post("/upload/{attachment_id}")
async def upload_file(attachment_id: str, file: UploadFile = File(...)):
"""Handle file upload for two-phase upload.
The client POSTs the file bytes here after creating the attachment
via the ChatKit attachments.create endpoint.
"""
logger.info(f"Receiving file upload for attachment: {attachment_id}")
try:
# Read file contents
contents = await file.read()
# Save to disk
file_path = attachment_store.get_file_path(attachment_id)
file_path.write_bytes(contents)
logger.info(f"Saved {len(contents)} bytes to {file_path}")
# Load the attachment metadata from the data store
attachment = await data_store.load_attachment(attachment_id, {"user_id": DEFAULT_USER_ID})
# Clear the upload_url since upload is complete
attachment.upload_url = None
# Save the updated attachment back to the store
await data_store.save_attachment(attachment, {"user_id": DEFAULT_USER_ID})
# Return the attachment metadata as JSON
return JSONResponse(content=attachment.model_dump(mode="json"))
except Exception as e:
logger.error(f"Error uploading file for attachment {attachment_id}: {e}", exc_info=True)
return JSONResponse(status_code=500, content={"error": "Failed to upload file."})
@app.get("/preview/{attachment_id}")
async def preview_image(attachment_id: str):
"""Serve image preview/thumbnail.
For simplicity, this serves the full image. In production, you should
generate and cache thumbnails.
"""
logger.debug(f"Serving preview for attachment: {attachment_id}")
try:
file_path = attachment_store.get_file_path(attachment_id)
if not file_path.exists():
return JSONResponse(status_code=404, content={"error": "File not found"})
# Determine media type from file extension or attachment metadata
# For simplicity, we'll try to load from the store
try:
attachment = await data_store.load_attachment(attachment_id, {"user_id": DEFAULT_USER_ID})
media_type = attachment.mime_type
except Exception:
# Default to binary if we can't determine
media_type = "application/octet-stream"
return FileResponse(file_path, media_type=media_type)
except Exception as e:
logger.error(f"Error serving preview for attachment {attachment_id}: {e}", exc_info=True)
return JSONResponse(status_code=500, content={"error": "Error serving preview for attachment."})
if __name__ == "__main__":
# Run the server
logger.info(f"Starting ChatKit Weather Agent server on {SERVER_HOST}:{SERVER_PORT}")
uvicorn.run(app, host=SERVER_HOST, port=SERVER_PORT, log_level="info")

View File

@@ -0,0 +1,119 @@
# Copyright (c) Microsoft. All rights reserved.
"""File-based AttachmentStore implementation for ChatKit.
This module provides a simple AttachmentStore implementation that stores
uploaded files on the local filesystem. In production, you should use
cloud storage like S3, Azure Blob Storage, or Google Cloud Storage.
"""
from pathlib import Path
from typing import TYPE_CHECKING, Any
from chatkit.store import AttachmentStore
from chatkit.types import Attachment, AttachmentCreateParams, FileAttachment, ImageAttachment
from pydantic import AnyUrl
if TYPE_CHECKING:
from store import SQLiteStore
class FileBasedAttachmentStore(AttachmentStore[dict[str, Any]]):
"""File-based AttachmentStore that stores files on local disk.
This implementation stores uploaded files in a local directory and provides
upload URLs that point to the FastAPI upload endpoint. It supports both
image and file attachments.
Features:
- Stores files in a local uploads directory
- Generates upload URLs for two-phase upload
- Generates preview URLs for images
- Proper cleanup on deletion
Note: This is for demonstration purposes. In production, use cloud storage
with signed URLs for better security and scalability.
"""
def __init__(
self,
uploads_dir: str = "./uploads",
base_url: str = "http://localhost:8001",
data_store: "SQLiteStore | None" = None,
):
"""Initialize the file-based attachment store.
Args:
uploads_dir: Directory where uploaded files will be stored
base_url: Base URL for generating upload and preview URLs
data_store: Optional data store to persist attachment metadata
"""
self.uploads_dir = Path(uploads_dir)
self.base_url = base_url.rstrip("/")
self.data_store = data_store
# Create uploads directory if it doesn't exist
self.uploads_dir.mkdir(parents=True, exist_ok=True)
def get_file_path(self, attachment_id: str) -> Path:
"""Get the filesystem path for an attachment."""
return self.uploads_dir / attachment_id
async def delete_attachment(self, attachment_id: str, context: dict[str, Any]) -> None:
"""Delete an attachment and its file from disk."""
file_path = self.get_file_path(attachment_id)
if file_path.exists():
file_path.unlink()
async def create_attachment(self, input: AttachmentCreateParams, context: dict[str, Any]) -> Attachment:
"""Create an attachment with upload URL for two-phase upload.
This creates the attachment metadata and returns upload URLs that
the client will use to POST the actual file bytes.
"""
# Generate unique ID for this attachment
attachment_id = self.generate_attachment_id(input.mime_type, context)
# Generate upload URL that points to our FastAPI upload endpoint
upload_url = f"{self.base_url}/upload/{attachment_id}"
# Create appropriate attachment type based on MIME type
if input.mime_type.startswith("image/"):
# For images, also provide a preview URL
preview_url = f"{self.base_url}/preview/{attachment_id}"
attachment = ImageAttachment(
id=attachment_id,
type="image",
mime_type=input.mime_type,
name=input.name,
upload_url=AnyUrl(upload_url),
preview_url=AnyUrl(preview_url),
)
else:
# For files, just provide upload URL
attachment = FileAttachment(
id=attachment_id,
type="file",
mime_type=input.mime_type,
name=input.name,
upload_url=AnyUrl(upload_url),
)
# Save attachment metadata to data store so it's available during upload
if self.data_store is not None:
await self.data_store.save_attachment(attachment, context)
return attachment
async def read_attachment_bytes(self, attachment_id: str) -> bytes:
"""Read the raw bytes of an uploaded attachment.
This is used by the ThreadItemConverter to create base64-encoded
content for sending to the Agent Framework.
"""
file_path = self.get_file_path(attachment_id)
if not file_path.exists():
raise FileNotFoundError(f"Attachment {attachment_id} not found on disk")
return file_path.read_bytes()

View File

@@ -0,0 +1,57 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>ChatKit + Agent Framework Demo</title>
<!--
IMPORTANT: The ChatKit UI library is loaded from OpenAI's CDN and cannot be self-hosted.
This requires internet connectivity and is not suitable for air-gapped environments.
See: https://github.com/openai/chatkit-js/issues/57
-->
<script src="https://cdn.platform.openai.com/deployments/chatkit/chatkit.js"></script>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif;
height: 100vh;
display: flex;
flex-direction: column;
}
header {
padding: 1rem;
background: #f5f5f5;
border-bottom: 1px solid #ddd;
}
h1 {
font-size: 1.5rem;
margin-bottom: 0.5rem;
}
p {
color: #666;
font-size: 0.9rem;
}
#root {
flex: 1;
overflow: hidden;
}
</style>
</head>
<body>
<header>
<h1>ChatKit + Agent Framework Demo</h1>
<p>Simple weather assistant powered by Agent Framework and ChatKit</p>
</header>
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,27 @@
{
"name": "chatkit-agent-framework-demo",
"version": "0.1.0",
"private": true,
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
},
"engines": {
"node": ">=18.18",
"npm": ">=9"
},
"dependencies": {
"@openai/chatkit-react": "^0",
"react": "^19.2.0",
"react-dom": "^19.2.0"
},
"devDependencies": {
"@types/react": "^19.2.0",
"@types/react-dom": "^19.2.0",
"@vitejs/plugin-react-swc": "^3.5.0",
"typescript": "^5.4.0",
"vite": "^7.1.12"
}
}

View File

@@ -0,0 +1,39 @@
import { ChatKit, useChatKit } from "@openai/chatkit-react";
const CHATKIT_API_URL = "/chatkit";
// Domain key for ChatKit integration
// - Local development: Uses default "domain_pk_localhost_dev"
// - Production: Register your domain at https://platform.openai.com/settings/organization/security/domain-allowlist
// and set VITE_CHATKIT_API_DOMAIN_KEY in your .env file
// See: https://github.com/openai/chatkit-js/issues/76
const CHATKIT_API_DOMAIN_KEY =
import.meta.env.VITE_CHATKIT_API_DOMAIN_KEY ?? "domain_pk_localhost_dev";
export default function App() {
const chatkit = useChatKit({
api: {
url: CHATKIT_API_URL,
domainKey: CHATKIT_API_DOMAIN_KEY,
uploadStrategy: { type: "two_phase" },
},
startScreen: {
greeting: "Hello! I'm your weather and image analysis assistant. Ask me about the weather in any location or upload images for me to analyze.",
prompts: [
{ label: "Weather in New York", prompt: "What's the weather in New York?" },
{ label: "Select City to Get Weather", prompt: "Show me the city selector for weather" },
{ label: "Current Time", prompt: "What time is it?" },
{ label: "Analyze an Image", prompt: "I'll upload an image for you to analyze" },
],
},
composer: {
placeholder: "Ask about weather or upload an image...",
attachments: {
enabled: true,
accept: { "image/*": [".png", ".jpg", ".jpeg", ".gif", ".webp"] },
},
},
});
return <ChatKit control={chatkit.control} style={{ height: "100%" }} />;
}

View File

@@ -0,0 +1,15 @@
import { StrictMode } from "react";
import { createRoot } from "react-dom/client";
import App from "./App";
const container = document.getElementById("root");
if (!container) {
throw new Error("Root element with id 'root' not found");
}
createRoot(container).render(
<StrictMode>
<App />
</StrictMode>,
);

View File

@@ -0,0 +1 @@
/// <reference types="vite/client" />

View File

@@ -0,0 +1,21 @@
{
"compilerOptions": {
"target": "ES2020",
"useDefineForClassFields": true,
"lib": ["ES2020", "DOM", "DOM.Iterable"],
"module": "ESNext",
"skipLibCheck": true,
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"resolveJsonModule": true,
"isolatedModules": true,
"noEmit": true,
"jsx": "react-jsx",
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noFallthroughCasesInSwitch": true
},
"include": ["src"],
"references": [{ "path": "./tsconfig.node.json" }]
}

View File

@@ -0,0 +1,10 @@
{
"compilerOptions": {
"composite": true,
"skipLibCheck": true,
"module": "ESNext",
"moduleResolution": "bundler",
"allowSyntheticDefaultImports": true
},
"include": ["vite.config.ts"]
}

View File

@@ -0,0 +1,24 @@
import { defineConfig } from "vite";
import react from "@vitejs/plugin-react-swc";
const backendTarget = process.env.BACKEND_URL ?? "http://127.0.0.1:8001";
export default defineConfig({
plugins: [react()],
server: {
host: "0.0.0.0",
port: 5171,
proxy: {
"/chatkit": {
target: backendTarget,
changeOrigin: true,
},
},
// For production deployments, you need to add your public domains to this list
allowedHosts: [
// You can remove these examples added just to demonstrate how to configure the allowlist
".ngrok.io",
".trycloudflare.com",
],
},
});

View File

@@ -0,0 +1,348 @@
# Copyright (c) Microsoft. All rights reserved.
"""SQLite-based store implementation for ChatKit data persistence.
This module provides a complete Store implementation using SQLite for data persistence.
It includes proper thread safety, user isolation, and follows the ChatKit Store protocol.
"""
import sqlite3
import uuid
from typing import Any
from chatkit.store import NotFoundError, Store
from chatkit.types import (
Attachment,
Page,
ThreadItem,
ThreadMetadata,
)
from pydantic import BaseModel
class ThreadData(BaseModel):
"""Model for serializing thread data to SQLite."""
thread: ThreadMetadata
class ItemData(BaseModel):
"""Model for serializing thread item data to SQLite."""
item: ThreadItem
class AttachmentData(BaseModel):
"""Model for serializing attachment data to SQLite."""
attachment: Attachment
class SQLiteStore(Store[dict[str, Any]]):
"""SQLite-based store implementation for ChatKit data.
This implementation follows the pattern from the ChatKit Python tests
and provides persistent storage for threads, messages, and attachments.
Features:
- Thread-safe SQLite connections with WAL mode
- User isolation for multi-tenant support
- Proper error handling and transaction management
- Complete Store protocol implementation
Note: This is for demonstration purposes. In production, you should
implement proper error handling, connection pooling, and migration strategies.
"""
def __init__(self, db_path: str | None = None):
self.db_path = db_path or "chatkit_demo.db" # Use file-based DB for demo
self._create_tables()
def _create_connection(self):
# Enable thread safety and WAL mode for better concurrent access
conn = sqlite3.connect(self.db_path, check_same_thread=False)
conn.execute("PRAGMA journal_mode=WAL")
return conn
def _create_tables(self):
with self._create_connection() as conn:
# Create threads table
conn.execute(
"""CREATE TABLE IF NOT EXISTS threads (
id TEXT PRIMARY KEY,
user_id TEXT NOT NULL,
created_at TEXT NOT NULL,
data TEXT NOT NULL
)"""
)
# Create items table
conn.execute(
"""CREATE TABLE IF NOT EXISTS items (
id TEXT PRIMARY KEY,
thread_id TEXT NOT NULL,
user_id TEXT NOT NULL,
created_at TEXT NOT NULL,
data TEXT NOT NULL
)"""
)
# Create attachments table
conn.execute(
"""CREATE TABLE IF NOT EXISTS attachments (
id TEXT PRIMARY KEY,
user_id TEXT NOT NULL,
data TEXT NOT NULL
)"""
)
conn.commit()
def generate_thread_id(self, context: dict[str, Any]) -> str:
return f"thr_{uuid.uuid4().hex[:8]}"
def generate_item_id(
self,
item_type: str,
thread: ThreadMetadata,
context: dict[str, Any],
) -> str:
prefix_map = {
"message": "msg",
"tool_call": "tc",
"task": "tsk",
"workflow": "wf",
"attachment": "atc",
}
prefix = prefix_map.get(item_type, "itm")
return f"{prefix}_{uuid.uuid4().hex[:8]}"
async def load_thread(self, thread_id: str, context: dict[str, Any]) -> ThreadMetadata:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
cursor = conn.execute(
"SELECT data FROM threads WHERE id = ? AND user_id = ?",
(thread_id, user_id),
).fetchone()
if cursor is None:
raise NotFoundError(f"Thread {thread_id} not found")
thread_data = ThreadData.model_validate_json(cursor[0])
return thread_data.thread
async def save_thread(self, thread: ThreadMetadata, context: dict[str, Any]) -> None:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
thread_data = ThreadData(thread=thread)
# Replace existing thread data
conn.execute(
"DELETE FROM threads WHERE id = ? AND user_id = ?",
(thread.id, user_id),
)
conn.execute(
"INSERT INTO threads (id, user_id, created_at, data) VALUES (?, ?, ?, ?)",
(
thread.id,
user_id,
thread.created_at.isoformat(),
thread_data.model_dump_json(),
),
)
conn.commit()
async def load_thread_items(
self,
thread_id: str,
after: str | None,
limit: int,
order: str,
context: dict[str, Any],
) -> Page[ThreadItem]:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
created_after: str | None = None
if after:
after_cursor = conn.execute(
"SELECT created_at FROM items WHERE id = ? AND user_id = ?",
(after, user_id),
).fetchone()
if after_cursor is None:
raise NotFoundError(f"Item {after} not found")
created_after = after_cursor[0]
query = """
SELECT data FROM items
WHERE thread_id = ? AND user_id = ?
"""
params: list[Any] = [thread_id, user_id]
if created_after:
query += " AND created_at > ?" if order == "asc" else " AND created_at < ?"
params.append(created_after)
query += f" ORDER BY created_at {order} LIMIT ?"
params.append(limit + 1)
items_cursor = conn.execute(query, params).fetchall()
items = [ItemData.model_validate_json(row[0]).item for row in items_cursor]
has_more = len(items) > limit
if has_more:
items = items[:limit]
return Page[ThreadItem](data=items, has_more=has_more, after=items[-1].id if items else None)
async def save_attachment(self, attachment: Attachment, context: dict[str, Any]) -> None:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
attachment_data = AttachmentData(attachment=attachment)
conn.execute(
"INSERT OR REPLACE INTO attachments (id, user_id, data) VALUES (?, ?, ?)",
(
attachment.id,
user_id,
attachment_data.model_dump_json(),
),
)
conn.commit()
async def load_attachment(self, attachment_id: str, context: dict[str, Any]) -> Attachment:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
cursor = conn.execute(
"SELECT data FROM attachments WHERE id = ? AND user_id = ?",
(attachment_id, user_id),
).fetchone()
if cursor is None:
raise NotFoundError(f"Attachment {attachment_id} not found")
attachment_data = AttachmentData.model_validate_json(cursor[0])
return attachment_data.attachment
async def delete_attachment(self, attachment_id: str, context: dict[str, Any]) -> None:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
conn.execute(
"DELETE FROM attachments WHERE id = ? AND user_id = ?",
(attachment_id, user_id),
)
conn.commit()
async def load_threads(
self,
limit: int,
after: str | None,
order: str,
context: dict[str, Any],
) -> Page[ThreadMetadata]:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
created_after: str | None = None
if after:
after_cursor = conn.execute(
"SELECT created_at FROM threads WHERE id = ? AND user_id = ?",
(after, user_id),
).fetchone()
if after_cursor is None:
raise NotFoundError(f"Thread {after} not found")
created_after = after_cursor[0]
query = "SELECT data FROM threads WHERE user_id = ?"
params: list[Any] = [user_id]
if created_after:
query += " AND created_at > ?" if order == "asc" else " AND created_at < ?"
params.append(created_after)
query += f" ORDER BY created_at {order} LIMIT ?"
params.append(limit + 1)
threads_cursor = conn.execute(query, params).fetchall()
threads = [ThreadData.model_validate_json(row[0]).thread for row in threads_cursor]
has_more = len(threads) > limit
if has_more:
threads = threads[:limit]
return Page[ThreadMetadata](data=threads, has_more=has_more, after=threads[-1].id if threads else None)
async def add_thread_item(self, thread_id: str, item: ThreadItem, context: dict[str, Any]) -> None:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
item_data = ItemData(item=item)
conn.execute(
"INSERT INTO items (id, thread_id, user_id, created_at, data) VALUES (?, ?, ?, ?, ?)",
(
item.id,
thread_id,
user_id,
item.created_at.isoformat(),
item_data.model_dump_json(),
),
)
conn.commit()
async def save_item(self, thread_id: str, item: ThreadItem, context: dict[str, Any]) -> None:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
item_data = ItemData(item=item)
conn.execute(
"UPDATE items SET data = ? WHERE id = ? AND thread_id = ? AND user_id = ?",
(
item_data.model_dump_json(),
item.id,
thread_id,
user_id,
),
)
conn.commit()
async def load_item(self, thread_id: str, item_id: str, context: dict[str, Any]) -> ThreadItem:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
cursor = conn.execute(
"SELECT data FROM items WHERE id = ? AND thread_id = ? AND user_id = ?",
(item_id, thread_id, user_id),
).fetchone()
if cursor is None:
raise NotFoundError(f"Item {item_id} not found in thread {thread_id}")
item_data = ItemData.model_validate_json(cursor[0])
return item_data.item
async def delete_thread(self, thread_id: str, context: dict[str, Any]) -> None:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
conn.execute(
"DELETE FROM threads WHERE id = ? AND user_id = ?",
(thread_id, user_id),
)
conn.execute(
"DELETE FROM items WHERE thread_id = ? AND user_id = ?",
(thread_id, user_id),
)
conn.commit()
async def delete_thread_item(self, thread_id: str, item_id: str, context: dict[str, Any]) -> None:
user_id = context.get("user_id", "demo_user")
with self._create_connection() as conn:
conn.execute(
"DELETE FROM items WHERE id = ? AND thread_id = ? AND user_id = ?",
(item_id, thread_id, user_id),
)
conn.commit()

View File

@@ -0,0 +1,436 @@
# Copyright (c) Microsoft. All rights reserved.
"""Weather widget rendering for ChatKit integration sample."""
import base64
from dataclasses import dataclass
from chatkit.actions import ActionConfig
from chatkit.widgets import Box, Button, Card, Col, Image, Row, Text, Title, WidgetRoot
WEATHER_ICON_COLOR = "#1D4ED8"
WEATHER_ICON_ACCENT = "#DBEAFE"
# Popular cities for the selector
POPULAR_CITIES = [
{"value": "seattle", "label": "Seattle, WA", "description": "Pacific Northwest"},
{"value": "new_york", "label": "New York, NY", "description": "East Coast"},
{"value": "san_francisco", "label": "San Francisco, CA", "description": "Bay Area"},
{"value": "chicago", "label": "Chicago, IL", "description": "Midwest"},
{"value": "miami", "label": "Miami, FL", "description": "Southeast"},
{"value": "austin", "label": "Austin, TX", "description": "Southwest"},
{"value": "boston", "label": "Boston, MA", "description": "New England"},
{"value": "denver", "label": "Denver, CO", "description": "Mountain West"},
{"value": "portland", "label": "Portland, OR", "description": "Pacific Northwest"},
{"value": "atlanta", "label": "Atlanta, GA", "description": "Southeast"},
]
# Mapping from city values to display names for weather queries
CITY_VALUE_TO_NAME = {city["value"]: city["label"] for city in POPULAR_CITIES}
def _sun_svg() -> str:
"""Generate SVG for sunny weather icon."""
color = WEATHER_ICON_COLOR
accent = WEATHER_ICON_ACCENT
return (
'<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" fill="none">'
f'<circle cx="32" cy="32" r="13" fill="{accent}" stroke="{color}" stroke-width="3"/>'
f'<g stroke="{color}" stroke-width="3" stroke-linecap="round">'
'<line x1="32" y1="8" x2="32" y2="16"/>'
'<line x1="32" y1="48" x2="32" y2="56"/>'
'<line x1="8" y1="32" x2="16" y2="32"/>'
'<line x1="48" y1="32" x2="56" y2="32"/>'
'<line x1="14.93" y1="14.93" x2="20.55" y2="20.55"/>'
'<line x1="43.45" y1="43.45" x2="49.07" y2="49.07"/>'
'<line x1="14.93" y1="49.07" x2="20.55" y2="43.45"/>'
'<line x1="43.45" y1="20.55" x2="49.07" y2="14.93"/>'
"</g>"
"</svg>"
)
def _cloud_svg() -> str:
"""Generate SVG for cloudy weather icon."""
color = WEATHER_ICON_COLOR
accent = WEATHER_ICON_ACCENT
return (
'<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" fill="none">'
f'<path d="M22 46H44C50.075 46 55 41.075 55 35S50.075 24 44 24H42.7C41.2 16.2 34.7 10 26.5 10 18 10 11.6 16.1 11 24.3 6.5 25.6 3 29.8 3 35s4.925 11 11 11h8Z" '
f'fill="{accent}" stroke="{color}" stroke-width="3" stroke-linejoin="round"/>'
"</svg>"
)
def _rain_svg() -> str:
"""Generate SVG for rainy weather icon."""
color = WEATHER_ICON_COLOR
accent = WEATHER_ICON_ACCENT
return (
'<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" fill="none">'
f'<path d="M22 40H44C50.075 40 55 35.075 55 29S50.075 18 44 18H42.7C41.2 10.2 34.7 4 26.5 4 18 4 11.6 10.1 11 18.3 6.5 19.6 3 23.8 3 29s4.925 11 11 11h8Z" '
f'fill="{accent}" stroke="{color}" stroke-width="3" stroke-linejoin="round"/>'
f'<g stroke="{color}" stroke-width="3" stroke-linecap="round">'
'<line x1="20" y1="48" x2="24" y2="56"/>'
'<line x1="30" y1="50" x2="34" y2="58"/>'
'<line x1="40" y1="48" x2="44" y2="56"/>'
"</g>"
"</svg>"
)
def _storm_svg() -> str:
"""Generate SVG for stormy weather icon."""
color = WEATHER_ICON_COLOR
accent = WEATHER_ICON_ACCENT
return (
'<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" fill="none">'
f'<path d="M22 40H44C50.075 40 55 35.075 55 29S50.075 18 44 18H42.7C41.2 10.2 34.7 4 26.5 4 18 4 11.6 10.1 11 18.3 6.5 19.6 3 23.8 3 29s4.925 11 11 11h8Z" '
f'fill="{accent}" stroke="{color}" stroke-width="3" stroke-linejoin="round"/>'
f'<path d="M34 46L28 56H34L30 64L42 50H36L40 46Z" '
f'fill="{color}" stroke="{color}" stroke-width="2" stroke-linejoin="round"/>'
"</svg>"
)
def _snow_svg() -> str:
"""Generate SVG for snowy weather icon."""
color = WEATHER_ICON_COLOR
accent = WEATHER_ICON_ACCENT
return (
'<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" fill="none">'
f'<path d="M22 40H44C50.075 40 55 35.075 55 29S50.075 18 44 18H42.7C41.2 10.2 34.7 4 26.5 4 18 4 11.6 10.1 11 18.3 6.5 19.6 3 23.8 3 29s4.925 11 11 11h8Z" '
f'fill="{accent}" stroke="{color}" stroke-width="3" stroke-linejoin="round"/>'
f'<g stroke="{color}" stroke-width="2" stroke-linecap="round">'
'<line x1="20" y1="48" x2="20" y2="56"/>'
'<line x1="17" y1="51" x2="23" y2="53"/>'
'<line x1="17" y1="53" x2="23" y2="51"/>'
'<line x1="36" y1="48" x2="36" y2="56"/>'
'<line x1="33" y1="51" x2="39" y2="53"/>'
'<line x1="33" y1="53" x2="39" y2="51"/>'
"</g>"
"</svg>"
)
def _fog_svg() -> str:
"""Generate SVG for foggy weather icon."""
color = WEATHER_ICON_COLOR
accent = WEATHER_ICON_ACCENT
return (
'<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" fill="none">'
f'<path d="M22 40H44C50.075 40 55 35.075 55 29S50.075 18 44 18H42.7C41.2 10.2 34.7 4 26.5 4 18 4 11.6 10.1 11 18.3 6.5 19.6 3 23.8 3 29s4.925 11 11 11h8Z" '
f'fill="{accent}" stroke="{color}" stroke-width="3" stroke-linejoin="round"/>'
f'<g stroke="{color}" stroke-width="3" stroke-linecap="round">'
'<line x1="18" y1="50" x2="42" y2="50"/>'
'<line x1="24" y1="56" x2="48" y2="56"/>'
"</g>"
"</svg>"
)
def _encode_svg(svg: str) -> str:
"""Encode SVG as base64 data URI."""
encoded = base64.b64encode(svg.encode("utf-8")).decode("ascii")
return f"data:image/svg+xml;base64,{encoded}"
# Weather condition to icon mapping
WEATHER_ICONS = {
"sunny": _encode_svg(_sun_svg()),
"cloudy": _encode_svg(_cloud_svg()),
"rainy": _encode_svg(_rain_svg()),
"stormy": _encode_svg(_storm_svg()),
"snowy": _encode_svg(_snow_svg()),
"foggy": _encode_svg(_fog_svg()),
}
DEFAULT_WEATHER_ICON = _encode_svg(_cloud_svg())
@dataclass
class WeatherData:
"""Weather data container."""
location: str
condition: str
temperature: int
humidity: int
wind_speed: int
def render_weather_widget(data: WeatherData) -> WidgetRoot:
"""Render a weather widget from weather data.
Args:
data: WeatherData containing weather information
Returns:
A ChatKit WidgetRoot (Card) displaying the weather information
"""
# Get weather icon
weather_icon_src = WEATHER_ICONS.get(data.condition.lower(), DEFAULT_WEATHER_ICON)
# Build the widget
header = Box(
padding=5,
background="surface-tertiary",
children=[
Row(
justify="between",
align="center",
children=[
Col(
align="start",
gap=1,
children=[
Text(
value=data.location,
size="lg",
weight="semibold",
),
Text(
value="Current conditions",
color="tertiary",
size="xs",
),
],
),
Box(
padding=3,
radius="full",
background="blue-100",
children=[
Image(
src=weather_icon_src,
alt=data.condition,
size=28,
fit="contain",
)
],
),
],
),
Row(
align="start",
gap=4,
children=[
Title(
value=f"{data.temperature}°C",
size="lg",
weight="semibold",
),
Col(
align="start",
gap=1,
children=[
Text(
value=data.condition.title(),
color="secondary",
size="sm",
weight="medium",
),
],
),
],
),
],
)
# Details section
details = Box(
padding=5,
gap=4,
children=[
Text(value="Weather details", weight="semibold", size="sm"),
Row(
gap=3,
wrap="wrap",
children=[
_detail_chip("Humidity", f"{data.humidity}%"),
_detail_chip("Wind", f"{data.wind_speed} km/h"),
],
),
],
)
return Card(
key="weather",
padding=0,
children=[header, details],
)
def _detail_chip(label: str, value: str) -> Box:
"""Create a detail chip widget component."""
return Box(
padding=3,
radius="xl",
background="surface-tertiary",
width=150,
minWidth=150,
maxWidth=150,
minHeight=80,
maxHeight=80,
flex="0 0 auto",
children=[
Col(
align="stretch",
gap=2,
children=[
Text(value=label, size="xs", weight="medium", color="tertiary"),
Row(
justify="center",
margin={"top": 2},
children=[Text(value=value, weight="semibold", size="lg")],
),
],
)
],
)
def weather_widget_copy_text(data: WeatherData) -> str:
"""Generate plain text representation of weather data.
Args:
data: WeatherData containing weather information
Returns:
Plain text description for copy/paste functionality
"""
return (
f"Weather in {data.location}:\n"
f"• Condition: {data.condition.title()}\n"
f"• Temperature: {data.temperature}°C\n"
f"• Humidity: {data.humidity}%\n"
f"• Wind: {data.wind_speed} km/h"
)
def render_city_selector_widget() -> WidgetRoot:
"""Render an interactive city selector widget.
This widget displays popular cities as a visual selection interface.
Users can click or ask about any city to get weather information.
Returns:
A ChatKit WidgetRoot (Card) with city selection display
"""
# Create location icon SVG
location_icon = _encode_svg(
'<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" fill="none">'
f'<path d="M32 8c-8.837 0-16 7.163-16 16 0 12 16 32 16 32s16-20 16-32c0-8.837-7.163-16-16-16z" '
f'fill="{WEATHER_ICON_ACCENT}" stroke="{WEATHER_ICON_COLOR}" stroke-width="3" stroke-linejoin="round"/>'
f'<circle cx="32" cy="24" r="6" fill="{WEATHER_ICON_COLOR}"/>'
"</svg>"
)
# Header section
header = Box(
padding=5,
background="surface-tertiary",
children=[
Row(
gap=3,
align="center",
children=[
Box(
padding=3,
radius="full",
background="blue-100",
children=[
Image(
src=location_icon,
alt="Location",
size=28,
fit="contain",
)
],
),
Col(
align="start",
gap=1,
children=[
Title(
value="Popular Cities",
size="md",
weight="semibold",
),
Text(
value="Select a city or ask about any location",
color="tertiary",
size="xs",
),
],
),
],
),
],
)
# Create city chips in a grid layout
city_chips: list[Button] = []
for city in POPULAR_CITIES:
# Create a button that sends an action to query weather for the selected city
chip = Button(
label=city["label"],
variant="outline",
size="md",
onClickAction=ActionConfig(
type="city_selected",
payload={"city_value": city["value"], "city_label": city["label"]},
handler="server", # Handle on server-side
),
)
city_chips.append(chip)
# Arrange in rows of 3
city_rows: list[Row] = []
for i in range(0, len(city_chips), 3):
row_chips: list[Button] = city_chips[i : i + 3]
city_rows.append(
Row(
gap=3,
wrap="wrap",
justify="start",
children=list(row_chips), # Convert to generic list
)
)
# Cities display section
cities_section = Box(
padding=5,
gap=3,
children=[
*city_rows,
Box(
padding=3,
radius="md",
background="blue-50",
children=[
Text(
value="💡 Click any city to get its weather, or ask about any other location!",
size="xs",
color="secondary",
),
],
),
],
)
return Card(
key="city_selector",
padding=0,
children=[header, cities_section],
)
def city_selector_copy_text() -> str:
"""Generate plain text representation of city selector.
Returns:
Plain text description for copy/paste functionality
"""
cities_list = "\n".join([f"{city['label']}" for city in POPULAR_CITIES])
return f"Popular cities (click to get weather):\n{cities_list}\n\nYou can also ask about weather in any other location!"

View File

@@ -0,0 +1,16 @@
FROM python:3.12-slim
WORKDIR /app
COPY . user_agent/
WORKDIR /app/user_agent
RUN if [ -f requirements.txt ]; then \
pip install -r requirements.txt; \
else \
echo "No requirements.txt found"; \
fi
EXPOSE 8088
CMD ["python", "main.py"]

View File

@@ -0,0 +1,30 @@
# Unique identifier/name for this agent
name: agent-with-hosted-mcp
# Brief description of what this agent does
description: >
An AI agent that uses Azure OpenAI with a Hosted Model Context Protocol (MCP) server.
The agent answers questions by searching Microsoft Learn documentation using MCP tools.
metadata:
# Categorization tags for organizing and discovering agents
authors:
- Microsoft Agent Framework Team
tags:
- Azure AI AgentServer
- Microsoft Agent Framework
- Model Context Protocol
- MCP
template:
name: agent-with-hosted-mcp
# The type of agent - "hosted" for HOBO, "container" for COBO
kind: hosted
protocols:
- protocol: responses
environment_variables:
- name: AZURE_OPENAI_ENDPOINT
value: ${AZURE_OPENAI_ENDPOINT}
- name: AZURE_OPENAI_CHAT_DEPLOYMENT_NAME
value: "{{chat}}"
resources:
- kind: model
id: gpt-4o-mini
name: chat

View File

@@ -0,0 +1,25 @@
# Copyright (c) Microsoft. All rights reserved.
from agent_framework import HostedMCPTool
from agent_framework.azure import AzureOpenAIChatClient
from azure.ai.agentserver.agentframework import from_agent_framework # pyright: ignore[reportUnknownVariableType]
from azure.identity import DefaultAzureCredential
def main():
# Create an Agent using the Azure OpenAI Chat Client with a MCP Tool that connects to Microsoft Learn MCP
agent = AzureOpenAIChatClient(credential=DefaultAzureCredential()).as_agent(
name="DocsAgent",
instructions="You are a helpful assistant that can help with microsoft documentation questions.",
tools=HostedMCPTool(
name="Microsoft Learn MCP",
url="https://learn.microsoft.com/api/mcp",
),
)
# Run the agent as a hosted agent
from_agent_framework(agent).run()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,2 @@
azure-ai-agentserver-agentframework==1.0.0b3
agent-framework

View File

@@ -0,0 +1,16 @@
FROM python:3.12-slim
WORKDIR /app
COPY . user_agent/
WORKDIR /app/user_agent
RUN if [ -f requirements.txt ]; then \
pip install -r requirements.txt; \
else \
echo "No requirements.txt found"; \
fi
EXPOSE 8088
CMD ["python", "main.py"]

View File

@@ -0,0 +1,33 @@
# Unique identifier/name for this agent
name: agent-with-text-search-rag
# Brief description of what this agent does
description: >
An AI agent that uses a ContextProvider for retrieval augmented generation (RAG) capabilities.
The agent runs searches against an external knowledge base before each model invocation and
injects the results into the model context. It can answer questions about Contoso Outdoors
policies and products, including return policies, refunds, shipping options, and product care
instructions such as tent maintenance.
metadata:
# Categorization tags for organizing and discovering agents
authors:
- Microsoft Agent Framework Team
tags:
- Azure AI AgentServer
- Microsoft Agent Framework
- Retrieval-Augmented Generation
- RAG
template:
name: agent-with-text-search-rag
# The type of agent - "hosted" for HOBO, "container" for COBO
kind: hosted
protocols:
- protocol: responses
environment_variables:
- name: AZURE_OPENAI_ENDPOINT
value: ${AZURE_OPENAI_ENDPOINT}
- name: AZURE_OPENAI_CHAT_DEPLOYMENT_NAME
value: "{{chat}}"
resources:
- kind: model
id: gpt-4o-mini
name: chat

View File

@@ -0,0 +1,110 @@
# Copyright (c) Microsoft. All rights reserved.
import json
import sys
from collections.abc import MutableSequence
from dataclasses import dataclass
from typing import Any
from agent_framework import ChatMessage, Context, ContextProvider, Role
from agent_framework.azure import AzureOpenAIChatClient
from azure.ai.agentserver.agentframework import from_agent_framework # pyright: ignore[reportUnknownVariableType]
from azure.identity import DefaultAzureCredential
if sys.version_info >= (3, 12):
from typing import override
else:
from typing_extensions import override
@dataclass
class TextSearchResult:
source_name: str
source_link: str
text: str
class TextSearchContextProvider(ContextProvider):
"""A simple context provider that simulates text search results based on keywords in the user's message."""
def _get_most_recent_message(self, messages: ChatMessage | MutableSequence[ChatMessage]) -> ChatMessage:
"""Helper method to extract the most recent message from the input."""
if isinstance(messages, ChatMessage):
return messages
if messages:
return messages[-1]
raise ValueError("No messages provided")
@override
async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context:
message = self._get_most_recent_message(messages)
query = message.text.lower()
results: list[TextSearchResult] = []
if "return" in query and "refund" in query:
results.append(
TextSearchResult(
source_name="Contoso Outdoors Return Policy",
source_link="https://contoso.com/policies/returns",
text=(
"Customers may return any item within 30 days of delivery. "
"Items should be unused and include original packaging. "
"Refunds are issued to the original payment method within 5 business days of inspection."
),
)
)
if "shipping" in query:
results.append(
TextSearchResult(
source_name="Contoso Outdoors Shipping Guide",
source_link="https://contoso.com/help/shipping",
text=(
"Standard shipping is free on orders over $50 and typically arrives in 3-5 business days "
"within the continental United States. Expedited options are available at checkout."
),
)
)
if "tent" in query or "fabric" in query:
results.append(
TextSearchResult(
source_name="TrailRunner Tent Care Instructions",
source_link="https://contoso.com/manuals/trailrunner-tent",
text=(
"Clean the tent fabric with lukewarm water and a non-detergent soap. "
"Allow it to air dry completely before storage and avoid prolonged UV "
"exposure to extend the lifespan of the waterproof coating."
),
)
)
if not results:
return Context()
return Context(
messages=[
ChatMessage(
role=Role.USER, text="\n\n".join(json.dumps(result.__dict__, indent=2) for result in results)
)
]
)
def main():
# Create an Agent using the Azure OpenAI Chat Client
agent = AzureOpenAIChatClient(credential=DefaultAzureCredential()).as_agent(
name="SupportSpecialist",
instructions=(
"You are a helpful support specialist for Contoso Outdoors. "
"Answer questions using the provided context and cite the source document when available."
),
context_provider=TextSearchContextProvider(),
)
# Run the agent as a hosted agent
from_agent_framework(agent).run()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,2 @@
azure-ai-agentserver-agentframework==1.0.0b3
agent-framework

View File

@@ -0,0 +1,16 @@
FROM python:3.12-slim
WORKDIR /app
COPY . user_agent/
WORKDIR /app/user_agent
RUN if [ -f requirements.txt ]; then \
pip install -r requirements.txt; \
else \
echo "No requirements.txt found"; \
fi
EXPOSE 8088
CMD ["python", "main.py"]

View File

@@ -0,0 +1,28 @@
# Unique identifier/name for this agent
name: agents-in-workflow
# Brief description of what this agent does
description: >
A workflow agent that responds to product launch strategy inquiries by concurrently leveraging insights from three specialized agents.
metadata:
# Categorization tags for organizing and discovering agents
authors:
- Microsoft Agent Framework Team
tags:
- Azure AI AgentServer
- Microsoft Agent Framework
- Workflows
template:
name: agents-in-workflow
# The type of agent - "hosted" for HOBO, "container" for COBO
kind: hosted
protocols:
- protocol: responses
environment_variables:
- name: AZURE_OPENAI_ENDPOINT
value: ${AZURE_OPENAI_ENDPOINT}
- name: AZURE_OPENAI_CHAT_DEPLOYMENT_NAME
value: "{{chat}}"
resources:
- kind: model
id: gpt-4o-mini
name: chat

View File

@@ -0,0 +1,44 @@
# Copyright (c) Microsoft. All rights reserved.
from agent_framework import ConcurrentBuilder
from agent_framework.azure import AzureOpenAIChatClient
from azure.ai.agentserver.agentframework import from_agent_framework
from azure.identity import DefaultAzureCredential # pyright: ignore[reportUnknownVariableType]
def main():
# Create agents
researcher = AzureOpenAIChatClient(credential=DefaultAzureCredential()).as_agent(
instructions=(
"You're an expert market and product researcher. "
"Given a prompt, provide concise, factual insights, opportunities, and risks."
),
name="researcher",
)
marketer = AzureOpenAIChatClient(credential=DefaultAzureCredential()).as_agent(
instructions=(
"You're a creative marketing strategist. "
"Craft compelling value propositions and target messaging aligned to the prompt."
),
name="marketer",
)
legal = AzureOpenAIChatClient(credential=DefaultAzureCredential()).as_agent(
instructions=(
"You're a cautious legal/compliance reviewer. "
"Highlight constraints, disclaimers, and policy concerns based on the prompt."
),
name="legal",
)
# Build a concurrent workflow
workflow = ConcurrentBuilder().participants([researcher, marketer, legal]).build()
# Convert the workflow to an agent
workflow_agent = workflow.as_agent()
# Run the agent as a hosted agent
from_agent_framework(workflow_agent).run()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,2 @@
azure-ai-agentserver-agentframework==1.0.0b3
agent-framework

View File

@@ -0,0 +1,17 @@
# OpenAI Configuration
OPENAI_API_KEY=
OPENAI_CHAT_MODEL_ID=
# Agent 365 Agentic Authentication Configuration
USE_ANONYMOUS_MODE=
CONNECTIONS__SERVICE_CONNECTION__SETTINGS__CLIENTID=
CONNECTIONS__SERVICE_CONNECTION__SETTINGS__CLIENTSECRET=
CONNECTIONS__SERVICE_CONNECTION__SETTINGS__TENANTID=
CONNECTIONS__SERVICE_CONNECTION__SETTINGS__SCOPES=
AGENTAPPLICATION__USERAUTHORIZATION__HANDLERS__AGENTIC__SETTINGS__TYPE=AgenticUserAuthorization
AGENTAPPLICATION__USERAUTHORIZATION__HANDLERS__AGENTIC__SETTINGS__SCOPES=https://graph.microsoft.com/.default
AGENTAPPLICATION__USERAUTHORIZATION__HANDLERS__AGENTIC__SETTINGS__ALTERNATEBLUEPRINTCONNECTIONNAME=https://graph.microsoft.com/.default
CONNECTIONSMAP_0_SERVICEURL=*
CONNECTIONSMAP_0_CONNECTION=SERVICE_CONNECTION

View File

@@ -0,0 +1,100 @@
# Microsoft Agent Framework Python Weather Agent sample (M365 Agents SDK)
This sample demonstrates a simple Weather Forecast Agent built with the Python Microsoft Agent Framework, exposed through the Microsoft 365 Agents SDK compatible endpoints. The agent accepts natural language requests for a weather forecast and responds with a textual answer. It supports multi-turn conversations to gather required information.
## Prerequisites
- Python 3.11+
- [uv](https://github.com/astral-sh/uv) for fast dependency management
- [devtunnel](https://learn.microsoft.com/azure/developer/dev-tunnels/get-started?tabs=windows)
- [Microsoft 365 Agents Toolkit](https://github.com/OfficeDev/microsoft-365-agents-toolkit) for playground/testing
- Access to OpenAI or Azure OpenAI with a model like `gpt-4o-mini`
## Configuration
Set the following environment variables:
```bash
# Common
export PORT=3978
export USE_ANONYMOUS_MODE=True # set to false if using auth
# OpenAI
export OPENAI_API_KEY="..."
export OPENAI_CHAT_MODEL_ID="..."
```
## Installing Dependencies
From the repository root or the sample folder:
```bash
uv sync
```
## Running the Agent Locally
```bash
# Activate environment first if not already
source .venv/bin/activate # (Windows PowerShell: .venv\Scripts\Activate.ps1)
# Run the weather agent demo
python m365_agent_demo/app.py
```
The agent starts on `http://localhost:3978`. Health check: `GET /api/health`.
## QuickStart using Agents Playground
1. Install (if not already):
```bash
winget install agentsplayground
```
2. Start the Python agent locally: `python m365_agent_demo/app.py`
3. Start the playground: `agentsplayground`
4. Chat with the Weather Agent.
## QuickStart using WebChat (Azure Bot)
To test via WebChat you can provision an Azure Bot and point its messaging endpoint to your agent.
1. Create an Azure Bot (choose Client Secret auth for local tunneling).
2. Create a `.env` file in this sample folder with the following (replace placeholders):
```bash
# Authentication / Agentic configuration
USE_ANONYMOUS_MODE=False
CONNECTIONS__SERVICE_CONNECTION__SETTINGS__CLIENTID="<client-id>"
CONNECTIONS__SERVICE_CONNECTION__SETTINGS__CLIENTSECRET="<client-secret>"
CONNECTIONS__SERVICE_CONNECTION__SETTINGS__TENANTID="<tenant-id>"
CONNECTIONS__SERVICE_CONNECTION__SETTINGS__SCOPES=https://graph.microsoft.com/.default
AGENTAPPLICATION__USERAUTHORIZATION__HANDLERS__AGENTIC__SETTINGS__TYPE=AgenticUserAuthorization
AGENTAPPLICATION__USERAUTHORIZATION__HANDLERS__AGENTIC__SETTINGS__SCOPES=https://graph.microsoft.com/.default
AGENTAPPLICATION__USERAUTHORIZATION__HANDLERS__AGENTIC__SETTINGS__ALTERNATEBLUEPRINTCONNECTIONNAME=https://graph.microsoft.com/.default
```
3. Host dev tunnel:
```bash
devtunnel host -p 3978 --allow-anonymous
```
4. Set the bot Messaging endpoint to: `https://<tunnel-host>/api/messages`
5. Run your local agent: `python m365_agent_demo/app.py`
6. Use "Test in WebChat" in Azure Portal.
> Federated Credentials or Managed Identity auth types typically require deployment to Azure App Service instead of tunneling.
## Troubleshooting
- 404 on `/api/messages`: Ensure you are POSTing and using the correct tunnel URL.
- Empty responses: Check model key / quota and ensure environment variables are set.
- Auth errors when anonymous disabled: Validate MSAL config matches your Azure Bot registration.
## Further Reading
- [Microsoft 365 Agents SDK](https://learn.microsoft.com/microsoft-365/agents-sdk/)
- [Devtunnel docs](https://learn.microsoft.com/azure/developer/dev-tunnels/)

View File

@@ -0,0 +1,238 @@
# Copyright (c) Microsoft. All rights reserved.
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "microsoft-agents-hosting-aiohttp",
# "microsoft-agents-hosting-core",
# "microsoft-agents-authentication-msal",
# "microsoft-agents-activity",
# "agent-framework-core",
# "aiohttp"
# ]
# ///
import os
from dataclasses import dataclass
from random import randint
from typing import Annotated
from agent_framework import ChatAgent
from agent_framework.openai import OpenAIChatClient
from aiohttp import web
from aiohttp.web_middlewares import middleware
from microsoft_agents.activity import load_configuration_from_env
from microsoft_agents.authentication.msal import MsalConnectionManager
from microsoft_agents.hosting.aiohttp import CloudAdapter, start_agent_process
from microsoft_agents.hosting.core import (
AgentApplication,
AuthenticationConstants,
Authorization,
ClaimsIdentity,
MemoryStorage,
TurnContext,
TurnState,
)
from pydantic import Field
"""
Demo application using Microsoft Agent 365 SDK.
This sample demonstrates how to build an AI agent using the Agent Framework,
integrating with Microsoft 365 authentication and hosting components.
The agent provides a simple weather tool and can be run in either anonymous mode
(no authentication required) or authenticated mode using MSAL and Azure AD.
Key features:
- Loads configuration from environment variables.
- Demonstrates agent creation and tool registration.
- Supports both anonymous and authenticated scenarios.
- Uses aiohttp for web hosting.
To run, set the appropriate environment variables (check .env.example file) for authentication or use
anonymous mode for local testing.
"""
@dataclass
class AppConfig:
use_anonymous_mode: bool
port: int
agents_sdk_config: dict
def load_app_config() -> AppConfig:
"""Load application configuration from environment variables.
Returns:
AppConfig: Consolidated configuration including anonymous mode flag, port, and SDK config.
"""
agents_sdk_config = load_configuration_from_env(os.environ)
use_anonymous_mode = os.environ.get("USE_ANONYMOUS_MODE", "true").lower() == "true"
port_str = os.getenv("PORT", "3978")
try:
port = int(port_str)
except ValueError:
port = 3978
return AppConfig(use_anonymous_mode=use_anonymous_mode, port=port, agents_sdk_config=agents_sdk_config)
def get_weather(
location: Annotated[str, Field(description="The location to get the weather for.")],
) -> str:
"""Generate a mock weather report for the provided location.
Args:
location: The geographic location name.
Returns:
str: Human-readable weather summary.
"""
conditions = ["sunny", "cloudy", "rainy", "stormy"]
return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C."
def build_agent() -> ChatAgent:
"""Create and return the chat agent instance with weather tool registered."""
return OpenAIChatClient().as_agent(
name="WeatherAgent", instructions="You are a helpful weather agent.", tools=get_weather
)
def build_connection_manager(config: AppConfig) -> MsalConnectionManager | None:
"""Build the connection manager unless running in anonymous mode.
Args:
config: Application configuration.
Returns:
MsalConnectionManager | None: Connection manager when authenticated mode is enabled.
"""
if config.use_anonymous_mode:
return None
return MsalConnectionManager(**config.agents_sdk_config)
def build_adapter(connection_manager: MsalConnectionManager | None) -> CloudAdapter:
"""Instantiate the CloudAdapter with the optional connection manager."""
return CloudAdapter(connection_manager=connection_manager)
def build_authorization(
storage: MemoryStorage, connection_manager: MsalConnectionManager | None, config: AppConfig
) -> Authorization | None:
"""Create Authorization component if not in anonymous mode.
Args:
storage: State storage backend.
connection_manager: Optional connection manager.
config: Application configuration.
Returns:
Authorization | None: Authorization component when enabled.
"""
if config.use_anonymous_mode:
return None
return Authorization(storage, connection_manager, **config.agents_sdk_config)
def build_agent_application(
storage: MemoryStorage,
adapter: CloudAdapter,
authorization: Authorization | None,
config: AppConfig,
) -> AgentApplication[TurnState]:
"""Compose and return the AgentApplication instance.
Args:
storage: Storage implementation.
adapter: CloudAdapter handling requests.
authorization: Optional authorization component.
config: App configuration.
Returns:
AgentApplication[TurnState]: Configured agent application.
"""
return AgentApplication[TurnState](
storage=storage, adapter=adapter, authorization=authorization, **config.agents_sdk_config
)
def build_anonymous_claims_middleware(use_anonymous_mode: bool):
"""Return a middleware that injects anonymous claims when enabled.
Args:
use_anonymous_mode: Whether to apply anonymous identity for each request.
Returns:
Callable: Aiohttp middleware function.
"""
@middleware
async def anonymous_claims_middleware(request, handler):
"""Inject claims for anonymous users if anonymous mode is active."""
if use_anonymous_mode:
request["claims_identity"] = ClaimsIdentity(
{
AuthenticationConstants.AUDIENCE_CLAIM: "anonymous",
AuthenticationConstants.APP_ID_CLAIM: "anonymous-app",
},
False,
"Anonymous",
)
return await handler(request)
return anonymous_claims_middleware
def create_app(config: AppConfig) -> web.Application:
"""Create and configure the aiohttp web application.
Args:
config: Loaded application configuration.
Returns:
web.Application: Fully initialized web application.
"""
middleware_fn = build_anonymous_claims_middleware(config.use_anonymous_mode)
app = web.Application(middleware=[middleware_fn])
storage = MemoryStorage()
agent = build_agent()
connection_manager = build_connection_manager(config)
adapter = build_adapter(connection_manager)
authorization = build_authorization(storage, connection_manager, config)
agent_app = build_agent_application(storage, adapter, authorization, config)
@agent_app.activity("message")
async def on_message(context: TurnContext, _: TurnState):
user_message = context.activity.text or ""
if not user_message.strip():
return
response = await agent.run(user_message)
response_text = response.text
await context.send_activity(response_text)
async def health(request: web.Request) -> web.Response:
return web.json_response({"status": "ok"})
async def entry_point(req: web.Request) -> web.Response:
return await start_agent_process(req, req.app["agent_app"], req.app["adapter"])
app.add_routes([
web.get("/api/health", health),
web.get("/api/messages", lambda _: web.Response(status=200)),
web.post("/api/messages", entry_point),
])
app["agent_app"] = agent_app
app["adapter"] = adapter
return app
def main() -> None:
"""Entry point: load configuration, build app, and start server."""
config = load_app_config()
app = create_app(config)
web.run_app(app, host="localhost", port=config.port)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,2 @@
AZURE_AI_PROJECT_ENDPOINT="<your-project-endpoint>"
AZURE_AI_MODEL_DEPLOYMENT_NAME="<your-model-deployment>"

View File

@@ -0,0 +1,30 @@
# Multi-Agent Travel Planning Workflow Evaluation
This sample demonstrates evaluating a multi-agent workflow using Azure AI's built-in evaluators. The workflow processes travel planning requests through seven specialized agents in a fan-out/fan-in pattern: travel request handler, hotel/flight/activity search agents, booking aggregator, booking confirmation, and payment processing.
## Evaluation Metrics
The evaluation uses four Azure AI built-in evaluators:
- **Relevance** - How well responses address the user query
- **Groundedness** - Whether responses are grounded in available context
- **Tool Call Accuracy** - Correct tool selection and parameter usage
- **Tool Output Utilization** - Effective use of tool outputs in responses
## Setup
Create a `.env` file with configuration as in the `.env.example` file in this folder.
## Running the Evaluation
Execute the complete workflow and evaluation:
```bash
python run_evaluation.py
```
The script will:
1. Execute the multi-agent travel planning workflow
2. Display response summary for each agent
3. Create and run evaluation on hotel, flight, and activity search agents
4. Monitor progress and display the evaluation report URL

View File

@@ -0,0 +1,754 @@
# Copyright (c) Microsoft. All rights reserved.
import json
from datetime import datetime
from typing import Annotated
from agent_framework import ai_function
from pydantic import Field
# --- Travel Planning Tools ---
# Note: These are mock tools for demonstration purposes. They return simulated data
# and do not make real API calls or bookings.
# Mock hotel search tool
@ai_function(name="search_hotels", description="Search for available hotels based on location and dates.")
def search_hotels(
location: Annotated[str, Field(description="City or region to search for hotels.")],
check_in: Annotated[str, Field(description="Check-in date (e.g., 'December 15, 2025').")],
check_out: Annotated[str, Field(description="Check-out date (e.g., 'December 18, 2025').")],
guests: Annotated[int, Field(description="Number of guests.")] = 2,
) -> str:
"""Search for available hotels based on location and dates.
Returns:
JSON string containing search results with hotel details including name, rating,
price, distance to landmarks, amenities, and availability.
"""
# Specific mock data for Paris December 15-18, 2025
if "paris" in location.lower():
mock_hotels = [
{
"name": "Hotel Eiffel Trocadéro",
"rating": 4.6,
"price_per_night": "$185",
"total_price": "$555 for 3 nights",
"distance_to_eiffel_tower": "0.3 miles",
"amenities": ["WiFi", "Breakfast", "Eiffel Tower View", "Concierge"],
"availability": "Available",
"address": "35 Rue Benjamin Franklin, 16th arr., Paris"
},
{
"name": "Mercure Paris Centre Tour Eiffel",
"rating": 4.4,
"price_per_night": "$220",
"total_price": "$660 for 3 nights",
"distance_to_eiffel_tower": "0.5 miles",
"amenities": ["WiFi", "Restaurant", "Bar", "Gym", "Air Conditioning"],
"availability": "Available",
"address": "20 Rue Jean Rey, 15th arr., Paris"
},
{
"name": "Pullman Paris Tour Eiffel",
"rating": 4.7,
"price_per_night": "$280",
"total_price": "$840 for 3 nights",
"distance_to_eiffel_tower": "0.2 miles",
"amenities": ["WiFi", "Spa", "Gym", "Restaurant", "Rooftop Bar", "Concierge"],
"availability": "Limited",
"address": "18 Avenue de Suffren, 15th arr., Paris"
}
]
else:
mock_hotels = [
{
"name": "Grand Plaza Hotel",
"rating": 4.5,
"price_per_night": "$150",
"amenities": ["WiFi", "Pool", "Gym", "Restaurant"],
"availability": "Available"
}
]
return json.dumps({
"location": location,
"check_in": check_in,
"check_out": check_out,
"guests": guests,
"hotels_found": len(mock_hotels),
"hotels": mock_hotels,
"note": "Hotel search results matching your query"
})
# Mock hotel details tool
@ai_function(name="get_hotel_details", description="Get detailed information about a specific hotel.")
def get_hotel_details(
hotel_name: Annotated[str, Field(description="Name of the hotel to get details for.")],
) -> str:
"""Get detailed information about a specific hotel.
Returns:
JSON string containing detailed hotel information including description,
check-in/out times, cancellation policy, reviews, and nearby attractions.
"""
hotel_details = {
"Hotel Eiffel Trocadéro": {
"description": "Charming boutique hotel with stunning Eiffel Tower views from select rooms. Perfect for couples and families.",
"check_in_time": "3:00 PM",
"check_out_time": "11:00 AM",
"cancellation_policy": "Free cancellation up to 24 hours before check-in",
"reviews": {
"total": 1247,
"recent_comments": [
"Amazing location! Walked to Eiffel Tower in 5 minutes.",
"Staff was incredibly helpful with restaurant recommendations.",
"Rooms are cozy and clean with great views."
]
},
"nearby_attractions": ["Eiffel Tower (0.3 mi)", "Trocadéro Gardens (0.2 mi)", "Seine River (0.4 mi)"]
},
"Mercure Paris Centre Tour Eiffel": {
"description": "Modern hotel with contemporary rooms and excellent dining options. Close to metro stations.",
"check_in_time": "2:00 PM",
"check_out_time": "12:00 PM",
"cancellation_policy": "Free cancellation up to 48 hours before check-in",
"reviews": {
"total": 2156,
"recent_comments": [
"Great value for money, clean and comfortable.",
"Restaurant had excellent French cuisine.",
"Easy access to public transportation."
]
},
"nearby_attractions": ["Eiffel Tower (0.5 mi)", "Champ de Mars (0.4 mi)", "Les Invalides (0.8 mi)"]
},
"Pullman Paris Tour Eiffel": {
"description": "Luxury hotel offering panoramic views, upscale amenities, and exceptional service. Ideal for a premium experience.",
"check_in_time": "3:00 PM",
"check_out_time": "12:00 PM",
"cancellation_policy": "Free cancellation up to 72 hours before check-in",
"reviews": {
"total": 3421,
"recent_comments": [
"Rooftop bar has the best Eiffel Tower views in Paris!",
"Luxurious rooms with every amenity you could want.",
"Worth the price for the location and service."
]
},
"nearby_attractions": ["Eiffel Tower (0.2 mi)", "Seine River Cruise Dock (0.3 mi)", "Trocadéro (0.5 mi)"]
}
}
details = hotel_details.get(hotel_name, {
"name": hotel_name,
"description": "Comfortable hotel with modern amenities",
"check_in_time": "3:00 PM",
"check_out_time": "11:00 AM",
"cancellation_policy": "Standard cancellation policy applies",
"reviews": {"total": 0, "recent_comments": []},
"nearby_attractions": []
})
return json.dumps({
"hotel_name": hotel_name,
"details": details
})
# Mock flight search tool
@ai_function(name="search_flights", description="Search for available flights between two locations.")
def search_flights(
origin: Annotated[str, Field(description="Departure airport or city (e.g., 'JFK' or 'New York').")],
destination: Annotated[str, Field(description="Arrival airport or city (e.g., 'CDG' or 'Paris').")],
departure_date: Annotated[str, Field(description="Departure date (e.g., 'December 15, 2025').")],
return_date: Annotated[str | None, Field(description="Return date (e.g., 'December 18, 2025').")] = None,
passengers: Annotated[int, Field(description="Number of passengers.")] = 1,
) -> str:
"""Search for available flights between two locations.
Returns:
JSON string containing flight search results with details including flight numbers,
airlines, departure/arrival times, prices, durations, and baggage allowances.
"""
# Specific mock data for JFK to Paris December 15-18, 2025
if "jfk" in origin.lower() or "new york" in origin.lower():
if "paris" in destination.lower() or "cdg" in destination.lower():
mock_flights = [
{
"outbound": {
"flight_number": "AF007",
"airline": "Air France",
"departure": "December 15, 2025 at 6:30 PM",
"arrival": "December 16, 2025 at 8:15 AM",
"duration": "7h 45m",
"aircraft": "Boeing 777-300ER",
"class": "Economy",
"price": "$520"
},
"return": {
"flight_number": "AF008",
"airline": "Air France",
"departure": "December 18, 2025 at 11:00 AM",
"arrival": "December 18, 2025 at 2:15 PM",
"duration": "8h 15m",
"aircraft": "Airbus A350-900",
"class": "Economy",
"price": "Included"
},
"total_price": "$520",
"stops": "Nonstop",
"baggage": "1 checked bag included"
},
{
"outbound": {
"flight_number": "DL264",
"airline": "Delta",
"departure": "December 15, 2025 at 10:15 PM",
"arrival": "December 16, 2025 at 12:05 PM",
"duration": "7h 50m",
"aircraft": "Airbus A330-900neo",
"class": "Economy",
"price": "$485"
},
"return": {
"flight_number": "DL265",
"airline": "Delta",
"departure": "December 18, 2025 at 1:45 PM",
"arrival": "December 18, 2025 at 5:00 PM",
"duration": "8h 15m",
"aircraft": "Airbus A330-900neo",
"class": "Economy",
"price": "Included"
},
"total_price": "$485",
"stops": "Nonstop",
"baggage": "1 checked bag included"
},
{
"outbound": {
"flight_number": "UA57",
"airline": "United Airlines",
"departure": "December 15, 2025 at 5:00 PM",
"arrival": "December 16, 2025 at 6:50 AM",
"duration": "7h 50m",
"aircraft": "Boeing 767-400ER",
"class": "Economy",
"price": "$560"
},
"return": {
"flight_number": "UA58",
"airline": "United Airlines",
"departure": "December 18, 2025 at 9:30 AM",
"arrival": "December 18, 2025 at 12:45 PM",
"duration": "8h 15m",
"aircraft": "Boeing 787-10",
"class": "Economy",
"price": "Included"
},
"total_price": "$560",
"stops": "Nonstop",
"baggage": "1 checked bag included"
}
]
else:
mock_flights = [{"flight_number": "XX123", "airline": "Generic Air", "price": "$400", "note": "Generic route"}]
else:
mock_flights = [
{
"outbound": {
"flight_number": "AA123",
"airline": "Generic Airlines",
"departure": f"{departure_date} at 9:00 AM",
"arrival": f"{departure_date} at 2:30 PM",
"duration": "5h 30m",
"class": "Economy",
"price": "$350"
},
"total_price": "$350",
"stops": "Nonstop"
}
]
return json.dumps({
"origin": origin,
"destination": destination,
"departure_date": departure_date,
"return_date": return_date,
"passengers": passengers,
"flights_found": len(mock_flights),
"flights": mock_flights,
"note": "Flight search results for JFK to Paris CDG"
})
# Mock flight details tool
@ai_function(name="get_flight_details", description="Get detailed information about a specific flight.")
def get_flight_details(
flight_number: Annotated[str, Field(description="Flight number (e.g., 'AF007' or 'DL264').")],
) -> str:
"""Get detailed information about a specific flight.
Returns:
JSON string containing detailed flight information including airline, aircraft type,
departure/arrival airports and times, gates, terminals, duration, and amenities.
"""
mock_details = {
"flight_number": flight_number,
"airline": "Sky Airways",
"aircraft": "Boeing 737-800",
"departure": {
"airport": "JFK International Airport",
"terminal": "Terminal 4",
"gate": "B23",
"time": "08:00 AM"
},
"arrival": {
"airport": "Charles de Gaulle Airport",
"terminal": "Terminal 2E",
"gate": "K15",
"time": "11:30 AM local time"
},
"duration": "3h 30m",
"baggage_allowance": {
"carry_on": "1 bag (10kg)",
"checked": "1 bag (23kg)"
},
"amenities": ["WiFi", "In-flight entertainment", "Meals included"]
}
return json.dumps({
"flight_details": mock_details
})
# Mock activity search tool
@ai_function(name="search_activities", description="Search for available activities and attractions at a destination.")
def search_activities(
location: Annotated[str, Field(description="City or region to search for activities.")],
date: Annotated[str | None, Field(description="Date for the activity (e.g., 'December 16, 2025').")] = None,
category: Annotated[str | None, Field(description="Activity category (e.g., 'Sightseeing', 'Culture', 'Culinary').")] = None,
) -> str:
"""Search for available activities and attractions at a destination.
Returns:
JSON string containing activity search results with details including name, category,
duration, price, rating, description, availability, and booking requirements.
"""
# Specific mock data for Paris activities
if "paris" in location.lower():
all_activities = [
{
"name": "Eiffel Tower Summit Access",
"category": "Sightseeing",
"duration": "2-3 hours",
"price": "$35",
"rating": 4.8,
"description": "Skip-the-line access to all three levels including the summit. Best views of Paris!",
"availability": "Daily 9:30 AM - 11:00 PM",
"best_time": "Early morning or sunset",
"booking_required": True
},
{
"name": "Louvre Museum Guided Tour",
"category": "Sightseeing",
"duration": "3 hours",
"price": "$55",
"rating": 4.7,
"description": "Expert-guided tour covering masterpieces including Mona Lisa and Venus de Milo.",
"availability": "Daily except Tuesdays, 9:00 AM entry",
"best_time": "Morning entry recommended",
"booking_required": True
},
{
"name": "Seine River Cruise",
"category": "Sightseeing",
"duration": "1 hour",
"price": "$18",
"rating": 4.6,
"description": "Scenic cruise past Notre-Dame, Eiffel Tower, and historic bridges.",
"availability": "Every 30 minutes, 10:00 AM - 10:00 PM",
"best_time": "Evening for illuminated monuments",
"booking_required": False
},
{
"name": "Musée d'Orsay Visit",
"category": "Culture",
"duration": "2-3 hours",
"price": "$16",
"rating": 4.7,
"description": "Impressionist masterpieces in a stunning Beaux-Arts railway station.",
"availability": "Tuesday-Sunday 9:30 AM - 6:00 PM",
"best_time": "Weekday mornings",
"booking_required": True
},
{
"name": "Versailles Palace Day Trip",
"category": "Culture",
"duration": "5-6 hours",
"price": "$75",
"rating": 4.9,
"description": "Explore the opulent palace and stunning gardens of Louis XIV (includes transport).",
"availability": "Daily except Mondays, 8:00 AM departure",
"best_time": "Full day trip",
"booking_required": True
},
{
"name": "Montmartre Walking Tour",
"category": "Culture",
"duration": "2.5 hours",
"price": "$25",
"rating": 4.6,
"description": "Discover the artistic heart of Paris, including Sacré-Cœur and artists' square.",
"availability": "Daily at 10:00 AM and 2:00 PM",
"best_time": "Morning or late afternoon",
"booking_required": False
},
{
"name": "French Cooking Class",
"category": "Culinary",
"duration": "3 hours",
"price": "$120",
"rating": 4.9,
"description": "Learn to make classic French dishes like coq au vin and crème brûlée, then enjoy your creations.",
"availability": "Tuesday-Saturday, 10:00 AM and 6:00 PM sessions",
"best_time": "Morning or evening sessions",
"booking_required": True
},
{
"name": "Wine & Cheese Tasting",
"category": "Culinary",
"duration": "1.5 hours",
"price": "$65",
"rating": 4.7,
"description": "Sample French wines and artisanal cheeses with expert sommelier guidance.",
"availability": "Daily at 5:00 PM and 7:30 PM",
"best_time": "Evening sessions",
"booking_required": True
},
{
"name": "Food Market Tour",
"category": "Culinary",
"duration": "2 hours",
"price": "$45",
"rating": 4.6,
"description": "Explore authentic Parisian markets and taste local specialties like cheeses, pastries, and charcuterie.",
"availability": "Tuesday, Thursday, Saturday mornings",
"best_time": "Morning (markets are freshest)",
"booking_required": False
}
]
if category:
activities = [act for act in all_activities if act["category"] == category]
else:
activities = all_activities
else:
activities = [
{
"name": "City Walking Tour",
"category": "Sightseeing",
"duration": "3 hours",
"price": "$45",
"rating": 4.7,
"description": "Explore the historic downtown area with an expert guide",
"availability": "Daily at 10:00 AM and 2:00 PM"
}
]
return json.dumps({
"location": location,
"date": date,
"category": category,
"activities_found": len(activities),
"activities": activities,
"note": "Activity search results for Paris with sightseeing, culture, and culinary options"
})
# Mock activity details tool
@ai_function(name="get_activity_details", description="Get detailed information about a specific activity.")
def get_activity_details(
activity_name: Annotated[str, Field(description="Name of the activity to get details for.")],
) -> str:
"""Get detailed information about a specific activity.
Returns:
JSON string containing detailed activity information including description, duration,
price, included items, meeting point, what to bring, cancellation policy, and reviews.
"""
# Paris-specific activity details
activity_details_map = {
"Eiffel Tower Summit Access": {
"name": "Eiffel Tower Summit Access",
"description": "Skip-the-line access to all three levels of the Eiffel Tower, including the summit. Enjoy panoramic views of Paris from 276 meters high.",
"duration": "2-3 hours (self-guided)",
"price": "$35 per person",
"included": ["Skip-the-line ticket", "Access to all 3 levels", "Summit access", "Audio guide app"],
"meeting_point": "Eiffel Tower South Pillar entrance, look for priority access line",
"what_to_bring": ["Photo ID", "Comfortable shoes", "Camera", "Light jacket (summit can be windy)"],
"cancellation_policy": "Free cancellation up to 24 hours in advance",
"languages": ["English", "French", "Spanish", "German", "Italian"],
"max_group_size": "No limit",
"rating": 4.8,
"reviews_count": 15234
},
"Louvre Museum Guided Tour": {
"name": "Louvre Museum Guided Tour",
"description": "Expert-guided tour of the world's largest art museum, focusing on must-see masterpieces including Mona Lisa, Venus de Milo, and Winged Victory.",
"duration": "3 hours",
"price": "$55 per person",
"included": ["Skip-the-line entry", "Expert art historian guide", "Headsets for groups over 6", "Museum highlights map"],
"meeting_point": "Glass Pyramid main entrance, look for guide with 'Louvre Tours' sign",
"what_to_bring": ["Photo ID", "Comfortable shoes", "Camera (no flash)", "Water bottle"],
"cancellation_policy": "Free cancellation up to 48 hours in advance",
"languages": ["English", "French", "Spanish"],
"max_group_size": 20,
"rating": 4.7,
"reviews_count": 8921
},
"French Cooking Class": {
"name": "French Cooking Class",
"description": "Hands-on cooking experience where you'll learn to prepare classic French dishes like coq au vin, ratatouille, and crème brûlée under expert chef guidance.",
"duration": "3 hours",
"price": "$120 per person",
"included": ["All ingredients", "Chef instruction", "Apron and recipe booklet", "Wine pairing", "Lunch/dinner of your creations"],
"meeting_point": "Le Chef Cooking Studio, 15 Rue du Bac, 7th arrondissement",
"what_to_bring": ["Appetite", "Camera for food photos"],
"cancellation_policy": "Free cancellation up to 72 hours in advance",
"languages": ["English", "French"],
"max_group_size": 12,
"rating": 4.9,
"reviews_count": 2341
}
}
details = activity_details_map.get(activity_name, {
"name": activity_name,
"description": "An immersive experience that showcases the best of local culture and attractions.",
"duration": "3 hours",
"price": "$45 per person",
"included": ["Professional guide", "Entry fees"],
"meeting_point": "Central meeting location",
"what_to_bring": ["Comfortable shoes", "Camera"],
"cancellation_policy": "Free cancellation up to 24 hours in advance",
"languages": ["English"],
"max_group_size": 15,
"rating": 4.5,
"reviews_count": 100
})
return json.dumps({
"activity_details": details
})
# Mock booking confirmation tool
@ai_function(name="confirm_booking", description="Confirm a booking reservation.")
def confirm_booking(
booking_type: Annotated[str, Field(description="Type of booking (e.g., 'hotel', 'flight', 'activity').")],
booking_id: Annotated[str, Field(description="Unique booking identifier.")],
customer_info: Annotated[dict, Field(description="Customer information including name and email.")],
) -> str:
"""Confirm a booking reservation.
Returns:
JSON string containing confirmation details including confirmation number,
booking status, customer information, and next steps.
"""
confirmation_number = f"CONF-{booking_type.upper()}-{booking_id}"
confirmation_data = {
"confirmation_number": confirmation_number,
"booking_type": booking_type,
"status": "Confirmed",
"customer_name": customer_info.get("name", "Guest"),
"email": customer_info.get("email", "guest@example.com"),
"confirmation_sent": True,
"next_steps": [
"Check your email for booking details",
"Arrive 30 minutes before scheduled time",
"Bring confirmation number and valid ID"
]
}
return json.dumps({
"confirmation": confirmation_data
})
# Mock hotel availability check tool
@ai_function(name="check_hotel_availability", description="Check availability for hotel rooms.")
def check_hotel_availability(
hotel_name: Annotated[str, Field(description="Name of the hotel to check availability for.")],
check_in: Annotated[str, Field(description="Check-in date (e.g., 'December 15, 2025').")],
check_out: Annotated[str, Field(description="Check-out date (e.g., 'December 18, 2025').")],
rooms: Annotated[int, Field(description="Number of rooms needed.")] = 1,
) -> str:
"""Check availability for hotel rooms.
Sample Date format: "December 15, 2025"
Returns:
JSON string containing availability status, available rooms count, price per night,
and last checked timestamp.
"""
availability_status = "Available"
availability_data = {
"service_type": "hotel",
"hotel_name": hotel_name,
"check_in": check_in,
"check_out": check_out,
"rooms_requested": rooms,
"status": availability_status,
"available_rooms": 8,
"price_per_night": "$185",
"last_checked": datetime.now().isoformat()
}
return json.dumps({
"availability": availability_data
})
# Mock flight availability check tool
@ai_function(name="check_flight_availability", description="Check availability for flight seats.")
def check_flight_availability(
flight_number: Annotated[str, Field(description="Flight number to check availability for.")],
date: Annotated[str, Field(description="Flight date (e.g., 'December 15, 2025').")],
passengers: Annotated[int, Field(description="Number of passengers.")] = 1,
) -> str:
"""Check availability for flight seats.
Sample Date format: "December 15, 2025"
Returns:
JSON string containing availability status, available seats count, price per passenger,
and last checked timestamp.
"""
availability_status = "Available"
availability_data = {
"service_type": "flight",
"flight_number": flight_number,
"date": date,
"passengers_requested": passengers,
"status": availability_status,
"available_seats": 45,
"price_per_passenger": "$520",
"last_checked": datetime.now().isoformat()
}
return json.dumps({
"availability": availability_data
})
# Mock activity availability check tool
@ai_function(name="check_activity_availability", description="Check availability for activity bookings.")
def check_activity_availability(
activity_name: Annotated[str, Field(description="Name of the activity to check availability for.")],
date: Annotated[str, Field(description="Activity date (e.g., 'December 16, 2025').")],
participants: Annotated[int, Field(description="Number of participants.")] = 1,
) -> str:
"""Check availability for activity bookings.
Sample Date format: "December 16, 2025"
Returns:
JSON string containing availability status, available spots count, price per person,
and last checked timestamp.
"""
availability_status = "Available"
availability_data = {
"service_type": "activity",
"activity_name": activity_name,
"date": date,
"participants_requested": participants,
"status": availability_status,
"available_spots": 15,
"price_per_person": "$45",
"last_checked": datetime.now().isoformat()
}
return json.dumps({
"availability": availability_data
})
# Mock payment processing tool
@ai_function(name="process_payment", description="Process payment for a booking.")
def process_payment(
amount: Annotated[float, Field(description="Payment amount.")],
currency: Annotated[str, Field(description="Currency code (e.g., 'USD', 'EUR').")],
payment_method: Annotated[dict, Field(description="Payment method details (type, card info).")],
booking_reference: Annotated[str, Field(description="Booking reference number for the payment.")],
) -> str:
"""Process payment for a booking.
Returns:
JSON string containing payment result with transaction ID, status, amount, currency,
payment method details, and receipt URL.
"""
transaction_id = f"TXN-{datetime.now().strftime('%Y%m%d%H%M%S')}"
payment_result = {
"transaction_id": transaction_id,
"amount": amount,
"currency": currency,
"status": "Success",
"payment_method": payment_method.get("type", "Credit Card"),
"last_4_digits": payment_method.get("last_4", "****"),
"booking_reference": booking_reference,
"timestamp": datetime.now().isoformat(),
"receipt_url": f"https://payments.travelagency.com/receipt/{transaction_id}"
}
return json.dumps({
"payment_result": payment_result
})
# Mock payment validation tool
@ai_function(name="validate_payment_method", description="Validate a payment method before processing.")
def validate_payment_method(
payment_method: Annotated[dict, Field(description="Payment method to validate (type, number, expiry, cvv).")],
) -> str:
"""Validate payment method details.
Returns:
JSON string containing validation result with is_valid flag, payment method type,
validation messages, supported currencies, and processing fee information.
"""
method_type = payment_method.get("type", "credit_card")
# Validation logic
is_valid = True
validation_messages = []
if method_type == "credit_card":
if not payment_method.get("number"):
is_valid = False
validation_messages.append("Card number is required")
if not payment_method.get("expiry"):
is_valid = False
validation_messages.append("Expiry date is required")
if not payment_method.get("cvv"):
is_valid = False
validation_messages.append("CVV is required")
validation_result = {
"is_valid": is_valid,
"payment_method_type": method_type,
"validation_messages": validation_messages if not is_valid else ["Payment method is valid"],
"supported_currencies": ["USD", "EUR", "GBP", "JPY"],
"processing_fee": "2.5%"
}
return json.dumps({
"validation_result": validation_result
})

View File

@@ -0,0 +1,440 @@
# Copyright (c) Microsoft. All rights reserved.
"""
Multi-Agent Travel Planning Workflow Evaluation with Multiple Response Tracking
This sample demonstrates a multi-agent travel planning workflow using the Azure AI Client that:
1. Processes travel queries through 7 specialized agents
2. Tracks MULTIPLE response and conversation IDs per agent for evaluation
3. Uses the new Prompt Agents API (V2)
4. Captures complete interaction sequences including multiple invocations
5. Aggregates findings through a travel planning coordinator
WORKFLOW STRUCTURE (7 agents):
- Travel Agent Executor → Hotel Search, Flight Search, Activity Search (fan-out)
- Hotel Search Executor → Booking Information Aggregation Executor
- Flight Search Executor → Booking Information Aggregation Executor
- Booking Information Aggregation Executor → Booking Confirmation Executor
- Booking Confirmation Executor → Booking Payment Executor
- Booking Information Aggregation, Booking Payment, Activity Search → Travel Planning Coordinator (ResearchLead) for final aggregation (fan-in)
Agents:
1. Travel Agent - Main coordinator (no tools to avoid thread conflicts)
2. Hotel Search - Searches hotels with tools
3. Flight Search - Searches flights with tools
4. Activity Search - Searches activities with tools
5. Booking Information Aggregation - Aggregates hotel & flight booking info
6. Booking Confirmation - Confirms bookings with tools
7. Booking Payment - Processes payments with tools
"""
import asyncio
import os
from collections import defaultdict
from _tools import (
check_flight_availability,
check_hotel_availability,
confirm_booking,
get_flight_details,
get_hotel_details,
process_payment,
search_activities,
search_flights,
# Travel planning tools
search_hotels,
validate_payment_method,
)
from agent_framework import (
AgentExecutorResponse,
AgentResponseUpdate,
AgentRunUpdateEvent,
ChatMessage,
Executor,
Role,
WorkflowBuilder,
WorkflowContext,
WorkflowOutputEvent,
executor,
handler,
)
from agent_framework.azure import AzureAIClient
from azure.ai.projects.aio import AIProjectClient
from azure.identity.aio import DefaultAzureCredential
from dotenv import load_dotenv
from typing_extensions import Never
load_dotenv()
@executor(id="start_executor")
async def start_executor(input: str, ctx: WorkflowContext[list[ChatMessage]]) -> None:
"""Initiates the workflow by sending the user query to all specialized agents."""
await ctx.send_message([ChatMessage(role="user", text=input)])
class ResearchLead(Executor):
"""Aggregates and summarizes travel planning findings from all specialized agents."""
def __init__(self, chat_client: AzureAIClient, id: str = "travel-planning-coordinator"):
# store=True to preserve conversation history for evaluation
self.agent = chat_client.as_agent(
id="travel-planning-coordinator",
instructions=(
"You are the final coordinator. You will receive responses from multiple agents: "
"booking-info-aggregation-agent (hotel/flight options), booking-payment-agent (payment confirmation), "
"and activity-search-agent (activities). "
"Review each agent's response, then create a comprehensive travel itinerary organized by: "
"1. Flights 2. Hotels 3. Activities 4. Booking confirmations 5. Payment details. "
"Clearly indicate which information came from which agent. Do not use tools."
),
name="travel-planning-coordinator",
store=True,
)
super().__init__(id=id)
@handler
async def fan_in_handle(self, responses: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None:
user_query = responses[0].full_conversation[0].text
# Extract findings from all agent responses
agent_findings = self._extract_agent_findings(responses)
summary_text = (
"\n".join(agent_findings) if agent_findings else "No specific findings were provided by the agents."
)
# Generate comprehensive travel plan summary
messages = [
ChatMessage(
role=Role.SYSTEM,
text="You are a travel planning coordinator. Summarize findings from multiple specialized travel agents and provide a clear, comprehensive travel plan based on the user's query.",
),
ChatMessage(
role=Role.USER,
text=f"Original query: {user_query}\n\nFindings from specialized travel agents:\n{summary_text}\n\nPlease provide a comprehensive travel plan based on these findings.",
),
]
try:
final_response = await self.agent.run(messages)
output_text = (
final_response.messages[-1].text
if final_response.messages and final_response.messages[-1].text
else f"Based on the available findings, here's your travel plan for '{user_query}': {summary_text}"
)
except Exception:
output_text = f"Based on the available findings, here's your travel plan for '{user_query}': {summary_text}"
await ctx.yield_output(output_text)
def _extract_agent_findings(self, responses: list[AgentExecutorResponse]) -> list[str]:
"""Extract findings from agent responses."""
agent_findings = []
for response in responses:
findings = []
if response.agent_response and response.agent_response.messages:
for msg in response.agent_response.messages:
if msg.role == Role.ASSISTANT and msg.text and msg.text.strip():
findings.append(msg.text.strip())
if findings:
combined_findings = " ".join(findings)
agent_findings.append(f"[{response.executor_id}]: {combined_findings}")
return agent_findings
async def run_workflow_with_response_tracking(query: str, chat_client: AzureAIClient | None = None) -> dict:
"""Run multi-agent workflow and track conversation IDs, response IDs, and interaction sequence.
Args:
query: The user query to process through the multi-agent workflow
chat_client: Optional AzureAIClient instance
Returns:
Dictionary containing interaction sequence, conversation/response IDs, and conversation analysis
"""
if chat_client is None:
try:
# Create AIProjectClient with the correct API version for V2 prompt agents
project_client = AIProjectClient(
endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"],
credential=credential,
api_version="2025-11-15-preview",
)
async with (
DefaultAzureCredential() as credential,
project_client,
AzureAIClient(project_client=project_client, credential=credential) as client,
):
return await _run_workflow_with_client(query, client)
except Exception as e:
print(f"Error during workflow execution: {e}")
raise
else:
return await _run_workflow_with_client(query, chat_client)
async def _run_workflow_with_client(query: str, chat_client: AzureAIClient) -> dict:
"""Execute workflow with given client and track all interactions."""
# Initialize tracking variables - use lists to track multiple responses per agent
conversation_ids = defaultdict(list)
response_ids = defaultdict(list)
workflow_output = None
# Create workflow components and keep agent references
# Pass project_client and credential to create separate client instances per agent
workflow, agent_map = await _create_workflow(chat_client.project_client, chat_client.credential)
# Process workflow events
events = workflow.run_stream(query)
workflow_output = await _process_workflow_events(events, conversation_ids, response_ids)
return {
"conversation_ids": dict(conversation_ids),
"response_ids": dict(response_ids),
"output": workflow_output,
"query": query,
}
async def _create_workflow(project_client, credential):
"""Create the multi-agent travel planning workflow with specialized agents.
IMPORTANT: Each agent needs its own client instance because the V2 client stores
agent_name and agent_version as instance variables, causing all agents to share
the same agent identity if they share a client.
"""
# Create separate client for Final Coordinator
final_coordinator_client = AzureAIClient(
project_client=project_client, credential=credential, agent_name="final-coordinator"
)
final_coordinator = ResearchLead(chat_client=final_coordinator_client, id="final-coordinator")
# Agent 1: Travel Request Handler (initial coordinator)
# Create separate client with unique agent_name
travel_request_handler_client = AzureAIClient(
project_client=project_client, credential=credential, agent_name="travel-request-handler"
)
travel_request_handler = travel_request_handler_client.as_agent(
id="travel-request-handler",
instructions=(
"You receive user travel queries and relay them to specialized agents. Extract key information: destination, dates, budget, and preferences. Pass this information forward clearly to the next agents."
),
name="travel-request-handler",
store=True,
)
# Agent 2: Hotel Search Executor
hotel_search_client = AzureAIClient(
project_client=project_client, credential=credential, agent_name="hotel-search-agent"
)
hotel_search_agent = hotel_search_client.as_agent(
id="hotel-search-agent",
instructions=(
"You are a hotel search specialist. Your task is ONLY to search for and provide hotel information. Use search_hotels to find options, get_hotel_details for specifics, and check_availability to verify rooms. Output format: List hotel names, prices per night, total cost for the stay, locations, ratings, amenities, and addresses. IMPORTANT: Only provide hotel information without additional commentary."
),
name="hotel-search-agent",
tools=[search_hotels, get_hotel_details, check_hotel_availability],
store=True,
)
# Agent 3: Flight Search Executor
flight_search_client = AzureAIClient(
project_client=project_client, credential=credential, agent_name="flight-search-agent"
)
flight_search_agent = flight_search_client.as_agent(
id="flight-search-agent",
instructions=(
"You are a flight search specialist. Your task is ONLY to search for and provide flight information. Use search_flights to find options, get_flight_details for specifics, and check_availability for seats. Output format: List flight numbers, airlines, departure/arrival times, prices, durations, and cabin class. IMPORTANT: Only provide flight information without additional commentary."
),
name="flight-search-agent",
tools=[search_flights, get_flight_details, check_flight_availability],
store=True,
)
# Agent 4: Activity Search Executor
activity_search_client = AzureAIClient(
project_client=project_client, credential=credential, agent_name="activity-search-agent"
)
activity_search_agent = activity_search_client.as_agent(
id="activity-search-agent",
instructions=(
"You are an activities specialist. Your task is ONLY to search for and provide activity information. Use search_activities to find options for activities. Output format: List activity names, descriptions, prices, durations, ratings, and categories. IMPORTANT: Only provide activity information without additional commentary."
),
name="activity-search-agent",
tools=[search_activities],
store=True,
)
# Agent 5: Booking Confirmation Executor
booking_confirmation_client = AzureAIClient(
project_client=project_client, credential=credential, agent_name="booking-confirmation-agent"
)
booking_confirmation_agent = booking_confirmation_client.as_agent(
id="booking-confirmation-agent",
instructions=(
"You confirm bookings. Use check_hotel_availability and check_flight_availability to verify slots, then confirm_booking to finalize. Provide ONLY: confirmation numbers, booking references, and confirmation status."
),
name="booking-confirmation-agent",
tools=[confirm_booking, check_hotel_availability, check_flight_availability],
store=True,
)
# Agent 6: Booking Payment Executor
booking_payment_client = AzureAIClient(
project_client=project_client, credential=credential, agent_name="booking-payment-agent"
)
booking_payment_agent = booking_payment_client.as_agent(
id="booking-payment-agent",
instructions=(
"You process payments. Use validate_payment_method to verify payment, then process_payment to complete transactions. Provide ONLY: payment confirmation status, transaction IDs, and payment amounts."
),
name="booking-payment-agent",
tools=[process_payment, validate_payment_method],
store=True,
)
# Agent 7: Booking Information Aggregation Executor
booking_info_client = AzureAIClient(
project_client=project_client, credential=credential, agent_name="booking-info-aggregation-agent"
)
booking_info_aggregation_agent = booking_info_client.as_agent(
id="booking-info-aggregation-agent",
instructions=(
"You aggregate hotel and flight search results. Receive options from search agents and organize them. Provide: top 2-3 hotel options with prices and top 2-3 flight options with prices in a structured format."
),
name="booking-info-aggregation-agent",
store=True,
)
# Build workflow with logical booking flow:
# 1. start_executor → travel_request_handler
# 2. travel_request_handler → hotel_search, flight_search, activity_search (fan-out)
# 3. hotel_search → booking_info_aggregation
# 4. flight_search → booking_info_aggregation
# 5. booking_info_aggregation → booking_confirmation
# 6. booking_confirmation → booking_payment
# 7. booking_info_aggregation, booking_payment, activity_search → final_coordinator (final aggregation, fan-in)
workflow = (
WorkflowBuilder(name="Travel Planning Workflow")
.set_start_executor(start_executor)
.add_edge(start_executor, travel_request_handler)
.add_fan_out_edges(travel_request_handler, [hotel_search_agent, flight_search_agent, activity_search_agent])
.add_edge(hotel_search_agent, booking_info_aggregation_agent)
.add_edge(flight_search_agent, booking_info_aggregation_agent)
.add_edge(booking_info_aggregation_agent, booking_confirmation_agent)
.add_edge(booking_confirmation_agent, booking_payment_agent)
.add_fan_in_edges(
[booking_info_aggregation_agent, booking_payment_agent, activity_search_agent], final_coordinator
)
.build()
)
# Return workflow and agent map for thread ID extraction
agent_map = {
"travel_request_handler": travel_request_handler,
"hotel-search-agent": hotel_search_agent,
"flight-search-agent": flight_search_agent,
"activity-search-agent": activity_search_agent,
"booking-confirmation-agent": booking_confirmation_agent,
"booking-payment-agent": booking_payment_agent,
"booking-info-aggregation-agent": booking_info_aggregation_agent,
"final-coordinator": final_coordinator.agent,
}
return workflow, agent_map
async def _process_workflow_events(events, conversation_ids, response_ids):
"""Process workflow events and track interactions."""
workflow_output = None
async for event in events:
if isinstance(event, WorkflowOutputEvent):
workflow_output = event.data
# Handle Unicode characters that may not be displayable in Windows console
try:
print(f"\nWorkflow Output: {event.data}\n")
except UnicodeEncodeError:
output_str = str(event.data).encode("ascii", "replace").decode("ascii")
print(f"\nWorkflow Output: {output_str}\n")
elif isinstance(event, AgentRunUpdateEvent):
_track_agent_ids(event, event.executor_id, response_ids, conversation_ids)
return workflow_output
def _track_agent_ids(event, agent, response_ids, conversation_ids):
"""Track agent response and conversation IDs - supporting multiple responses per agent."""
if isinstance(event.data, AgentResponseUpdate):
# Check for conversation_id and response_id from raw_representation
# V2 API stores conversation_id directly on raw_representation (ChatResponseUpdate)
if hasattr(event.data, "raw_representation") and event.data.raw_representation:
raw = event.data.raw_representation
# Try conversation_id directly on raw representation
if hasattr(raw, "conversation_id") and raw.conversation_id:
# Only add if not already in the list
if raw.conversation_id not in conversation_ids[agent]:
conversation_ids[agent].append(raw.conversation_id)
# Extract response_id from the OpenAI event (available from first event)
if hasattr(raw, "raw_representation") and raw.raw_representation:
openai_event = raw.raw_representation
# Check if event has response object with id
if hasattr(openai_event, "response") and hasattr(openai_event.response, "id"):
# Only add if not already in the list
if openai_event.response.id not in response_ids[agent]:
response_ids[agent].append(openai_event.response.id)
async def create_and_run_workflow():
"""Run the workflow evaluation and display results.
Returns:
Dictionary containing agents data with conversation IDs, response IDs, and query information
"""
example_queries = [
"Plan a 3-day trip to Paris from December 15-18, 2025. Budget is $2000. Need hotel near Eiffel Tower, round-trip flights from New York JFK, and recommend 2-3 activities per day.",
"Find a budget hotel in Tokyo for January 5-10, 2026 under $150/night near Shibuya station, book activities including a sushi making class",
"Search for round-trip flights from Los Angeles to London departing March 20, 2026, returning March 27, 2026. Economy class, 2 passengers. Recommend tourist attractions and museums.",
]
query = example_queries[0]
print(f"Query: {query}\n")
result = await run_workflow_with_response_tracking(query)
# Create output data structure
output_data = {"agents": {}, "query": result["query"], "output": result.get("output", "")}
# Create agent-specific mappings - now with lists of IDs
all_agents = set(result["conversation_ids"].keys()) | set(result["response_ids"].keys())
for agent_name in all_agents:
output_data["agents"][agent_name] = {
"conversation_ids": result["conversation_ids"].get(agent_name, []),
"response_ids": result["response_ids"].get(agent_name, []),
"response_count": len(result["response_ids"].get(agent_name, [])),
}
print(f"\nTotal agents tracked: {len(output_data['agents'])}")
# Print summary of multiple responses
print("\n=== Multi-Response Summary ===")
for agent_name, agent_data in output_data["agents"].items():
response_count = agent_data["response_count"]
print(f"{agent_name}: {response_count} response(s)")
return output_data
if __name__ == "__main__":
asyncio.run(create_and_run_workflow())

View File

@@ -0,0 +1,220 @@
# Copyright (c) Microsoft. All rights reserved.
"""
Script to run multi-agent travel planning workflow and evaluate agent responses.
This script:
1. Executes the multi-agent workflow
2. Displays response data summary
3. Creates and runs evaluation with multiple evaluators
4. Monitors evaluation progress and displays results
"""
import asyncio
import os
import time
from azure.ai.projects import AIProjectClient
from azure.identity import DefaultAzureCredential
from dotenv import load_dotenv
from create_workflow import create_and_run_workflow
def print_section(title: str):
"""Print a formatted section header."""
print(f"\n{'='*80}")
print(f"{title}")
print(f"{'='*80}")
async def run_workflow():
"""Execute the multi-agent travel planning workflow.
Returns:
Dictionary containing workflow data with agent response IDs
"""
print_section("Step 1: Running Workflow")
print("Executing multi-agent travel planning workflow...")
print("This may take a few minutes...")
workflow_data = await create_and_run_workflow()
print("Workflow execution completed")
return workflow_data
def display_response_summary(workflow_data: dict):
"""Display summary of response data."""
print_section("Step 2: Response Data Summary")
print(f"Query: {workflow_data['query']}")
print(f"\nAgents tracked: {len(workflow_data['agents'])}")
for agent_name, agent_data in workflow_data['agents'].items():
response_count = agent_data['response_count']
print(f" {agent_name}: {response_count} response(s)")
def fetch_agent_responses(openai_client, workflow_data: dict, agent_names: list):
"""Fetch and display final responses from specified agents."""
print_section("Step 3: Fetching Agent Responses")
for agent_name in agent_names:
if agent_name not in workflow_data['agents']:
continue
agent_data = workflow_data['agents'][agent_name]
if not agent_data['response_ids']:
continue
final_response_id = agent_data['response_ids'][-1]
print(f"\n{agent_name}")
print(f" Response ID: {final_response_id}")
try:
response = openai_client.responses.retrieve(response_id=final_response_id)
content = response.output[-1].content[-1].text
truncated = content[:300] + "..." if len(content) > 300 else content
print(f" Content preview: {truncated}")
except Exception as e:
print(f" Error: {e}")
def create_evaluation(openai_client, model_deployment: str):
"""Create evaluation with multiple evaluators."""
print_section("Step 4: Creating Evaluation")
data_source_config = {"type": "azure_ai_source", "scenario": "responses"}
testing_criteria = [
{
"type": "azure_ai_evaluator",
"name": "relevance",
"evaluator_name": "builtin.relevance",
"initialization_parameters": {"deployment_name": model_deployment}
},
{
"type": "azure_ai_evaluator",
"name": "groundedness",
"evaluator_name": "builtin.groundedness",
"initialization_parameters": {"deployment_name": model_deployment}
},
{
"type": "azure_ai_evaluator",
"name": "tool_call_accuracy",
"evaluator_name": "builtin.tool_call_accuracy",
"initialization_parameters": {"deployment_name": model_deployment}
},
{
"type": "azure_ai_evaluator",
"name": "tool_output_utilization",
"evaluator_name": "builtin.tool_output_utilization",
"initialization_parameters": {"deployment_name": model_deployment}
},
]
eval_object = openai_client.evals.create(
name="Travel Workflow Multi-Evaluator Assessment",
data_source_config=data_source_config,
testing_criteria=testing_criteria,
)
evaluator_names = [criterion["name"] for criterion in testing_criteria]
print(f"Evaluation created: {eval_object.id}")
print(f"Evaluators ({len(evaluator_names)}): {', '.join(evaluator_names)}")
return eval_object
def run_evaluation(openai_client, eval_object, workflow_data: dict, agent_names: list):
"""Run evaluation on selected agent responses."""
print_section("Step 5: Running Evaluation")
selected_response_ids = []
for agent_name in agent_names:
if agent_name in workflow_data['agents']:
agent_data = workflow_data['agents'][agent_name]
if agent_data['response_ids']:
selected_response_ids.append(agent_data['response_ids'][-1])
print(f"Selected {len(selected_response_ids)} responses for evaluation")
data_source = {
"type": "azure_ai_responses",
"item_generation_params": {
"type": "response_retrieval",
"data_mapping": {"response_id": "{{item.resp_id}}"},
"source": {
"type": "file_content",
"content": [{"item": {"resp_id": resp_id}} for resp_id in selected_response_ids]
},
},
}
eval_run = openai_client.evals.runs.create(
eval_id=eval_object.id,
name="Multi-Agent Response Evaluation",
data_source=data_source
)
print(f"Evaluation run created: {eval_run.id}")
return eval_run
def monitor_evaluation(openai_client, eval_object, eval_run):
"""Monitor evaluation progress and display results."""
print_section("Step 6: Monitoring Evaluation")
print("Waiting for evaluation to complete...")
while eval_run.status not in ["completed", "failed"]:
eval_run = openai_client.evals.runs.retrieve(
run_id=eval_run.id,
eval_id=eval_object.id
)
print(f"Status: {eval_run.status}")
time.sleep(5)
if eval_run.status == "completed":
print("\nEvaluation completed successfully")
print(f"Result counts: {eval_run.result_counts}")
print(f"\nReport URL: {eval_run.report_url}")
else:
print("\nEvaluation failed")
async def main():
"""Main execution flow."""
load_dotenv()
print("Travel Planning Workflow Evaluation")
workflow_data = await run_workflow()
display_response_summary(workflow_data)
project_client = AIProjectClient(
endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"],
credential=DefaultAzureCredential(),
api_version="2025-11-15-preview"
)
openai_client = project_client.get_openai_client()
agents_to_evaluate = ["hotel-search-agent", "flight-search-agent", "activity-search-agent"]
fetch_agent_responses(openai_client, workflow_data, agents_to_evaluate)
model_deployment = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-4o-mini")
eval_object = create_evaluation(openai_client, model_deployment)
eval_run = run_evaluation(openai_client, eval_object, workflow_data, agents_to_evaluate)
monitor_evaluation(openai_client, eval_object, eval_run)
print_section("Complete")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,42 @@
# Agent Examples
This folder contains examples demonstrating how to create and use agents with different chat clients from the Agent Framework. Each sub-folder focuses on a specific provider and client type, showing various capabilities like function tools, code interpreter, thread management, structured outputs, image processing, web search, Model Context Protocol (MCP) integration, and more.
## Examples by Provider
### Azure AI Foundry Examples
| Folder | Description |
|--------|-------------|
| **[`azure_ai_agent/`](azure_ai_agent/)** | Create agents using Azure AI Agent Service (based on `azure-ai-agents` V1 package) including function tools, code interpreter, MCP integration, thread management, and more. |
| **[`azure_ai/`](azure_ai/)** | Create agents using Azure AI Agent Service (based on `azure-ai-projects` [V2](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/CHANGELOG.md#200b1-2025-11-11) package) including function tools, code interpreter, MCP integration, thread management, and more. |
### Microsoft Copilot Studio Examples
| Folder | Description |
|--------|-------------|
| **[`copilotstudio/`](copilotstudio/)** | Create agents using Microsoft Copilot Studio with streaming and non-streaming responses, authentication handling, and explicit configuration options |
### Azure OpenAI Examples
| Folder | Description |
|--------|-------------|
| **[`azure_openai/`](azure_openai/)** | Create agents using Azure OpenAI APIs with multiple client types (Assistants, Chat, and Responses clients) supporting function tools, code interpreter, thread management, and more |
### OpenAI Examples
| Folder | Description |
|--------|-------------|
| **[`openai/`](openai/)** | Create agents using OpenAI APIs with comprehensive examples including Assistants, Chat, and Responses clients featuring function tools, code interpreter, file search, web search, MCP integration, image analysis/generation, structured outputs, reasoning, and thread management |
### Anthropic Examples
| Folder | Description |
|--------|-------------|
| **[`anthropic/`](anthropic/)** | Create agents using Anthropic models through OpenAI Chat Client configuration, demonstrating tool calling capabilities |
### Custom Implementation Examples
| Folder | Description |
|--------|-------------|
| **[`custom/`](custom/)** | Create custom agents and chat clients by extending the base framework classes, showing complete control over agent behavior and backend integration |

View File

@@ -0,0 +1,31 @@
# A2A Agent Examples
This folder contains examples demonstrating how to create and use agents with the A2A (Agent2Agent) protocol from the `agent_framework` package to communicate with remote A2A agents.
For more information about the A2A protocol specification, visit: https://a2a-protocol.org/latest/
## Examples
| File | Description |
|------|-------------|
| [`agent_with_a2a.py`](agent_with_a2a.py) | The simplest way to connect to and use a single A2A agent. Demonstrates agent discovery via agent cards and basic message exchange using the A2A protocol. |
## Environment Variables
Make sure to set the following environment variables before running the example:
### Required
- `A2A_AGENT_HOST`: URL of a single A2A agent (for simple sample, e.g., `http://localhost:5001/`)
## Quick Testing with .NET A2A Servers
For quick testing and demonstration, you can use the pre-built .NET A2A servers from this repository:
**Quick Testing Reference**: Use the .NET A2A Client Server sample at:
`..\agent-framework\dotnet\samples\A2AClientServer`
### Run Python A2A Sample
```powershell
# Simple A2A sample (single agent)
uv run python agent_with_a2a.py
```

View File

@@ -0,0 +1,78 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
import httpx
from a2a.client import A2ACardResolver
from agent_framework.a2a import A2AAgent
"""
Agent2Agent (A2A) Protocol Integration Sample
This sample demonstrates how to connect to and communicate with external agents using
the A2A protocol. A2A is a standardized communication protocol that enables interoperability
between different agent systems, allowing agents built with different frameworks and
technologies to communicate seamlessly.
For more information about the A2A protocol specification, visit: https://a2a-protocol.org/latest/
Key concepts demonstrated:
- Discovering A2A-compliant agents using AgentCard resolution
- Creating A2AAgent instances to wrap external A2A endpoints
- Converting Agent Framework messages to A2A protocol format
- Handling A2A responses (Messages and Tasks) back to framework types
To run this sample:
1. Set the A2A_AGENT_HOST environment variable to point to an A2A-compliant agent endpoint
Example: export A2A_AGENT_HOST="https://your-a2a-agent.example.com"
2. Ensure the target agent exposes its AgentCard at /.well-known/agent.json
3. Run: uv run python agent_with_a2a.py
The sample will:
- Connect to the specified A2A agent endpoint
- Retrieve and parse the agent's capabilities via its AgentCard
- Send a message using the A2A protocol
- Display the agent's response
Visit the README.md for more details on setting up and running A2A agents.
"""
async def main():
"""Demonstrates connecting to and communicating with an A2A-compliant agent."""
# Get A2A agent host from environment
a2a_agent_host = os.getenv("A2A_AGENT_HOST")
if not a2a_agent_host:
raise ValueError("A2A_AGENT_HOST environment variable is not set")
print(f"Connecting to A2A agent at: {a2a_agent_host}")
# Initialize A2ACardResolver
async with httpx.AsyncClient(timeout=60.0) as http_client:
resolver = A2ACardResolver(httpx_client=http_client, base_url=a2a_agent_host)
# Get agent card
agent_card = await resolver.get_agent_card()
print(f"Found agent: {agent_card.name} - {agent_card.description}")
# Create A2A agent instance
agent = A2AAgent(
name=agent_card.name,
description=agent_card.description,
agent_card=agent_card,
url=a2a_agent_host,
)
# Invoke the agent and output the result
print("\nSending message to A2A agent...")
response = await agent.run("What are your capabilities?")
# Print the response
print("\nAgent Response:")
for message in response.messages:
print(message.text)
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,24 @@
# Anthropic Examples
This folder contains examples demonstrating how to use Anthropic's Claude models with the Agent Framework.
## Examples
| File | Description |
|------|-------------|
| [`anthropic_basic.py`](anthropic_basic.py) | Demonstrates how to setup a simple agent using the AnthropicClient, with both streaming and non-streaming responses. |
| [`anthropic_advanced.py`](anthropic_advanced.py) | Shows advanced usage of the AnthropicClient, including hosted tools and `thinking`. |
| [`anthropic_skills.py`](anthropic_skills.py) | Illustrates how to use Anthropic-managed Skills with an agent, including the Code Interpreter tool and file generation and saving. |
| [`anthropic_foundry.py`](anthropic_foundry.py) | Example of using Foundry's Anthropic integration with the Agent Framework. |
## Environment Variables
Set the following environment variables before running the examples:
- `ANTHROPIC_API_KEY`: Your Anthropic API key (get one from [Anthropic Console](https://console.anthropic.com/))
- `ANTHROPIC_CHAT_MODEL_ID`: The Claude model to use (e.g., `claude-haiku-4-5`, `claude-sonnet-4-5-20250929`)
Or, for Foundry:
- `ANTHROPIC_FOUNDRY_API_KEY`: Your Foundry Anthropic API key
- `ANTHROPIC_FOUNDRY_ENDPOINT`: The endpoint URL for your Foundry Anthropic resource
- `ANTHROPIC_CHAT_MODEL_ID`: The Claude model to use in Foundry (e.g., `claude-haiku-4-5`)

View File

@@ -0,0 +1,54 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from agent_framework import HostedMCPTool, HostedWebSearchTool, TextReasoningContent, UsageContent
from agent_framework.anthropic import AnthropicChatOptions, AnthropicClient
"""
Anthropic Chat Agent Example
This sample demonstrates using Anthropic with:
- Setting up an Anthropic-based agent with hosted tools.
- Using the `thinking` feature.
- Displaying both thinking and usage information during streaming responses.
"""
async def main() -> None:
"""Example of streaming response (get results as they are generated)."""
agent = AnthropicClient[AnthropicChatOptions]().as_agent(
name="DocsAgent",
instructions="You are a helpful agent for both Microsoft docs questions and general questions.",
tools=[
HostedMCPTool(
name="Microsoft Learn MCP",
url="https://learn.microsoft.com/api/mcp",
),
HostedWebSearchTool(),
],
default_options={
# anthropic needs a value for the max_tokens parameter
# we set it to 1024, but you can override like this:
"max_tokens": 20000,
"thinking": {"type": "enabled", "budget_tokens": 10000},
},
)
query = "Can you compare Python decorators with C# attributes?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
async for chunk in agent.run_stream(query):
for content in chunk.contents:
if isinstance(content, TextReasoningContent):
print(f"\033[32m{content.text}\033[0m", end="", flush=True)
if isinstance(content, UsageContent):
print(f"\n\033[34m[Usage so far: {content.usage_details}]\033[0m\n", end="", flush=True)
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,69 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from random import randint
from typing import Annotated
from agent_framework.anthropic import AnthropicClient
"""
Anthropic Chat Agent Example
This sample demonstrates using Anthropic with an agent and a single custom tool.
"""
def get_weather(
location: Annotated[str, "The location to get the weather for."],
) -> str:
"""Get the weather for a given location."""
conditions = ["sunny", "cloudy", "rainy", "stormy"]
return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C."
async def non_streaming_example() -> None:
"""Example of non-streaming response (get the complete result at once)."""
print("=== Non-streaming Response Example ===")
agent = AnthropicClient(
).as_agent(
name="WeatherAgent",
instructions="You are a helpful weather agent.",
tools=get_weather,
)
query = "What's the weather like in Seattle?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Result: {result}\n")
async def streaming_example() -> None:
"""Example of streaming response (get results as they are generated)."""
print("=== Streaming Response Example ===")
agent = AnthropicClient(
).as_agent(
name="WeatherAgent",
instructions="You are a helpful weather agent.",
tools=get_weather,
)
query = "What's the weather like in Portland and in Paris?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
async for chunk in agent.run_stream(query):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
async def main() -> None:
print("=== Anthropic Example ===")
await streaming_example()
await non_streaming_example()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,65 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from agent_framework import HostedMCPTool, HostedWebSearchTool, TextReasoningContent, UsageContent
from agent_framework.anthropic import AnthropicClient
from anthropic import AsyncAnthropicFoundry
"""
Anthropic Foundry Chat Agent Example
This sample demonstrates using Anthropic with:
- Setting up an Anthropic-based agent with hosted tools.
- Using the `thinking` feature.
- Displaying both thinking and usage information during streaming responses.
This example requires `anthropic>=0.74.0` and an endpoint in Foundry for Anthropic.
To use the Foundry integration ensure you have the following environment variables set:
- ANTHROPIC_FOUNDRY_API_KEY
Alternatively you can pass in a azure_ad_token_provider function to the AsyncAnthropicFoundry constructor.
- ANTHROPIC_FOUNDRY_ENDPOINT
Should be something like https://<your-resource-name>.services.ai.azure.com/anthropic/
- ANTHROPIC_CHAT_MODEL_ID
Should be something like claude-haiku-4-5
"""
async def main() -> None:
"""Example of streaming response (get results as they are generated)."""
agent = AnthropicClient(anthropic_client=AsyncAnthropicFoundry()).as_agent(
name="DocsAgent",
instructions="You are a helpful agent for both Microsoft docs questions and general questions.",
tools=[
HostedMCPTool(
name="Microsoft Learn MCP",
url="https://learn.microsoft.com/api/mcp",
),
HostedWebSearchTool(),
],
default_options={
# anthropic needs a value for the max_tokens parameter
# we set it to 1024, but you can override like this:
"max_tokens": 20000,
"thinking": {"type": "enabled", "budget_tokens": 10000},
},
)
query = "Can you compare Python decorators with C# attributes?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
async for chunk in agent.run_stream(query):
for content in chunk.contents:
if isinstance(content, TextReasoningContent):
print(f"\033[32m{content.text}\033[0m", end="", flush=True)
if isinstance(content, UsageContent):
print(f"\n\033[34m[Usage so far: {content.usage_details}]\033[0m\n", end="", flush=True)
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,88 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import logging
from pathlib import Path
from agent_framework import HostedCodeInterpreterTool, HostedFileContent
from agent_framework.anthropic import AnthropicChatOptions, AnthropicClient
logger = logging.getLogger(__name__)
"""
Anthropic Skills Agent Example
This sample demonstrates using Anthropic with:
- Listing and using Anthropic-managed Skills.
- One approach to add additional beta flags.
You can also set additonal_chat_options with "additional_beta_flags" per request.
- Creating an agent with the Code Interpreter tool and a Skill.
- Catching and downloading generated files from the agent.
"""
async def main() -> None:
"""Example of streaming response (get results as they are generated)."""
client = AnthropicClient[AnthropicChatOptions](additional_beta_flags=["skills-2025-10-02"])
# List Anthropic-managed Skills
skills = await client.anthropic_client.beta.skills.list(source="anthropic", betas=["skills-2025-10-02"])
for skill in skills.data:
print(f"{skill.source}: {skill.id} (version: {skill.latest_version})")
# Create a agent with the pptx skill enabled
# Skills also need the code interpreter tool to function
agent = client.as_agent(
name="DocsAgent",
instructions="You are a helpful agent for creating powerpoint presentations.",
tools=HostedCodeInterpreterTool(),
default_options={
"max_tokens": 20000,
"thinking": {"type": "enabled", "budget_tokens": 10000},
"container": {"skills": [{"type": "anthropic", "skill_id": "pptx", "version": "latest"}]},
},
)
print(
"The agent output will use the following colors:\n"
"\033[0mUser: (default)\033[0m\n"
"\033[0mAgent: (default)\033[0m\n"
"\033[32mAgent Reasoning: (green)\033[0m\n"
"\033[34mUsage: (blue)\033[0m\n"
)
query = "Create a presentation about renewable energy with 5 slides"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
files: list[HostedFileContent] = []
async for chunk in agent.run_stream(query):
for content in chunk.contents:
match content.type:
case "text":
print(content.text, end="", flush=True)
case "text_reasoning":
print(f"\033[32m{content.text}\033[0m", end="", flush=True)
case "usage":
print(f"\n\033[34m[Usage so far: {content.usage_details}]\033[0m\n", end="", flush=True)
case "hosted_file":
# Catch generated files
files.append(content)
case _:
logger.debug("Unhandled content type: %s", content.type)
pass
print("\n")
if files:
# Save to a new file (will be in the folder where you are running this script)
# When running this sample multiple times, the files will be overritten
# Since I'm using the pptx skill, the files will be PowerPoint presentations
print("Generated files:")
for idx, file in enumerate(files):
file_content = await client.anthropic_client.beta.files.download(
file_id=file.file_id, betas=["files-api-2025-04-14"]
)
with open(Path(__file__).parent / f"renewable_energy-{idx}.pptx", "wb") as f:
await file_content.write_to_file(f.name)
print(f"File {idx}: renewable_energy-{idx}.pptx saved to disk.")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,95 @@
# Azure AI Agent Examples
This folder contains examples demonstrating different ways to create and use agents with the Azure AI client from the `agent_framework.azure` package. These examples use the `AzureAIClient` with the `azure-ai-projects` 2.x (V2) API surface (see [changelog](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/CHANGELOG.md#200b1-2025-11-11)). For V1 (`azure-ai-agents` 1.x) samples using `AzureAIAgentClient`, see the [Azure AI V1 examples folder](../azure_ai_agent/).
## Examples
| File | Description |
|------|-------------|
| [`azure_ai_basic.py`](azure_ai_basic.py) | The simplest way to create an agent using `AzureAIProjectAgentProvider`. Demonstrates both streaming and non-streaming responses with function tools. Shows automatic agent creation and basic weather functionality. |
| [`azure_ai_provider_methods.py`](azure_ai_provider_methods.py) | Comprehensive guide to `AzureAIProjectAgentProvider` methods: `create_agent()` for creating new agents, `get_agent()` for retrieving existing agents (by name, reference, or details), and `as_agent()` for wrapping SDK objects without HTTP calls. |
| [`azure_ai_use_latest_version.py`](azure_ai_use_latest_version.py) | Demonstrates how to reuse the latest version of an existing agent instead of creating a new agent version on each instantiation by using `provider.get_agent()` to retrieve the latest version. |
| [`azure_ai_with_agent_as_tool.py`](azure_ai_with_agent_as_tool.py) | Shows how to use the agent-as-tool pattern with Azure AI agents, where one agent delegates work to specialized sub-agents wrapped as tools using `as_tool()`. Demonstrates hierarchical agent architectures. |
| [`azure_ai_with_agent_to_agent.py`](azure_ai_with_agent_to_agent.py) | Shows how to use Agent-to-Agent (A2A) capabilities with Azure AI agents to enable communication with other agents using the A2A protocol. Requires an A2A connection configured in your Azure AI project. |
| [`azure_ai_with_azure_ai_search.py`](azure_ai_with_azure_ai_search.py) | Shows how to use Azure AI Search with Azure AI agents to search through indexed data and answer user questions with proper citations. Requires an Azure AI Search connection and index configured in your Azure AI project. |
| [`azure_ai_with_bing_grounding.py`](azure_ai_with_bing_grounding.py) | Shows how to use Bing Grounding search with Azure AI agents to search the web for current information and provide grounded responses with citations. Requires a Bing connection configured in your Azure AI project. |
| [`azure_ai_with_bing_custom_search.py`](azure_ai_with_bing_custom_search.py) | Shows how to use Bing Custom Search with Azure AI agents to search custom search instances and provide responses with relevant results. Requires a Bing Custom Search connection and instance configured in your Azure AI project. |
| [`azure_ai_with_browser_automation.py`](azure_ai_with_browser_automation.py) | Shows how to use Browser Automation with Azure AI agents to perform automated web browsing tasks and provide responses based on web interactions. Requires a Browser Automation connection configured in your Azure AI project. |
| [`azure_ai_with_code_interpreter.py`](azure_ai_with_code_interpreter.py) | Shows how to use the `HostedCodeInterpreterTool` with Azure AI agents to write and execute Python code for mathematical problem solving and data analysis. |
| [`azure_ai_with_code_interpreter_file_generation.py`](azure_ai_with_code_interpreter_file_generation.py) | Shows how to retrieve file IDs from code interpreter generated files using both streaming and non-streaming approaches. |
| [`azure_ai_with_code_interpreter_file_download.py`](azure_ai_with_code_interpreter_file_download.py) | Shows how to download files generated by code interpreter using the OpenAI containers API. |
| [`azure_ai_with_content_filtering.py`](azure_ai_with_content_filtering.py) | Shows how to enable content filtering (RAI policy) on Azure AI agents using `RaiConfig`. Requires creating an RAI policy in Azure AI Foundry portal first. |
| [`azure_ai_with_existing_agent.py`](azure_ai_with_existing_agent.py) | Shows how to work with a pre-existing agent by providing the agent name and version to the Azure AI client. Demonstrates agent reuse patterns for production scenarios. |
| [`azure_ai_with_existing_conversation.py`](azure_ai_with_existing_conversation.py) | Demonstrates how to use an existing conversation created on the service side with Azure AI agents. Shows two approaches: specifying conversation ID at the client level and using AgentThread with an existing conversation ID. |
| [`azure_ai_with_application_endpoint.py`](azure_ai_with_application_endpoint.py) | Demonstrates calling the Azure AI application-scoped endpoint. |
| [`azure_ai_with_explicit_settings.py`](azure_ai_with_explicit_settings.py) | Shows how to create an agent with explicitly configured `AzureAIClient` settings, including project endpoint, model deployment, and credentials rather than relying on environment variable defaults. |
| [`azure_ai_with_file_search.py`](azure_ai_with_file_search.py) | Shows how to use the `HostedFileSearchTool` with Azure AI agents to upload files, create vector stores, and enable agents to search through uploaded documents to answer user questions. |
| [`azure_ai_with_hosted_mcp.py`](azure_ai_with_hosted_mcp.py) | Shows how to integrate hosted Model Context Protocol (MCP) tools with Azure AI Agent. |
| [`azure_ai_with_local_mcp.py`](azure_ai_with_local_mcp.py) | Shows how to integrate local Model Context Protocol (MCP) tools with Azure AI agents. |
| [`azure_ai_with_response_format.py`](azure_ai_with_response_format.py) | Shows how to use structured outputs (response format) with Azure AI agents using Pydantic models to enforce specific response schemas. |
| [`azure_ai_with_runtime_json_schema.py`](azure_ai_with_runtime_json_schema.py) | Shows how to use structured outputs (response format) with Azure AI agents using a JSON schema to enforce specific response schemas. |
| [`azure_ai_with_search_context_agentic.py`](../../context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py) | Shows how to use AzureAISearchContextProvider with agentic mode. Uses Knowledge Bases for multi-hop reasoning across documents with query planning. Recommended for most scenarios - slightly slower with more token consumption for query planning, but more accurate results. |
| [`azure_ai_with_search_context_semantic.py`](../../context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py) | Shows how to use AzureAISearchContextProvider with semantic mode. Fast hybrid search with vector + keyword search and semantic ranking for RAG. Best for simple queries where speed is critical. |
| [`azure_ai_with_sharepoint.py`](azure_ai_with_sharepoint.py) | Shows how to use SharePoint grounding with Azure AI agents to search through SharePoint content and answer user questions with proper citations. Requires a SharePoint connection configured in your Azure AI project. |
| [`azure_ai_with_thread.py`](azure_ai_with_thread.py) | Demonstrates thread management with Azure AI agents, including automatic thread creation for stateless conversations and explicit thread management for maintaining conversation context across multiple interactions. |
| [`azure_ai_with_image_generation.py`](azure_ai_with_image_generation.py) | Shows how to use the `ImageGenTool` with Azure AI agents to generate images based on text prompts. |
| [`azure_ai_with_memory_search.py`](azure_ai_with_memory_search.py) | Shows how to use memory search functionality with Azure AI agents for conversation persistence. Demonstrates creating memory stores and enabling agents to search through conversation history. |
| [`azure_ai_with_microsoft_fabric.py`](azure_ai_with_microsoft_fabric.py) | Shows how to use Microsoft Fabric with Azure AI agents to query Fabric data sources and provide responses based on data analysis. Requires a Microsoft Fabric connection configured in your Azure AI project. |
| [`azure_ai_with_openapi.py`](azure_ai_with_openapi.py) | Shows how to integrate OpenAPI specifications with Azure AI agents using dictionary-based tool configuration. Demonstrates using external REST APIs for dynamic data lookup. |
| [`azure_ai_with_reasoning.py`](azure_ai_with_reasoning.py) | Shows how to enable reasoning for a model that supports it. |
| [`azure_ai_with_web_search.py`](azure_ai_with_web_search.py) | Shows how to use the `HostedWebSearchTool` with Azure AI agents to perform web searches and retrieve up-to-date information from the internet. |
## Environment Variables
Before running the examples, you need to set up your environment variables. You can do this in one of two ways:
### Option 1: Using a .env file (Recommended)
1. Copy the `.env.example` file from the `python` directory to create a `.env` file:
```bash
cp ../../../../.env.example ../../../../.env
```
2. Edit the `.env` file and add your values:
```env
AZURE_AI_PROJECT_ENDPOINT="your-project-endpoint"
AZURE_AI_MODEL_DEPLOYMENT_NAME="your-model-deployment-name"
```
### Option 2: Using environment variables directly
Set the environment variables in your shell:
```bash
export AZURE_AI_PROJECT_ENDPOINT="your-project-endpoint"
export AZURE_AI_MODEL_DEPLOYMENT_NAME="your-model-deployment-name"
```
### Required Variables
- `AZURE_AI_PROJECT_ENDPOINT`: Your Azure AI project endpoint (required for all examples)
- `AZURE_AI_MODEL_DEPLOYMENT_NAME`: The name of your model deployment (required for all examples)
## Authentication
All examples use `AzureCliCredential` for authentication by default. Before running the examples:
1. Install the Azure CLI
2. Run `az login` to authenticate with your Azure account
3. Ensure you have appropriate permissions to the Azure AI project
Alternatively, you can replace `AzureCliCredential` with other authentication options like `DefaultAzureCredential` or environment-based credentials.
## Running the Examples
Each example can be run independently. Navigate to this directory and run any example:
```bash
python azure_ai_basic.py
python azure_ai_with_code_interpreter.py
# ... etc
```
The examples demonstrate various patterns for working with Azure AI agents, from basic usage to advanced scenarios like thread management and structured outputs.

View File

@@ -0,0 +1,82 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from random import randint
from typing import Annotated
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
from pydantic import Field
"""
Azure AI Agent Basic Example
This sample demonstrates basic usage of AzureAIProjectAgentProvider.
Shows both streaming and non-streaming responses with function tools.
"""
def get_weather(
location: Annotated[str, Field(description="The location to get the weather for.")],
) -> str:
"""Get the weather for a given location."""
conditions = ["sunny", "cloudy", "rainy", "stormy"]
return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C."
async def non_streaming_example() -> None:
"""Example of non-streaming response (get the complete result at once)."""
print("=== Non-streaming Response Example ===")
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="BasicWeatherAgent",
instructions="You are a helpful weather agent.",
tools=get_weather,
)
query = "What's the weather like in Seattle?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Agent: {result}\n")
async def streaming_example() -> None:
"""Example of streaming response (get results as they are generated)."""
print("=== Streaming Response Example ===")
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="BasicWeatherAgent",
instructions="You are a helpful weather agent.",
tools=get_weather,
)
query = "What's the weather like in Tokyo?"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
async for chunk in agent.run_stream(query):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
async def main() -> None:
print("=== Basic Azure AI Chat Client Agent Example ===")
await non_streaming_example()
await streaming_example()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,249 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
from random import randint
from typing import Annotated
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.ai.projects.aio import AIProjectClient
from azure.ai.projects.models import AgentReference, PromptAgentDefinition
from azure.identity.aio import AzureCliCredential
from pydantic import Field
"""
Azure AI Project Agent Provider Methods Example
This sample demonstrates the three main methods of AzureAIProjectAgentProvider:
1. create_agent() - Create a new agent on the Azure AI service
2. get_agent() - Retrieve an existing agent from the service
3. as_agent() - Wrap an SDK agent version object without making HTTP calls
It also shows how to use a single provider instance to spawn multiple agents
with different configurations, which is efficient for multi-agent scenarios.
Each method returns a ChatAgent that can be used for conversations.
"""
def get_weather(
location: Annotated[str, Field(description="The location to get the weather for.")],
) -> str:
"""Get the weather for a given location."""
conditions = ["sunny", "cloudy", "rainy", "stormy"]
return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}C."
async def create_agent_example() -> None:
"""Example of using provider.create_agent() to create a new agent.
This method creates a new agent version on the Azure AI service and returns
a ChatAgent. Use this when you want to create a fresh agent with
specific configuration.
"""
print("=== provider.create_agent() Example ===")
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
# Create a new agent with custom configuration
agent = await provider.create_agent(
name="WeatherAssistant",
instructions="You are a helpful weather assistant. Always be concise.",
description="An agent that provides weather information.",
tools=get_weather,
)
print(f"Created agent: {agent.name}")
print(f"Agent ID: {agent.id}")
query = "What's the weather in Paris?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Agent: {result}\n")
async def get_agent_by_name_example() -> None:
"""Example of using provider.get_agent(name=...) to retrieve an agent by name.
This method fetches the latest version of an existing agent from the service.
Use this when you know the agent name and want to use the most recent version.
"""
print("=== provider.get_agent(name=...) Example ===")
async with (
AzureCliCredential() as credential,
AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client,
):
# First, create an agent using the SDK directly
created_agent = await project_client.agents.create_version(
agent_name="TestAgentByName",
description="Test agent for get_agent by name example.",
definition=PromptAgentDefinition(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
instructions="You are a helpful assistant. End each response with '- Your Assistant'.",
),
)
try:
# Get the agent using the provider by name (fetches latest version)
provider = AzureAIProjectAgentProvider(project_client=project_client)
agent = await provider.get_agent(name=created_agent.name)
print(f"Retrieved agent: {agent.name}")
query = "Hello!"
print(f"User: {query}")
result = await agent.run(query)
print(f"Agent: {result}\n")
finally:
# Clean up the agent
await project_client.agents.delete_version(
agent_name=created_agent.name, agent_version=created_agent.version
)
async def get_agent_by_reference_example() -> None:
"""Example of using provider.get_agent(reference=...) to retrieve a specific agent version.
This method fetches a specific version of an agent using an AgentReference.
Use this when you need to use a particular version of an agent.
"""
print("=== provider.get_agent(reference=...) Example ===")
async with (
AzureCliCredential() as credential,
AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client,
):
# First, create an agent using the SDK directly
created_agent = await project_client.agents.create_version(
agent_name="TestAgentByReference",
description="Test agent for get_agent by reference example.",
definition=PromptAgentDefinition(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
instructions="You are a helpful assistant. Always respond in uppercase.",
),
)
try:
# Get the agent using an AgentReference with specific version
provider = AzureAIProjectAgentProvider(project_client=project_client)
reference = AgentReference(name=created_agent.name, version=created_agent.version)
agent = await provider.get_agent(reference=reference)
print(f"Retrieved agent: {agent.name} (version via reference)")
query = "Say hello"
print(f"User: {query}")
result = await agent.run(query)
print(f"Agent: {result}\n")
finally:
# Clean up the agent
await project_client.agents.delete_version(
agent_name=created_agent.name, agent_version=created_agent.version
)
async def multiple_agents_example() -> None:
"""Example of using a single provider to spawn multiple agents.
A single provider instance can create multiple agents with different
configurations.
"""
print("=== Multiple Agents from Single Provider Example ===")
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
# Create multiple specialized agents from the same provider
weather_agent = await provider.create_agent(
name="WeatherExpert",
instructions="You are a weather expert. Provide brief weather information.",
tools=get_weather,
)
translator_agent = await provider.create_agent(
name="Translator",
instructions="You are a translator. Translate any text to French. Only output the translation.",
)
poet_agent = await provider.create_agent(
name="Poet",
instructions="You are a poet. Respond to everything with a short haiku.",
)
print(f"Created agents: {weather_agent.name}, {translator_agent.name}, {poet_agent.name}\n")
# Use each agent for its specialty
weather_query = "What's the weather in London?"
print(f"User to WeatherExpert: {weather_query}")
weather_result = await weather_agent.run(weather_query)
print(f"WeatherExpert: {weather_result}\n")
translate_query = "Hello, how are you today?"
print(f"User to Translator: {translate_query}")
translate_result = await translator_agent.run(translate_query)
print(f"Translator: {translate_result}\n")
poet_query = "Tell me about the morning sun"
print(f"User to Poet: {poet_query}")
poet_result = await poet_agent.run(poet_query)
print(f"Poet: {poet_result}\n")
async def as_agent_example() -> None:
"""Example of using provider.as_agent() to wrap an SDK object without HTTP calls.
This method wraps an existing AgentVersionDetails into a ChatAgent without
making additional HTTP calls. Use this when you already have the full
AgentVersionDetails from a previous SDK operation.
"""
print("=== provider.as_agent() Example ===")
async with (
AzureCliCredential() as credential,
AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client,
):
# Create an agent using the SDK directly - this returns AgentVersionDetails
agent_version_details = await project_client.agents.create_version(
agent_name="TestAgentAsAgent",
description="Test agent for as_agent example.",
definition=PromptAgentDefinition(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
instructions="You are a helpful assistant. Keep responses under 20 words.",
),
)
try:
# Wrap the SDK object directly without any HTTP calls
provider = AzureAIProjectAgentProvider(project_client=project_client)
agent = provider.as_agent(agent_version_details)
print(f"Wrapped agent: {agent.name} (no HTTP call needed)")
print(f"Agent version: {agent_version_details.version}")
query = "What can you do?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Agent: {result}\n")
finally:
# Clean up the agent
await project_client.agents.delete_version(
agent_name=agent_version_details.name, agent_version=agent_version_details.version
)
async def main() -> None:
print("=== Azure AI Project Agent Provider Methods Example ===\n")
await create_agent_example()
await get_agent_by_name_example()
await get_agent_by_reference_example()
await as_agent_example()
await multiple_agents_example()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,64 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from random import randint
from typing import Annotated
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
from pydantic import Field
"""
Azure AI Agent Latest Version Example
This sample demonstrates how to reuse the latest version of an existing agent
instead of creating a new agent version on each instantiation. The first call creates a new agent,
while subsequent calls with `get_agent()` reuse the latest agent version.
"""
def get_weather(
location: Annotated[str, Field(description="The location to get the weather for.")],
) -> str:
"""Get the weather for a given location."""
conditions = ["sunny", "cloudy", "rainy", "stormy"]
return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C."
async def main() -> None:
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
# First call creates a new agent
agent = await provider.create_agent(
name="MyWeatherAgent",
instructions="You are a helpful weather agent.",
tools=get_weather,
)
query = "What's the weather like in Seattle?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Agent: {result}\n")
# Second call retrieves the existing agent (latest version) instead of creating a new one
# This is useful when you want to reuse an agent that was created earlier
agent2 = await provider.get_agent(
name="MyWeatherAgent",
tools=get_weather, # Tools must be provided for function tools
)
query = "What's the weather like in Tokyo?"
print(f"User: {query}")
result = await agent2.run(query)
print(f"Agent: {result}\n")
print(f"First agent ID with version: {agent.id}")
print(f"Second agent ID with version: {agent2.id}")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,70 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from collections.abc import Awaitable, Callable
from agent_framework import FunctionInvocationContext
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
"""
Azure AI Agent-as-Tool Example
Demonstrates hierarchical agent architectures where one agent delegates
work to specialized sub-agents wrapped as tools using as_tool().
This pattern is useful when you want a coordinator agent to orchestrate
multiple specialized agents, each focusing on specific tasks.
"""
async def logging_middleware(
context: FunctionInvocationContext,
next: Callable[[FunctionInvocationContext], Awaitable[None]],
) -> None:
"""Middleware that logs tool invocations to show the delegation flow."""
print(f"[Calling tool: {context.function.name}]")
print(f"[Request: {context.arguments}]")
await next(context)
print(f"[Response: {context.result}]")
async def main() -> None:
print("=== Azure AI Agent-as-Tool Pattern ===")
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
# Create a specialized writer agent
writer = await provider.create_agent(
name="WriterAgent",
instructions="You are a creative writer. Write short, engaging content.",
)
# Convert writer agent to a tool using as_tool()
writer_tool = writer.as_tool(
name="creative_writer",
description="Generate creative content like taglines, slogans, or short copy",
arg_name="request",
arg_description="What to write",
)
# Create coordinator agent with writer as a tool
coordinator = await provider.create_agent(
name="CoordinatorAgent",
instructions="You coordinate with specialized agents. Delegate writing tasks to the creative_writer tool.",
tools=[writer_tool],
middleware=[logging_middleware],
)
query = "Create a tagline for a coffee shop"
print(f"User: {query}")
result = await coordinator.run(query)
print(f"Coordinator: {result}\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,53 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
"""
Azure AI Agent with Agent-to-Agent (A2A) Example
This sample demonstrates usage of AzureAIProjectAgentProvider with Agent-to-Agent (A2A) capabilities
to enable communication with other agents using the A2A protocol.
Prerequisites:
1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables.
2. Ensure you have an A2A connection configured in your Azure AI project
and set A2A_PROJECT_CONNECTION_ID environment variable.
3. (Optional) A2A_ENDPOINT - If the connection is missing target (e.g., "Custom keys" type),
set the A2A endpoint URL directly.
"""
async def main() -> None:
# Configure A2A tool with connection ID
a2a_tool = {
"type": "a2a_preview",
"project_connection_id": os.environ["A2A_PROJECT_CONNECTION_ID"],
}
# If the connection is missing a target, we need to set the A2A endpoint URL
if os.environ.get("A2A_ENDPOINT"):
a2a_tool["base_url"] = os.environ["A2A_ENDPOINT"]
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="MyA2AAgent",
instructions="""You are a helpful assistant that can communicate with other agents.
Use the A2A tool when you need to interact with other agents to complete tasks
or gather information from specialized agents.""",
tools=a2a_tool,
)
query = "What can the secondary agent do?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Result: {result}\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,39 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
from agent_framework import ChatAgent
from agent_framework.azure import AzureAIClient
from azure.ai.projects.aio import AIProjectClient
from azure.identity.aio import AzureCliCredential
"""
Azure AI Agent with Application Endpoint Example
This sample demonstrates working with pre-existing Azure AI Agents by providing
application endpoint instead of project endpoint.
"""
async def main() -> None:
# Create the client
async with (
AzureCliCredential() as credential,
# Endpoint here should be application endpoint with format:
# /api/projects/<project-name>/applications/<application-name>/protocols
AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client,
ChatAgent(
chat_client=AzureAIClient(
project_client=project_client,
),
) as agent,
):
query = "How are you?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Agent: {result}\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,52 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
"""
Azure AI Agent with Azure AI Search Example
This sample demonstrates usage of AzureAIProjectAgentProvider with Azure AI Search
to search through indexed data and answer user questions about it.
Prerequisites:
1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables.
2. Ensure you have an Azure AI Search connection configured in your Azure AI project
and set AI_SEARCH_PROJECT_CONNECTION_ID and AI_SEARCH_INDEX_NAME environment variable.
"""
async def main() -> None:
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="MySearchAgent",
instructions="""You are a helpful assistant. You must always provide citations for
answers using the tool and render them as: `[message_idx:search_idx†source]`.""",
tools={
"type": "azure_ai_search",
"azure_ai_search": {
"indexes": [
{
"project_connection_id": os.environ["AI_SEARCH_PROJECT_CONNECTION_ID"],
"index_name": os.environ["AI_SEARCH_INDEX_NAME"],
# For query_type=vector, ensure your index has a field with vectorized data.
"query_type": "simple",
}
]
},
},
)
query = "Tell me about insurance options"
print(f"User: {query}")
result = await agent.run(query)
print(f"Result: {result}\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,50 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
"""
Azure AI Agent with Bing Custom Search Example
This sample demonstrates usage of AzureAIProjectAgentProvider with Bing Custom Search
to search custom search instances and provide responses with relevant results.
Prerequisites:
1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables.
2. Ensure you have a Bing Custom Search connection configured in your Azure AI project
and set BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID and BING_CUSTOM_SEARCH_INSTANCE_NAME environment variables.
"""
async def main() -> None:
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="MyCustomSearchAgent",
instructions="""You are a helpful agent that can use Bing Custom Search tools to assist users.
Use the available Bing Custom Search tools to answer questions and perform tasks.""",
tools={
"type": "bing_custom_search_preview",
"bing_custom_search_preview": {
"search_configurations": [
{
"project_connection_id": os.environ["BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID"],
"instance_name": os.environ["BING_CUSTOM_SEARCH_INSTANCE_NAME"],
}
]
},
},
)
query = "Tell me more about foundry agent service"
print(f"User: {query}")
result = await agent.run(query)
print(f"Result: {result}\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,56 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
"""
Azure AI Agent with Bing Grounding Example
This sample demonstrates usage of AzureAIProjectAgentProvider with Bing Grounding
to search the web for current information and provide grounded responses.
Prerequisites:
1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables.
2. Ensure you have a Bing connection configured in your Azure AI project
and set BING_PROJECT_CONNECTION_ID environment variable.
To get your Bing connection ID:
- Go to Azure AI Foundry portal (https://ai.azure.com)
- Navigate to your project's "Connected resources" section
- Add a new connection for "Grounding with Bing Search"
- Copy the connection ID and set it as the BING_PROJECT_CONNECTION_ID environment variable
"""
async def main() -> None:
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="MyBingGroundingAgent",
instructions="""You are a helpful assistant that can search the web for current information.
Use the Bing search tool to find up-to-date information and provide accurate, well-sourced answers.
Always cite your sources when possible.""",
tools={
"type": "bing_grounding",
"bing_grounding": {
"search_configurations": [
{
"project_connection_id": os.environ["BING_PROJECT_CONNECTION_ID"],
}
]
},
},
)
query = "What is today's date and weather in Seattle?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Result: {result}\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,54 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
"""
Azure AI Agent with Browser Automation Example
This sample demonstrates usage of AzureAIProjectAgentProvider with Browser Automation
to perform automated web browsing tasks and provide responses based on web interactions.
Prerequisites:
1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables.
2. Ensure you have a Browser Automation connection configured in your Azure AI project
and set BROWSER_AUTOMATION_PROJECT_CONNECTION_ID environment variable.
"""
async def main() -> None:
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="MyBrowserAutomationAgent",
instructions="""You are an Agent helping with browser automation tasks.
You can answer questions, provide information, and assist with various tasks
related to web browsing using the Browser Automation tool available to you.""",
tools={
"type": "browser_automation_preview",
"browser_automation_preview": {
"connection": {
"project_connection_id": os.environ["BROWSER_AUTOMATION_PROJECT_CONNECTION_ID"],
}
},
},
)
query = """Your goal is to report the percent of Microsoft year-to-date stock price change.
To do that, go to the website finance.yahoo.com.
At the top of the page, you will find a search bar.
Enter the value 'MSFT', to get information about the Microsoft stock price.
At the top of the resulting page you will see a default chart of Microsoft stock price.
Click on 'YTD' at the top of that chart, and report the percent value that shows up just below it."""
print(f"User: {query}")
result = await agent.run(query)
print(f"Result: {result}\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,58 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from agent_framework import ChatResponse, HostedCodeInterpreterTool
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
from openai.types.responses.response import Response as OpenAIResponse
from openai.types.responses.response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
"""
Azure AI Agent Code Interpreter Example
This sample demonstrates using HostedCodeInterpreterTool with AzureAIProjectAgentProvider
for Python code execution and mathematical problem solving.
"""
async def main() -> None:
"""Example showing how to use the HostedCodeInterpreterTool with AzureAIProjectAgentProvider."""
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="MyCodeInterpreterAgent",
instructions="You are a helpful assistant that can write and execute Python code to solve problems.",
tools=HostedCodeInterpreterTool(),
)
query = "Use code to get the factorial of 100?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Result: {result}\n")
if (
isinstance(result.raw_representation, ChatResponse)
and isinstance(result.raw_representation.raw_representation, OpenAIResponse)
and len(result.raw_representation.raw_representation.output) > 0
):
# Find the first ResponseCodeInterpreterToolCall item
code_interpreter_item = next(
(
item
for item in result.raw_representation.raw_representation.output
if isinstance(item, ResponseCodeInterpreterToolCall)
),
None,
)
if code_interpreter_item is not None:
generated_code = code_interpreter_item.code
print(f"Generated code:\n{generated_code}")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,219 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import tempfile
from pathlib import Path
from agent_framework import (
AgentResponseUpdate,
ChatAgent,
CitationAnnotation,
HostedCodeInterpreterTool,
HostedFileContent,
TextContent,
)
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
"""
Azure AI V2 Code Interpreter File Download Sample
This sample demonstrates how the AzureAIProjectAgentProvider handles file annotations
when code interpreter generates text files. It shows:
1. How to extract file IDs and container IDs from annotations
2. How to download container files using the OpenAI containers API
3. How to save downloaded files locally
Note: Code interpreter generates files in containers, which require both
file_id and container_id to download via client.containers.files.content.retrieve().
"""
QUERY = (
"Write a simple Python script that creates a text file called 'sample.txt' containing "
"'Hello from the code interpreter!' and save it to disk."
)
async def download_container_files(
file_contents: list[CitationAnnotation | HostedFileContent], agent: ChatAgent
) -> list[Path]:
"""Download container files using the OpenAI containers API.
Code interpreter generates files in containers, which require both file_id
and container_id to download. The container_id is stored in additional_properties.
This function works for both streaming (HostedFileContent) and non-streaming
(CitationAnnotation) responses.
Args:
file_contents: List of CitationAnnotation or HostedFileContent objects
containing file_id and container_id.
agent: The ChatAgent instance with access to the AzureAIClient.
Returns:
List of Path objects for successfully downloaded files.
"""
if not file_contents:
return []
# Create output directory in system temp folder
temp_dir = Path(tempfile.gettempdir())
output_dir = temp_dir / "agent_framework_downloads"
output_dir.mkdir(exist_ok=True)
print(f"\nDownloading {len(file_contents)} container file(s) to {output_dir.absolute()}...")
# Access the OpenAI client from AzureAIClient
openai_client = agent.chat_client.client
downloaded_files: list[Path] = []
for content in file_contents:
file_id = content.file_id
# Extract container_id from additional_properties
if not content.additional_properties or "container_id" not in content.additional_properties:
print(f" File {file_id}: ✗ Missing container_id")
continue
container_id = content.additional_properties["container_id"]
# Extract filename based on content type
if isinstance(content, CitationAnnotation):
filename = content.url or f"{file_id}.txt"
# Extract filename from sandbox URL if present (e.g., sandbox:/mnt/data/sample.txt)
if filename.startswith("sandbox:"):
filename = filename.split("/")[-1]
else: # HostedFileContent
filename = content.additional_properties.get("filename") or f"{file_id}.txt"
output_path = output_dir / filename
try:
# Download using containers API
print(f" Downloading {filename}...", end="", flush=True)
file_content = await openai_client.containers.files.content.retrieve(
file_id=file_id,
container_id=container_id,
)
# file_content is HttpxBinaryResponseContent, read it
content_bytes = file_content.read()
# Save to disk
output_path.write_bytes(content_bytes)
file_size = output_path.stat().st_size
print(f"({file_size} bytes)")
downloaded_files.append(output_path)
except Exception as e:
print(f"Failed: {e}")
return downloaded_files
async def non_streaming_example() -> None:
"""Example of downloading files from non-streaming response using CitationAnnotation."""
print("=== Non-Streaming Response Example ===")
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="V2CodeInterpreterFileAgent",
instructions="You are a helpful assistant that can write and execute Python code to create files.",
tools=HostedCodeInterpreterTool(),
)
print(f"User: {QUERY}\n")
result = await agent.run(QUERY)
print(f"Agent: {result.text}\n")
# Check for annotations in the response
annotations_found: list[CitationAnnotation] = []
# AgentResponse has messages property, which contains ChatMessage objects
for message in result.messages:
for content in message.contents:
if isinstance(content, TextContent) and content.annotations:
for annotation in content.annotations:
if isinstance(annotation, CitationAnnotation) and annotation.file_id:
annotations_found.append(annotation)
print(f"Found file annotation: file_id={annotation.file_id}")
if annotation.additional_properties and "container_id" in annotation.additional_properties:
print(f" container_id={annotation.additional_properties['container_id']}")
if annotations_found:
print(f"SUCCESS: Found {len(annotations_found)} file annotation(s)")
# Download the container files
downloaded_paths = await download_container_files(annotations_found, agent)
if downloaded_paths:
print("\nDownloaded files available at:")
for path in downloaded_paths:
print(f" - {path.absolute()}")
else:
print("WARNING: No file annotations found in non-streaming response")
async def streaming_example() -> None:
"""Example of downloading files from streaming response using HostedFileContent."""
print("\n=== Streaming Response Example ===")
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="V2CodeInterpreterFileAgentStreaming",
instructions="You are a helpful assistant that can write and execute Python code to create files.",
tools=HostedCodeInterpreterTool(),
)
print(f"User: {QUERY}\n")
file_contents_found: list[HostedFileContent] = []
text_chunks: list[str] = []
async for update in agent.run_stream(QUERY):
if isinstance(update, AgentResponseUpdate):
for content in update.contents:
if isinstance(content, TextContent):
if content.text:
text_chunks.append(content.text)
if content.annotations:
for annotation in content.annotations:
if isinstance(annotation, CitationAnnotation) and annotation.file_id:
print(f"Found streaming CitationAnnotation: file_id={annotation.file_id}")
elif isinstance(content, HostedFileContent):
file_contents_found.append(content)
print(f"Found streaming HostedFileContent: file_id={content.file_id}")
if content.additional_properties and "container_id" in content.additional_properties:
print(f" container_id={content.additional_properties['container_id']}")
print(f"\nAgent response: {''.join(text_chunks)[:200]}...")
if file_contents_found:
print(f"SUCCESS: Found {len(file_contents_found)} file reference(s) in streaming")
# Download the container files
downloaded_paths = await download_container_files(file_contents_found, agent)
if downloaded_paths:
print("\n✓ Downloaded files available at:")
for path in downloaded_paths:
print(f" - {path.absolute()}")
else:
print("WARNING: No file annotations found in streaming response")
async def main() -> None:
print("AzureAIClient Code Interpreter File Download Sample\n")
await non_streaming_example()
await streaming_example()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,112 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from agent_framework import (
AgentResponseUpdate,
HostedCodeInterpreterTool,
)
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
"""
Azure AI V2 Code Interpreter File Generation Sample
This sample demonstrates how the AzureAIProjectAgentProvider handles file annotations
when code interpreter generates text files. It shows both non-streaming
and streaming approaches to verify file ID extraction.
"""
QUERY = (
"Write a simple Python script that creates a text file called 'sample.txt' containing "
"'Hello from the code interpreter!' and save it to disk."
)
async def non_streaming_example() -> None:
"""Example of extracting file annotations from non-streaming response."""
print("=== Non-Streaming Response Example ===")
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="V2CodeInterpreterFileAgent",
instructions="You are a helpful assistant that can write and execute Python code to create files.",
tools=HostedCodeInterpreterTool(),
)
print(f"User: {QUERY}\n")
result = await agent.run(QUERY)
print(f"Agent: {result.text}\n")
# Check for annotations in the response
annotations_found: list[str] = []
# AgentResponse has messages property, which contains ChatMessage objects
for message in result.messages:
for content in message.contents:
if content.type == "text" and content.annotations:
for annotation in content.annotations:
if annotation.file_id:
annotations_found.append(annotation.file_id)
print(f"Found file annotation: file_id={annotation.file_id}")
if annotations_found:
print(f"SUCCESS: Found {len(annotations_found)} file annotation(s)")
else:
print("WARNING: No file annotations found in non-streaming response")
async def streaming_example() -> None:
"""Example of extracting file annotations from streaming response."""
print("\n=== Streaming Response Example ===")
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="V2CodeInterpreterFileAgentStreaming",
instructions="You are a helpful assistant that can write and execute Python code to create files.",
tools=HostedCodeInterpreterTool(),
)
print(f"User: {QUERY}\n")
annotations_found: list[str] = []
text_chunks: list[str] = []
file_ids_found: list[str] = []
async for update in agent.run_stream(QUERY):
if isinstance(update, AgentResponseUpdate):
for content in update.contents:
if content.type == "text":
if content.text:
text_chunks.append(content.text)
if content.annotations:
for annotation in content.annotations:
if annotation.file_id:
annotations_found.append(annotation.file_id)
print(f"Found streaming annotation: file_id={annotation.file_id}")
elif content.type == "hosted_file":
file_ids_found.append(content.file_id)
print(f"Found streaming HostedFileContent: file_id={content.file_id}")
print(f"\nAgent response: {''.join(text_chunks)[:200]}...")
if annotations_found or file_ids_found:
total = len(annotations_found) + len(file_ids_found)
print(f"SUCCESS: Found {total} file reference(s) in streaming")
else:
print("WARNING: No file annotations found in streaming response")
async def main() -> None:
print("AzureAIClient Code Interpreter File Generation Sample\n")
await non_streaming_example()
await streaming_example()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,66 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.ai.projects.models import RaiConfig
from azure.identity.aio import AzureCliCredential
"""
Azure AI Agent with Content Filtering (RAI Policy) Example
This sample demonstrates how to enable content filtering on Azure AI agents using RaiConfig.
Prerequisites:
1. Create an RAI Policy in Azure AI Foundry portal:
- Go to Azure AI Foundry > Your Project > Guardrails + Controls > Content Filters
- Create a new content filter or use an existing one
- Note the policy name
2. Set environment variables:
- AZURE_AI_PROJECT_ENDPOINT: Your Azure AI Foundry project endpoint
- AZURE_AI_MODEL_DEPLOYMENT_NAME: Your model deployment name
3. Run `az login` to authenticate
"""
async def main() -> None:
print("=== Azure AI Agent with Content Filtering ===\n")
# Replace with your RAI policy from Azure AI Foundry portal
rai_policy_name = (
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/"
"Microsoft.CognitiveServices/accounts/{accountName}/raiPolicies/{policyName}"
)
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
# Create agent with content filtering enabled via default_options
agent = await provider.create_agent(
name="ContentFilteredAgent",
instructions="You are a helpful assistant.",
default_options={"rai_config": RaiConfig(rai_policy_name=rai_policy_name)},
)
# Test with a normal query
query = "What is the capital of France?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Agent: {result}\n")
# Test with a query that might trigger content filtering
# (depending on your RAI policy configuration)
query2 = "Tell me something inappropriate."
print(f"User: {query2}")
try:
result2 = await agent.run(query2)
print(f"Agent: {result2}\n")
except Exception as e:
print(f"Content filter triggered: {e}\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,66 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.ai.projects.aio import AIProjectClient
from azure.ai.projects.models import PromptAgentDefinition
from azure.identity.aio import AzureCliCredential
"""
Azure AI Agent with Existing Agent Example
This sample demonstrates working with pre-existing Azure AI Agents by using provider.get_agent() method,
showing agent reuse patterns for production scenarios.
"""
async def using_provider_get_agent() -> None:
print("=== Get existing Azure AI agent with provider.get_agent() ===")
# Create the client
async with (
AzureCliCredential() as credential,
AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client,
):
# Create remote agent using SDK directly
azure_ai_agent = await project_client.agents.create_version(
agent_name="MyNewTestAgent",
description="Agent for testing purposes.",
definition=PromptAgentDefinition(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
# Setting specific requirements to verify that this agent is used.
instructions="End each response with [END].",
),
)
try:
# Get newly created agent as ChatAgent by using provider.get_agent()
provider = AzureAIProjectAgentProvider(project_client=project_client)
agent = await provider.get_agent(name=azure_ai_agent.name)
# Verify agent properties
print(f"Agent ID: {agent.id}")
print(f"Agent name: {agent.name}")
print(f"Agent description: {agent.description}")
query = "How are you?"
print(f"User: {query}")
result = await agent.run(query)
# Response that indicates that previously created agent was used:
# "I'm here and ready to help you! How can I assist you today? [END]"
print(f"Agent: {result}\n")
finally:
# Clean up the agent manually
await project_client.agents.delete_version(
agent_name=azure_ai_agent.name, agent_version=azure_ai_agent.version
)
async def main() -> None:
await using_provider_get_agent()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,99 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
from random import randint
from typing import Annotated
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.ai.projects.aio import AIProjectClient
from azure.identity.aio import AzureCliCredential
from pydantic import Field
"""
Azure AI Agent Existing Conversation Example
This sample demonstrates usage of AzureAIProjectAgentProvider with existing conversation created on service side.
"""
def get_weather(
location: Annotated[str, Field(description="The location to get the weather for.")],
) -> str:
"""Get the weather for a given location."""
conditions = ["sunny", "cloudy", "rainy", "stormy"]
return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C."
async def example_with_conversation_id() -> None:
"""Example shows how to use existing conversation ID with the provider."""
print("=== Azure AI Agent With Existing Conversation ===")
async with (
AzureCliCredential() as credential,
AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client,
):
# Create a conversation using OpenAI client
openai_client = project_client.get_openai_client()
conversation = await openai_client.conversations.create()
conversation_id = conversation.id
print(f"Conversation ID: {conversation_id}")
provider = AzureAIProjectAgentProvider(project_client=project_client)
agent = await provider.create_agent(
name="BasicAgent",
instructions="You are a helpful agent.",
tools=get_weather,
)
# Pass conversation_id at run level
query = "What's the weather like in Seattle?"
print(f"User: {query}")
result = await agent.run(query, conversation_id=conversation_id)
print(f"Agent: {result.text}\n")
query = "What was my last question?"
print(f"User: {query}")
result = await agent.run(query, conversation_id=conversation_id)
print(f"Agent: {result.text}\n")
async def example_with_thread() -> None:
"""This example shows how to specify existing conversation ID with AgentThread."""
print("=== Azure AI Agent With Existing Conversation and Thread ===")
async with (
AzureCliCredential() as credential,
AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client,
):
provider = AzureAIProjectAgentProvider(project_client=project_client)
agent = await provider.create_agent(
name="BasicAgent",
instructions="You are a helpful agent.",
tools=get_weather,
)
# Create a conversation using OpenAI client
openai_client = project_client.get_openai_client()
conversation = await openai_client.conversations.create()
conversation_id = conversation.id
print(f"Conversation ID: {conversation_id}")
# Create a thread with the existing ID
thread = agent.get_new_thread(service_thread_id=conversation_id)
query = "What's the weather like in Seattle?"
print(f"User: {query}")
result = await agent.run(query, thread=thread)
print(f"Agent: {result.text}\n")
query = "What was my last question?"
print(f"User: {query}")
result = await agent.run(query, thread=thread)
print(f"Agent: {result.text}\n")
async def main() -> None:
await example_with_conversation_id()
await example_with_thread()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,52 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
from random import randint
from typing import Annotated
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
from pydantic import Field
"""
Azure AI Agent with Explicit Settings Example
This sample demonstrates creating Azure AI Agents with explicit configuration
settings rather than relying on environment variable defaults.
"""
def get_weather(
location: Annotated[str, Field(description="The location to get the weather for.")],
) -> str:
"""Get the weather for a given location."""
conditions = ["sunny", "cloudy", "rainy", "stormy"]
return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C."
async def main() -> None:
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(
project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"],
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
credential=credential,
) as provider,
):
agent = await provider.create_agent(
name="WeatherAgent",
instructions="You are a helpful weather agent.",
tools=get_weather,
)
query = "What's the weather like in New York?"
print(f"User: {query}")
result = await agent.run(query)
print(f"Agent: {result}\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,75 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import os
from pathlib import Path
from agent_framework import HostedFileSearchTool, HostedVectorStoreContent
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.ai.agents.aio import AgentsClient
from azure.ai.agents.models import FileInfo, VectorStore
from azure.identity.aio import AzureCliCredential
"""
The following sample demonstrates how to create a simple, Azure AI agent that
uses a file search tool to answer user questions.
"""
# Simulate a conversation with the agent
USER_INPUTS = [
"Who is the youngest employee?",
"Who works in sales?",
"I have a customer request, who can help me?",
]
async def main() -> None:
"""Main function demonstrating Azure AI agent with file search capabilities."""
file: FileInfo | None = None
vector_store: VectorStore | None = None
async with (
AzureCliCredential() as credential,
AgentsClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as agents_client,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
try:
# 1. Upload file and create vector store
pdf_file_path = Path(__file__).parent.parent / "resources" / "employees.pdf"
print(f"Uploading file from: {pdf_file_path}")
file = await agents_client.files.upload_and_poll(file_path=str(pdf_file_path), purpose="assistants")
print(f"Uploaded file, file ID: {file.id}")
vector_store = await agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore")
print(f"Created vector store, vector store ID: {vector_store.id}")
# 2. Create file search tool with uploaded resources
file_search_tool = HostedFileSearchTool(inputs=[HostedVectorStoreContent(vector_store_id=vector_store.id)])
# 3. Create an agent with file search capabilities using the provider
agent = await provider.create_agent(
name="EmployeeSearchAgent",
instructions=(
"You are a helpful assistant that can search through uploaded employee files "
"to answer questions about employees."
),
tools=file_search_tool,
)
# 4. Simulate conversation with the agent
for user_input in USER_INPUTS:
print(f"# User: '{user_input}'")
response = await agent.run(user_input)
print(f"# Agent: {response.text}")
finally:
# 5. Cleanup: Delete the vector store and file in case of earlier failure to prevent orphaned resources.
if vector_store:
await agents_client.vector_stores.delete(vector_store.id)
if file:
await agents_client.files.delete(file.id)
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,119 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from typing import Any
from agent_framework import AgentProtocol, AgentResponse, AgentThread, ChatMessage, HostedMCPTool
from agent_framework.azure import AzureAIProjectAgentProvider
from azure.identity.aio import AzureCliCredential
"""
Azure AI Agent with Hosted MCP Example
This sample demonstrates integrating hosted Model Context Protocol (MCP) tools with Azure AI Agent.
"""
async def handle_approvals_without_thread(query: str, agent: "AgentProtocol") -> AgentResponse:
"""When we don't have a thread, we need to ensure we return with the input, approval request and approval."""
result = await agent.run(query, store=False)
while len(result.user_input_requests) > 0:
new_inputs: list[Any] = [query]
for user_input_needed in result.user_input_requests:
print(
f"User Input Request for function from {agent.name}: {user_input_needed.function_call.name}"
f" with arguments: {user_input_needed.function_call.arguments}"
)
new_inputs.append(ChatMessage(role="assistant", contents=[user_input_needed]))
user_approval = input("Approve function call? (y/n): ")
new_inputs.append(
ChatMessage(role="user", contents=[user_input_needed.create_response(user_approval.lower() == "y")])
)
result = await agent.run(new_inputs, store=False)
return result
async def handle_approvals_with_thread(query: str, agent: "AgentProtocol", thread: "AgentThread") -> AgentResponse:
"""Here we let the thread deal with the previous responses, and we just rerun with the approval."""
result = await agent.run(query, thread=thread)
while len(result.user_input_requests) > 0:
new_input: list[Any] = []
for user_input_needed in result.user_input_requests:
print(
f"User Input Request for function from {agent.name}: {user_input_needed.function_call.name}"
f" with arguments: {user_input_needed.function_call.arguments}"
)
user_approval = input("Approve function call? (y/n): ")
new_input.append(
ChatMessage(
role="user",
contents=[user_input_needed.create_response(user_approval.lower() == "y")],
)
)
result = await agent.run(new_input, thread=thread)
return result
async def run_hosted_mcp_without_approval() -> None:
"""Example showing MCP Tools without approval."""
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="MyLearnDocsAgent",
instructions="You are a helpful assistant that can help with Microsoft documentation questions.",
tools=HostedMCPTool(
name="Microsoft Learn MCP",
url="https://learn.microsoft.com/api/mcp",
approval_mode="never_require",
),
)
query = "How to create an Azure storage account using az cli?"
print(f"User: {query}")
result = await handle_approvals_without_thread(query, agent)
print(f"{agent.name}: {result}\n")
async def run_hosted_mcp_with_approval_and_thread() -> None:
"""Example showing MCP Tools with approvals using a thread."""
print("=== MCP with approvals and with thread ===")
# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="MyApiSpecsAgent",
instructions="You are a helpful agent that can use MCP tools to assist users.",
tools=HostedMCPTool(
name="api-specs",
url="https://gitmcp.io/Azure/azure-rest-api-specs",
approval_mode="always_require",
),
)
thread = agent.get_new_thread()
query = "Please summarize the Azure REST API specifications Readme"
print(f"User: {query}")
result = await handle_approvals_with_thread(query, agent, thread)
print(f"{agent.name}: {result}\n")
async def main() -> None:
print("=== Azure AI Agent with Hosted MCP Tools Example ===\n")
await run_hosted_mcp_without_approval()
await run_hosted_mcp_with_approval_and_thread()
if __name__ == "__main__":
asyncio.run(main())

Some files were not shown because too many files have changed in this diff Show More