diff --git a/.claude/settings.json b/.claude/settings.json deleted file mode 100644 index 378d0ebe..00000000 --- a/.claude/settings.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "permissions": { - "allow": [ - "WebSearch", - "WebFetch(domain:docs.temporal.io)", - "Bash(rye run pytest:*)", - "Bash(rye run lint:*)", - "Bash(rye run typecheck:*)", - "Bash(rye run sync:*)", - "Bash(rye run build:*)" - ], - "deny": [], - "ask": [] - } -} \ No newline at end of file diff --git a/.cursor/rules/00_repo_tooling.mdc b/.cursor/rules/00_repo_tooling.mdc deleted file mode 100644 index ca3a44cb..00000000 --- a/.cursor/rules/00_repo_tooling.mdc +++ /dev/null @@ -1,24 +0,0 @@ ---- -description: Project-wide tooling, env, and command conventions -globs: "**/*" -alwaysApply: true ---- - -Use Rye for Python dependency management and workflows. Prefer these commands: - -- Setup env: `./scripts/bootstrap` or `rye sync --all-features` [[Use Rye in this repo]] -- Run tests: `rye run pytest` or `./scripts/test` -- Run a specific test: `rye run pytest path/to/test_file.py::TestClass::test_method -v` -- Format: `rye run format` or `./scripts/format` -- Lint: `rye run lint` or `./scripts/lint` -- Type check: `rye run typecheck` (runs pyright and mypy) -- Build: `rye build` - -Environment requirements: - -- Python 3.12+ is required -- A mock server auto-starts for tests on port 4010 - -Notes: - -- Only use `uv` inside of tutorial folders which have their own virtualenv (managed by a tutorial specific pyproject.toml inside the relevant tutorial folder). Otherwise use rye at the top level. diff --git a/.cursor/rules/05_permissions_and_tools.mdc b/.cursor/rules/05_permissions_and_tools.mdc deleted file mode 100644 index 72ff6597..00000000 --- a/.cursor/rules/05_permissions_and_tools.mdc +++ /dev/null @@ -1,18 +0,0 @@ ---- -description: Cursor agent permissions and allowed tools aligned with Claude settings -globs: "**/*" -alwaysApply: true ---- - -When invoking external tools or the terminal, follow these constraints: - -- Web search is allowed when needed for docs and references -- Prefer fetching docs from `docs.temporal.io` when researching Temporal topics -- Allowed bash commands should go through Rye workflows: - - `rye run pytest:*` - - `rye run lint:*` - - `rye run typecheck:*` - - `rye run sync:*` - - `rye run build:*` - -Default to Rye; only use other tools when explicitly required by the codebase. diff --git a/.cursor/rules/10_architecture.mdc b/.cursor/rules/10_architecture.mdc deleted file mode 100644 index 230f1456..00000000 --- a/.cursor/rules/10_architecture.mdc +++ /dev/null @@ -1,30 +0,0 @@ ---- -description: Repository architecture overview and code navigation hints -globs: "src/agentex/**, examples/**, tests/**, README.md" -alwaysApply: false ---- - -Code structure expectations: - -- `src/agentex/` contains the core SDK and generated API client code -- `src/agentex/lib/` contains manually maintained code that should not be overwritten by the code generator - - `cli/` Typer-based CLI implementation - - `core/` Core services, adapters, and Temporal workflows - - `sdk/` SDK utilities and FastACP implementation - - `types/` Custom type definitions - - `utils/` Utility functions -- `examples/` provides example implementations and tutorials -- `tests/` contains the test suites - -Key components quick reference: - -- Client Layer: HTTP client for AgentEx API in `_client.py` and `resources/` -- CLI Layer: Typer-based commands under `lib/cli/` -- Core Services: Temporal workflows and services under `lib/core/` -- FastACP: Protocol implementation in `lib/sdk/fastacp/` -- State Machine: Workflow state management in `lib/sdk/state_machine/` - -Generated vs manual code: - -- Treat `src/agentex/lib/**` as manual code; avoid edits in generated areas unless regenerating consistently -- Expect merge conflicts between generator outputs and manual patches; keep custom logic in `lib/` diff --git a/.cursor/rules/20_codegen_boundaries.mdc b/.cursor/rules/20_codegen_boundaries.mdc deleted file mode 100644 index 0bd03880..00000000 --- a/.cursor/rules/20_codegen_boundaries.mdc +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Keep manual code separate from generated SDK code -globs: "src/agentex/**" -alwaysApply: true ---- - -Guideline: - -- Avoid modifying auto-generated files in `src/agentex/` except where explicitly intended. Place custom logic, extensions, and higher-level abstractions in `src/agentex/lib/`. -- When adding features, prefer adding new modules under `src/agentex/lib/**` rather than changing generated files directly. -- If a change to generated code is required, document the reason and ensure the generator configuration or upstream schema is updated to make the change reproducible. diff --git a/.cursor/rules/30_cli_and_commands.mdc b/.cursor/rules/30_cli_and_commands.mdc deleted file mode 100644 index f3944234..00000000 --- a/.cursor/rules/30_cli_and_commands.mdc +++ /dev/null @@ -1,18 +0,0 @@ ---- -description: Guidance for working with the agentex CLI and commands -globs: "src/agentex/lib/cli/**, src/agentex/lib/core/**" -alwaysApply: false ---- - -The `agentex` CLI exposes: - -- `agentex agents` for get/list/run/build/deploy agents -- `agentex tasks` for get/list/delete tasks -- `agentex secrets` for sync/get/list/delete secrets -- `agentex uv` as a UV wrapper with AgentEx-specific enhancements -- `agentex init` to initialize new agent projects - -Development tips: - -- For agent development, use `agentex agents run --manifest manifest.yaml` -- For debugging, append `--debug-worker` and optionally `--debug-port 5679` diff --git a/.cursor/rules/40_temporal_and_agents.mdc b/.cursor/rules/40_temporal_and_agents.mdc deleted file mode 100644 index 7f105391..00000000 --- a/.cursor/rules/40_temporal_and_agents.mdc +++ /dev/null @@ -1,17 +0,0 @@ ---- -description: Temporal workflows, activities, and agent development guidance -globs: "src/agentex/lib/core/temporal/**, examples/**/10_temporal/**" -alwaysApply: false ---- - -Temporal integration: - -- Workflow definitions live in `lib/core/temporal/` -- Include activity definitions for different providers and worker implementations -- Keep workflow logic deterministic and side-effect free; move I/O into activities - -Agent framework: - -- Agents are manifest-driven and support multiple agent types (sync and Temporal-based) -- Use the examples under `examples/10_async/` and `examples/10_temporal/` for patterns -- For debugging agents, use the CLI flags `--debug-worker` and `--debug-port` diff --git a/.cursor/rules/50_tests_and_mocking.mdc b/.cursor/rules/50_tests_and_mocking.mdc deleted file mode 100644 index 420de4ac..00000000 --- a/.cursor/rules/50_tests_and_mocking.mdc +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: Testing workflow and mock server details -globs: "tests/**, scripts/test, scripts/mock" -alwaysApply: true ---- - -Testing: - -- Run tests with `rye run pytest` or `./scripts/test` -- To run a specific test: `rye run pytest path/to/test_file.py::TestClass::test_method -v` -- A mock server is automatically started for tests on port 4010 - -When writing tests: - -- Prefer deterministic unit tests that do not depend on external services -- Use the mock server and fixtures provided in the repository diff --git a/.cursor/rules/60_style_lint_typecheck.mdc b/.cursor/rules/60_style_lint_typecheck.mdc deleted file mode 100644 index f36f02d7..00000000 --- a/.cursor/rules/60_style_lint_typecheck.mdc +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: Formatting, linting, and type checking standards -globs: "src/**, tests/**" -alwaysApply: true ---- - -Standards: - -- Format code via `rye run format` or `./scripts/format` -- Lint via `rye run lint` or `./scripts/lint` -- Type check via `rye run typecheck` (pyright + mypy) - -Guidance: - -- Keep code readable and consistent; prefer small, focused functions -- Avoid introducing style or type violations; fix before committing diff --git a/.cursor/rules/70_examples_and_docs.mdc b/.cursor/rules/70_examples_and_docs.mdc deleted file mode 100644 index 7d16e9d0..00000000 --- a/.cursor/rules/70_examples_and_docs.mdc +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: How to use examples and documentation for development -globs: "examples/**, README.md" -alwaysApply: false ---- - -Use the `examples/` directory as reference implementations and tutorials. When creating new features: - -- Mirror patterns from the closest matching example -- Keep examples runnable with the documented commands -- Prefer adding or updating examples alongside significant feature changes diff --git a/.github/scripts/sync_agents.py b/.github/scripts/sync_agents.py deleted file mode 100644 index e69de29b..00000000 diff --git a/.github/workflows/agentex-tutorials-test.yml b/.github/workflows/agentex-tutorials-test.yml deleted file mode 100644 index eb6da476..00000000 --- a/.github/workflows/agentex-tutorials-test.yml +++ /dev/null @@ -1,315 +0,0 @@ -name: Test Tutorial Agents - -on: - pull_request: - branches: [ main ] - push: - branches: [ main ] - workflow_dispatch: - -jobs: - find-tutorials: - runs-on: ubuntu-latest - outputs: - tutorials: ${{ steps.get-tutorials.outputs.tutorials }} - steps: - - name: Checkout agentex-python repo - uses: actions/checkout@v4 - - - name: Find all tutorials - id: get-tutorials - run: | - cd examples/tutorials - # Find all tutorials and exclude specific temporal ones - all_tutorials=$(find . -name "manifest.yaml" -exec dirname {} \; | sort | sed 's|^\./||') - - # Filter out the specified temporal tutorials that are being updated - filtered_tutorials=$(echo "$all_tutorials" | grep -v -E "(temporal)") - - # Convert to JSON array - tutorials=$(echo "$filtered_tutorials" | jq -R -s -c 'split("\n") | map(select(length > 0))') - - echo "tutorials=$tutorials" >> $GITHUB_OUTPUT - echo "All tutorials found: $(echo "$all_tutorials" | wc -l)" - echo "Filtered tutorials: $(echo "$filtered_tutorials" | wc -l)" - echo "Excluded tutorials:" - echo "$all_tutorials" | grep -E "(10_temporal/050_|10_temporal/070_|10_temporal/080_)" || echo " (none matched exclusion pattern)" - echo "Final tutorial list: $tutorials" - - test-tutorial: - needs: find-tutorials - runs-on: ubuntu-latest - timeout-minutes: 15 - strategy: - matrix: - tutorial: ${{ fromJson(needs.find-tutorials.outputs.tutorials) }} - fail-fast: false - name: test-${{ matrix.tutorial }} - - steps: - - name: Checkout agentex-python repo - uses: actions/checkout@v4 - - - name: Install UV - run: | - curl -LsSf https://astral.sh/uv/install.sh | sh - echo "$HOME/.local/bin" >> $GITHUB_PATH - - - name: Pull latest AgentEx image - run: | - echo "๐Ÿณ Pulling latest Scale AgentEx Docker image..." - docker pull ghcr.io/scaleapi/scale-agentex/agentex:latest - echo "โœ… Successfully pulled AgentEx Docker image" - - - name: Checkout scale-agentex repo - uses: actions/checkout@v4 - with: - repository: scaleapi/scale-agentex - path: scale-agentex - - - name: Configure Docker Compose for pulled image and host networking - run: | - cd scale-agentex/agentex - echo "๐Ÿ”ง Configuring AgentEx container to use pulled image and host networking..." - - # Install yq for YAML manipulation - sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 - sudo chmod +x /usr/local/bin/yq - - # Override to use pulled image instead of building - yq eval '.services.agentex.image = "ghcr.io/scaleapi/scale-agentex/agentex:latest"' -i docker-compose.yml - yq eval 'del(.services.agentex.build)' -i docker-compose.yml - - # Add extra_hosts to agentex service to make host.docker.internal work - yq eval '.services.agentex.extra_hosts = ["host.docker.internal:host-gateway"]' -i docker-compose.yml - - echo "โœ… Configured docker-compose to use pulled image with host access" - - - name: Start AgentEx Server - run: | - cd scale-agentex/agentex - echo "๐Ÿš€ Starting AgentEx server and dependencies..." - - # Start all services - docker compose up -d - - echo "โณ Waiting for dependencies to be healthy..." - - # Wait for services to be healthy - for i in {1..30}; do - if docker compose ps | grep -q "healthy"; then - echo "โœ… Dependencies are healthy" - break - fi - echo " Attempt $i/30: Waiting for services..." - sleep 5 - done - - # Wait specifically for AgentEx server to be ready - echo "โณ Waiting for AgentEx server to be ready..." - for i in {1..30}; do - if curl -s --max-time 5 http://localhost:5003/health >/dev/null 2>&1; then - echo "โœ… AgentEx server is ready" - break - fi - echo " Attempt $i/30: Waiting for AgentEx server..." - sleep 5 - done - - - name: Build AgentEx SDK - run: | - echo "๐Ÿ”จ Building AgentEx SDK wheel..." - uv build - echo "โœ… SDK built successfully" - ls -la dist/ - - - name: Test Tutorial - id: run-test - working-directory: ./examples/tutorials - env: - OPENAI_API_KEY: ${{ secrets.TUTORIAL_OPENAI_API_KEY }} - HEALTH_CHECK_PORT: 8080 # Use non-privileged port for temporal worker health checks - run: | - echo "Testing tutorial: ${{ matrix.tutorial }}" - AGENTEX_API_BASE_URL="http://localhost:5003" \ - ./run_agent_test.sh --build-cli "${{ matrix.tutorial }}" - - - name: Print agent logs on failure - if: failure() - working-directory: ./examples/tutorials - run: | - echo "๐Ÿšจ Test failed for tutorial: ${{ matrix.tutorial }}" - echo "๐Ÿ“‹ Printing agent logs..." - - # Look for agent log files in the tutorial directory - if find "${{ matrix.tutorial }}" -name "*.log" -type f 2>/dev/null | grep -q .; then - echo "Found agent log files:" - find "${{ matrix.tutorial }}" -name "*.log" -type f -exec echo "=== {} ===" \; -exec cat {} \; - else - echo "No .log files found, checking for other common log locations..." - fi - - # Check for any output files or dumps - if find "${{ matrix.tutorial }}" -name "agent_output*" -o -name "debug*" -o -name "*.out" 2>/dev/null | grep -q .; then - echo "Found other output files:" - find "${{ matrix.tutorial }}" -name "agent_output*" -o -name "debug*" -o -name "*.out" -exec echo "=== {} ===" \; -exec cat {} \; - fi - - # Print the last 50 lines of any python processes that might still be running - echo "๐Ÿ” Checking for running python processes..." - ps aux | grep python || echo "No python processes found" - - - name: Record test result - id: test-result - if: always() - run: | - # Create results directory - mkdir -p test-results - - # Determine result - if [ "${{ steps.run-test.outcome }}" == "success" ]; then - result="passed" - echo "result=passed" >> $GITHUB_OUTPUT - echo "tutorial=${{ matrix.tutorial }}" >> $GITHUB_OUTPUT - else - result="failed" - echo "result=failed" >> $GITHUB_OUTPUT - echo "tutorial=${{ matrix.tutorial }}" >> $GITHUB_OUTPUT - fi - - # Save result to file for artifact upload - # Create a safe filename from tutorial path - safe_name=$(echo "${{ matrix.tutorial }}" | tr '/' '_' | tr -d ' ') - echo "$result" > "test-results/result-${safe_name}.txt" - echo "${{ matrix.tutorial }}" > "test-results/tutorial-${safe_name}.txt" - echo "safe_name=${safe_name}" >> $GITHUB_OUTPUT - - - name: Upload test result - if: always() - uses: actions/upload-artifact@v4 - with: - name: test-result-${{ steps.test-result.outputs.safe_name }} - path: test-results/ - retention-days: 1 - - test-summary: - if: always() - needs: [find-tutorials, test-tutorial] - runs-on: ubuntu-latest - name: Test Summary - steps: - - name: Download all test results - uses: actions/download-artifact@v4 - with: - pattern: test-result-* - path: all-results/ - merge-multiple: true - continue-on-error: true - - - name: Generate Test Summary - run: | - echo "# ๐Ÿงช Tutorial Tests Summary" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - # Initialize counters - passed_count=0 - failed_count=0 - skipped_count=0 - total_count=0 - - # Get all tutorials that were supposed to run - tutorials='${{ needs.find-tutorials.outputs.tutorials }}' - - if [ -d "all-results" ] && [ "$(ls -A all-results 2>/dev/null)" ]; then - echo "๐Ÿ“Š Processing individual test results from artifacts..." - - echo "## Test Results" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "| Tutorial | Status | Result |" >> $GITHUB_STEP_SUMMARY - echo "|----------|--------|--------|" >> $GITHUB_STEP_SUMMARY - - # Process each result file - for result_file in all-results/result-*.txt; do - if [ -f "$result_file" ]; then - # Extract the safe name from filename - safe_name=$(basename "$result_file" .txt | sed 's/result-//') - - # Get corresponding tutorial name file - tutorial_file="all-results/tutorial-${safe_name}.txt" - - if [ -f "$tutorial_file" ]; then - tutorial_name=$(cat "$tutorial_file") - result=$(cat "$result_file") - - total_count=$((total_count + 1)) - - if [ "$result" = "passed" ]; then - echo "| \`$tutorial_name\` | โœ… | Passed |" >> $GITHUB_STEP_SUMMARY - passed_count=$((passed_count + 1)) - else - echo "| \`$tutorial_name\` | โŒ | Failed |" >> $GITHUB_STEP_SUMMARY - failed_count=$((failed_count + 1)) - fi - fi - fi - done - - # Check for any tutorials that didn't have results (skipped/cancelled) - echo "$tutorials" | jq -r '.[]' | while read expected_tutorial; do - safe_expected=$(echo "$expected_tutorial" | tr '/' '_' | tr -d ' ') - if [ ! -f "all-results/result-${safe_expected}.txt" ]; then - echo "| \`$expected_tutorial\` | โญ๏ธ | Skipped/Cancelled |" >> $GITHUB_STEP_SUMMARY - skipped_count=$((skipped_count + 1)) - total_count=$((total_count + 1)) - fi - done - - else - echo "โš ๏ธ No individual test results found. This could mean:" - echo "- Test jobs were cancelled before completion" - echo "- Artifacts failed to upload" - echo "- No tutorials were found to test" - echo "" - - overall_result="${{ needs.test-tutorial.result }}" - echo "Overall job status: **$overall_result**" - - if [[ "$overall_result" == "success" ]]; then - echo "โœ… All tests appear to have passed based on job status." - elif [[ "$overall_result" == "failure" ]]; then - echo "โŒ Some tests appear to have failed based on job status." - echo "" - echo "๐Ÿ’ก **Tip:** Check individual job logs for specific failure details." - elif [[ "$overall_result" == "cancelled" ]]; then - echo "โญ๏ธ Tests were cancelled." - else - echo "โ“ Test status is unclear: $overall_result" - fi - - # Don't show detailed breakdown when we don't have individual results - tutorial_count=$(echo "$tutorials" | jq -r '. | length') - echo "" - echo "Expected tutorial count: $tutorial_count" - fi - - # Only show detailed statistics if we have individual results - if [ -d "all-results" ] && [ "$(ls -A all-results 2>/dev/null)" ]; then - echo "" >> $GITHUB_STEP_SUMMARY - echo "## Summary Statistics" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "- **Total Tests:** $total_count" >> $GITHUB_STEP_SUMMARY - echo "- **Passed:** $passed_count โœ…" >> $GITHUB_STEP_SUMMARY - echo "- **Failed:** $failed_count โŒ" >> $GITHUB_STEP_SUMMARY - echo "- **Skipped:** $skipped_count โญ๏ธ" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - if [ $failed_count -eq 0 ] && [ $passed_count -gt 0 ]; then - echo "๐ŸŽ‰ **All tests passed!**" >> $GITHUB_STEP_SUMMARY - elif [ $failed_count -gt 0 ]; then - echo "โš ๏ธ **Some tests failed.** Check individual job logs for details." >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "๐Ÿ’ก **Tip:** Look for the 'Print agent logs on failure' step in failed jobs for debugging information." >> $GITHUB_STEP_SUMMARY - else - echo "โ„น๏ธ **Tests were cancelled or skipped.**" >> $GITHUB_STEP_SUMMARY - fi - fi diff --git a/.github/workflows/build-and-push-tutorial-agent.yml b/.github/workflows/build-and-push-tutorial-agent.yml deleted file mode 100644 index 0ed78058..00000000 --- a/.github/workflows/build-and-push-tutorial-agent.yml +++ /dev/null @@ -1,201 +0,0 @@ -name: Build and Push Tutorial Agent - -on: - workflow_dispatch: - inputs: - rebuild_all: - description: "Rebuild all tutorial agents regardless of changes, this is reserved for maintainers only." - required: false - type: boolean - default: false - - pull_request: - paths: - - "examples/tutorials/**" - - push: - branches: - - main - paths: - - "examples/tutorials/**" - -permissions: - contents: read - packages: write - -jobs: - check-permissions: - runs-on: ubuntu-latest - steps: - - name: Check event type and permissions - run: | - if [ "${{ github.event_name }}" != "workflow_dispatch" ]; then - echo "Skipping permission check - not a workflow_dispatch event" - exit 0 - fi - echo "Checking maintainer permissions for workflow_dispatch" - - - name: Check if user is maintainer - if: ${{ github.event_name == 'workflow_dispatch' }} - uses: actions/github-script@v7 - with: - script: | - const { data: permission } = await github.rest.repos.getCollaboratorPermissionLevel({ - owner: context.repo.owner, - repo: context.repo.repo, - username: context.actor - }); - - const allowedRoles = ['admin', 'maintain']; - if (!allowedRoles.includes(permission.permission)) { - throw new Error(`โŒ User ${context.actor} does not have sufficient permissions. Required: ${allowedRoles.join(', ')}. Current: ${permission.permission}`); - } - - find-agents: - runs-on: ubuntu-latest - needs: [check-permissions] - outputs: - agents: ${{ steps.get-agents.outputs.agents }} - has_agents: ${{ steps.get-agents.outputs.has_agents }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 # Fetch full history for git diff - - - name: Find tutorial agents to build - id: get-agents - env: - REBUILD_ALL: ${{ inputs.rebuild_all }} - run: | - # Find all tutorial directories with manifest.yaml - all_agents=$(find examples/tutorials -name "manifest.yaml" -exec dirname {} \; | sort) - agents_to_build=() - - if [ "$REBUILD_ALL" = "true" ]; then - echo "Rebuild all agents requested" - agents_to_build=($(echo "$all_agents")) - - echo "### ๐Ÿ”„ Rebuilding All Tutorial Agents" >> $GITHUB_STEP_SUMMARY - else - # Determine the base branch for comparison - if [ "${{ github.event_name }}" = "pull_request" ]; then - BASE_BRANCH="origin/${{ github.base_ref }}" - echo "Comparing against PR base branch: $BASE_BRANCH" - else - # For pushes to main, compare against the first parent (pre-merge state) - BASE_BRANCH="HEAD^1" - echo "Comparing against previous commit: $BASE_BRANCH" - fi - # Check each agent directory for changes - for agent_dir in $all_agents; do - echo "Checking $agent_dir for changes..." - - # Check if any files in this agent directory have changed - if git diff --name-only $BASE_BRANCH HEAD | grep -q "^$agent_dir/"; then - echo " โœ… Changes detected in $agent_dir" - agents_to_build+=("$agent_dir") - else - echo " โญ๏ธ No changes in $agent_dir - skipping build" - fi - done - - echo "### ๐Ÿ”„ Changed Tutorial Agents" >> $GITHUB_STEP_SUMMARY - fi - - # Convert array to JSON format and output summary - if [ ${#agents_to_build[@]} -eq 0 ]; then - echo "No agents to build" - echo "agents=[]" >> $GITHUB_OUTPUT - echo "has_agents=false" >> $GITHUB_OUTPUT - else - echo "Agents to build: ${#agents_to_build[@]}" - agents_json=$(printf '%s\n' "${agents_to_build[@]}" | jq -R -s -c 'split("\n") | map(select(length > 0))') - echo "agents=$agents_json" >> $GITHUB_OUTPUT - echo "has_agents=true" >> $GITHUB_OUTPUT - - echo "" >> $GITHUB_STEP_SUMMARY - for agent in "${agents_to_build[@]}"; do - echo "- \`$agent\`" >> $GITHUB_STEP_SUMMARY - done - echo "" >> $GITHUB_STEP_SUMMARY - fi - - build-agents: - needs: [find-agents] - if: ${{ needs.find-agents.outputs.has_agents == 'true' }} - runs-on: ubuntu-latest - timeout-minutes: 15 - strategy: - matrix: - agent_path: ${{ fromJson(needs.find-agents.outputs.agents) }} - fail-fast: false - - name: build-${{ matrix.agent_path }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: "3.12" - - - name: Get latest agentex-sdk version from PyPI - id: get-version - run: | - LATEST_VERSION=$(curl -s https://pypi.org/pypi/agentex-sdk/json | jq -r '.info.version') - echo "Latest agentex-sdk version: $LATEST_VERSION" - echo "AGENTEX_SDK_VERSION=$LATEST_VERSION" >> $GITHUB_ENV - pip install agentex-sdk==$LATEST_VERSION - echo "Installed agentex-sdk version $LATEST_VERSION" - - - name: Generate Image name - id: image-name - run: | - # Remove examples/tutorials/ prefix and replace / with - - AGENT_NAME=$(echo "${{ matrix.agent_path }}" | sed 's|^examples/tutorials/||' | sed 's|/|-|g') - echo "AGENT_NAME=$AGENT_NAME" >> $GITHUB_ENV - echo "agent_name=$AGENT_NAME" >> $GITHUB_OUTPUT - echo "Agent name set to $AGENT_NAME" - - - name: Login to GitHub Container Registry - # Only login if we're going to push (main branch or rebuild_all) - if: ${{ github.event_name == 'push' || inputs.rebuild_all }} - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build and Conditionally Push Agent Image - env: - REGISTRY: ghcr.io - run: | - AGENT_NAME="${{ steps.image-name.outputs.agent_name }}" - REPOSITORY_NAME="${{ github.repository }}/tutorial-agents/${AGENT_NAME}" - - # Determine if we should push based on event type - if [ "${{ github.event_name }}" = "push" ] || [ "${{ inputs.rebuild_all }}" = "true" ]; then - SHOULD_PUSH=true - VERSION_TAG="latest" - echo "๐Ÿš€ Building and pushing agent: ${{ matrix.agent_path }}" - else - SHOULD_PUSH=false - VERSION_TAG="${{ github.sha }}" - echo "๐Ÿ” Validating build for agent: ${{ matrix.agent_path }}" - fi - - # Build command - add --push only if we should push - BUILD_ARGS="--manifest ${{ matrix.agent_path }}/manifest.yaml --registry ${REGISTRY} --tag ${VERSION_TAG} --platforms linux/amd64,linux/arm64 --repository-name ${REPOSITORY_NAME}" - - if [ "$SHOULD_PUSH" = "true" ]; then - agentex agents build $BUILD_ARGS --push - echo "โœ… Successfully built and pushed: ${REGISTRY}/${REPOSITORY_NAME}:${VERSION_TAG}" - else - agentex agents build $BUILD_ARGS - echo "โœ… Build validation successful for: ${{ matrix.agent_path }}" - fi diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 0cc9a4f7..00000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,6 +0,0 @@ -name: Test AgentEx Tutorials - -on: - workflow_dispatch: - - workflow_call: diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index a2fcae5d..2f6d7779 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -21,8 +21,8 @@ jobs: curl -sSf https://rye.astral.sh/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: - RYE_VERSION: "0.44.0" - RYE_INSTALL_OPTION: "--yes" + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' - name: Publish to PyPI run: | diff --git a/.gitignore b/.gitignore index 3666be24..95ceb189 100644 --- a/.gitignore +++ b/.gitignore @@ -13,10 +13,3 @@ dist .envrc codegen.log Brewfile.lock.json - -.DS_Store - -examples/**/uv.lock - -# Claude workspace directories -.claude-workspace/ \ No newline at end of file diff --git a/.python-version b/.python-version index e4fba218..43077b24 100644 --- a/.python-version +++ b/.python-version @@ -1 +1 @@ -3.12 +3.9.18 diff --git a/.stats.yml b/.stats.yml index 3b293f90..fbcaba61 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 35 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/sgp%2Fagentex-sdk-6feb0601dafb255298a2f1da01d64541d40da90aeb527e2f444c49c993e8c162.yml -openapi_spec_hash: 973cd2ed3c945818d15b7deee0b25d71 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/sgp%2Fagentex-sdk-8f9296ac9fa68bb264c4739463e55ce27cdafb31b705b27600d6656db7b0dac5.yml +openapi_spec_hash: 47f4675ac3c7198869240b5c6f33f8fd config_hash: 32eb65911c08ac84d117cecdf2759869 diff --git a/.vscode/launch.json b/.vscode/launch.json deleted file mode 100644 index 2d735caf..00000000 --- a/.vscode/launch.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "version": "0.2.0", - "configurations": [ - { - "name": "Attach to AgentEx Worker", - "type": "debugpy", - "request": "attach", - "connect": { - "host": "localhost", - "port": 5678 - }, - "pathMappings": [ - { - "localRoot": "${workspaceFolder}", - "remoteRoot": "." - } - ], - "justMyCode": false, - "console": "integratedTerminal" - }, - { - "name": "Attach to AgentEx Worker (Port 5679)", - "type": "debugpy", - "request": "attach", - "connect": { - "host": "localhost", - "port": 5679 - }, - "pathMappings": [ - { - "localRoot": "${workspaceFolder}", - "remoteRoot": "." - } - ], - "justMyCode": false, - "console": "integratedTerminal" - } - ] -} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index f1e2ab4e..00000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,547 +0,0 @@ -# Changelog - -## 0.7.3 (2025-12-10) - -Full Changelog: [v0.7.2...v0.7.3](https://github.com/scaleapi/scale-agentex-python/compare/v0.7.2...v0.7.3) - -## 0.7.2 (2025-12-10) - -Full Changelog: [v0.7.1...v0.7.2](https://github.com/scaleapi/scale-agentex-python/compare/v0.7.1...v0.7.2) - -## 0.7.1 (2025-12-09) - -Full Changelog: [v0.7.0...v0.7.1](https://github.com/scaleapi/scale-agentex-python/compare/v0.7.0...v0.7.1) - -### Features - -* **api:** api update ([92b2710](https://github.com/scaleapi/scale-agentex-python/commit/92b2710e0f060a8d59f8d8237c3ca7b8e923867a)) - -## 0.7.0 (2025-12-09) - -Full Changelog: [v0.6.7...v0.7.0](https://github.com/scaleapi/scale-agentex-python/compare/v0.6.7...v0.7.0) - -### Features - -* **api:** add messages/paginated endpoint ([3e03aff](https://github.com/scaleapi/scale-agentex-python/commit/3e03aff8490e0556cb05052d385156eda8f28107)) -* **api:** add messages/paginated to stainless config ([2473ded](https://github.com/scaleapi/scale-agentex-python/commit/2473ded39274bcd0a16d7314667fcf7f55e829c2)) -* **api:** api update ([f6eccdf](https://github.com/scaleapi/scale-agentex-python/commit/f6eccdf975eaef9b257ef3f20f087f2f2f9b3665)) -* **api:** api update ([41067fb](https://github.com/scaleapi/scale-agentex-python/commit/41067fb79725787e0ceb20dcf16029998bcbca24)) -* **api:** api update ([cdc9c63](https://github.com/scaleapi/scale-agentex-python/commit/cdc9c636be6f26e84772d1d1ef9d47cddcd9dabc)) -* **api:** api update ([413d9c8](https://github.com/scaleapi/scale-agentex-python/commit/413d9c806d918d7c5da3d0249c0f11d4b9f0894e)) -* **api:** api update ([1b4bf7d](https://github.com/scaleapi/scale-agentex-python/commit/1b4bf7d3a11306a50ec0eb9c20764c585d0e98e4)) -* **api:** manual updates ([131e836](https://github.com/scaleapi/scale-agentex-python/commit/131e836b5bda8248f847b00308b6711a1ee84ee0)) - - -### Bug Fixes - -* ensure streams are always closed ([7bb9db8](https://github.com/scaleapi/scale-agentex-python/commit/7bb9db851a213d261e585cd2f156046f05cf85db)) -* **types:** allow pyright to infer TypedDict types within SequenceNotStr ([9cfc9d6](https://github.com/scaleapi/scale-agentex-python/commit/9cfc9d66579a11f3eaf248bafbfddb422e878a58)) - - -### Chores - -* add missing docstrings ([81f1fa9](https://github.com/scaleapi/scale-agentex-python/commit/81f1fa9b3c440d893b8ea8f773ab2592eb333d65)) -* **deps:** mypy 1.18.1 has a regression, pin to 1.17 ([e20aaa4](https://github.com/scaleapi/scale-agentex-python/commit/e20aaa495384f547dd18c8d31496f70b4a37e0dd)) -* **docs:** use environment variables for authentication in code snippets ([a30f6ae](https://github.com/scaleapi/scale-agentex-python/commit/a30f6aebca8de5be72eb7bcf7a3b3ccea28479bc)) -* update lockfile ([a3a2e4f](https://github.com/scaleapi/scale-agentex-python/commit/a3a2e4fbcf6e6e4bcbadab50c6b9236e4514dae2)) - -## 0.6.7 (2025-11-19) - -Full Changelog: [v0.6.6...v0.6.7](https://github.com/scaleapi/scale-agentex-python/compare/v0.6.6...v0.6.7) - -## 0.6.6 (2025-11-12) - -Full Changelog: [v0.6.5...v0.6.6](https://github.com/scaleapi/scale-agentex-python/compare/v0.6.5...v0.6.6) - -### Bug Fixes - -* compat with Python 3.14 ([9a62f23](https://github.com/scaleapi/scale-agentex-python/commit/9a62f23376ef797bafe67f61552eb7635286caa3)) -* **compat:** update signatures of `model_dump` and `model_dump_json` for Pydantic v1 ([cf857f9](https://github.com/scaleapi/scale-agentex-python/commit/cf857f9191f10a971e9cba2a8c764229ed4a7dfe)) - - -### Chores - -* **internal:** restore stats ([5ec0383](https://github.com/scaleapi/scale-agentex-python/commit/5ec0383d9d6a85b342263ba49b8e3893924c59fc)) -* **package:** drop Python 3.8 support ([3d4dc37](https://github.com/scaleapi/scale-agentex-python/commit/3d4dc37f87b8d8f1debbe6505746342e461772ba)) - -## 0.6.5 (2025-11-06) - -Full Changelog: [v0.6.4...v0.6.5](https://github.com/scaleapi/scale-agentex-python/compare/v0.6.4...v0.6.5) - -## 0.6.4 (2025-11-06) - -Full Changelog: [v0.6.3...v0.6.4](https://github.com/scaleapi/scale-agentex-python/compare/v0.6.3...v0.6.4) - -## 0.6.3 (2025-11-06) - -Full Changelog: [v0.6.2...v0.6.3](https://github.com/scaleapi/scale-agentex-python/compare/v0.6.2...v0.6.3) - -## 0.6.2 (2025-11-05) - -Full Changelog: [v0.6.1...v0.6.2](https://github.com/scaleapi/scale-agentex-python/compare/v0.6.1...v0.6.2) - -### Features - -* **api:** update via SDK Studio ([b732dfa](https://github.com/scaleapi/scale-agentex-python/commit/b732dfac50cacc90c84a751fd6c75d18fa5b43ed)) - -## 0.6.1 (2025-11-05) - -Full Changelog: [v0.6.0...v0.6.1](https://github.com/scaleapi/scale-agentex-python/compare/v0.6.0...v0.6.1) - -### Features - -* **api:** api update ([f6189a4](https://github.com/scaleapi/scale-agentex-python/commit/f6189a43e1430fdd16c8d10e6ad835d9dfa5871c)) -* **api:** api update ([714c719](https://github.com/scaleapi/scale-agentex-python/commit/714c7194e488e6070c99e200b91189f50dcdb831)) - -## 0.6.0 (2025-11-04) - -Full Changelog: [v0.5.3...v0.6.0](https://github.com/scaleapi/scale-agentex-python/compare/v0.5.3...v0.6.0) - -### Features - -* **api:** api update ([ec61dd3](https://github.com/scaleapi/scale-agentex-python/commit/ec61dd3124fbf169dcdcced262a30bfbed080b5f)) - - -### Chores - -* **internal:** grammar fix (it's -> its) ([36e27da](https://github.com/scaleapi/scale-agentex-python/commit/36e27daed52435b300f090ac4643cd502a817a1e)) - -## 0.5.3 (2025-10-31) - -Full Changelog: [v0.5.2...v0.5.3](https://github.com/scaleapi/scale-agentex-python/compare/v0.5.2...v0.5.3) - -### Chores - -* re apply example updates ([043973b](https://github.com/scaleapi/scale-agentex-python/commit/043973bec649ab2304eff7a313938e1e3e5377e5)) - -## 0.5.2 (2025-10-31) - -Full Changelog: [v0.5.0...v0.5.2](https://github.com/scaleapi/scale-agentex-python/compare/v0.5.0...v0.5.2) - -### Features - -* **api:** manual updates ([dc66b57](https://github.com/scaleapi/scale-agentex-python/commit/dc66b57618525669b3aa15676343ef542675a5f9)) -* bump the helm chart version ([1ffafb0](https://github.com/scaleapi/scale-agentex-python/commit/1ffafb0406138d6abd84254fa394b88c4a28ce70)) - - -### Chores - -* sync repo ([0e05416](https://github.com/scaleapi/scale-agentex-python/commit/0e05416219ca93ae347e6175804bc0f2259a6b44)) - -## 0.5.0 (2025-10-28) - -Full Changelog: [v0.4.28...v0.5.0](https://github.com/scaleapi/agentex-python/compare/v0.4.28...v0.5.0) - -### Features - -* **api:** api update ([129fae6](https://github.com/scaleapi/agentex-python/commit/129fae69844e655b5dd02b6f67c44d15f5dbfa93)) - -## 0.4.28 (2025-10-28) - -Full Changelog: [v0.4.27...v0.4.28](https://github.com/scaleapi/agentex-python/compare/v0.4.27...v0.4.28) - -## 0.4.27 (2025-10-27) - -Full Changelog: [v0.4.26...v0.4.27](https://github.com/scaleapi/agentex-python/compare/v0.4.26...v0.4.27) - -### Features - -* **api:** api update ([f5e4fd2](https://github.com/scaleapi/agentex-python/commit/f5e4fd2f2fbb2c7e67e51795fba1f0b2e13048de)) - -## 0.4.26 (2025-10-21) - -Full Changelog: [v0.4.25...v0.4.26](https://github.com/scaleapi/agentex-python/compare/v0.4.25...v0.4.26) - -### Features - -* **api:** api update ([0c1dedd](https://github.com/scaleapi/agentex-python/commit/0c1dedd0fecb05e3684f110cc589f2abe55acb97)) -* **api:** api update ([719dc74](https://github.com/scaleapi/agentex-python/commit/719dc74f7844e2a3c14e46996e353d9c632b8e0a)) - - -### Chores - -* bump `httpx-aiohttp` version to 0.1.9 ([21c7921](https://github.com/scaleapi/agentex-python/commit/21c79210a0d65944fec5010fcc581a2d85fb94ab)) - -## 0.4.25 (2025-10-10) - -Full Changelog: [v0.4.24...v0.4.25](https://github.com/scaleapi/agentex-python/compare/v0.4.24...v0.4.25) - -## 0.4.24 (2025-10-10) - -Full Changelog: [v0.4.23...v0.4.24](https://github.com/scaleapi/agentex-python/compare/v0.4.23...v0.4.24) - -### Features - -* **api:** manual updates ([09996ea](https://github.com/scaleapi/agentex-python/commit/09996ea688a7225670bdd9d944b64801fac7acce)) - - -### Bug Fixes - -* health check port handling ([#138](https://github.com/scaleapi/agentex-python/issues/138)) ([fe22301](https://github.com/scaleapi/agentex-python/commit/fe223012db49768f38c4de56b5d5744031b631d1)) - - -### Chores - -* do not install brew dependencies in ./scripts/bootstrap by default ([2675e14](https://github.com/scaleapi/agentex-python/commit/2675e14bf9f3a0113a849caf2283376c448f9d03)) -* improve example values ([6997fe5](https://github.com/scaleapi/agentex-python/commit/6997fe57910ea54d6d71b25fdea4497925c8ec63)) -* **internal:** detect missing future annotations with ruff ([f1aa71f](https://github.com/scaleapi/agentex-python/commit/f1aa71f89bb0e8369e6d895b5111dc15fd1e2c12)) -* **internal:** update pydantic dependency ([156ea64](https://github.com/scaleapi/agentex-python/commit/156ea64a4fa317d3ab483e7b9b6ba63471b618ef)) -* **internal:** version bump ([8567752](https://github.com/scaleapi/agentex-python/commit/85677527f5c8d393f0eea0a2a629da48fb56f4a9)) -* **internal:** version bump ([45206dd](https://github.com/scaleapi/agentex-python/commit/45206dd28643403800c386b75e1c9a442c8978ae)) -* **internal:** version bump ([98354ba](https://github.com/scaleapi/agentex-python/commit/98354ba2e7630798e25a8e278cba44c1aacc1e08)) -* **internal:** version bump ([aa2a8db](https://github.com/scaleapi/agentex-python/commit/aa2a8db5907f78b4b39849a1900dae27412359bb)) -* **internal:** version bump ([73bba2a](https://github.com/scaleapi/agentex-python/commit/73bba2a59e77fa31caab5b668781b71bc7c5ec2d)) -* **types:** change optional parameter type from NotGiven to Omit ([2117d77](https://github.com/scaleapi/agentex-python/commit/2117d77219da097e784d5d2deab1632a2855dae9)) - -## 0.4.23 (2025-10-02) - -Full Changelog: [v0.4.22...v0.4.23](https://github.com/scaleapi/agentex-python/compare/v0.4.22...v0.4.23) - -### Features - -* Adding Agent info to SGP tracing metadata ([#85](https://github.com/scaleapi/agentex-python/issues/85)) ([900f66b](https://github.com/scaleapi/agentex-python/commit/900f66b60bc61ac515a7e43172d573a31c623fa9)) - -## 0.4.22 (2025-10-01) - -Full Changelog: [v0.4.21...v0.4.22](https://github.com/scaleapi/agentex-python/compare/v0.4.21...v0.4.22) - -## 0.4.21 (2025-10-01) - -Full Changelog: [v0.4.20...v0.4.21](https://github.com/scaleapi/agentex-python/compare/v0.4.20...v0.4.21) - -## 0.4.20 (2025-10-01) - -Full Changelog: [v0.4.19...v0.4.20](https://github.com/scaleapi/agentex-python/compare/v0.4.19...v0.4.20) - -## 0.4.19 (2025-10-01) - -Full Changelog: [v0.4.18...v0.4.19](https://github.com/scaleapi/agentex-python/compare/v0.4.18...v0.4.19) - -### Features - -* Adds helm config to Agent Environment ([#125](https://github.com/scaleapi/agentex-python/issues/125)) ([e4b39b5](https://github.com/scaleapi/agentex-python/commit/e4b39b5f319452bbc6650a7ef41b3a3179bb3b93)) - -## 0.4.18 (2025-09-29) - -Full Changelog: [v0.4.17...v0.4.18](https://github.com/scaleapi/agentex-python/compare/v0.4.17...v0.4.18) - -### Chores - -* **internal:** version bump ([eded756](https://github.com/scaleapi/agentex-python/commit/eded756bde2f3b4cfcf02c7a9cf72e70a82dd9aa)) - -## 0.4.17 (2025-09-29) - -Full Changelog: [v0.4.16...v0.4.17](https://github.com/scaleapi/agentex-python/compare/v0.4.16...v0.4.17) - -## 0.4.16 (2025-09-16) - -Full Changelog: [v0.4.15...v0.4.16](https://github.com/scaleapi/agentex-python/compare/v0.4.15...v0.4.16) - -## 0.4.15 (2025-09-16) - -Full Changelog: [v0.4.14...v0.4.15](https://github.com/scaleapi/agentex-python/compare/v0.4.14...v0.4.15) - -## 0.4.14 (2025-09-16) - -Full Changelog: [v0.4.13...v0.4.14](https://github.com/scaleapi/agentex-python/compare/v0.4.13...v0.4.14) - -### Features - -* add previous_response_id parameter to OpenAI module ([7a78844](https://github.com/scaleapi/agentex-python/commit/7a78844f9efbfac606c7e52d1f469db0728c9e56)) - -## 0.4.13 (2025-09-12) - -Full Changelog: [v0.4.12...v0.4.13](https://github.com/scaleapi/agentex-python/compare/v0.4.12...v0.4.13) - -### Features - -* **api:** api update ([0102183](https://github.com/scaleapi/agentex-python/commit/0102183a8f5a23dbdaf905ffbe7ffbcf59bf7b21)) -* **api:** api update ([8a6edb1](https://github.com/scaleapi/agentex-python/commit/8a6edb13046ca24bf6c45fc018e32de498d48869)) - -## 0.4.12 (2025-09-08) - -Full Changelog: [v0.4.11...v0.4.12](https://github.com/scaleapi/agentex-python/compare/v0.4.11...v0.4.12) - -### โš  BREAKING CHANGES - -* task_cancel now requires explicit agent_name/agent_id parameter to identify which agent owns the task being cancelled - -### Bug Fixes - -* task cancellation architectural bug ([f9a72a9](https://github.com/scaleapi/agentex-python/commit/f9a72a94f96afe86d3cc80f4f85ea368279d4517)) - -## 0.4.11 (2025-09-04) - -Full Changelog: [v0.4.10...v0.4.11](https://github.com/scaleapi/agentex-python/compare/v0.4.10...v0.4.11) - -### Features - -* Guardrail support ([e3e9bf9](https://github.com/scaleapi/agentex-python/commit/e3e9bf9dd6cf16b9a783638690d4a31914be8139)) -* improve future compat with pydantic v3 ([f0d8624](https://github.com/scaleapi/agentex-python/commit/f0d86244065c88bb2777db8fabeb1921e7e01116)) -* multiple guardrails ([ea8f98a](https://github.com/scaleapi/agentex-python/commit/ea8f98a973ba486e854cf14528a88eb73a203cf8)) -* **templates:** add custom activity timeout guidance for temporal agents ([7658256](https://github.com/scaleapi/agentex-python/commit/765825680132677ea0351f2a9410f472ee754906)) -* **types:** replace List[str] with SequenceNotStr in params ([f319781](https://github.com/scaleapi/agentex-python/commit/f3197818637574cd92b2c1f710679155eddf5af7)) - - -### Bug Fixes - -* Adding new example for guardrails instead of using 10_async ([15dc44b](https://github.com/scaleapi/agentex-python/commit/15dc44b333a977564c9974cc089d5ef578840714)) -* avoid newer type syntax ([6b5c82a](https://github.com/scaleapi/agentex-python/commit/6b5c82aab9ebcf755575b641aced2b77a13a71c3)) - - -### Chores - -* **internal:** add Sequence related utils ([496034d](https://github.com/scaleapi/agentex-python/commit/496034db4d6cba361c1f392a4bb86f6ab057e878)) -* **internal:** change ci workflow machines ([7445d94](https://github.com/scaleapi/agentex-python/commit/7445d94cb860f92911ec97ecd951149557956c6a)) -* **internal:** move mypy configurations to `pyproject.toml` file ([e96cd34](https://github.com/scaleapi/agentex-python/commit/e96cd34629d5ea173446c3184fbfe28bd2b370a0)) -* **internal:** update pyright exclude list ([d952430](https://github.com/scaleapi/agentex-python/commit/d952430ab4cbc41bca06010bbcfea3eeb022073e)) - -## 0.4.10 (2025-08-24) - -Full Changelog: [v0.4.9...v0.4.10](https://github.com/scaleapi/agentex-python/compare/v0.4.9...v0.4.10) - -## 0.4.9 (2025-08-22) - -Full Changelog: [v0.4.8...v0.4.9](https://github.com/scaleapi/agentex-python/compare/v0.4.8...v0.4.9) - -## 0.4.8 (2025-08-22) - -Full Changelog: [v0.4.7...v0.4.8](https://github.com/scaleapi/agentex-python/compare/v0.4.7...v0.4.8) - -## 0.4.7 (2025-08-22) - -Full Changelog: [v0.4.6...v0.4.7](https://github.com/scaleapi/agentex-python/compare/v0.4.6...v0.4.7) - -### Chores - -* update github action ([677e95d](https://github.com/scaleapi/agentex-python/commit/677e95de075b7031cfc4971d7d09769daaa5b2af)) - -## 0.4.6 (2025-08-20) - -Full Changelog: [v0.4.5...v0.4.6](https://github.com/scaleapi/agentex-python/compare/v0.4.5...v0.4.6) - -### Features - -* **api:** api update ([7b4c80a](https://github.com/scaleapi/agentex-python/commit/7b4c80acb502c29df63a3d66a1b29b653d2e3cf5)) - - -### Chores - -* generate release ([0836e4a](https://github.com/scaleapi/agentex-python/commit/0836e4a632e8f3aa0cd05fc6b61581f8c8be9bcd)) - -## 0.4.5 (2025-08-20) - -Full Changelog: [v0.4.4...v0.4.5](https://github.com/scaleapi/agentex-python/compare/v0.4.4...v0.4.5) - -### Features - -* **api:** manual updates ([34a53aa](https://github.com/scaleapi/agentex-python/commit/34a53aa28b8f862d74dd1603d92b7dd5dd28ddb1)) - - -### Bug Fixes - -* enable FunctionTool serialization for Temporal worker nodes ([c9eb040](https://github.com/scaleapi/agentex-python/commit/c9eb04002825195187cd58f34c9185349a63566e)) -* **tools:** handle callable objects in model serialization to facilitate tool calling ([4e9bb87](https://github.com/scaleapi/agentex-python/commit/4e9bb87d7faa2c2e1643893a168f7c6affd2809d)) - - -### Chores - -* demonstrate FunctionTool use in a (temporal) tutorial ([3a72043](https://github.com/scaleapi/agentex-python/commit/3a7204333c328fab8ba0f1d31fd26994ea176ecf)) - -## 0.4.4 (2025-08-17) - -Full Changelog: [v0.4.3...v0.4.4](https://github.com/scaleapi/agentex-python/compare/v0.4.3...v0.4.4) - -## 0.4.3 (2025-08-17) - -Full Changelog: [v0.4.2...v0.4.3](https://github.com/scaleapi/agentex-python/compare/v0.4.2...v0.4.3) - -## 0.4.2 (2025-08-17) - -Full Changelog: [v0.4.1...v0.4.2](https://github.com/scaleapi/agentex-python/compare/v0.4.1...v0.4.2) - -## 0.4.1 (2025-08-16) - -Full Changelog: [v0.4.0...v0.4.1](https://github.com/scaleapi/agentex-python/compare/v0.4.0...v0.4.1) - -## 0.4.0 (2025-08-15) - -Full Changelog: [v0.3.0...v0.4.0](https://github.com/scaleapi/agentex-python/compare/v0.3.0...v0.4.0) - -### Features - -* **api:** manual updates ([ce2a201](https://github.com/scaleapi/agentex-python/commit/ce2a201227ff6659874672fc7c6a890f25dfaa08)) -* **api:** manual updates ([7afbafd](https://github.com/scaleapi/agentex-python/commit/7afbafd03fdcbd464305fe6f0592141117d3527c)) - -## 0.3.0 (2025-08-14) - -Full Changelog: [v0.2.10...v0.3.0](https://github.com/scaleapi/agentex-python/compare/v0.2.10...v0.3.0) - -### Features - -* **api:** api update ([ad779b4](https://github.com/scaleapi/agentex-python/commit/ad779b4ce6a9f21b4f69c88770269b404ac25818)) -* **api:** manual updates ([9dc2f75](https://github.com/scaleapi/agentex-python/commit/9dc2f7511750884ec6754d91e6d27592f85b72e5)) - -## 0.2.10 (2025-08-13) - -Full Changelog: [v0.2.9...v0.2.10](https://github.com/scaleapi/agentex-python/compare/v0.2.9...v0.2.10) - -## 0.2.9 (2025-08-12) - -Full Changelog: [v0.2.8...v0.2.9](https://github.com/scaleapi/agentex-python/compare/v0.2.8...v0.2.9) - -### Chores - -* **internal:** update test skipping reason ([4affc92](https://github.com/scaleapi/agentex-python/commit/4affc925c69ed626d429732b470d4d1535b1be8d)) - -## 0.2.8 (2025-08-09) - -Full Changelog: [v0.2.7...v0.2.8](https://github.com/scaleapi/agentex-python/compare/v0.2.7...v0.2.8) - -### Chores - -* **internal:** update comment in script ([401f1d7](https://github.com/scaleapi/agentex-python/commit/401f1d79034ecb0b556a26debde79681bc21e8ae)) -* update @stainless-api/prism-cli to v5.15.0 ([4d332d0](https://github.com/scaleapi/agentex-python/commit/4d332d0f77a5a11ca6781a5fc7690ae82653cadb)) - -## 0.2.7 (2025-08-08) - -Full Changelog: [v0.2.6...v0.2.7](https://github.com/scaleapi/agentex-python/compare/v0.2.6...v0.2.7) - -### Features - -* **api:** api update ([e3d08ba](https://github.com/scaleapi/agentex-python/commit/e3d08baad59346db48e04a394a929d6347dafa07)) -* debug features ([40d8db2](https://github.com/scaleapi/agentex-python/commit/40d8db22dcc8f00a6c78e9bc3e1d036ebd1423b6)) - - -### Chores - -* **internal:** fix ruff target version ([1b880e1](https://github.com/scaleapi/agentex-python/commit/1b880e1dd81d47bb9df12507f13351611ff6367f)) - -## 0.2.6 (2025-08-01) - -Full Changelog: [v0.2.5...v0.2.6](https://github.com/scaleapi/agentex-python/compare/v0.2.5...v0.2.6) - -### Features - -* **api:** add query params to tasks.list ([d4902d5](https://github.com/scaleapi/agentex-python/commit/d4902d52caf82e2f57d1bbf19527cdc1448ed397)) -* **client:** support file upload requests ([e004b30](https://github.com/scaleapi/agentex-python/commit/e004b304c22286151330c2200bcb85046a7ac111)) - -## 0.2.5 (2025-07-30) - -Full Changelog: [v0.2.4...v0.2.5](https://github.com/scaleapi/agentex-python/compare/v0.2.4...v0.2.5) - -### Features - -* **api:** api update ([f90002c](https://github.com/scaleapi/agentex-python/commit/f90002c247a94cddc17307fb4eded12359cc9ad8)) -* **api:** api update ([aee4ad1](https://github.com/scaleapi/agentex-python/commit/aee4ad10e588386e9af1b4828d16ddba1805dca0)) -* **api:** manual updates ([55efcdd](https://github.com/scaleapi/agentex-python/commit/55efcdd55f2a20d1172da95cd551751d8be0d0df)) - -## 0.2.4 (2025-07-29) - -Full Changelog: [v0.2.3...v0.2.4](https://github.com/scaleapi/agentex-python/compare/v0.2.3...v0.2.4) - -## 0.2.3 (2025-07-29) - -Full Changelog: [v0.2.2...v0.2.3](https://github.com/scaleapi/agentex-python/compare/v0.2.2...v0.2.3) - -## 0.2.2 (2025-07-28) - -Full Changelog: [v0.2.1...v0.2.2](https://github.com/scaleapi/agentex-python/compare/v0.2.1...v0.2.2) - -### Features - -* **api:** api update ([eb79533](https://github.com/scaleapi/agentex-python/commit/eb79533dd041b7fccccc6a75abedd0c87e9c55e5)) - -## 0.2.1 (2025-07-27) - -Full Changelog: [v0.2.0...v0.2.1](https://github.com/scaleapi/agentex-python/compare/v0.2.0...v0.2.1) - -## 0.2.0 (2025-07-25) - -Full Changelog: [v0.1.1...v0.2.0](https://github.com/scaleapi/agentex-python/compare/v0.1.1...v0.2.0) - -### Features - -* **api:** update typescript sdk with big changes ([2c75d64](https://github.com/scaleapi/agentex-python/commit/2c75d642348df727505778c347efa568930ea4f0)) - - -### Chores - -* **project:** add settings file for vscode ([0f926cc](https://github.com/scaleapi/agentex-python/commit/0f926cce7df375de33627f8212caacf64f89b1ed)) - -## 0.1.1 (2025-07-24) - -Full Changelog: [v0.1.0...v0.1.1](https://github.com/scaleapi/agentex-python/compare/v0.1.0...v0.1.1) - -### Features - -* **api:** manual updates ([714e97e](https://github.com/scaleapi/agentex-python/commit/714e97ed1813a4a91b421fb77fadaf2afac2450d)) -* **api:** manual updates ([8dccfbd](https://github.com/scaleapi/agentex-python/commit/8dccfbdd9b8b887bfb99c79a9a28163215560ae4)) -* **api:** manual updates ([03af884](https://github.com/scaleapi/agentex-python/commit/03af884e31a3df4d42a863c06c5ab4dfc2374374)) - -## 0.1.0 (2025-07-23) - -Full Changelog: [v0.1.0-alpha.6...v0.1.0](https://github.com/scaleapi/agentex-python/compare/v0.1.0-alpha.6...v0.1.0) - -### Features - -* **api:** manual updates ([84010e4](https://github.com/scaleapi/agentex-python/commit/84010e4adecf7c779abd9a828000a3b50d9d3ac3)) - -## 0.1.0-alpha.6 (2025-07-23) - -Full Changelog: [v0.1.0-alpha.5...v0.1.0-alpha.6](https://github.com/scaleapi/agentex-python/compare/v0.1.0-alpha.5...v0.1.0-alpha.6) - -### Features - -* **api:** api update ([af18034](https://github.com/scaleapi/agentex-python/commit/af18034e4173794ebf42eff688f26d64caca4e64)) -* **api:** api update ([be9b603](https://github.com/scaleapi/agentex-python/commit/be9b60326817566d5c5edcbd7b7babb6db07e539)) -* **api:** manual updates ([bbe3be3](https://github.com/scaleapi/agentex-python/commit/bbe3be30aa9fb8d7a677f0e9f0be4dd565563d6e)) - -## 0.1.0-alpha.5 (2025-07-23) - -Full Changelog: [v0.1.0-alpha.4...v0.1.0-alpha.5](https://github.com/scaleapi/agentex-python/compare/v0.1.0-alpha.4...v0.1.0-alpha.5) - -### Features - -* **api:** deprecate name subresource ([14881c0](https://github.com/scaleapi/agentex-python/commit/14881c0ff2922e0a622975a0f5b314de99d7aabb)) -* **api:** manual updates ([d999a43](https://github.com/scaleapi/agentex-python/commit/d999a438c409f04b7e36b5df2d9b080d1d1b0e4a)) -* **api:** manual updates ([a885d8d](https://github.com/scaleapi/agentex-python/commit/a885d8dbabfe2cc2a556ef02e75e5502fd799c46)) - - -### Bug Fixes - -* **api:** build errors ([7bde6b7](https://github.com/scaleapi/agentex-python/commit/7bde6b727d6d16ebd6805ef843596fc3224445a6)) -* **parsing:** parse extra field types ([d40e6e0](https://github.com/scaleapi/agentex-python/commit/d40e6e0d6911be0bc9bfc419e02bd7c1d5ad5be4)) - -## 0.1.0-alpha.4 (2025-07-22) - -Full Changelog: [v0.1.0-alpha.3...v0.1.0-alpha.4](https://github.com/scaleapi/agentex-python/compare/v0.1.0-alpha.3...v0.1.0-alpha.4) - -## 0.1.0-alpha.3 (2025-07-22) - -Full Changelog: [v0.1.0-alpha.2...v0.1.0-alpha.3](https://github.com/scaleapi/agentex-python/compare/v0.1.0-alpha.2...v0.1.0-alpha.3) - -### Features - -* **api:** api update ([afedf45](https://github.com/scaleapi/agentex-python/commit/afedf4541ba6219cd04ef7af39a1d451abde75a4)) - -## 0.1.0-alpha.2 (2025-07-22) - -Full Changelog: [v0.1.0-alpha.1...v0.1.0-alpha.2](https://github.com/scaleapi/agentex-python/compare/v0.1.0-alpha.1...v0.1.0-alpha.2) - -## 0.1.0-alpha.1 (2025-07-22) - -Full Changelog: [v0.0.1-alpha.1...v0.1.0-alpha.1](https://github.com/scaleapi/agentex-python/compare/v0.0.1-alpha.1...v0.1.0-alpha.1) - -### Features - -* **api:** manual updates ([06f5fe1](https://github.com/scaleapi/agentex-python/commit/06f5fe115ace5ec4ca8149cd0afa6207b193a04c)) - -## 0.0.1-alpha.1 (2025-07-22) - -Full Changelog: [v0.0.1-alpha.0...v0.0.1-alpha.1](https://github.com/scaleapi/agentex-python/compare/v0.0.1-alpha.0...v0.0.1-alpha.1) - -### Chores - -* sync repo ([bc305f4](https://github.com/scaleapi/agentex-python/commit/bc305f43efedb5b7d7b28eaa059bce1d280c9dbb)) -* update SDK settings ([e5a06b4](https://github.com/scaleapi/agentex-python/commit/e5a06b4e3d8f8ad15d55b92393d7ddd833415f86)) diff --git a/CLAUDE.md b/CLAUDE.md deleted file mode 100644 index ffb1e50f..00000000 --- a/CLAUDE.md +++ /dev/null @@ -1,93 +0,0 @@ -# CLAUDE.md - -This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. - -## Development Commands - -### Package Management in the top level repo -- Use `rye` for dependency management (preferred) -- Run `./scripts/bootstrap` to set up the environment -- Or use `rye sync --all-features` directly - -Special note: the individual tutorials maintain their own tutorial specific virtualenv using `uv`. So when testing/running tutorials, you `uv run` instead of `rye run`. Everything else is similar. - -#### Testing -- Run tests: `rye run pytest` or `./scripts/test` -- Run specific test: `rye run pytest path/to/test_file.py::TestClass::test_method -v` -- Mock server is automatically started for tests, runs on port 4010 - -#### Linting and Formatting -- Format code: `rye run format` or `./scripts/format` - * The repository is still in flux, so running format might accidentally change files that aren't part of your scope of changes. So always run `run rye format` with additional arguments to constrain the formatting to the files that you are modifying. -- Lint code: `rye run lint` or `./scripts/lint` -- Type check: `rye run typecheck` (runs both pyright and mypy) - -### Building and Running -- Build package: `rye build` - - - -### CLI Commands -The package provides the `agentex` CLI with these main commands: -- `agentex agents` - Get, list, run, build, and deploy agents -- `agentex tasks` - Get, list, and delete tasks -- `agentex secrets` - Sync, get, list, and delete secrets -- `agentex uv` - UV wrapper with AgentEx-specific enhancements -- `agentex init` - Initialize new agent projects - -### Agent Development -- Run agents: `agentex agents run --manifest manifest.yaml` -- Debug agents: `agentex agents run --manifest manifest.yaml --debug-worker` -- Debug with custom port: `agentex agents run --manifest manifest.yaml --debug-worker --debug-port 5679` - -## Architecture Overview - -### Code Structure -- `/src/agentex/` - Core SDK and generated API client code -- `/src/agentex/lib/` - Custom library code (not modified by code generator) - - `/cli/` - Command-line interface implementation - - `/core/` - Core services, adapters, and temporal workflows - - `/sdk/` - SDK utilities and FastACP implementation - - `/types/` - Custom type definitions - - `/utils/` - Utility functions -- `/examples/` - Example implementations and tutorials -- `/tests/` - Test suites - -### Key Components - -**SDK Architecture:** -- **Client Layer**: HTTP client for AgentEx API (`_client.py`, `resources/`) -- **CLI Layer**: Typer-based command interface (`lib/cli/`) -- **Core Services**: Temporal workflows, adapters, and services (`lib/core/`) -- **FastACP**: Fast Agent Communication Protocol implementation (`lib/sdk/fastacp/`) -- **State Machine**: Workflow state management (`lib/sdk/state_machine/`) - -**Temporal Integration:** -- Workflow definitions in `lib/core/temporal/` -- Activity definitions for different providers -- Worker implementations for running temporal workflows - -**Agent Framework:** -- Manifest-driven agent configuration -- Support for multiple agent types (sync, temporal-based) -- Debugging support with VS Code integration - -### Code Generation -Most SDK code is auto-generated. Manual changes are preserved in: -- `src/agentex/lib/` directory -- `examples/` directory -- Merge conflicts may occur between manual patches and generator changes - -### Key Dependencies -- `temporalio` - Temporal workflow engine -- `typer` - CLI framework -- `pydantic` - Data validation -- `httpx` - HTTP client -- `fastapi` - Web framework -- `ruff` - Linting and formatting -- `pytest` - Testing framework - -### Environment Requirements -- Python 3.12+ required -- Uses Rye for dependency management -- Supports both sync and async client patterns \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d0efe047..67ab9a5d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -126,8 +126,3 @@ You can release to package managers by using [the `Publish PyPI` GitHub action]( If you need to manually release a package, you can run the `bin/publish-pypi` script with a `PYPI_TOKEN` set on the environment. - -## ๐Ÿค– **Vibe Coding Setup** - -This repository is setup with some pre-canned prompts for [Claude Code](https://docs.anthropic.com/en/docs/claude-code) as well as [Cursor](https://cursor.com/). - diff --git a/README.md b/README.md index 5941deff..44d302aa 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ - # Agentex Python API library @@ -68,37 +67,6 @@ asyncio.run(main()) Functionality between the synchronous and asynchronous clients is otherwise identical. -## Debugging - -AgentEx provides built-in debugging support for **temporal projects** during local development. - -```bash -# Basic debugging -uv run agentex agents run --manifest manifest.yaml --debug-worker - -# Wait for debugger to attach before starting -uv run agentex agents run --manifest manifest.yaml --debug-worker --wait-for-debugger - -# Custom debug port -uv run agentex agents run --manifest manifest.yaml --debug-worker --debug-port 5679 -``` - -For **VS Code**, add this configuration to `.vscode/launch.json`: - -```json -{ - "name": "Attach to AgentEx Worker", - "type": "debugpy", - "request": "attach", - "connect": { "host": "localhost", "port": 5678 }, - "pathMappings": [{ "localRoot": "${workspaceFolder}", "remoteRoot": "." }], - "justMyCode": false, - "console": "integratedTerminal" -} -``` - -The debug server automatically finds an available port starting from 5678 and prints connection details when starting. - ### With aiohttp By default, the async client uses `httpx` for HTTP requests. However, for improved concurrency performance you may also use `aiohttp` as the HTTP backend. diff --git a/api.md b/api.md index f70f9b19..7e8e6800 100644 --- a/api.md +++ b/api.md @@ -154,16 +154,3 @@ Methods: - client.tracker.retrieve(tracker_id) -> AgentTaskTracker - client.tracker.update(tracker_id, \*\*params) -> AgentTaskTracker - client.tracker.list(\*\*params) -> TrackerListResponse - -# DeploymentHistory - -Types: - -```python -from agentex.types import DeploymentHistory, DeploymentHistoryListResponse -``` - -Methods: - -- client.deployment_history.retrieve(deployment_id) -> DeploymentHistory -- client.deployment_history.list(\*\*params) -> DeploymentHistoryListResponse diff --git a/examples/demos/procurement_agent/.dockerignore b/examples/demos/procurement_agent/.dockerignore deleted file mode 100644 index c4f7a8b4..00000000 --- a/examples/demos/procurement_agent/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store \ No newline at end of file diff --git a/examples/demos/procurement_agent/.gitignore b/examples/demos/procurement_agent/.gitignore deleted file mode 100644 index 127b65a2..00000000 --- a/examples/demos/procurement_agent/.gitignore +++ /dev/null @@ -1,63 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Virtual environments -venv/ -env/ -ENV/ -.venv - -# IDE -.vscode/ -.idea/ -*.swp -*.swo -*~ - -# Database files -*.db -*.sqlite -*.sqlite3 - -# Environment variables -.env -.env.local - -# Logs -*.log - -# OS -.DS_Store -Thumbs.db - -# Jupyter -.ipynb_checkpoints/ -*.ipynb_checkpoints - -# Testing -.pytest_cache/ -.coverage -htmlcov/ - -# UV -.venv/ -uv.lock diff --git a/examples/demos/procurement_agent/Dockerfile b/examples/demos/procurement_agent/Dockerfile deleted file mode 100644 index 17dd0e68..00000000 --- a/examples/demos/procurement_agent/Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - nodejs \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/** - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy just the pyproject.toml file to optimize caching -COPY procurement_agent/pyproject.toml /app/procurement_agent/pyproject.toml - -WORKDIR /app/procurement_agent - -# Install the required Python packages using uv -RUN uv pip install --system . - -# Copy the project code -COPY procurement_agent/project /app/procurement_agent/project - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] \ No newline at end of file diff --git a/examples/demos/procurement_agent/README.md b/examples/demos/procurement_agent/README.md deleted file mode 100644 index 878c2ec3..00000000 --- a/examples/demos/procurement_agent/README.md +++ /dev/null @@ -1,412 +0,0 @@ -# Procurement Agent Demo - -A demonstration of long-running, autonomous AI agents using **Temporal** and **AgentEx**. This agent manages construction procurement workflows that can run for months, respond to external events, and escalate to humans when needed. - -## What This Demo Shows - -This demo illustrates a **procurement manager for building construction** that: - -- **Runs for months or years** - Temporal workflows enable truly persistent agents -- **Responds to external events** - Not just human input, but signals from the real world (shipments, inspections, etc.) -- **Escalates to humans when needed** - Waits indefinitely for human decisions on critical issues -- **Learns from experience** - Remembers past human decisions and applies them to similar situations -- **Manages complex state** - Uses a database to track construction schedules and procurement items - -### Key Concepts - -**Long-Running Workflows**: Thanks to Temporal, the agent can live for months, surviving restarts and failures while maintaining full context. - -**External Event Integration**: The agent receives real-world signals (not just user messages) via Temporal signals and takes autonomous actions. - -**Human-in-the-Loop**: The agent can pause execution indefinitely (up to 24 hours) while waiting for human approval on critical decisions. - -**Learning System**: When a human makes a decision, the agent extracts learnings and applies them to future similar situations. - -**State Management**: Uses SQLite to persist construction schedules and procurement item status, providing queryable visibility into current operations without parsing conversation history. - -**Automatic Summarization**: When conversation history exceeds token limits (~40k tokens), the agent automatically summarizes older messages while preserving recent context, enabling indefinite conversation length. - -## Example Workflow - -Here's what happens when items move through the procurement pipeline: - -1. **Submittal Approved** โ†’ Agent issues purchase order and creates tracking record -2. **Shipment Departed Factory** โ†’ Agent ingests ETA and checks for schedule conflicts -3. **Shipment Arrived Site** โ†’ Agent notifies team and schedules quality inspection -4. **Inspection Failed** โ†’ Agent escalates to human with recommended action -5. **Human Decision** โ†’ Agent learns from the decision for next time - -## Running the Demo - -### Prerequisites - -You'll need three terminals running: - -1. **AgentEx Backend** (database, Temporal server, etc.) -2. **AgentEx UI** (web interface at localhost:3000) -3. **Procurement Agent** (this demo) - -### Step 1: Start AgentEx Backend - -From the `scale-agentex` repository: - -```bash -make dev -``` - -This starts all required services (Postgres, Temporal, Redis, etc.) via Docker Compose. Verify everything is healthy: - -```bash -# Optional: Use lazydocker for a better view -lzd -``` - -You should see Temporal UI at: http://localhost:8080 - -### Step 2: Start AgentEx Web UI - -From the `scale-agentex-web` repository: - -```bash -make dev -``` - -The UI will be available at: http://localhost:3000 - -### Step 3: Run the Procurement Agent - -From this directory (`examples/demos/procurement_agent`): - -```bash -# Install dependencies -uv sync - -# Run the agent -export ENVIRONMENT=development && uv run agentex agents run --manifest manifest.yaml -``` - -The agent will start and register with the AgentEx backend on port 8000. - -### Step 4: Create a Task - -Go to http://localhost:3000 and: - -1. Create a new task for the `procurement-agent` -2. Send a message like "Hello" to initialize the workflow -3. Note the **Workflow ID** from the Temporal UI at http://localhost:8080 - -### Step 5: Send Test Events - -Now simulate real-world procurement events: - -```bash -# Navigate to the scripts directory -cd project/scripts - -# Send events (you'll be prompted for the workflow ID) -uv run send_test_events.py - -# Or provide the workflow ID directly -uv run send_test_events.py -``` - -The script sends a series of events simulating the procurement lifecycle for multiple items: -- Steel Beams (passes inspection) -- HVAC Units (fails inspection - agent escalates) -- Windows (passes inspection) -- Flooring Materials (passes inspection) -- Electrical Panels (fails inspection - agent applies learnings) - -### Step 6: Observe the Agent - -Watch the agent in action: - -1. **AgentEx UI** (http://localhost:3000) - See agent responses and decisions -2. **Temporal UI** (http://localhost:8080) - View workflow execution, signals, and state -3. **Terminal** - Watch agent logs for detailed operation info - -When an inspection fails, the agent will: -- Analyze the situation -- Recommend an action -- Wait for your response in the AgentEx UI -- Learn from your decision for future similar situations - -## Project Structure - -``` -procurement_agent/ -โ”œโ”€โ”€ project/ -โ”‚ โ”œโ”€โ”€ acp.py # ACP server & event handlers -โ”‚ โ”œโ”€โ”€ workflow.py # Main Temporal workflow logic -โ”‚ โ”œโ”€โ”€ run_worker.py # Temporal worker setup -โ”‚ โ”œโ”€โ”€ agents/ -โ”‚ โ”‚ โ”œโ”€โ”€ procurement_agent.py # Main AI agent with procurement tools -โ”‚ โ”‚ โ”œโ”€โ”€ extract_learnings_agent.py # Extracts learnings from human decisions -โ”‚ โ”‚ โ””โ”€โ”€ summarization_agent.py # Summarizes conversation history -โ”‚ โ”œโ”€โ”€ activities/ -โ”‚ โ”‚ โ””โ”€โ”€ activities.py # Temporal activities (POs, inspections, schedules) -โ”‚ โ”œโ”€โ”€ data/ -โ”‚ โ”‚ โ”œโ”€โ”€ database.py # SQLite operations -โ”‚ โ”‚ โ””โ”€โ”€ procurement.db # Persistent storage (auto-created) -โ”‚ โ”œโ”€โ”€ models/ -โ”‚ โ”‚ โ””โ”€โ”€ events.py # Event type definitions (Pydantic models) -โ”‚ โ”œโ”€โ”€ scripts/ -โ”‚ โ”‚ โ””โ”€โ”€ send_test_events.py # Event simulation script -โ”‚ โ””โ”€โ”€ utils/ -โ”‚ โ”œโ”€โ”€ learning_extraction.py # Utilities for extracting context from conversations -โ”‚ โ””โ”€โ”€ summarization.py # Token counting and summarization logic -โ”œโ”€โ”€ manifest.yaml # Agent configuration -โ”œโ”€โ”€ Dockerfile # Container definition -โ””โ”€โ”€ pyproject.toml # Dependencies (uv) -``` - -## How It Works - -### 1. Event-Driven Architecture - -The agent receives events via Temporal signals in `workflow.py`: - -```python -@workflow.signal -async def send_event(self, event: str) -> None: - # Validate and queue the event - await self.event_queue.put(event) -``` - -Events are validated against Pydantic models and processed by the AI agent. - -### 2. Human-in-the-Loop Pattern - -Critical decisions require human approval via the `wait_for_human` tool in `procurement_agent.py`: - -```python -@function_tool -async def wait_for_human(recommended_action: str) -> str: - """ - Pause execution until human provides input. - Waits up to 24 hours for response. - """ - await workflow.wait_condition( - lambda: not workflow_instance.human_queue.empty(), - timeout=timedelta(hours=24), - ) - # ... return human response -``` - -The workflow continues only after receiving human input through the AgentEx UI. - -### 3. State Management - -Instead of cramming everything into the LLM context window, the agent uses SQLite to manage: - -- **Master construction schedule** (delivery dates, buffer days, requirements) -- **Procurement items** (status, ETAs, purchase orders, inspection results) - -The database is accessed through Temporal activities with proper error handling and retry policies. - -### 4. Learning System - -When humans make decisions, the agent extracts learnings in `extract_learnings_agent.py`: - -```python -# After human input, extract the learning -extraction_result = await Runner.run(extract_agent, new_context, hooks=hooks) -learning = extraction_result.final_output - -# Store in workflow state for future reference -self.human_input_learnings.append(learning) -``` - -These learnings are passed into the agent's system prompt on subsequent runs. - -### 5. Automatic Summarization - -For long-running workflows, conversation history can grow unbounded. The agent automatically manages context using intelligent summarization: - -```python -# After each turn, check if summarization is needed -if should_summarize(self._state.input_list): - # Find messages to summarize (preserves last 10 turns, starts after previous summary) - messages_to_summarize, start_index, end_index = get_messages_to_summarize( - self._state.input_list, - last_summary_index - ) - - # Generate summary with dedicated agent - summary_agent = new_summarization_agent() - summary_result = await Runner.run(summary_agent, messages_to_summarize, hooks=hooks) - - # Replace summarized portion with compact summary - self._state.input_list = apply_summary_to_input_list(...) -``` - -Key features: -- **Token threshold**: Triggers at ~40k tokens to stay within model limits -- **Preserves recent context**: Always keeps last 10 user turns in full detail -- **Never re-summarizes**: Starts after the most recent summary to avoid information loss -- **Dedicated summarization agent**: GPT-4o agent focused on extracting key procurement events, decisions, and current state - -This enables workflows to run indefinitely without hitting context limits. - -### 6. Error Handling & Retries - -The workflow uses Temporal's retry policies for resilient execution: - -```python -retry_policy = RetryPolicy( - initial_interval=timedelta(seconds=1), - backoff_coefficient=2.0, # Exponential backoff - maximum_interval=timedelta(seconds=120), - maximum_attempts=5, - non_retryable_error_types=[ - "DataCorruptionError", - "ScheduleNotFoundError", - ] -) -``` - -Activities automatically retry on transient failures but fail fast on data corruption. - -## Key Features - -### Durability -- Workflows survive process restarts, crashes, and deployments -- All state is persisted in Temporal and SQLite -- No context is lost even after months of runtime - -### External Event Processing -- Responds to events from external systems (ERP, logistics, QA) -- Validates and processes events asynchronously -- Multiple event types supported (approvals, shipments, inspections) - -### Human Escalation -- Automatically escalates critical issues (schedule delays, inspection failures) -- Provides recommended actions to humans -- Waits indefinitely (up to 24 hours) for human response -- Continues workflow after receiving guidance - -### Learning & Adaptation -- Extracts patterns from human decisions -- Applies learned rules to similar future situations -- Becomes more autonomous over time -- Human maintains oversight and final authority - -### Observability -- Full workflow history in Temporal UI -- Real-time agent responses in AgentEx UI -- Detailed logging for debugging -- Database audit trail for all changes - -## Customizing the Demo - -### Modify the Construction Schedule - -Edit the default schedule in `project/data/database.py`: - -```python -DEFAULT_SCHEDULE = { - "project": { - "name": "Small Office Renovation", - "start_date": "2026-02-01", - "end_date": "2026-05-31" - }, - "deliveries": [ - { - "item": "Steel Beams", - "required_by": "2026-02-15", - "buffer_days": 5 - }, - # ... add more items - ] -} -``` - -### Add New Event Types - -1. Define the event in `project/models/events.py` -2. Update event validation in `workflow.py` -3. Teach the agent how to handle it in `procurement_agent.py` -4. Add test events in `project/scripts/send_test_events.py` - -### Change Agent Behavior - -Modify the agent's instructions in `project/agents/procurement_agent.py`: - -```python -def new_procurement_agent(master_construction_schedule: str, human_input_learnings: list) -> Agent: - instructions = f""" - You are a procurement agent for a commercial building construction project. - - [Your custom instructions here...] - """ - # ... -``` - -### Add New Tools - -Create new activities in `project/activities/activities.py` and register them as tools: - -```python -@activity.defn(name="my_custom_activity") -async def my_custom_activity(param: str) -> str: - # ... your logic - return result - -# Register in the agent -tools=[ - openai_agents.workflow.activity_as_tool( - my_custom_activity, - start_to_close_timeout=timedelta(minutes=10) - ), - # ... other tools -] -``` - -## Troubleshooting - -**Agent not appearing in UI** -- Verify agent is running on port 8000: `lsof -i :8000` -- Check `ENVIRONMENT=development` is set -- Review agent logs for errors - -**Events not being received** -- Confirm workflow ID is correct (check Temporal UI) -- Verify Temporal server is running: `docker ps | grep temporal` -- Check that send_test_events.py is using the right workflow ID - -**Human escalation timeout** -- The agent waits 24 hours for human input before timing out -- Respond in the AgentEx UI task thread -- Check that your message is being sent to the correct task - -**Database errors** -- The database is automatically created at `project/data/procurement.db` -- Delete the file to reset: `rm project/data/procurement.db` -- The agent will recreate it on next run - -**Import errors** -- Make sure dependencies are installed: `uv sync` -- Verify you're running from the correct directory -- Check Python version is 3.12+ - -## What's Next? - -This demo shows the foundation for autonomous, long-running agents. Potential applications include: - -- **Supply chain management** - Track orders, shipments, and inventory across months -- **Compliance workflows** - Monitor regulatory requirements and schedule audits -- **Customer success** - Proactive outreach based on usage patterns and lifecycle stage -- **Infrastructure management** - React to alerts, coordinate maintenance, escalate outages -- **Financial processes** - Invoice approval workflows, budget tracking, expense management - -The key insight: **AI agents don't just answer questionsโ€”they can run real-world processes autonomously over time.** - -## Learn More - -- [AgentEx Documentation](https://agentex.sgp.scale.com/docs/) -- [Temporal Documentation](https://docs.temporal.io/) -- [OpenAI Agents SDK](https://github.com/openai/agents-sdk) - ---- - -**Questions or issues?** Open an issue on the [scale-agentex GitHub repository](https://github.com/scaleapi/scale-agentex). diff --git a/examples/demos/procurement_agent/dev.ipynb b/examples/demos/procurement_agent/dev.ipynb deleted file mode 100644 index 53b70d15..00000000 --- a/examples/demos/procurement_agent/dev.ipynb +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "36834357", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d1c309d6", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"procurement-agent\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f6e6ef0", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Async agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b03b0d37", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a6927cc0", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4864e354", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/examples/demos/procurement_agent/environments.yaml b/examples/demos/procurement_agent/environments.yaml deleted file mode 100644 index 90f44ae6..00000000 --- a/examples/demos/procurement_agent/environments.yaml +++ /dev/null @@ -1,64 +0,0 @@ -# Agent Environment Configuration -# ------------------------------ -# This file defines environment-specific settings for your agent. -# This DIFFERS from the manifest.yaml file in that it is used to program things that are ONLY per environment. - -# ********** EXAMPLE ********** -# schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -# environments: -# dev: -# auth: -# principal: -# user_id: "1234567890" -# user_name: "John Doe" -# user_email: "john.doe@example.com" -# user_role: "admin" -# user_permissions: "read, write, delete" -# helm_overrides: # This is used to override the global helm values.yaml file in the agentex-agent helm charts -# replicas: 3 -# resources: -# requests: -# cpu: "1000m" -# memory: "2Gi" -# limits: -# cpu: "2000m" -# memory: "4Gi" -# env: -# - name: LOG_LEVEL -# value: "DEBUG" -# - name: ENVIRONMENT -# value: "staging" -# -# kubernetes: -# # OPTIONAL - Otherwise it will be derived from separately. However, this can be used to override the derived -# # namespace and deploy it with in the same namespace that already exists for a separate agent. -# namespace: "team-procurement-agent" -# ********** END EXAMPLE ********** - -schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -environments: - dev: - auth: - principal: - user_id: # TODO: Fill in - account_id: # TODO: Fill in - helm_overrides: - # This is used to override the global helm values.yaml file in the agentex-agent helm charts - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" - temporal-worker: - enabled: true - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/demos/procurement_agent/manifest.yaml b/examples/demos/procurement_agent/manifest.yaml deleted file mode 100644 index ad815e20..00000000 --- a/examples/demos/procurement_agent/manifest.yaml +++ /dev/null @@ -1,144 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../ # Keep this as the default root - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - procurement_agent - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: procurement_agent/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: procurement_agent/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - # Path to temporal worker file - # Examples: - # project/run_worker.py (standard) - # workers/temporal.py (custom structure) - # ../shared/worker.py (shared across projects) - worker: project/run_worker.py - - -# Agent Configuration -# ----------------- -agent: - # Type of agent - either sync or async - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: procurement-agent - - # Description of what your agent does - # Helps with documentation and discovery - description: An Agentex agent that manages procurement for building constructions - - # Temporal workflow configuration - # This enables your agent to run as a Temporal workflow for long-running tasks - temporal: - enabled: true - workflows: - # Name of the workflow class - # Must match the @workflow.defn name in your workflow.py - - name: procurement-agent - - # Queue name for task distribution - # Used by Temporal to route tasks to your agent - # Convention: _task_queue - queue_name: procurement_agent_queue - - # Optional: Health check port for temporal worker - # Defaults to 80 if not specified - # health_check_port: 80 - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - credentials: - - env_var_name: REDIS_URL - secret_name: redis-url-secret - secret_key: url - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - env: - OPENAI_API_KEY: "" - # OPENAI_BASE_URL: "" - OPENAI_ORG_ID: "" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: - - name: my-registry-secret # Update with your image pull secret name - - # Global deployment settings that apply to all clusters - # These can be overridden using --override-file with custom configuration files - global: - agent: - name: "procurement-agent" - description: "An Agentex agent that manages procurement for building constructions" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/demos/procurement_agent/project/__init__.py b/examples/demos/procurement_agent/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/demos/procurement_agent/project/acp.py b/examples/demos/procurement_agent/project/acp.py deleted file mode 100644 index 54cac94a..00000000 --- a/examples/demos/procurement_agent/project/acp.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -import sys - -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin - -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModelProvider, -) -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor - -# === DEBUG SETUP (AgentEx CLI Debug Support) === -if os.getenv("AGENTEX_DEBUG_ENABLED") == "true": - try: - import debugpy - - from agentex.lib.utils.logging import make_logger - - logger = make_logger(__name__) - debug_port = int(os.getenv("AGENTEX_DEBUG_PORT", "5679")) - debug_type = os.getenv("AGENTEX_DEBUG_TYPE", "acp") - wait_for_attach = os.getenv("AGENTEX_DEBUG_WAIT_FOR_ATTACH", "false").lower() == "true" - - # Configure debugpy - debugpy.configure(subProcess=False) - debugpy.listen(debug_port) - - logger.info(f"๐Ÿ› [{debug_type.upper()}] Debug server listening on port {debug_port}") - - if wait_for_attach: - logger.info(f"โณ [{debug_type.upper()}] Waiting for debugger to attach...") - debugpy.wait_for_client() - logger.info(f"โœ… [{debug_type.upper()}] Debugger attached!") - else: - logger.info(f"๐Ÿ“ก [{debug_type.upper()}] Ready for debugger attachment") - - except ImportError: - print("โŒ debugpy not available. Install with: pip install debugpy") - sys.exit(1) - except Exception as e: - print(f"โŒ Debug setup failed: {e}") - sys.exit(1) -# === END DEBUG SETUP === - -from agentex.lib.types.fastacp import TemporalACPConfig -from agentex.lib.sdk.fastacp.fastacp import FastACP - -context_interceptor = ContextInterceptor() -streaming_model_provider = TemporalStreamingModelProvider() - -# Create the ACP server -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233"), - plugins=[OpenAIAgentsPlugin(model_provider=streaming_model_provider)], - interceptors=[context_interceptor] - ) -) \ No newline at end of file diff --git a/examples/demos/procurement_agent/project/activities/__init__.py b/examples/demos/procurement_agent/project/activities/__init__.py deleted file mode 100644 index 8c8e7bd5..00000000 --- a/examples/demos/procurement_agent/project/activities/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Procurement agent activities module.""" diff --git a/examples/demos/procurement_agent/project/activities/activities.py b/examples/demos/procurement_agent/project/activities/activities.py deleted file mode 100644 index f2581e14..00000000 --- a/examples/demos/procurement_agent/project/activities/activities.py +++ /dev/null @@ -1,570 +0,0 @@ -from __future__ import annotations - -import json -import uuid -import asyncio -from datetime import datetime, timedelta - -from temporalio import activity -from temporalio.exceptions import ApplicationError - -from project.data.database import ( - DatabaseError, - DataCorruptionError, - create_procurement_item, - delete_procurement_item, - update_procurement_item, - get_all_procurement_items, - get_schedule_for_workflow, - create_schedule_for_workflow, - get_procurement_item_by_name, - remove_delivery_item_for_workflow, - update_project_end_date_for_workflow, - update_delivery_date_for_item_for_workflow, -) -from project.models.events import ( - SubmitalApprovalEvent, - ShipmentDepartedFactoryEvent, -) -from agentex.lib.utils.logging import make_logger - -logger = make_logger(__name__) - -@activity.defn -async def issue_purchase_order(event: SubmitalApprovalEvent) -> str: - """ - Issues a purchase order for construction materials. - - Call this when: - - A submittal is approved (Submittal_Approved event) - - Human feedback requests reissuing a purchase order - """ - uuid_purchase_order = str(uuid.uuid4()) - # wait for 5 seconds as if we were calling an API to issue a purchase order - await asyncio.sleep(5) - logger.info(f"Issuing purchase order: {event}") - logger.info(f"Purchase order ID: {uuid_purchase_order}") - - return f"Successfully issued purchase order with ID: {uuid_purchase_order}" - -@activity.defn -async def flag_potential_issue(event: ShipmentDepartedFactoryEvent) -> str: - """ - Flags a potential issue with a delivery date. - - Call this when: - - A shipment departure creates timeline concerns (Shipment_Departed_Factory event) - - When ETA = required date and there is zero buffer - - Human feedback identifies a potential delivery issue - """ - logger.info(f"Flagging potential issue: {event}") - logger.info(f"Potential issue flagged with delivery date: {event.eta}") - # imagine this is a call to an API to flag a potential issue, perhaps a notification to a team member - await asyncio.sleep(1) - return f"Potential issue flagged with delivery date: {event.eta}" - -@activity.defn -async def notify_team_shipment_arrived(event: ShipmentDepartedFactoryEvent) -> str: - """ - Notifies the team that a shipment has arrived. - - Call this when: - - A shipment arrives at the site (Shipment_Arrived_Site event) - - Human feedback requests team notification - """ - logger.info(f"Notifying team that shipment has arrived: {event.item}") - logger.info(f"Team notification sent for arrival of: {event.item}") - # imagine this is a call to an API to notify the team that a shipment has arrived, perhaps a notification to a team member - await asyncio.sleep(1) - - return f"Notifying team that shipment has arrived: {event.item}" - -@activity.defn -async def schedule_inspection(event: ShipmentDepartedFactoryEvent) -> str: - """ - Schedules an inspection for delivered materials. - - Call this when: - - A shipment arrives at the site (Shipment_Arrived_Site event) - - Human feedback requests scheduling an inspection - """ - inspection_date = datetime.now() + timedelta(days=1) - logger.info(f"Scheduling inspection for: {event.item} on {inspection_date}") - # imagine this is a call to an API to schedule an inspection - await asyncio.sleep(1) - return f"Scheduling inspection for {event.item} on {inspection_date}" - - - -@activity.defn -async def create_master_construction_schedule(workflow_id: str) -> str: - """ - Creates the master construction schedule for the workflow. - - Call this when: - - The workflow is created - - Args: - workflow_id: The Temporal workflow ID - - Raises: - ApplicationError: Non-retryable if data is invalid - DatabaseError: Retryable if database connection fails - """ - logger.info(f"Creating master construction schedule for workflow: {workflow_id}") - - try: - await create_schedule_for_workflow(workflow_id) - return "Master construction schedule created for workflow" - - except DataCorruptionError as e: - # Application error - invalid data, don't retry - logger.error(f"Data corruption error creating schedule: {e}") - raise ApplicationError( - f"Invalid data creating schedule: {e}", - type="DataCorruptionError", - non_retryable=True - ) from e - - except DatabaseError as e: - # Platform error - database connection issue, let Temporal retry - logger.warning(f"Database error creating schedule (will retry): {e}") - raise # Let Temporal retry with activity retry policy - - except Exception as e: - # Unexpected error - log and let Temporal retry - logger.error(f"Unexpected error creating schedule: {e}") - raise - -@activity.defn -async def get_master_construction_schedule(workflow_id: str) -> str: - """ - Gets the master construction schedule for the workflow. - - Call this when: - - You want to get the master construction schedule for the workflow - - Human feedback requests the master construction schedule - - Returns: - The master construction schedule for the workflow as JSON string - - Raises: - ApplicationError: Non-retryable if schedule not found or data corrupted - DatabaseError: Retryable if database connection fails - """ - try: - schedule = await get_schedule_for_workflow(workflow_id) - - if schedule is None: - # Schedule not found - this is an application error - logger.error(f"No schedule found for workflow {workflow_id}") - raise ApplicationError( - f"No master construction schedule found for workflow {workflow_id}", - type="ScheduleNotFoundError", - non_retryable=True - ) - - logger.info(f"Master construction schedule found for workflow: {workflow_id}") - return json.dumps(schedule) - - except ApplicationError: - # Re-raise application errors - raise - - except DataCorruptionError as e: - # Application error - corrupted data, don't retry - logger.error(f"Data corruption error retrieving schedule: {e}") - raise ApplicationError( - f"Schedule data corrupted: {e}", - type="DataCorruptionError", - non_retryable=True - ) from e - - except DatabaseError as e: - # Platform error - database connection issue, let Temporal retry - logger.warning(f"Database error retrieving schedule (will retry): {e}") - raise # Let Temporal retry with activity retry policy - - except Exception as e: - # Unexpected error - log and let Temporal retry - logger.error(f"Unexpected error retrieving schedule: {e}") - raise - -@activity.defn -async def update_delivery_date_for_item(workflow_id: str, item: str, new_delivery_date: str) -> str: - """ - Updates the delivery date for a specific item in the construction schedule. - - Call this when: - - You want to update the delivery date for a specific item in the construction schedule - - Human feedback requests updating the delivery date for a specific item - - Args: - workflow_id: The Temporal workflow ID - item: The item to update - new_delivery_date: The new delivery date - - Raises: - ApplicationError: Non-retryable if schedule/item not found - DatabaseError: Retryable if database connection fails - """ - logger.info(f"Updating delivery date for item: {item} to {new_delivery_date}") - - try: - await update_delivery_date_for_item_for_workflow(workflow_id, item, new_delivery_date) - return f"Delivery date updated for item: {item} to {new_delivery_date}" - - except DataCorruptionError as e: - # Application error - schedule or item not found, don't retry - logger.error(f"Data corruption error updating delivery date: {e}") - raise ApplicationError( - f"Failed to update delivery date: {e}", - type="DataCorruptionError", - non_retryable=True - ) from e - - except DatabaseError as e: - # Platform error - database connection issue, let Temporal retry - logger.warning(f"Database error updating delivery date (will retry): {e}") - raise # Let Temporal retry with activity retry policy - - except Exception as e: - # Unexpected error - log and let Temporal retry - logger.error(f"Unexpected error updating delivery date: {e}") - raise - -@activity.defn -async def remove_delivery_item(workflow_id: str, item: str) -> str: - """ - Removes a delivery item from the construction schedule. - - Call this when: - - You want to remove a delivery item from the construction schedule - - Human feedback requests removing a delivery item - - Args: - workflow_id: The Temporal workflow ID - item: The item to remove - - Raises: - ApplicationError: Non-retryable if schedule/item not found - DatabaseError: Retryable if database connection fails - """ - logger.info(f"Removing delivery item: {item}") - - try: - await remove_delivery_item_for_workflow(workflow_id, item) - return f"Delivery item removed from construction schedule: {item}" - - except DataCorruptionError as e: - # Application error - schedule or item not found, don't retry - logger.error(f"Data corruption error removing delivery item: {e}") - raise ApplicationError( - f"Failed to remove delivery item: {e}", - type="DataCorruptionError", - non_retryable=True - ) from e - - except DatabaseError as e: - # Platform error - database connection issue, let Temporal retry - logger.warning(f"Database error removing delivery item (will retry): {e}") - raise # Let Temporal retry with activity retry policy - - except Exception as e: - # Unexpected error - log and let Temporal retry - logger.error(f"Unexpected error removing delivery item: {e}") - raise - -@activity.defn -async def update_project_end_date(workflow_id: str, new_end_date: str) -> str: - """ - Updates the end date for the project in the construction schedule. - - Call this when: - - You want to update the end date for the project in the construction schedule - - Human feedback requests updating the end date for the project - - Args: - workflow_id: The Temporal workflow ID - new_end_date: The new end date for the project - - Raises: - ApplicationError: Non-retryable if schedule not found - DatabaseError: Retryable if database connection fails - """ - logger.info(f"Updating end date for project to: {new_end_date}") - - try: - await update_project_end_date_for_workflow(workflow_id, new_end_date) - return f"End date updated for project: {new_end_date}" - - except DataCorruptionError as e: - # Application error - schedule not found, don't retry - logger.error(f"Data corruption error updating project end date: {e}") - raise ApplicationError( - f"Failed to update project end date: {e}", - type="DataCorruptionError", - non_retryable=True - ) from e - - except DatabaseError as e: - # Platform error - database connection issue, let Temporal retry - logger.warning(f"Database error updating project end date (will retry): {e}") - raise # Let Temporal retry with activity retry policy - - except Exception as e: - # Unexpected error - log and let Temporal retry - logger.error(f"Unexpected error updating project end date: {e}") - raise - - -@activity.defn -async def create_procurement_item_activity( - workflow_id: str, - item: str, - status: str, - eta: str | None = None, - date_arrived: str | None = None, - purchase_order_id: str | None = None -) -> str: - """ - Creates a new procurement item for tracking through the workflow. - - Call this when: - - A submittal is approved (Submittal_Approved event) - automatically after submittal approval - - Human feedback requests creating a new procurement item - - Args: - workflow_id: The Temporal workflow ID - item: The item name (e.g., "Steel Beams") - status: Current status of the item (e.g., "submittal_approved") - eta: Optional estimated time of arrival - date_arrived: Optional date the item arrived - purchase_order_id: Optional purchase order ID - - Raises: - ApplicationError: Non-retryable if data is invalid - DatabaseError: Retryable if database connection fails - """ - logger.info(f"Creating procurement item for workflow {workflow_id}: {item} with status {status}") - - try: - await create_procurement_item( - workflow_id=workflow_id, - item=item, - status=status, - eta=eta, - date_arrived=date_arrived, - purchase_order_id=purchase_order_id - ) - return f"Procurement item created: {item} with status {status}" - - except DataCorruptionError as e: - # Application error - invalid data, don't retry - logger.error(f"Data corruption error creating procurement item: {e}") - raise ApplicationError( - f"Invalid data creating procurement item: {e}", - type="DataCorruptionError", - non_retryable=True - ) from e - - except DatabaseError as e: - # Platform error - database connection issue, let Temporal retry - logger.warning(f"Database error creating procurement item (will retry): {e}") - raise # Let Temporal retry with activity retry policy - - except Exception as e: - # Unexpected error - log and let Temporal retry - logger.error(f"Unexpected error creating procurement item: {e}") - raise - - -@activity.defn -async def update_procurement_item_activity( - workflow_id: str, - item: str, - status: str | None = None, - eta: str | None = None, - date_arrived: str | None = None, - purchase_order_id: str | None = None -) -> str: - """ - Updates a procurement item's fields. - - Call this when: - - Any event occurs that changes the item's status (e.g., shipment departed, arrived, inspection scheduled/failed/passed) - - Human feedback requests updating the procurement item - - Purchase order is issued - - ETA is updated - - Item arrives at site - - Args: - workflow_id: The Temporal workflow ID - item: The item name (e.g., "Steel Beams") - status: Optional new status - eta: Optional new estimated time of arrival - date_arrived: Optional new arrival date - purchase_order_id: Optional new purchase order ID - - Raises: - ApplicationError: Non-retryable if workflow_id invalid or item not found - DatabaseError: Retryable if database connection fails - """ - logger.info(f"Updating procurement item for workflow {workflow_id}: {item}") - - try: - await update_procurement_item( - workflow_id=workflow_id, - item=item, - status=status, - eta=eta, - date_arrived=date_arrived, - purchase_order_id=purchase_order_id - ) - return f"Procurement item updated for workflow {workflow_id}: {item}" - - except DataCorruptionError as e: - # Application error - item not found or invalid data, don't retry - logger.error(f"Data corruption error updating procurement item: {e}") - raise ApplicationError( - f"Failed to update procurement item: {e}", - type="DataCorruptionError", - non_retryable=True - ) from e - - except DatabaseError as e: - # Platform error - database connection issue, let Temporal retry - logger.warning(f"Database error updating procurement item (will retry): {e}") - raise # Let Temporal retry with activity retry policy - - except Exception as e: - # Unexpected error - log and let Temporal retry - logger.error(f"Unexpected error updating procurement item: {e}") - raise - - -@activity.defn -async def delete_procurement_item_activity(workflow_id: str, item: str) -> str: - """ - Deletes a procurement item from the database. - - Call this when: - - Human feedback explicitly requests removing/deleting an item (e.g., "remove the steel beams") - - Item is no longer needed in the project - - Args: - workflow_id: The Temporal workflow ID - item: The item name (e.g., "Steel Beams") - - Raises: - ApplicationError: Non-retryable if workflow_id invalid or item not found - DatabaseError: Retryable if database connection fails - """ - logger.info(f"Deleting procurement item for workflow {workflow_id}: {item}") - - try: - await delete_procurement_item(workflow_id, item) - return f"Procurement item deleted for workflow {workflow_id}: {item}" - - except DataCorruptionError as e: - # Application error - item not found, don't retry - logger.error(f"Data corruption error deleting procurement item: {e}") - raise ApplicationError( - f"Failed to delete procurement item: {e}", - type="DataCorruptionError", - non_retryable=True - ) from e - - except DatabaseError as e: - # Platform error - database connection issue, let Temporal retry - logger.warning(f"Database error deleting procurement item (will retry): {e}") - raise # Let Temporal retry with activity retry policy - - except Exception as e: - # Unexpected error - log and let Temporal retry - logger.error(f"Unexpected error deleting procurement item: {e}") - raise - - -@activity.defn -async def get_procurement_item_by_name_activity(workflow_id: str, item: str) -> str: - """ - Retrieves a procurement item by workflow ID and item name. - - Call this when: - - You need to check the status of a specific item - - You need context about an item before making decisions - - Human feedback requests information about a specific item - - Args: - workflow_id: The Temporal workflow ID - item: The item name (e.g., "Steel Beams") - - Returns: - JSON string of the procurement item or message if not found - - Raises: - ApplicationError: Non-retryable if input data is invalid - DatabaseError: Retryable if database connection fails - """ - logger.info(f"Getting procurement item for workflow {workflow_id}: {item}") - - try: - result = await get_procurement_item_by_name(workflow_id, item) - - if result is None: - return f"No procurement item found for workflow {workflow_id} with item name: {item}" - - return json.dumps(result) - - except DataCorruptionError as e: - # Application error - invalid input, don't retry - logger.error(f"Data corruption error getting procurement item: {e}") - raise ApplicationError( - f"Invalid input getting procurement item: {e}", - type="DataCorruptionError", - non_retryable=True - ) from e - - except DatabaseError as e: - # Platform error - database connection issue, let Temporal retry - logger.warning(f"Database error getting procurement item (will retry): {e}") - raise # Let Temporal retry with activity retry policy - - except Exception as e: - # Unexpected error - log and let Temporal retry - logger.error(f"Unexpected error getting procurement item: {e}") - raise - - -@activity.defn -async def get_all_procurement_items_activity() -> str: - """ - Retrieves all procurement items from the database. - - Call this when: - - You need an overview of all procurement items - - You need to check the status of multiple items - - Human feedback requests a summary of all items - - Returns: - JSON string of all procurement items - - Raises: - DatabaseError: Retryable if database connection fails - """ - logger.info("Getting all procurement items") - - try: - results = await get_all_procurement_items() - return json.dumps(results) - - except DatabaseError as e: - # Platform error - database connection issue, let Temporal retry - logger.warning(f"Database error getting all procurement items (will retry): {e}") - raise # Let Temporal retry with activity retry policy - - except Exception as e: - # Unexpected error - log and let Temporal retry - logger.error(f"Unexpected error getting all procurement items: {e}") - raise \ No newline at end of file diff --git a/examples/demos/procurement_agent/project/agents/__init__.py b/examples/demos/procurement_agent/project/agents/__init__.py deleted file mode 100644 index 08d7078b..00000000 --- a/examples/demos/procurement_agent/project/agents/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Procurement agent agents module.""" diff --git a/examples/demos/procurement_agent/project/agents/extract_learnings_agent.py b/examples/demos/procurement_agent/project/agents/extract_learnings_agent.py deleted file mode 100644 index ca6ea680..00000000 --- a/examples/demos/procurement_agent/project/agents/extract_learnings_agent.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Agent for extracting learnings from human interactions.""" - - -from agents import Agent - - -def new_extract_learnings_agent() -> Agent: - """ - Create an agent that extracts 1-2 sentence learnings from human interactions. - - This agent analyzes the full conversation context to understand how we got to - the human interaction and what key insight or decision was made. - - Returns: - Agent configured to extract a concise learning - """ - instructions = """ -You are a learning extraction agent for a procurement system. - -Your job is to analyze only the wait_for_human tool call OUTPUT and extract a concise 1-2 sentence learning that can be applied to future decisions. -We care about the output as that is what the human actually said. The input is AI generated, we are trying to extract what decision the human made. - -For example: - - Example usage from the conversation: - { - "arguments": "{\"recommended_action\":\"\"The inspection failed I recommend we re-order the item.\"\"}", - "call_id": "call_FqWa25mlCKwo8gA3zr4TwHca", - "name": "wait_for_human", - "type": "function_call", - "id": "fc_08a992817d632789006914d90bbb948194bd20eb784f33c2a5", - "status": "completed" - } - - Human response received: - { - "call_id": "call_FqWa25mlCKwo8gA3zr4TwHca", - "output": "No, we should not re-order the item. Please remove the item from the master schedule.", - "type": "function_call_output" - } -Learning: When we fail inspection, the recommended action is to remove the item from the master schedule. - -The rest of the information is just context but the focus should be on understanding what the human wanted to do and why. - -Please extract a 1-2 sentence learning from the wait_for_human tool call. -""" - - return Agent( - name="Extract Learnings Agent", - instructions=instructions, - model="gpt-4o", - tools=[], # No tools needed - just analysis - ) diff --git a/examples/demos/procurement_agent/project/agents/procurement_agent.py b/examples/demos/procurement_agent/project/agents/procurement_agent.py deleted file mode 100644 index 475cd83e..00000000 --- a/examples/demos/procurement_agent/project/agents/procurement_agent.py +++ /dev/null @@ -1,502 +0,0 @@ -"""Event agent for processing procurement events and taking actions.""" -from __future__ import annotations - -from datetime import datetime, timedelta - -from agents import Agent, function_tool -from temporalio import workflow -from temporalio.common import RetryPolicy -from temporalio.contrib import openai_agents -from temporalio.exceptions import TimeoutError, ApplicationError - -from project.activities.activities import ( - schedule_inspection, - flag_potential_issue, - issue_purchase_order, - remove_delivery_item, - update_project_end_date, - notify_team_shipment_arrived, - update_delivery_date_for_item, - create_procurement_item_activity, - delete_procurement_item_activity, - update_procurement_item_activity, - get_all_procurement_items_activity, - get_procurement_item_by_name_activity, -) - - -@function_tool -async def wait_for_human(recommended_action: str) -> str: - """ - When the we are stuck and need to ask a human for help, call this tool. Please provide a recommended action to the human. - Until the human approves the recommended action, you will keep calling this tool (call it as many times as needed). - If the human says anything other than yes, please use this tool again and come up with a new recommended action. - If the human wants to add additional information, please use this tool again and come up with a new recommended action. - You are almost always calling this tool again unless the human approves the exact recommended action. - - For example: - - Assistant recommendation: The inspection failed I recommend we re-order the item. - Human response: No, we should not re-order the item. Please remove the item from the master schedule. - Assistant recommendation: Ok I will go ahead and remove the item from the master schedule. Do you approve? - Human response: Yes - - Assistant recommendation: The inspection failed I recommend we re-order the item. - Human response: Yes and also please update the master schedule to reflect the new delivery date. - Assistant recommendation: Ok I will go ahead and update the master schedule to reflect the new delivery date and re-order the item. Does that sound right? - Human response: Yes - """ - workflow_instance = workflow.instance() - workflow.logger.info(f"Recommended action: {recommended_action}") - - try: - # Wait for human response with 24-hour timeout (don't wait forever!) - await workflow.wait_condition( - lambda: not workflow_instance.human_queue.empty(), - timeout=timedelta(hours=24), - ) - - while not workflow_instance.human_queue.empty(): - human_input = await workflow_instance.human_queue.get() - print(f"[WORKFLOW] Processing human message from queue") - return human_input - - # If queue became empty after wait_condition succeeded, this shouldn't normally happen - workflow.logger.warning("Queue empty after wait condition succeeded") - return "No human response available" - - except TimeoutError: - # Human didn't respond within 24 hours - workflow.logger.warning("Human escalation timed out after 24 hours") - return "TIMEOUT: No human response received within 24 hours. Proceeding with best judgment." - - -@function_tool -async def update_delivery_date_tool(item: str, new_delivery_date: str) -> str: - """ - Updates the delivery date for a specific item in the construction schedule. - - Call this when: - - You want to update the delivery date for a specific item in the construction schedule - - Human feedback requests updating the delivery date for a specific item - - Args: - item: The item to update - new_delivery_date: The new delivery date - - Returns: - Confirmation message or error description - """ - workflow_id = workflow.info().workflow_id - - retry_policy = RetryPolicy( - initial_interval=timedelta(seconds=1), - backoff_coefficient=2.0, - maximum_interval=timedelta(seconds=120), - maximum_attempts=5, - non_retryable_error_types=["DataCorruptionError"], - ) - - try: - return await workflow.execute_activity( - update_delivery_date_for_item, - args=[workflow_id, item, new_delivery_date], - start_to_close_timeout=timedelta(minutes=5), - schedule_to_close_timeout=timedelta(minutes=10), - retry_policy=retry_policy, - ) - except ApplicationError as e: - # Non-retryable error (item not found, schedule missing) - workflow.logger.error(f"Failed to update delivery date for {item}: {e}") - return f"Error: Unable to update delivery date for {item}. {e.message}" - except Exception as e: - # Unexpected error - workflow.logger.error(f"Unexpected error updating delivery date: {e}") - return f"Error: System issue updating delivery date for {item}. Please try again." - - -@function_tool -async def remove_delivery_item_tool(item: str) -> str: - """ - Removes a delivery item from the construction schedule. - - Call this when: - - You want to remove a delivery item from the construction schedule - - Human feedback requests removing a delivery item - - Args: - item: The item to remove - - Returns: - Confirmation message or error description - """ - workflow_id = workflow.info().workflow_id - - retry_policy = RetryPolicy( - initial_interval=timedelta(seconds=1), - backoff_coefficient=2.0, - maximum_interval=timedelta(seconds=120), - maximum_attempts=5, - non_retryable_error_types=["DataCorruptionError"], - ) - - try: - return await workflow.execute_activity( - remove_delivery_item, - args=[workflow_id, item], - start_to_close_timeout=timedelta(minutes=5), - schedule_to_close_timeout=timedelta(minutes=10), - retry_policy=retry_policy, - ) - except ApplicationError as e: - # Non-retryable error (item not found, schedule missing) - workflow.logger.error(f"Failed to remove delivery item {item}: {e}") - return f"Error: Unable to remove item {item}. {e.message}" - except Exception as e: - # Unexpected error - workflow.logger.error(f"Unexpected error removing delivery item: {e}") - return f"Error: System issue removing item {item}. Please try again." - - -@function_tool -async def update_project_end_date_tool(new_end_date: str) -> str: - """ - Updates the end date for the project in the construction schedule. - - Call this when: - - You want to update the end date for the project in the construction schedule - - Human feedback requests updating the end date for the project - - Args: - new_end_date: The new end date for the project - - Returns: - Confirmation message or error description - """ - workflow_id = workflow.info().workflow_id - - retry_policy = RetryPolicy( - initial_interval=timedelta(seconds=1), - backoff_coefficient=2.0, - maximum_interval=timedelta(seconds=120), - maximum_attempts=5, - non_retryable_error_types=["DataCorruptionError"], - ) - - try: - return await workflow.execute_activity( - update_project_end_date, - args=[workflow_id, new_end_date], - start_to_close_timeout=timedelta(minutes=5), - schedule_to_close_timeout=timedelta(minutes=10), - retry_policy=retry_policy, - ) - except ApplicationError as e: - # Non-retryable error (schedule not found) - workflow.logger.error(f"Failed to update project end date: {e}") - return f"Error: Unable to update project end date. {e.message}" - except Exception as e: - # Unexpected error - workflow.logger.error(f"Unexpected error updating project end date: {e}") - return f"Error: System issue updating project end date. Please try again." - - -@function_tool -async def create_procurement_item_tool( - item: str, - status: str, - eta: str | None = None, - date_arrived: str | None = None, - purchase_order_id: str | None = None -) -> str: - """ - Creates a new procurement item for tracking through the workflow. - - Call this when: - - A submittal is approved (after calling issue_purchase_order) - - You need to track a new item in the procurement system - - Args: - item: The item name (e.g., "Steel Beams") - status: Current status (e.g., "submittal_approved", "purchase_order_issued") - eta: Optional estimated time of arrival (ISO format) - date_arrived: Optional date the item arrived (ISO format) - purchase_order_id: Optional purchase order ID - - Returns: - Confirmation message or error description - """ - workflow_id = workflow.info().workflow_id - - retry_policy = RetryPolicy( - initial_interval=timedelta(seconds=1), - backoff_coefficient=2.0, - maximum_interval=timedelta(seconds=120), - maximum_attempts=5, - non_retryable_error_types=["DataCorruptionError"], - ) - - try: - return await workflow.execute_activity( - create_procurement_item_activity, - args=[workflow_id, item, status, eta, date_arrived, purchase_order_id], - start_to_close_timeout=timedelta(minutes=5), - schedule_to_close_timeout=timedelta(minutes=10), - retry_policy=retry_policy, - ) - except ApplicationError as e: - # Non-retryable error (invalid data) - workflow.logger.error(f"Failed to create procurement item for {item}: {e}") - return f"Error: Unable to create procurement item for {item}. {e.message}" - except Exception as e: - # Unexpected error - workflow.logger.error(f"Unexpected error creating procurement item: {e}") - return f"Error: System issue creating procurement item for {item}. Please try again." - - -@function_tool -async def update_procurement_item_tool( - item: str, - status: str | None = None, - eta: str | None = None, - date_arrived: str | None = None, - purchase_order_id: str | None = None -) -> str: - """ - Updates a procurement item's fields in the tracking system. - - Call this when: - - An event changes the item's status (e.g., shipment departed, arrived, inspection scheduled/failed/passed) - - A purchase order is issued for the item - - The ETA is updated - - The item arrives at the site - - A potential issue is flagged - - Args: - item: The item name (e.g., "Steel Beams", "HVAC Units") - REQUIRED to identify which item to update - status: Optional new status (e.g., "purchase_order_issued", "shipment_departed", "shipment_arrived", - "potential_issue_flagged", "inspection_scheduled", "inspection_failed", "inspection_passed") - eta: Optional new estimated time of arrival (ISO format) - date_arrived: Optional new arrival date (ISO format) - purchase_order_id: Optional new purchase order ID - - Returns: - Confirmation message or error description - """ - workflow_id = workflow.info().workflow_id - - retry_policy = RetryPolicy( - initial_interval=timedelta(seconds=1), - backoff_coefficient=2.0, - maximum_interval=timedelta(seconds=120), - maximum_attempts=5, - non_retryable_error_types=["DataCorruptionError"], - ) - - try: - return await workflow.execute_activity( - update_procurement_item_activity, - args=[workflow_id, item, status, eta, date_arrived, purchase_order_id], - start_to_close_timeout=timedelta(minutes=5), - schedule_to_close_timeout=timedelta(minutes=10), - retry_policy=retry_policy, - ) - except ApplicationError as e: - # Non-retryable error (item not found) - workflow.logger.error(f"Failed to update procurement item: {e}") - return f"Error: Unable to update procurement item. {e.message}" - except Exception as e: - # Unexpected error - workflow.logger.error(f"Unexpected error updating procurement item: {e}") - return f"Error: System issue updating procurement item. Please try again." - - -@function_tool -async def delete_procurement_item_tool(item: str) -> str: - """ - Deletes a procurement item from the tracking system. - - Call this when: - - Human explicitly requests removing/deleting an item - - An item is no longer needed in the project - - Args: - item: The item name to delete (e.g., "Steel Beams", "HVAC Units") - - Returns: - Confirmation message or error description - """ - workflow_id = workflow.info().workflow_id - - retry_policy = RetryPolicy( - initial_interval=timedelta(seconds=1), - backoff_coefficient=2.0, - maximum_interval=timedelta(seconds=120), - maximum_attempts=5, - non_retryable_error_types=["DataCorruptionError"], - ) - - try: - return await workflow.execute_activity( - delete_procurement_item_activity, - args=[workflow_id, item], - start_to_close_timeout=timedelta(minutes=5), - schedule_to_close_timeout=timedelta(minutes=10), - retry_policy=retry_policy, - ) - except ApplicationError as e: - # Non-retryable error (item not found) - workflow.logger.error(f"Failed to delete procurement item: {e}") - return f"Error: Unable to delete procurement item. {e.message}" - except Exception as e: - # Unexpected error - workflow.logger.error(f"Unexpected error deleting procurement item: {e}") - return f"Error: System issue deleting procurement item. Please try again." - - -@function_tool -async def get_procurement_item_by_name_tool(item: str) -> str: - """ - Retrieves a procurement item by item name for context. - - Call this when: - - You need to check the status of a specific item before making decisions - - Human asks about the status of an item - - You need additional context about an item - - Args: - item: The item name (e.g., "Steel Beams") - - Returns: - JSON string of the procurement item or message if not found - """ - workflow_id = workflow.info().workflow_id - - retry_policy = RetryPolicy( - initial_interval=timedelta(seconds=1), - backoff_coefficient=2.0, - maximum_interval=timedelta(seconds=120), - maximum_attempts=5, - non_retryable_error_types=["DataCorruptionError"], - ) - - try: - return await workflow.execute_activity( - get_procurement_item_by_name_activity, - args=[workflow_id, item], - start_to_close_timeout=timedelta(minutes=5), - schedule_to_close_timeout=timedelta(minutes=10), - retry_policy=retry_policy, - ) - except ApplicationError as e: - # Non-retryable error (invalid input) - workflow.logger.error(f"Failed to get procurement item {item}: {e}") - return f"Error: Unable to get procurement item {item}. {e.message}" - except Exception as e: - # Unexpected error - workflow.logger.error(f"Unexpected error getting procurement item: {e}") - return f"Error: System issue getting procurement item {item}. Please try again." - - -@function_tool -async def get_all_procurement_items_tool() -> str: - """ - Retrieves all procurement items for context. - - Call this when: - - You need an overview of all procurement items - - Human asks for a summary of all items - - You need to check multiple items' statuses - - Returns: - JSON string of all procurement items - """ - retry_policy = RetryPolicy( - initial_interval=timedelta(seconds=1), - backoff_coefficient=2.0, - maximum_interval=timedelta(seconds=120), - maximum_attempts=5, - non_retryable_error_types=["DataCorruptionError"], - ) - - try: - return await workflow.execute_activity( - get_all_procurement_items_activity, - start_to_close_timeout=timedelta(minutes=5), - schedule_to_close_timeout=timedelta(minutes=10), - retry_policy=retry_policy, - ) - except ApplicationError as e: - # Non-retryable error - workflow.logger.error(f"Failed to get all procurement items: {e}") - return f"Error: Unable to get all procurement items. {e.message}" - except Exception as e: - # Unexpected error - workflow.logger.error(f"Unexpected error getting all procurement items: {e}") - return f"Error: System issue getting all procurement items. Please try again." - -def new_procurement_agent(master_construction_schedule: str, human_input_learnings: list) -> Agent: - """ - Create an agent that processes procurement events and takes actions. - - Args: - event_log: History of events that have occurred - master_construction_schedule: Current construction schedule - human_input_learnings: Past escalations and human decisions - - Returns: - Agent configured to process events and call tools - """ - instructions = f""" -You are a procurement agent for a commercial building construction project. - -Your role is to monitor procurement events, take appropriate actions, and escalate critical issues to a human with a recommended action. - -Please escalate to a human if you feel like we are facing a critical schedule delay and provide a recommended action. - -If the user says no or has feedback, please come up with another solution and call the wait_for_human tool again (you can call it as many times as needed). - -## Context - -Master Construction Schedule: -{master_construction_schedule} - -Past Learnings from Escalations: -{human_input_learnings} - -Current Date: {datetime.now().isoformat()} - - - """ - - start_to_close_timeout = timedelta(days=1) - - return Agent( - name="Procurement Event Agent", - instructions=instructions, - model="gpt-4o", - tools=[ - openai_agents.workflow.activity_as_tool( - issue_purchase_order, start_to_close_timeout=start_to_close_timeout - ), - openai_agents.workflow.activity_as_tool( - flag_potential_issue, start_to_close_timeout=start_to_close_timeout - ), - openai_agents.workflow.activity_as_tool( - notify_team_shipment_arrived, - start_to_close_timeout=start_to_close_timeout, - ), - openai_agents.workflow.activity_as_tool( - schedule_inspection, start_to_close_timeout=start_to_close_timeout - ), - update_delivery_date_tool, # function_tool wrapper that injects workflow_id - remove_delivery_item_tool, # function_tool wrapper that injects workflow_id - update_project_end_date_tool, # function_tool wrapper that injects workflow_id - create_procurement_item_tool, # function_tool wrapper for creating procurement items - update_procurement_item_tool, # function_tool wrapper for updating procurement items - delete_procurement_item_tool, # function_tool wrapper for deleting procurement items - get_procurement_item_by_name_tool, # function_tool wrapper for getting a specific procurement item - get_all_procurement_items_tool, # function_tool wrapper for getting all procurement items - wait_for_human, # function_tool runs in workflow context - ], - ) \ No newline at end of file diff --git a/examples/demos/procurement_agent/project/agents/summarization_agent.py b/examples/demos/procurement_agent/project/agents/summarization_agent.py deleted file mode 100644 index e74f2d46..00000000 --- a/examples/demos/procurement_agent/project/agents/summarization_agent.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Agent for summarizing conversation history.""" - -from agents import Agent - - -def new_summarization_agent() -> Agent: - """ - Create an agent that summarizes conversation history for context compression. - - This agent analyzes the conversation and creates a detailed but concise summary - that captures key events, decisions, and current state for continuing the workflow. - - Returns: - Agent configured to generate conversation summaries - """ - instructions = """ -You are a summarization agent for a procurement workflow system. - -Your job is to create a detailed but concise summary of the conversation history. -Focus on information that would be helpful for continuing the conversation, including: - -- What procurement events have occurred (submittals, shipments, inspections, etc.) -- What items are being tracked and their current status -- What actions have been taken (purchase orders issued, inspections scheduled, etc.) -- Any critical issues or delays that were identified -- Any human decisions or escalations that occurred -- What is currently being worked on -- What needs to be done next - -Your summary should be comprehensive enough to provide full context but concise enough -to be quickly understood. Aim for 3-5 paragraphs organized by topic. - -Focus on the OUTCOMES and CURRENT STATE rather than listing every single tool call. - -Example format: - -**Items Tracked:** -Steel Beams have been approved, purchase order issued (ID: 6c9e401a...), shipment arrived -on 2026-02-10, inspection passed. Currently marked as complete. - -**Current Status:** -All items are on schedule with no delays. The workflow is progressing smoothly. - -**Next Steps:** -Continue monitoring upcoming deliveries for HVAC Units and Windows. -""" - - return Agent( - name="Summarization Agent", - instructions=instructions, - model="gpt-4o", - tools=[], # No tools needed - just summarization - ) diff --git a/examples/demos/procurement_agent/project/data/__init__.py b/examples/demos/procurement_agent/project/data/__init__.py deleted file mode 100644 index ec504c84..00000000 --- a/examples/demos/procurement_agent/project/data/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Procurement agent data module.""" diff --git a/examples/demos/procurement_agent/project/data/database.py b/examples/demos/procurement_agent/project/data/database.py deleted file mode 100644 index a756f7ef..00000000 --- a/examples/demos/procurement_agent/project/data/database.py +++ /dev/null @@ -1,686 +0,0 @@ -""" -Database initialization and management for procurement agent. -Stores master construction schedules indexed by workflow ID. -""" -from __future__ import annotations - -import json -from typing import Optional -from pathlib import Path - -import aiosqlite # type: ignore[import-untyped] - -from agentex.lib.utils.logging import make_logger - -logger = make_logger(__name__) - - -# Custom exceptions for database operations -class DatabaseError(Exception): - """Platform-level database errors (retryable by Temporal)""" - pass - - -class DataCorruptionError(Exception): - """Application-level data errors (non-retryable)""" - pass - -# Database file location (in the data directory) -DB_PATH = Path(__file__).parent / "procurement.db" - -DEFAULT_SCHEDULE = { - "project": { - "name": "Small Office Renovation", - "start_date": "2026-02-01", - "end_date": "2026-05-31" - }, - "deliveries": [ - { - "item": "Steel Beams", - "required_by": "2026-02-15", - "buffer_days": 5 - }, - { - "item": "HVAC Units", - "required_by": "2026-03-01", - "buffer_days": 7 - }, - { - "item": "Windows", - "required_by": "2026-03-15", - "buffer_days": 10 - }, - { - "item": "Flooring Materials", - "required_by": "2026-04-01", - "buffer_days": 3 - }, - { - "item": "Electrical Panels", - "required_by": "2026-04-15", - "buffer_days": 5 - } - ] -} - - -async def init_database() -> None: - """ - Initialize the SQLite database and create tables if they don't exist. - Creates the master_construction_schedule and procurement_items tables. - Safe to call multiple times - uses CREATE TABLE IF NOT EXISTS. - - Raises: - DatabaseError: If database initialization fails - """ - logger.info(f"Initializing database at {DB_PATH}") - - try: - async with aiosqlite.connect(DB_PATH) as db: - await db.execute(""" - CREATE TABLE IF NOT EXISTS master_construction_schedule ( - workflow_id TEXT PRIMARY KEY, - project_name TEXT NOT NULL, - project_start_date TEXT NOT NULL, - project_end_date TEXT NOT NULL, - schedule_json TEXT NOT NULL, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - ) - """) - - # Create index on workflow_id for faster lookups - await db.execute(""" - CREATE INDEX IF NOT EXISTS idx_workflow_id - ON master_construction_schedule(workflow_id) - """) - - # Create procurement_items table for tracking item status through workflow - await db.execute(""" - CREATE TABLE IF NOT EXISTS procurement_items ( - workflow_id TEXT NOT NULL, - item TEXT NOT NULL, - status TEXT NOT NULL, - eta TEXT, - date_arrived TEXT, - purchase_order_id TEXT, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (workflow_id, item) - ) - """) - - # Create index on workflow_id for faster lookups - await db.execute(""" - CREATE INDEX IF NOT EXISTS idx_procurement_workflow_id - ON procurement_items(workflow_id) - """) - - await db.commit() - logger.info("Database initialized successfully") - - except aiosqlite.Error as e: - # Fatal error - can't initialize database - logger.error(f"Failed to initialize database: {e}") - raise DatabaseError(f"Failed to initialize database: {e}") from e - except Exception as e: - logger.error(f"Unexpected error during database initialization: {e}") - raise DatabaseError(f"Unexpected database initialization error: {e}") from e - - -async def create_schedule_for_workflow( - workflow_id: str, - schedule: Optional[dict] = None -) -> None: - """ - Create a new construction schedule for a specific workflow. - Uses default schedule if none provided. - - Args: - workflow_id: The Temporal workflow ID - schedule: Optional custom schedule dict. If None, uses DEFAULT_SCHEDULE - - Raises: - DatabaseError: If database operation fails (retryable by Temporal) - DataCorruptionError: If schedule data is invalid (non-retryable) - """ - # Input validation - non-retryable errors - if not workflow_id or not isinstance(workflow_id, str): - raise DataCorruptionError("Invalid workflow_id: must be a non-empty string") - - if schedule is None: - schedule = DEFAULT_SCHEDULE - - # Validate schedule structure - non-retryable errors - try: - if "project" not in schedule: - raise DataCorruptionError("Schedule missing 'project' key") - required_keys = ["name", "start_date", "end_date"] - for key in required_keys: - if key not in schedule["project"]: - raise DataCorruptionError(f"Schedule project missing required key: {key}") - except (TypeError, AttributeError) as e: - raise DataCorruptionError(f"Invalid schedule structure: {e}") from e - - try: - # Validate JSON serialization before inserting - schedule_json = json.dumps(schedule) - - async with aiosqlite.connect(DB_PATH) as db: - await db.execute(""" - INSERT OR REPLACE INTO master_construction_schedule - (workflow_id, project_name, project_start_date, project_end_date, schedule_json, updated_at) - VALUES (?, ?, ?, ?, ?, CURRENT_TIMESTAMP) - """, ( - workflow_id, - schedule["project"]["name"], - schedule["project"]["start_date"], - schedule["project"]["end_date"], - schedule_json - )) - await db.commit() - logger.info(f"Created schedule for workflow {workflow_id}") - - except (TypeError, ValueError) as e: - # Data error - can't serialize to JSON, don't retry - logger.error(f"Failed to serialize schedule to JSON: {e}") - raise DataCorruptionError(f"Schedule data cannot be serialized: {e}") from e - - except aiosqlite.IntegrityError as e: - # Data constraint violation - don't retry - logger.error(f"Data integrity error: {e}") - raise DataCorruptionError(f"Data integrity error: {e}") from e - - except aiosqlite.Error as e: - # Database connection/lock errors - retryable - logger.warning(f"Database error creating schedule (retryable): {e}") - raise DatabaseError(f"Failed to create schedule: {e}") from e - - except Exception as e: - # Unexpected error - treat as retryable - logger.error(f"Unexpected error creating schedule: {e}") - raise DatabaseError(f"Unexpected error creating schedule: {e}") from e - - -async def get_schedule_for_workflow(workflow_id: str) -> Optional[dict]: - """ - Retrieve the construction schedule for a specific workflow. - - Args: - workflow_id: The Temporal workflow ID - - Returns: - The schedule dict or None if not found - - Raises: - DatabaseError: If database operation fails (retryable by Temporal) - DataCorruptionError: If stored JSON is corrupted (non-retryable) - """ - try: - async with aiosqlite.connect(DB_PATH) as db: - db.row_factory = aiosqlite.Row - async with db.execute(""" - SELECT schedule_json FROM master_construction_schedule - WHERE workflow_id = ? - """, (workflow_id,)) as cursor: - row = await cursor.fetchone() - if row: - # Validate JSON before returning - try: - return json.loads(row["schedule_json"]) - except json.JSONDecodeError as e: - logger.error(f"Corrupted JSON in database for workflow {workflow_id}: {e}") - raise DataCorruptionError( - f"Schedule JSON corrupted for workflow {workflow_id}: {e}" - ) from e - return None - - except DataCorruptionError: - # Re-raise data corruption errors - raise - - except aiosqlite.Error as e: - # Database connection errors - retryable - logger.warning(f"Database error retrieving schedule (retryable): {e}") - raise DatabaseError(f"Failed to retrieve schedule: {e}") from e - - except Exception as e: - # Unexpected error - treat as retryable - logger.error(f"Unexpected error retrieving schedule: {e}") - raise DatabaseError(f"Unexpected error retrieving schedule: {e}") from e - -async def update_delivery_date_for_item_for_workflow(workflow_id: str, item: str, new_delivery_date: str) -> None: - """ - Update the delivery date for a specific item in the construction schedule for a specific workflow. - - Raises: - DatabaseError: If database operation fails (retryable by Temporal) - DataCorruptionError: If schedule not found or item not found (non-retryable) - """ - # Get the current schedule (may raise DatabaseError or DataCorruptionError) - schedule = await get_schedule_for_workflow(workflow_id) - if schedule is None: - logger.error(f"No schedule found for workflow {workflow_id}") - raise DataCorruptionError(f"No schedule found for workflow {workflow_id}") - - # Update the delivery item's required_by date - updated = False - for delivery in schedule.get("deliveries", []): - if delivery.get("item") == item: - delivery["required_by"] = new_delivery_date - updated = True - break - - if not updated: - logger.error(f"Item {item} not found in schedule for workflow {workflow_id}") - raise DataCorruptionError(f"Item {item} not found in schedule for workflow {workflow_id}") - - # Save the updated schedule back to the database - try: - async with aiosqlite.connect(DB_PATH) as db: - await db.execute(""" - UPDATE master_construction_schedule - SET schedule_json = ?, updated_at = CURRENT_TIMESTAMP - WHERE workflow_id = ? - """, (json.dumps(schedule), workflow_id)) - await db.commit() - logger.info(f"Updated delivery date for item {item} in workflow {workflow_id}") - - except aiosqlite.Error as e: - # Database connection errors - retryable - logger.warning(f"Database error updating delivery date (retryable): {e}") - raise DatabaseError(f"Failed to update delivery date: {e}") from e - - except Exception as e: - # Unexpected error - treat as retryable - logger.error(f"Unexpected error updating delivery date: {e}") - raise DatabaseError(f"Unexpected error updating delivery date: {e}") from e - -async def remove_delivery_item_for_workflow(workflow_id: str, item: str) -> None: - """ - Remove a delivery item from the construction schedule for a specific workflow. - - Raises: - DatabaseError: If database operation fails (retryable by Temporal) - DataCorruptionError: If schedule not found or item not found (non-retryable) - """ - # Get the current schedule (may raise DatabaseError or DataCorruptionError) - schedule = await get_schedule_for_workflow(workflow_id) - if schedule is None: - logger.error(f"No schedule found for workflow {workflow_id}") - raise DataCorruptionError(f"No schedule found for workflow {workflow_id}") - - # Remove the delivery item from the list - original_count = len(schedule.get("deliveries", [])) - schedule["deliveries"] = [ - delivery for delivery in schedule.get("deliveries", []) - if delivery.get("item") != item - ] - - if len(schedule["deliveries"]) == original_count: - logger.error(f"Item {item} not found in schedule for workflow {workflow_id}") - raise DataCorruptionError(f"Item {item} not found in schedule for workflow {workflow_id}") - - # Save the updated schedule back to the database - try: - async with aiosqlite.connect(DB_PATH) as db: - await db.execute(""" - UPDATE master_construction_schedule - SET schedule_json = ?, updated_at = CURRENT_TIMESTAMP - WHERE workflow_id = ? - """, (json.dumps(schedule), workflow_id)) - await db.commit() - logger.info(f"Removed delivery item {item} from workflow {workflow_id}") - - except aiosqlite.Error as e: - # Database connection errors - retryable - logger.warning(f"Database error removing delivery item (retryable): {e}") - raise DatabaseError(f"Failed to remove delivery item: {e}") from e - - except Exception as e: - # Unexpected error - treat as retryable - logger.error(f"Unexpected error removing delivery item: {e}") - raise DatabaseError(f"Unexpected error removing delivery item: {e}") from e - -async def update_project_end_date_for_workflow(workflow_id: str, new_end_date: str) -> None: - """ - Update the end date for the project in the construction schedule for a specific workflow. - - Raises: - DatabaseError: If database operation fails (retryable by Temporal) - DataCorruptionError: If schedule not found (non-retryable) - """ - # Get the current schedule (may raise DatabaseError or DataCorruptionError) - schedule = await get_schedule_for_workflow(workflow_id) - if schedule is None: - logger.error(f"No schedule found for workflow {workflow_id}") - raise DataCorruptionError(f"No schedule found for workflow {workflow_id}") - - # Update the project end date in both the JSON and the dedicated column - schedule["project"]["end_date"] = new_end_date - - try: - async with aiosqlite.connect(DB_PATH) as db: - await db.execute(""" - UPDATE master_construction_schedule - SET project_end_date = ?, schedule_json = ?, updated_at = CURRENT_TIMESTAMP - WHERE workflow_id = ? - """, (new_end_date, json.dumps(schedule), workflow_id)) - await db.commit() - logger.info(f"Updated end date for project in workflow {workflow_id}") - - except aiosqlite.Error as e: - # Database connection errors - retryable - logger.warning(f"Database error updating project end date (retryable): {e}") - raise DatabaseError(f"Failed to update project end date: {e}") from e - - except Exception as e: - # Unexpected error - treat as retryable - logger.error(f"Unexpected error updating project end date: {e}") - raise DatabaseError(f"Unexpected error updating project end date: {e}") from e - - -async def create_procurement_item( - workflow_id: str, - item: str, - status: str, - eta: Optional[str] = None, - date_arrived: Optional[str] = None, - purchase_order_id: Optional[str] = None -) -> None: - """ - Create a new procurement item for tracking through the workflow. - - Args: - workflow_id: The Temporal workflow ID - item: The item name (e.g., "Steel Beams") - status: Current status of the item - eta: Optional estimated time of arrival - date_arrived: Optional date the item arrived - purchase_order_id: Optional purchase order ID - - Raises: - DatabaseError: If database operation fails (retryable by Temporal) - DataCorruptionError: If input data is invalid (non-retryable) - """ - # Input validation - non-retryable errors - if not workflow_id or not isinstance(workflow_id, str): - raise DataCorruptionError("Invalid workflow_id: must be a non-empty string") - - if not item or not isinstance(item, str): - raise DataCorruptionError("Invalid item: must be a non-empty string") - - if not status or not isinstance(status, str): - raise DataCorruptionError("Invalid status: must be a non-empty string") - - try: - async with aiosqlite.connect(DB_PATH) as db: - await db.execute(""" - INSERT OR REPLACE INTO procurement_items - (workflow_id, item, status, eta, date_arrived, purchase_order_id, updated_at) - VALUES (?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP) - """, ( - workflow_id, - item, - status, - eta, - date_arrived, - purchase_order_id - )) - await db.commit() - logger.info(f"Created procurement item for workflow {workflow_id}: {item} with status {status}") - - except aiosqlite.IntegrityError as e: - # Data constraint violation - don't retry - logger.error(f"Data integrity error: {e}") - raise DataCorruptionError(f"Data integrity error: {e}") from e - - except aiosqlite.Error as e: - # Database connection/lock errors - retryable - logger.warning(f"Database error creating procurement item (retryable): {e}") - raise DatabaseError(f"Failed to create procurement item: {e}") from e - - except Exception as e: - # Unexpected error - treat as retryable - logger.error(f"Unexpected error creating procurement item: {e}") - raise DatabaseError(f"Unexpected error creating procurement item: {e}") from e - - -async def update_procurement_item( - workflow_id: str, - item: str, - status: Optional[str] = None, - eta: Optional[str] = None, - date_arrived: Optional[str] = None, - purchase_order_id: Optional[str] = None -) -> None: - """ - Update a procurement item's fields. Only updates fields that are provided. - - Args: - workflow_id: The Temporal workflow ID - item: The item name (e.g., "Steel Beams") - status: Optional new status - eta: Optional new estimated time of arrival - date_arrived: Optional new arrival date - purchase_order_id: Optional new purchase order ID - - Raises: - DatabaseError: If database operation fails (retryable by Temporal) - DataCorruptionError: If workflow_id is invalid or item not found (non-retryable) - """ - # Input validation - non-retryable errors - if not workflow_id or not isinstance(workflow_id, str): - raise DataCorruptionError("Invalid workflow_id: must be a non-empty string") - - if not item or not isinstance(item, str): - raise DataCorruptionError("Invalid item: must be a non-empty string") - - # Build dynamic update query based on provided fields - update_fields = [] - params = [] - - if status is not None: - update_fields.append("status = ?") - params.append(status) - - if eta is not None: - update_fields.append("eta = ?") - params.append(eta) - - if date_arrived is not None: - update_fields.append("date_arrived = ?") - params.append(date_arrived) - - if purchase_order_id is not None: - update_fields.append("purchase_order_id = ?") - params.append(purchase_order_id) - - if not update_fields: - logger.warning(f"No fields to update for workflow {workflow_id}") - return - - # Always update the updated_at timestamp - update_fields.append("updated_at = CURRENT_TIMESTAMP") - params.extend([workflow_id, item]) - - try: - async with aiosqlite.connect(DB_PATH) as db: - query = f""" - UPDATE procurement_items - SET {', '.join(update_fields)} - WHERE workflow_id = ? AND item = ? - """ - cursor = await db.execute(query, params) - - if cursor.rowcount == 0: - logger.error(f"No procurement item found for workflow {workflow_id} with item {item}") - raise DataCorruptionError(f"No procurement item found for workflow {workflow_id} with item {item}") - - await db.commit() - logger.info(f"Updated procurement item for workflow {workflow_id}") - - except DataCorruptionError: - # Re-raise data corruption errors - raise - - except aiosqlite.Error as e: - # Database connection errors - retryable - logger.warning(f"Database error updating procurement item (retryable): {e}") - raise DatabaseError(f"Failed to update procurement item: {e}") from e - - except Exception as e: - # Unexpected error - treat as retryable - logger.error(f"Unexpected error updating procurement item: {e}") - raise DatabaseError(f"Unexpected error updating procurement item: {e}") from e - - -async def delete_procurement_item(workflow_id: str, item: str) -> None: - """ - Delete a procurement item from the database. - - Args: - workflow_id: The Temporal workflow ID - item: The item name (e.g., "Steel Beams") - - Raises: - DatabaseError: If database operation fails (retryable by Temporal) - DataCorruptionError: If workflow_id is invalid or item not found (non-retryable) - """ - # Input validation - non-retryable errors - if not workflow_id or not isinstance(workflow_id, str): - raise DataCorruptionError("Invalid workflow_id: must be a non-empty string") - - if not item or not isinstance(item, str): - raise DataCorruptionError("Invalid item: must be a non-empty string") - - try: - async with aiosqlite.connect(DB_PATH) as db: - cursor = await db.execute(""" - DELETE FROM procurement_items - WHERE workflow_id = ? AND item = ? - """, (workflow_id, item)) - - if cursor.rowcount == 0: - logger.error(f"No procurement item found for workflow {workflow_id} with item {item}") - raise DataCorruptionError(f"No procurement item found for workflow {workflow_id} with item {item}") - - await db.commit() - logger.info(f"Deleted procurement item for workflow {workflow_id}") - - except DataCorruptionError: - # Re-raise data corruption errors - raise - - except aiosqlite.Error as e: - # Database connection errors - retryable - logger.warning(f"Database error deleting procurement item (retryable): {e}") - raise DatabaseError(f"Failed to delete procurement item: {e}") from e - - except Exception as e: - # Unexpected error - treat as retryable - logger.error(f"Unexpected error deleting procurement item: {e}") - raise DatabaseError(f"Unexpected error deleting procurement item: {e}") from e - - -async def get_procurement_item_by_name(workflow_id: str, item: str) -> Optional[dict]: - """ - Retrieve a procurement item for a specific workflow and item name. - - Args: - workflow_id: The Temporal workflow ID - item: The item name (e.g., "Steel Beams") - - Returns: - The procurement item dict or None if not found - - Raises: - DatabaseError: If database operation fails (retryable by Temporal) - DataCorruptionError: If input validation fails (non-retryable) - """ - # Input validation - non-retryable errors - if not workflow_id or not isinstance(workflow_id, str): - raise DataCorruptionError("Invalid workflow_id: must be a non-empty string") - - if not item or not isinstance(item, str): - raise DataCorruptionError("Invalid item: must be a non-empty string") - - try: - async with aiosqlite.connect(DB_PATH) as db: - db.row_factory = aiosqlite.Row - async with db.execute(""" - SELECT workflow_id, item, status, eta, date_arrived, purchase_order_id, created_at, updated_at - FROM procurement_items - WHERE workflow_id = ? AND item = ? - """, (workflow_id, item)) as cursor: - row = await cursor.fetchone() - if row: - return { - "workflow_id": row["workflow_id"], - "item": row["item"], - "status": row["status"], - "eta": row["eta"], - "date_arrived": row["date_arrived"], - "purchase_order_id": row["purchase_order_id"], - "created_at": row["created_at"], - "updated_at": row["updated_at"], - } - return None - - except DataCorruptionError: - # Re-raise data corruption errors - raise - - except aiosqlite.Error as e: - # Database connection errors - retryable - logger.warning(f"Database error retrieving procurement item (retryable): {e}") - raise DatabaseError(f"Failed to retrieve procurement item: {e}") from e - - except Exception as e: - # Unexpected error - treat as retryable - logger.error(f"Unexpected error retrieving procurement item: {e}") - raise DatabaseError(f"Unexpected error retrieving procurement item: {e}") from e - - -async def get_all_procurement_items() -> list[dict]: - """ - Retrieve all procurement items from the database. - - Returns: - List of procurement item dicts - - Raises: - DatabaseError: If database operation fails (retryable by Temporal) - """ - try: - async with aiosqlite.connect(DB_PATH) as db: - db.row_factory = aiosqlite.Row - async with db.execute(""" - SELECT workflow_id, item, status, eta, date_arrived, purchase_order_id, created_at, updated_at - FROM procurement_items - ORDER BY created_at DESC - """) as cursor: - rows = await cursor.fetchall() - return [ - { - "workflow_id": row["workflow_id"], - "item": row["item"], - "status": row["status"], - "eta": row["eta"], - "date_arrived": row["date_arrived"], - "purchase_order_id": row["purchase_order_id"], - "created_at": row["created_at"], - "updated_at": row["updated_at"], - } - for row in rows - ] - - except aiosqlite.Error as e: - # Database connection errors - retryable - logger.warning(f"Database error retrieving all procurement items (retryable): {e}") - raise DatabaseError(f"Failed to retrieve all procurement items: {e}") from e - - except Exception as e: - # Unexpected error - treat as retryable - logger.error(f"Unexpected error retrieving all procurement items: {e}") - raise DatabaseError(f"Unexpected error retrieving all procurement items: {e}") from e \ No newline at end of file diff --git a/examples/demos/procurement_agent/project/models/__init__.py b/examples/demos/procurement_agent/project/models/__init__.py deleted file mode 100644 index 1b2da8d1..00000000 --- a/examples/demos/procurement_agent/project/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Procurement agent models module.""" diff --git a/examples/demos/procurement_agent/project/models/events.py b/examples/demos/procurement_agent/project/models/events.py deleted file mode 100644 index 634626ec..00000000 --- a/examples/demos/procurement_agent/project/models/events.py +++ /dev/null @@ -1,46 +0,0 @@ -from enum import Enum -from datetime import datetime - -from pydantic import Field, BaseModel - - -class EventType(Enum): - SUBMITTAL_APPROVED = "Submittal_Approved" - SHIPMENT_DEPARTED_FACTORY = "Shipment_Departed_Factory" - SHIPMENT_ARRIVED_SITE = "Shipment_Arrived_Site" - INSPECTION_FAILED = "Inspection_Failed" - INSPECTION_PASSED = "Inspection_Passed" - HUMAN_INPUT = "Human_Input" - -class SubmitalApprovalEvent(BaseModel): - event_type: EventType = Field(default=EventType.SUBMITTAL_APPROVED) - item: str - document_url: str - document_name: str - -class ShipmentDepartedFactoryEvent(BaseModel): - event_type: EventType = Field(default=EventType.SHIPMENT_DEPARTED_FACTORY) - item: str - eta: datetime - date_departed: datetime - location_address: str - -class ShipmentArrivedSiteEvent(BaseModel): - event_type: EventType = Field(default=EventType.SHIPMENT_ARRIVED_SITE) - item: str - date_arrived: datetime - location_address: str - -class InspectionFailedEvent(BaseModel): - event_type: EventType = Field(default=EventType.INSPECTION_FAILED) - item: str - inspection_date: datetime - document_url: str - document_name: str - -class InspectionPassedEvent(BaseModel): - event_type: EventType = Field(default=EventType.INSPECTION_PASSED) - item: str - inspection_date: datetime - document_url: str - document_name: str \ No newline at end of file diff --git a/examples/demos/procurement_agent/project/run_worker.py b/examples/demos/procurement_agent/project/run_worker.py deleted file mode 100644 index 127a810f..00000000 --- a/examples/demos/procurement_agent/project/run_worker.py +++ /dev/null @@ -1,96 +0,0 @@ -import asyncio - -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin - -from project.workflow import ProcurementAgentWorkflow -from project.data.database import init_database -from agentex.lib.utils.debug import setup_debug_if_enabled -from agentex.lib.utils.logging import make_logger -from project.activities.activities import ( - schedule_inspection, - flag_potential_issue, - issue_purchase_order, - remove_delivery_item, - update_project_end_date, - notify_team_shipment_arrived, - update_delivery_date_for_item, - create_procurement_item_activity, - delete_procurement_item_activity, - get_master_construction_schedule, - update_procurement_item_activity, - get_all_procurement_items_activity, - create_master_construction_schedule, - get_procurement_item_by_name_activity, -) -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker -from agentex.lib.core.temporal.plugins.openai_agents.hooks.activities import stream_lifecycle_content -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModelProvider, -) -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor - -environment_variables = EnvironmentVariables.refresh() - -logger = make_logger(__name__) - - -async def main(): - """ - Main worker initialization and execution. - Handles database initialization and worker startup with error handling. - """ - try: - # Setup debug mode if enabled - setup_debug_if_enabled() - - task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE - if task_queue_name is None: - raise ValueError("WORKFLOW_TASK_QUEUE is not set") - - # Initialize the database with error handling - try: - await init_database() - logger.info("Database initialized successfully") - except Exception as e: - logger.error(f"Failed to initialize database: {e}") - raise RuntimeError(f"Database initialization failed: {e}") from e - - all_activities = get_all_activities() + [stream_lifecycle_content, issue_purchase_order, flag_potential_issue, notify_team_shipment_arrived, schedule_inspection, - create_master_construction_schedule, get_master_construction_schedule, update_delivery_date_for_item, remove_delivery_item, update_project_end_date, - create_procurement_item_activity, update_procurement_item_activity, delete_procurement_item_activity, - get_procurement_item_by_name_activity, get_all_procurement_items_activity] - - context_interceptor = ContextInterceptor() - streaming_model_provider = TemporalStreamingModelProvider() - - # Create a worker with automatic tracing - worker = AgentexWorker( - task_queue=task_queue_name, - plugins=[OpenAIAgentsPlugin(model_provider=streaming_model_provider)], - interceptors=[context_interceptor], - ) - - logger.info(f"Starting worker on task queue: {task_queue_name}") - - await worker.run( - activities=all_activities, - workflow=ProcurementAgentWorkflow, - ) - - except ValueError as e: - # Configuration error - logger.error(f"Configuration error: {e}") - raise - except RuntimeError as e: - # Database or initialization error - logger.error(f"Initialization error: {e}") - raise - except Exception as e: - # Unexpected error - logger.error(f"Unexpected error in worker: {e}", exc_info=True) - raise - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/demos/procurement_agent/project/scripts/__init__.py b/examples/demos/procurement_agent/project/scripts/__init__.py deleted file mode 100644 index 6f84b9de..00000000 --- a/examples/demos/procurement_agent/project/scripts/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Procurement agent scripts module.""" diff --git a/examples/demos/procurement_agent/project/scripts/send_test_events.py b/examples/demos/procurement_agent/project/scripts/send_test_events.py deleted file mode 100644 index e85b75c4..00000000 --- a/examples/demos/procurement_agent/project/scripts/send_test_events.py +++ /dev/null @@ -1,295 +0,0 @@ -#!/usr/bin/env python -""" -Simple script to automatically send fake events to the workflow. -Just run this script and it will send a few test events to demonstrate the event handling. -""" - -import os -import sys -import asyncio -from datetime import datetime - -from temporalio.client import Client - -from project.models.events import ( - EventType, - InspectionFailedEvent, - InspectionPassedEvent, - SubmitalApprovalEvent, - ShipmentArrivedSiteEvent, - ShipmentDepartedFactoryEvent, -) -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables - -# Set defaults for local development -os.environ.setdefault("AGENT_NAME", "procurement-agent") -os.environ.setdefault("ACP_URL", "http://localhost:8000") -os.environ.setdefault("WORKFLOW_NAME", "procurement-agent") -os.environ.setdefault("WORKFLOW_TASK_QUEUE", "procurement_agent_queue") -os.environ.setdefault("TEMPORAL_ADDRESS", "localhost:7233") - -logger = make_logger(__name__) -environment_variables = EnvironmentVariables.refresh() - - -async def send_fake_events(workflow_id: str): - """Send a series of fake events to the workflow.""" - - # Connect to Temporal - temporal_url = environment_variables.TEMPORAL_ADDRESS or "localhost:7233" - client = await Client.connect(temporal_url) - - # Get handle to the workflow - handle = client.get_workflow_handle(workflow_id) - - # Define the procurement event flow for Steel Beams (passes inspection) - # Required by: 2026-02-15, Buffer: 5 days - # Arriving on 2026-02-10 (5 days early - within buffer) - steel_beams_events = [ - SubmitalApprovalEvent( - event_type=EventType.SUBMITTAL_APPROVED, - item="Steel Beams", - document_name="Steel Beams Submittal.pdf", - document_url="/submittal_approval.pdf" - ), - ShipmentDepartedFactoryEvent( - event_type=EventType.SHIPMENT_DEPARTED_FACTORY, - item="Steel Beams", - eta=datetime(2026, 2, 10, 14, 30), - date_departed=datetime(2026, 2, 3, 9, 15), - location_address="218 W 18th St, New York, NY 10011" - ), - ShipmentArrivedSiteEvent( - event_type=EventType.SHIPMENT_ARRIVED_SITE, - item="Steel Beams", - date_arrived=datetime(2026, 2, 10, 15, 45), - location_address="650 Townsend St, San Francisco, CA 94103" - ), - InspectionPassedEvent( - event_type=EventType.INSPECTION_PASSED, - item="Steel Beams", - inspection_date=datetime(2026, 2, 11, 10, 20), - document_name="Steel Beams Inspection Report.pdf", - document_url="/inspection_passed.pdf" - ) - ] - - # Define the procurement event flow for HVAC Units (fails inspection) - # Required by: 2026-03-01, Buffer: 7 days - # Arriving on 2026-02-22 (7 days early - within buffer) - hvac_events = [ - SubmitalApprovalEvent( - event_type=EventType.SUBMITTAL_APPROVED, - item="HVAC Units", - document_name="HVAC Units Submittal.pdf", - document_url="/submittal_approval.pdf" - ), - ShipmentDepartedFactoryEvent( - event_type=EventType.SHIPMENT_DEPARTED_FACTORY, - item="HVAC Units", - eta=datetime(2026, 2, 22, 11, 0), - date_departed=datetime(2026, 2, 15, 13, 45), - location_address="218 W 18th St, New York, NY 10011" - ), - ShipmentArrivedSiteEvent( - event_type=EventType.SHIPMENT_ARRIVED_SITE, - item="HVAC Units", - date_arrived=datetime(2026, 2, 22, 10, 30), - location_address="650 Townsend St, San Francisco, CA 94103" - ), - InspectionFailedEvent( - event_type=EventType.INSPECTION_FAILED, - item="HVAC Units", - inspection_date=datetime(2026, 2, 23, 14, 15), - document_name="HVAC Units Inspection Report.pdf", - document_url="/inspection_failed.pdf" - ) - ] - - # Define the procurement event flow for Windows (passes inspection - everything smooth) - # Required by: 2026-03-15, Buffer: 10 days - # Arriving on 2026-03-05 (10 days early - within buffer) - windows_events = [ - SubmitalApprovalEvent( - event_type=EventType.SUBMITTAL_APPROVED, - item="Windows", - document_name="Windows Submittal.pdf", - document_url="/submittal_approval.pdf" - ), - ShipmentDepartedFactoryEvent( - event_type=EventType.SHIPMENT_DEPARTED_FACTORY, - item="Windows", - eta=datetime(2026, 3, 5, 16, 0), - date_departed=datetime(2026, 2, 20, 8, 30), - location_address="218 W 18th St, New York, NY 10011" - ), - ShipmentArrivedSiteEvent( - event_type=EventType.SHIPMENT_ARRIVED_SITE, - item="Windows", - date_arrived=datetime(2026, 3, 5, 16, 20), - location_address="650 Townsend St, San Francisco, CA 94103" - ), - InspectionPassedEvent( - event_type=EventType.INSPECTION_PASSED, - item="Windows", - inspection_date=datetime(2026, 3, 6, 9, 45), - document_name="Windows Inspection Report.pdf", - document_url="/inspection_passed.pdf" - ), - # Duplicate arrival event to test agent doesn't double-process - ShipmentArrivedSiteEvent( - event_type=EventType.SHIPMENT_ARRIVED_SITE, - item="Windows", - date_arrived=datetime(2026, 3, 5, 16, 20), - location_address="650 Townsend St, San Francisco, CA 94103" - ) - ] - - # Define the procurement event flow for Flooring Materials (passes inspection - everything smooth) - # Required by: 2026-04-01, Buffer: 3 days - # Arriving on 2026-03-29 (3 days early - within buffer) - flooring_events = [ - SubmitalApprovalEvent( - event_type=EventType.SUBMITTAL_APPROVED, - item="Flooring Materials", - document_name="Flooring Materials Submittal.pdf", - document_url="/submittal_approval.pdf" - ), - ShipmentDepartedFactoryEvent( - event_type=EventType.SHIPMENT_DEPARTED_FACTORY, - item="Flooring Materials", - eta=datetime(2026, 3, 29, 13, 15), - date_departed=datetime(2026, 3, 22, 11, 30), - location_address="218 W 18th St, New York, NY 10011" - ), - ShipmentArrivedSiteEvent( - event_type=EventType.SHIPMENT_ARRIVED_SITE, - item="Flooring Materials", - date_arrived=datetime(2026, 3, 29, 12, 45), - location_address="650 Townsend St, San Francisco, CA 94103" - ), - InspectionPassedEvent( - event_type=EventType.INSPECTION_PASSED, - item="Flooring Materials", - inspection_date=datetime(2026, 3, 30, 15, 30), - document_name="Flooring Materials Inspection Report.pdf", - document_url="/inspection_passed.pdf" - ) - ] - - # Define the procurement event flow for Electrical Panels (fails inspection) - # Required by: 2026-04-15, Buffer: 5 days - # Arriving on 2026-04-10 (5 days early - within buffer) - # Agent should apply learnings from HVAC Units failure - electrical_events = [ - SubmitalApprovalEvent( - event_type=EventType.SUBMITTAL_APPROVED, - item="Electrical Panels", - document_name="Electrical Panels Submittal.pdf", - document_url="/submittal_approval.pdf" - ), - ShipmentDepartedFactoryEvent( - event_type=EventType.SHIPMENT_DEPARTED_FACTORY, - item="Electrical Panels", - eta=datetime(2026, 4, 10, 10, 45), - date_departed=datetime(2026, 4, 1, 14, 0), - location_address="218 W 18th St, New York, NY 10011" - ), - ShipmentArrivedSiteEvent( - event_type=EventType.SHIPMENT_ARRIVED_SITE, - item="Electrical Panels", - date_arrived=datetime(2026, 4, 10, 11, 15), - location_address="650 Townsend St, San Francisco, CA 94103" - ), - InspectionFailedEvent( - event_type=EventType.INSPECTION_FAILED, - item="Electrical Panels", - inspection_date=datetime(2026, 4, 11, 13, 0), - document_name="Electrical Panels Inspection Report.pdf", - document_url="/inspection_failed.pdf" - ) - ] - - # Combine all events - all_events = [ - ("Steel Beams", steel_beams_events), - ("HVAC Units", hvac_events), - ("Windows", windows_events), - ("Flooring Materials", flooring_events), - ("Electrical Panels", electrical_events) - ] - - print(f"Connected to workflow: {workflow_id}") - print("=" * 60) - print("Sending procurement events...") - print("=" * 60) - - for item_name, events in all_events: - print(f"\n{'=' * 60}") - print(f"Processing: {item_name}") - print("=" * 60) - - for i, event in enumerate(events, 1): - print(f"\n[Event {i}] Sending: {event.event_type.value}") - print(f" Item: {event.item}") - - # Show additional details based on event type - if hasattr(event, 'eta'): - print(f" ETA: {event.eta}") - if hasattr(event, 'date_arrived'): - print(f" Date Arrived: {event.date_arrived}") - if hasattr(event, 'inspection_date'): - print(f" Inspection Date: {event.inspection_date}") - - try: - # Send the event using the send_event signal - # Convert event to JSON string - event_data = event.model_dump_json() - await handle.signal("send_event", event_data) - print(f"โœ“ Event sent successfully!") - - # Wait a bit between events so you can see them being processed - await asyncio.sleep(10) - - except Exception as e: - print(f"โœ— Error sending event: {e}") - logger.error(f"Failed to send event: {e}") - - print("\n" + "=" * 60) - print("All events have been sent!") - print("Check your workflow in the UI to see the processed events.") - print("=" * 60) - - -async def main(): - """Main entry point.""" - - # Get workflow ID from command line or prompt user - if len(sys.argv) > 1: - workflow_id = sys.argv[1] - else: - print("Enter the Workflow ID to send events to:") - print("(You can find this in the AgentEx UI or Temporal dashboard)") - workflow_id = input("Workflow ID: ").strip() - - if not workflow_id: - print("Error: Workflow ID is required!") - print("\nUsage: python send_simple_events.py [workflow_id]") - return - - try: - await send_fake_events(workflow_id) - except KeyboardInterrupt: - print("\n\nInterrupted. Goodbye!") - except Exception as e: - logger.error(f"Unexpected error: {e}") - print(f"Error: {e}") - print("\nMake sure:") - print("1. The workflow is running") - print("2. The workflow ID is correct") - print("3. Temporal is accessible at", environment_variables.TEMPORAL_ADDRESS) - - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/demos/procurement_agent/project/utils/__init__.py b/examples/demos/procurement_agent/project/utils/__init__.py deleted file mode 100644 index be8d6ac8..00000000 --- a/examples/demos/procurement_agent/project/utils/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Utility functions for the procurement agent.""" - -from project.utils.learning_extraction import get_new_wait_for_human_context - -__all__ = ["get_new_wait_for_human_context"] diff --git a/examples/demos/procurement_agent/project/utils/learning_extraction.py b/examples/demos/procurement_agent/project/utils/learning_extraction.py deleted file mode 100644 index e6cb61b3..00000000 --- a/examples/demos/procurement_agent/project/utils/learning_extraction.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Utility for extracting new context from human interactions using a "going backwards" approach. - -This module prevents re-processing old wait_for_human calls by: -1. Iterating backwards through the conversation -2. Stopping when we hit a previously-processed wait_for_human call -3. Returning only the NEW portion of the conversation -""" - -from typing import Any, Set, Dict, List, Tuple, Optional - -from agentex.lib.utils.logging import make_logger - -logger = make_logger(__name__) - - -def get_new_wait_for_human_context( - full_conversation: List[Dict[str, Any]], - extracted_learning_call_ids: Set[str], -) -> Optional[Tuple[List[Dict[str, Any]], str]]: - """ - Extract NEW context since the last processed wait_for_human call. - - Similar to OpenCode's filterCompacted() pattern, this function: - - Iterates backwards through the full conversation history - - Stops when it finds a wait_for_human call we've already processed - - Returns only the NEW context - - Args: - full_conversation: The complete conversation history (self._state.input_list) - extracted_learning_call_ids: Set of call_ids we've already extracted learnings from - - Returns: - Tuple of (new_context_messages, call_id) if a new wait_for_human was found, None otherwise - """ - # Go backwards through the conversation to find new wait_for_human calls - new_context = [] - found_new_wait_for_human = False - new_wait_for_human_call_id = None - - for item in reversed(full_conversation): - # Always collect items as we go backwards - new_context.append(item) - - # Check if this is a wait_for_human function call - if isinstance(item, dict) and item.get("type") == "function_call": - if item.get("name") == "wait_for_human": - call_id = item.get("call_id") - - # If we've already extracted learning for this call_id, STOP - if call_id in extracted_learning_call_ids: - logger.info(f"Found already-processed wait_for_human call_id: {call_id}, stopping") - break - - # This is a NEW wait_for_human call - if not found_new_wait_for_human: - found_new_wait_for_human = True - new_wait_for_human_call_id = call_id - logger.info(f"Found NEW wait_for_human call_id: {call_id}") - - # If we found a new wait_for_human call, return the new context - if found_new_wait_for_human: - # Reverse back to chronological order - new_context.reverse() - logger.info(f"Extracted {len(new_context)} messages of new context") - assert new_wait_for_human_call_id is not None, "call_id should be set when found_new_wait_for_human is True" - return (new_context, new_wait_for_human_call_id) - else: - logger.info("No new wait_for_human calls found") - return None diff --git a/examples/demos/procurement_agent/project/utils/summarization.py b/examples/demos/procurement_agent/project/utils/summarization.py deleted file mode 100644 index b74ad1e3..00000000 --- a/examples/demos/procurement_agent/project/utils/summarization.py +++ /dev/null @@ -1,205 +0,0 @@ -""" -Summarization utility for managing conversation context. - -This module provides functionality to detect when conversation history exceeds -token limits and should be summarized. Follows OpenCode's approach of stopping -at previous summaries to avoid re-summarizing already condensed content. -""" -from typing import Any, Dict, List, Tuple, Optional - -import tiktoken - -from agentex.lib.utils.logging import make_logger - -logger = make_logger(__name__) - -# Configuration constants -SUMMARIZATION_TOKEN_THRESHOLD = 40000 # Trigger summarization at 40k tokens -PRESERVE_LAST_N_TURNS = 10 # Always keep last 10 user turns in full - - -def estimate_tokens(text: str) -> int: - """ - Estimate the number of tokens in a text string using tiktoken. - - Args: - text: The text to estimate tokens for - - Returns: - Estimated token count - """ - try: - encoding = tiktoken.encoding_for_model("gpt-4o") - return len(encoding.encode(text)) - except Exception as e: - # Fallback to rough estimation if tiktoken fails - logger.warning(f"Token estimation failed, using fallback: {e}") - return len(text) // 4 # Rough approximation - - -def should_summarize(input_list: List[Dict[str, Any]]) -> bool: - """ - Check if the conversation history exceeds the token threshold and needs summarization. - - Args: - input_list: The conversation history - - Returns: - True if summarization should be triggered - """ - total_tokens = 0 - - for item in input_list: - if isinstance(item, dict): - # Estimate tokens for the entire item (JSON serialized) - item_str = str(item) - total_tokens += estimate_tokens(item_str) - - logger.info(f"Total conversation tokens: {total_tokens}") - - if total_tokens > SUMMARIZATION_TOKEN_THRESHOLD: - logger.info(f"Token threshold exceeded ({total_tokens} > {SUMMARIZATION_TOKEN_THRESHOLD}), summarization needed") - return True - - return False - - -def get_messages_to_summarize( - input_list: List[Dict[str, Any]], - last_summary_index: Optional[int] -) -> Tuple[List[Dict[str, Any]], int, int]: - """ - Get the portion of conversation that should be summarized, following OpenCode's approach. - - Strategy: - - If there's a previous summary, start from AFTER it (never re-summarize summaries) - - Find last N user turns and preserve them - - Return everything in between for summarization - - Args: - input_list: The full conversation history - last_summary_index: Index of the last summary message (None if no prior summary) - - Returns: - Tuple of (messages_to_summarize, start_index, end_index) - - messages_to_summarize: The slice of conversation to summarize - - start_index: Where the summarization range starts - - end_index: Where the summarization range ends (exclusive) - """ - # Find all user turn indices - user_turn_indices = [] - for i, item in enumerate(input_list): - if isinstance(item, dict) and item.get("role") == "user": - user_turn_indices.append(i) - - # Determine the start index (after last summary, or from beginning) - if last_summary_index is not None: - start_index = last_summary_index + 1 # Start AFTER the summary - logger.info(f"Starting summarization after previous summary at index {last_summary_index}") - else: - start_index = 0 - logger.info("No previous summary found, starting from beginning") - - # Determine the end index (preserve last N turns) - if len(user_turn_indices) >= PRESERVE_LAST_N_TURNS: - # Find the Nth-from-last user turn - preserve_from_index = user_turn_indices[-PRESERVE_LAST_N_TURNS] - end_index = preserve_from_index - logger.info(f"Preserving last {PRESERVE_LAST_N_TURNS} turns from index {preserve_from_index}") - else: - # Not enough turns to preserve, summarize nothing - end_index = len(input_list) - logger.warning(f"Only {len(user_turn_indices)} user turns, not enough to summarize (need more than {PRESERVE_LAST_N_TURNS})") - - # Extract the messages to summarize - if end_index <= start_index: - logger.info("No messages to summarize (end_index <= start_index)") - return [], start_index, end_index - - messages_to_summarize = input_list[start_index:end_index] - logger.info(f"Summarizing {len(messages_to_summarize)} messages from index {start_index} to {end_index}") - - return messages_to_summarize, start_index, end_index - - -def create_summary_message(summary_text: str) -> Dict[str, Any]: - """ - Create a summary message in the input_list format. - - Args: - summary_text: The AI-generated summary text - - Returns: - A dictionary representing the summary message - """ - return { - "role": "assistant", - "content": summary_text, - "_summary": True, # Mark this as a summary message - } - - -def create_resume_message() -> Dict[str, Any]: - """ - Create a resume message that instructs the AI to continue from the summary. - - Returns: - A dictionary representing the resume instruction - """ - return { - "role": "user", - "content": "Use the above summary to continue from where we left off.", - "_synthetic": True, # Mark as system-generated - } - - -def apply_summary_to_input_list( - input_list: List[Dict[str, Any]], - summary_text: str, - start_index: int, - end_index: int -) -> List[Dict[str, Any]]: - """ - Replace the summarized portion of input_list with the summary message. - - Args: - input_list: The original conversation history - summary_text: The AI-generated summary - start_index: Start of summarized range - end_index: End of summarized range - - Returns: - New input_list with summary applied - """ - # Build new input list: [before summary] + [summary] + [resume] + [after summary] - before_summary = input_list[:start_index] if start_index > 0 else [] - after_summary = input_list[end_index:] - - summary_msg = create_summary_message(summary_text) - resume_msg = create_resume_message() - - new_input_list = before_summary + [summary_msg, resume_msg] + after_summary - - logger.info(f"Applied summary: reduced from {len(input_list)} to {len(new_input_list)} messages") - - return new_input_list - - -def find_last_summary_index(input_list: List[Dict[str, Any]]) -> Optional[int]: - """ - Find the index of the last summary message in the conversation. - - Args: - input_list: The conversation history - - Returns: - Index of the last summary message, or None if no summary exists - """ - for i in range(len(input_list) - 1, -1, -1): - item = input_list[i] - if isinstance(item, dict) and item.get("_summary") is True: - logger.info(f"Found last summary at index {i}") - return i - - logger.info("No previous summary found") - return None diff --git a/examples/demos/procurement_agent/project/workflow.py b/examples/demos/procurement_agent/project/workflow.py deleted file mode 100644 index 30b94b4b..00000000 --- a/examples/demos/procurement_agent/project/workflow.py +++ /dev/null @@ -1,403 +0,0 @@ -import json -import asyncio -from typing import Any, Dict, List, override -from datetime import timedelta - -from agents import Runner -from pydantic import BaseModel -from temporalio import workflow -from temporalio.common import RetryPolicy -from temporalio.exceptions import ApplicationError - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from project.models.events import ( - EventType, - InspectionFailedEvent, - InspectionPassedEvent, - SubmitalApprovalEvent, - ShipmentArrivedSiteEvent, - ShipmentDepartedFactoryEvent, -) -from agentex.lib.utils.logging import make_logger -from agentex.types.data_content import DataContent -from agentex.types.text_content import TextContent -from project.utils.summarization import ( - should_summarize, - find_last_summary_index, - get_messages_to_summarize, - apply_summary_to_input_list, -) -from project.activities.activities import get_master_construction_schedule, create_master_construction_schedule -from project.agents.procurement_agent import new_procurement_agent -from agentex.lib.environment_variables import EnvironmentVariables -from project.utils.learning_extraction import get_new_wait_for_human_context -from project.agents.summarization_agent import new_summarization_agent -from project.agents.extract_learnings_agent import new_extract_learnings_agent -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow -from agentex.lib.core.temporal.plugins.openai_agents.hooks.hooks import TemporalStreamingHooks - -environment_variables = EnvironmentVariables.refresh() - -if environment_variables.WORKFLOW_NAME is None: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if environment_variables.AGENT_NAME is None: - raise ValueError("Environment variable AGENT_NAME is not set") - -logger = make_logger(__name__) - -class StateModel(BaseModel): - """ - State model for preserving conversation history. - - This allows the agent to maintain context throughout the conversation, - making it possible to reference previous messages and build on the discussion. - """ - input_list: List[Dict[str, Any]] - - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class ProcurementAgentWorkflow(BaseWorkflow): - """ - Minimal async workflow template for AgentEx Temporal agents. - """ - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self._complete_task = False - self._task_id = None - self._trace_id = None - self._parent_span_id = None - self._state = None - self._workflow_started = False # Track if agent workflow loop has started - self.event_queue: asyncio.Queue = asyncio.Queue() # Events - self.human_queue: asyncio.Queue = asyncio.Queue() # Human input - self.human_input_learnings: list = [] - self.extracted_learning_call_ids: set = set() # Track which wait_for_human calls we've extracted learnings from - - # Define activity retry policy with exponential backoff - # Based on Temporal best practices from blog post - self.activity_retry_policy = RetryPolicy( - initial_interval=timedelta(seconds=1), - backoff_coefficient=2.0, # Exponential backoff - maximum_interval=timedelta(seconds=120), # Cap at 2 minutes - maximum_attempts=5, - non_retryable_error_types=[ - "DataCorruptionError", - "ScheduleNotFoundError", - ] - ) - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - @override - async def on_task_event_send(self, params: SendEventParams) -> None: - """ - Handle incoming events from the frontend. - - First event: Triggers the initial agent workflow execution. - Subsequent events: Feed the wait_for_human tool's human_queue. - """ - if self._state is None: - raise ValueError("State is not initialized") - - if params.event.content is None: - workflow.logger.warning("Received event with no content") - return - - # Display the user's message in the UI - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - # After the first event, all subsequent events are human responses to wait_for_human - if self._workflow_started: - # Extract text content and put it in the human_queue for wait_for_human tool - if isinstance(params.event.content, TextContent): - await self.human_queue.put(params.event.content.content) - - @workflow.run - @override - async def on_task_create(self, params: CreateTaskParams) -> str: - logger.info(f"Received task create params: {params}") - - self._state = StateModel(input_list=[]) - - self._task_id = params.task.id - self._trace_id = params.task.id - self._parent_span_id = params.task.id - - workflow_id = workflow.info().workflow_id - - # Create the master construction schedule with error handling - try: - await workflow.execute_activity( - create_master_construction_schedule, - workflow_id, - start_to_close_timeout=timedelta(minutes=5), # Changed from 10s to 5min - schedule_to_close_timeout=timedelta(minutes=10), - retry_policy=self.activity_retry_policy, - ) - logger.info("Master construction schedule created successfully") - - except ApplicationError as e: - # Non-retryable application error (invalid data) - logger.error(f"Failed to create schedule: {e}") - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="Failed to initialize project schedule. Please contact support.", - ), - ) - raise # Fail the workflow - - except Exception as e: - # Unexpected error - logger.error(f"Unexpected error creating schedule: {e}") - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="System error during initialization. Please try creating a new task.", - ), - ) - raise - - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="Welcome to the Procurement Agent! I'll help you manage construction deliveries and schedules. Send events to get started.", - ), - ) - - # Mark workflow as started - subsequent events will feed the human_queue - self._workflow_started = True - - while True: - await workflow.wait_condition( - lambda: not self.event_queue.empty(), - timeout=None, - ) - - if not self.event_queue.empty(): - event = await self.event_queue.get() - - await adk.messages.create(task_id=params.task.id, content=DataContent( - author="user", - data=json.loads(event), - )) - - self._state.input_list.append({ - "role": "user", - "content": event, - }) - - # Get master construction schedule with error handling - try: - master_construction_schedule = await workflow.execute_activity( - get_master_construction_schedule, - workflow_id, - start_to_close_timeout=timedelta(minutes=2), # Changed from 10s to 2min - schedule_to_close_timeout=timedelta(minutes=5), - retry_policy=self.activity_retry_policy, - ) - except ApplicationError as e: - # Non-retryable error (schedule not found or corrupted) - logger.error(f"Failed to retrieve schedule for event processing: {e}") - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="Unable to access project schedule. Please reinitialize the workflow.", - ), - ) - continue # Skip this event, wait for next one - - except Exception as e: - # Unexpected error retrieving schedule - logger.error(f"Unexpected error retrieving schedule: {e}") - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="Temporary system issue. Retrying event processing...", - ), - ) - continue # Skip this event, wait for next one - - # Create agent and execute with error handling - try: - procurement_agent = new_procurement_agent( - master_construction_schedule=master_construction_schedule, - human_input_learnings=self.human_input_learnings - ) - - hooks = TemporalStreamingHooks(task_id=params.task.id) - - # Execute agent with graceful degradation pattern (from temporal-community demos) - result = await Runner.run(procurement_agent, self._state.input_list, hooks=hooks) # type: ignore[arg-type] - - # Update state with result - self._state.input_list = result.to_input_list() # type: ignore[assignment] - logger.info("Successfully processed event") - # Extract learnings from NEW wait_for_human calls only (using going backwards approach) - try: - result_context = get_new_wait_for_human_context( - full_conversation=self._state.input_list, - extracted_learning_call_ids=self.extracted_learning_call_ids, - ) - - if result_context is not None: - new_context, call_id = result_context - logger.info("Found new wait_for_human call, extracting learning...") - - # Create extraction agent and run with only the NEW context - extract_agent = new_extract_learnings_agent() - extraction_result = await Runner.run(extract_agent, new_context, hooks=hooks) # type: ignore[arg-type] - - logger.info(f"About to extract learning: {extraction_result.final_output}") - # Append the learning and track the call_id - learning = extraction_result.final_output - if learning: - self.human_input_learnings.append(learning) - self.extracted_learning_call_ids.add(call_id) - logger.info(f"Extracted learning: {learning}") - - except Exception as e: - logger.error(f"Failed to extract learning: {e}") - - # Check if summarization is needed (after learning extraction) - try: - if should_summarize(self._state.input_list): - logger.info("Token threshold exceeded, starting summarization...") - - # Find the last summary index - last_summary_index = find_last_summary_index(self._state.input_list) - - # Get messages to summarize (excludes last 10 turns, starts after previous summary) - messages_to_summarize, start_index, end_index = get_messages_to_summarize( - self._state.input_list, - last_summary_index - ) - - if messages_to_summarize: - logger.info(f"Summarizing {len(messages_to_summarize)} messages...") - - # Create summarization agent and run - summary_agent = new_summarization_agent() - summary_result = await Runner.run(summary_agent, messages_to_summarize, hooks=hooks) # type: ignore[arg-type] - - summary_text = summary_result.final_output - if summary_text: - # Apply summary to input_list - self._state.input_list = apply_summary_to_input_list( - self._state.input_list, - summary_text, - start_index, - end_index - ) - logger.info(f"Summarization complete, new input_list length: {len(self._state.input_list)}") - else: - logger.warning("Summarization produced no output") - else: - logger.info("No messages to summarize (not enough turns yet)") - - except Exception as e: - logger.error(f"Failed to summarize conversation: {e}") - - except Exception as e: - # Agent execution failed - graceful degradation - logger.error(f"Agent execution failed processing event: {e}") - - # Notify that event couldn't be processed - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="Unable to process this event. The issue has been logged. Please try sending another event.", - ), - ) - - # Don't crash workflow - continue and wait for next event - continue - - if self._complete_task: - return "Task completed" - - @workflow.signal - async def complete_task_signal(self) -> None: - logger.info("Received signal to complete the agent conversation") - self._complete_task = True - - @workflow.signal - async def send_event(self, event: str) -> None: - """ - Receives event strings from external systems with validation. - Events should be JSON strings with event_type and required fields. - Example: {"event_type":"Submittal_Approved","item":"Steel Beams"} - """ - # Validate event is not None or empty - if not event: - logger.error("Received empty or None event") - raise ValueError("Event cannot be empty or None") - - # Validate event is a string - if not isinstance(event, str): - logger.error(f"Event must be string, got {type(event)}") - raise ValueError(f"Event must be a string, received {type(event).__name__}") - - # Validate event length (prevent DoS) - if len(event) > 50000: # 50KB limit - logger.error(f"Event too large: {len(event)} characters") - raise ValueError(f"Event exceeds maximum size (50KB)") - - # Validate event is valid JSON - try: - event_data = json.loads(event) - except json.JSONDecodeError as e: - logger.error(f"Event is not valid JSON: {e}") - raise ValueError(f"Event must be valid JSON: {e}") from e - - # Validate event has required structure - if not isinstance(event_data, dict): - logger.error(f"Event JSON must be an object, got {type(event_data)}") - raise ValueError("Event must be a JSON object") - - # Validate event_type field exists - if "event_type" not in event_data: - logger.error("Event missing 'event_type' field") - raise ValueError("Event must contain 'event_type' field") - - # Validate event_type is one of the allowed types - event_type_str = event_data["event_type"] - valid_event_types = [e.value for e in EventType] - - if event_type_str not in valid_event_types: - logger.error(f"Invalid event_type: {event_type_str}. Valid types: {valid_event_types}") - raise ValueError( - f"Invalid event_type '{event_type_str}'. " - f"Must be one of: {', '.join(valid_event_types)}" - ) - - # Validate event structure based on type using Pydantic models - try: - if event_type_str == EventType.SUBMITTAL_APPROVED.value: - SubmitalApprovalEvent(**event_data) - elif event_type_str == EventType.SHIPMENT_DEPARTED_FACTORY.value: - ShipmentDepartedFactoryEvent(**event_data) - elif event_type_str == EventType.SHIPMENT_ARRIVED_SITE.value: - ShipmentArrivedSiteEvent(**event_data) - elif event_type_str == EventType.INSPECTION_FAILED.value: - InspectionFailedEvent(**event_data) - elif event_type_str == EventType.INSPECTION_PASSED.value: - InspectionPassedEvent(**event_data) - elif event_type_str == EventType.HUMAN_INPUT.value: - # HUMAN_INPUT doesn't have a specific model, just needs event_type - pass - - except Exception as e: - logger.error(f"Event validation failed for {event_type_str}: {e}") - raise ValueError(f"Invalid event structure for {event_type_str}: {e}") from e - - logger.info(f"Validated event type: {event_type_str}") - await self.event_queue.put(event) \ No newline at end of file diff --git a/examples/demos/procurement_agent/pyproject.toml b/examples/demos/procurement_agent/pyproject.toml deleted file mode 100644 index 7ccbf80e..00000000 --- a/examples/demos/procurement_agent/pyproject.toml +++ /dev/null @@ -1,36 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "procurement_agent" -version = "0.1.0" -description = "An Agentex agent that manages procurement for building constructions" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk>=0.6.5", - "openai-agents>=0.4.2", - "temporalio>=1.18.2", - "scale-gp", - "aiosqlite", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", - "debugpy>=1.8.15", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/launch-tutorials.sh b/examples/launch-tutorials.sh deleted file mode 100755 index 024d9ac1..00000000 --- a/examples/launch-tutorials.sh +++ /dev/null @@ -1,341 +0,0 @@ -#!/bin/bash - -# AgentEx Tutorial Launcher -# This script helps you easily launch and test all tutorials in the repository -# -# Usage: -# ./launch-tutorials.sh # Show interactive menu -# ./launch-tutorials.sh 1 # Launch tutorial #1 directly -# ./launch-tutorials.sh a # Launch all tutorials with confirmations -# ./launch-tutorials.sh c # Clean up orphaned tutorial processes -# -# Note: Excludes 90_multi_agent_non_temporal (use its own start-agents.sh) - -# Simple cleanup function for orphaned processes -cleanup() { - # Kill any remaining agentex or uvicorn processes from tutorials - local agentex_pids=$(pgrep -f "agentex agents run.*tutorials" 2>/dev/null || true) - if [[ -n "$agentex_pids" ]]; then - echo "$agentex_pids" | xargs kill -TERM 2>/dev/null || true - sleep 1 - echo "$agentex_pids" | xargs kill -KILL 2>/dev/null || true - fi - - local uvicorn_pids=$(pgrep -f "uvicorn.*project\." 2>/dev/null || true) - if [[ -n "$uvicorn_pids" ]]; then - echo "$uvicorn_pids" | xargs kill -TERM 2>/dev/null || true - sleep 1 - echo "$uvicorn_pids" | xargs kill -KILL 2>/dev/null || true - fi -} - -# Color codes for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Tutorial definitions -declare -a TUTORIALS=( - "tutorials/00_sync/000_hello_acp|Basic Hello ACP (Sync)" - "tutorials/00_sync/010_multiturn|Multi-turn Chat (Sync)" - "tutorials/00_sync/020_streaming|Streaming Response (Sync)" - "tutorials/10_async/00_base/000_hello_acp|Basic Hello ACP (Async)" - "tutorials/10_async/00_base/010_multiturn|Multi-turn Chat (Async)" - "tutorials/10_async/00_base/020_streaming|Streaming Response (Async)" - "tutorials/10_async/00_base/030_tracing|Tracing Example (Async)" - "tutorials/10_async/00_base/040_other_sdks|Other SDKs Integration (Async)" - "tutorials/10_async/00_base/080_batch_events|Batch Events (Async)" - "tutorials/10_async/10_temporal/000_hello_acp|Basic Hello ACP (Temporal)" - "tutorials/10_async/10_temporal/010_agent_chat|Agent Chat (Temporal)" - "tutorials/10_async/10_temporal/020_state_machine|State Machine (Temporal)" -) - -# Function to print colored output -print_colored() { - local color=$1 - local message=$2 - # Check if terminal supports colors - if [[ -t 1 ]] && command -v tput >/dev/null 2>&1; then - printf "${color}%s${NC}\n" "$message" - else - printf "%s\n" "$message" - fi -} - -# Function to display the menu -show_menu() { - print_colored $BLUE "โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" - print_colored $BLUE "โ•‘ AgentEx Tutorial Launcher โ•‘" - print_colored $BLUE "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" - echo "" - print_colored $YELLOW "Available tutorials:" - echo "" - - local index=1 - for tutorial in "${TUTORIALS[@]}"; do - IFS='|' read -r path description <<< "$tutorial" - if [[ -t 1 ]] && command -v tput >/dev/null 2>&1; then - printf "${GREEN}%2d.${NC} %s\n" $index "$description" - else - printf "%2d. %s\n" $index "$description" - fi - index=$((index + 1)) - done - - echo "" - print_colored $BLUE "Other options:" - print_colored $GREEN " a. Run all tutorials sequentially (with confirmations)" - print_colored $GREEN " c. Clean up any orphaned tutorial processes" - print_colored $GREEN " q. Quit" - echo "" - print_colored $YELLOW "๐Ÿ“Œ Note: The multi-agent system tutorial (tutorials/10_async/90_multi_agent_non_temporal) is excluded" - print_colored $YELLOW " as it has a special launch process. Use its own start-agents.sh script." - echo "" -} - -# Function to run a specific tutorial -run_tutorial() { - local tutorial_index=$1 - local tutorial_info="${TUTORIALS[$((tutorial_index - 1))]}" - IFS='|' read -r path description <<< "$tutorial_info" - - local manifest_path="${path}/manifest.yaml" - - print_colored $BLUE "โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—" - printf "โ•‘ Running: %-54s โ•‘\n" "$description" - print_colored $BLUE "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" - - if [[ ! -f "$manifest_path" ]]; then - print_colored $RED "โŒ Error: Manifest file not found at $manifest_path" - return 1 - fi - - print_colored $YELLOW "๐Ÿ“‚ Tutorial path: $path" - print_colored $YELLOW "๐Ÿ“„ Manifest: $manifest_path" - echo "" - print_colored $GREEN "๐Ÿš€ Executing: cd .. && uv run agentex agents run --manifest examples/$manifest_path" - print_colored $YELLOW "๐Ÿ’ก Press Ctrl+C to stop the tutorial" - echo "" - - # Run the tutorial directly (need to go to parent dir where uv project is) - # Load .env file if it exists and pass variables to the subshell - if [[ -f "../.env" ]]; then - (cd .. && set -a && source .env && set +a && uv run agentex agents run --manifest "examples/$manifest_path") - else - (cd .. && uv run agentex agents run --manifest "examples/$manifest_path") - fi - - local exit_code=$? - if [[ $exit_code -eq 0 ]]; then - print_colored $GREEN "โœ… Tutorial completed successfully!" - elif [[ $exit_code -eq 130 ]]; then - print_colored $YELLOW "๐Ÿ›‘ Tutorial was interrupted by user" - else - print_colored $RED "โŒ Tutorial failed with exit code: $exit_code" - fi - - return $exit_code -} - -# Function to run all tutorials -run_all_tutorials() { - print_colored $BLUE "๐ŸŽฏ Running all tutorials sequentially..." - echo "" - - local success_count=0 - local total_count=${#TUTORIALS[@]} - - for i in $(seq 1 $total_count); do - local tutorial_info="${TUTORIALS[$((i - 1))]}" - IFS='|' read -r path description <<< "$tutorial_info" - - print_colored $YELLOW "โ”Œโ”€ Tutorial $i/$total_count: $description" - echo "" - - # Ask for confirmation - while true; do - print_colored $BLUE "Run this tutorial? (y/n/q to quit): " - read -r response - case $response in - [Yy]* ) - if run_tutorial $i; then - success_count=$((success_count + 1)) - fi - break - ;; - [Nn]* ) - print_colored $YELLOW "โญ๏ธ Skipping tutorial $i" - break - ;; - [Qq]* ) - print_colored $YELLOW "๐Ÿ›‘ Stopping tutorial run" - echo "" - print_colored $BLUE "๐Ÿ“Š Summary: $success_count/$((i-1)) tutorials completed successfully" - return 0 - ;; - * ) - print_colored $RED "Please answer y, n, or q." - ;; - esac - done - - if [[ $i -lt $total_count ]]; then - echo "" - print_colored $BLUE "โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€" - echo "" - fi - done - - echo "" - print_colored $BLUE "๐ŸŽ‰ All tutorials completed!" - print_colored $BLUE "๐Ÿ“Š Summary: $success_count/$total_count tutorials completed successfully" -} - -# Function to manually clean up tutorial processes -manual_cleanup() { - print_colored $BLUE "๐Ÿงน Manual cleanup of tutorial processes..." - echo "" - - # Check for running tutorial processes - local found_processes=false - - # Check for agentex processes - local agentex_pids=$(pgrep -f "agentex agents run.*tutorials" 2>/dev/null || true) - if [[ -n "$agentex_pids" ]]; then - found_processes=true - print_colored $YELLOW "๐Ÿ” Found agentex tutorial processes:" - ps -p $agentex_pids -o pid,command 2>/dev/null || true - echo "" - fi - - # Check for uvicorn processes - local uvicorn_pids=$(pgrep -f "uvicorn.*project\." 2>/dev/null || true) - if [[ -n "$uvicorn_pids" ]]; then - found_processes=true - print_colored $YELLOW "๐Ÿ” Found uvicorn tutorial processes:" - ps -p $uvicorn_pids -o pid,command 2>/dev/null || true - echo "" - fi - - # Check for occupied ports - print_colored $YELLOW "๐Ÿ” Checking common tutorial ports (8000-8003)..." - local port_check=$(lsof -i :8000 -i :8001 -i :8002 -i :8003 2>/dev/null || true) - if [[ -n "$port_check" ]]; then - found_processes=true - echo "$port_check" - echo "" - fi - - if [[ "$found_processes" == "false" ]]; then - print_colored $GREEN "โœ… No tutorial processes found - system is clean!" - return 0 - fi - - # Ask for confirmation before cleaning - while true; do - print_colored $BLUE "Kill these processes? (y/n): " - read -r response - case $response in - [Yy]* ) - print_colored $YELLOW "๐Ÿงน Cleaning up..." - cleanup - print_colored $GREEN "โœ… Manual cleanup completed!" - break - ;; - [Nn]* ) - print_colored $YELLOW "โญ๏ธ Cleanup cancelled" - break - ;; - * ) - print_colored $RED "Please answer y or n." - ;; - esac - done -} - -# Function to validate tutorial number -validate_tutorial_number() { - local num=$1 - if [[ ! "$num" =~ ^[0-9]+$ ]] || [[ $num -lt 1 ]] || [[ $num -gt ${#TUTORIALS[@]} ]]; then - return 1 - fi - return 0 -} - -# Main script logic -main() { - # Check if we're in the right directory - if [[ ! -f "../pyproject.toml" ]] || [[ ! -d "tutorials" ]]; then - print_colored $RED "โŒ Error: This script must be run from the examples directory" - print_colored $YELLOW "๐Ÿ’ก Current directory: $(pwd)" - print_colored $YELLOW "๐Ÿ’ก Expected files: ../pyproject.toml, tutorials/" - exit 1 - fi - - # If a tutorial number is provided as argument - if [[ $# -eq 1 ]]; then - local tutorial_num=$1 - - if [[ "$tutorial_num" == "a" ]] || [[ "$tutorial_num" == "all" ]]; then - run_all_tutorials - exit 0 - elif [[ "$tutorial_num" == "c" ]] || [[ "$tutorial_num" == "cleanup" ]]; then - manual_cleanup - exit 0 - fi - - if validate_tutorial_number "$tutorial_num"; then - run_tutorial "$tutorial_num" - exit $? - else - print_colored $RED "โŒ Error: Invalid tutorial number '$tutorial_num'" - print_colored $YELLOW "๐Ÿ’ก Valid range: 1-${#TUTORIALS[@]}" - exit 1 - fi - fi - - # Interactive mode - while true; do - show_menu - print_colored $BLUE "Enter your choice (1-${#TUTORIALS[@]}, a, c, or q): " - read -r choice - - case $choice in - [Qq]* ) - print_colored $YELLOW "๐Ÿ‘‹ Goodbye!" - exit 0 - ;; - [Aa]* ) - echo "" - run_all_tutorials - echo "" - ;; - [Cc]* ) - echo "" - manual_cleanup - echo "" - print_colored $BLUE "Press Enter to continue..." - read -r - ;; - * ) - if validate_tutorial_number "$choice"; then - echo "" - run_tutorial "$choice" - echo "" - print_colored $BLUE "Press Enter to continue..." - read -r - else - print_colored $RED "โŒ Invalid choice: '$choice'" - print_colored $YELLOW "๐Ÿ’ก Please enter a number between 1 and ${#TUTORIALS[@]}, 'a' for all, 'c' for cleanup, or 'q' to quit" - fi - ;; - esac - - echo "" - done -} - -# Run the main function -main "$@" \ No newline at end of file diff --git a/examples/tutorials/00_sync/000_hello_acp/.dockerignore b/examples/tutorials/00_sync/000_hello_acp/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/00_sync/000_hello_acp/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/00_sync/000_hello_acp/Dockerfile b/examples/tutorials/00_sync/000_hello_acp/Dockerfile deleted file mode 100644 index b91d1339..00000000 --- a/examples/tutorials/00_sync/000_hello_acp/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - - -# Copy pyproject.toml and README.md to install dependencies -COPY 00_sync/000_hello_acp/pyproject.toml /app/000_hello_acp/pyproject.toml -COPY 00_sync/000_hello_acp/README.md /app/000_hello_acp/README.md - -WORKDIR /app/000_hello_acp - -# Copy the project code -COPY 00_sync/000_hello_acp/project /app/000_hello_acp/project - -# Copy the test files -COPY 00_sync/000_hello_acp/tests /app/000_hello_acp/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies -RUN uv pip install --system .[dev] - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=000-hello-acp - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] diff --git a/examples/tutorials/00_sync/000_hello_acp/README.md b/examples/tutorials/00_sync/000_hello_acp/README.md deleted file mode 100644 index b007cc56..00000000 --- a/examples/tutorials/00_sync/000_hello_acp/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# [Sync] Hello ACP - -This is a simple AgentEx agent that just says hello and acknowledges the user's message to show which ACP methods need to be implemented for the sync ACP type. -The simplest agent type: synchronous request/response pattern with a single `@acp.on_message_send` handler. Best for stateless operations that complete immediately. - -## What You'll Learn -- Building a basic synchronous agent -- The `@acp.on_message_send` handler pattern -- When to use sync vs async agents - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository (agentex) root - -## Quick Start - -```bash -cd examples/tutorials/00_sync/000_hello_acp -uv run agentex agents run --manifest manifest.yaml -``` - -## Key Code - -```python -@acp.on_message_send -async def handle_message_send(params: SendMessageParams): - return TextContent( - author="agent", - content=f"Echo: {params.content.content}" - ) -``` - -That's it - one handler, immediate response. No task creation, no state management. - -## When to Use -- Simple chatbots with no memory requirements -- Quick Q&A or information lookup agents -- Prototyping and testing agent responses -- Operations that complete in under a second - -## Why This Matters -Sync agents are the simplest way to get started with AgentEx. They're perfect for learning the basics and building stateless agents. Once you need conversation memory or task tracking, you'll graduate to async agents. - -**Next:** [010_multiturn](../010_multiturn/) - Add conversation memory to your agent diff --git a/examples/tutorials/00_sync/000_hello_acp/dev.ipynb b/examples/tutorials/00_sync/000_hello_acp/dev.ipynb deleted file mode 100644 index a50a29f3..00000000 --- a/examples/tutorials/00_sync/000_hello_acp/dev.ipynb +++ /dev/null @@ -1,158 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"s000-hello-acp\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# # (Optional) Create a new task. If you don't create a new task, each message will be sent to a new task. The server will create the task for you.\n", - "\n", - "# import uuid\n", - "\n", - "# TASK_ID = str(uuid.uuid4())[:8]\n", - "\n", - "# rpc_response = client.agents.rpc_by_name(\n", - "# agent_name=AGENT_NAME,\n", - "# method=\"task/create\",\n", - "# params={\n", - "# \"name\": f\"{TASK_ID}-task\",\n", - "# \"params\": {}\n", - "# }\n", - "# )\n", - "\n", - "# task = rpc_response.result\n", - "# print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Test non streaming response\n", - "from agentex.types import TextContent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_message(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"stream\": False\n", - " }\n", - ")\n", - "\n", - "if not rpc_response or not rpc_response.result:\n", - " raise ValueError(\"No result in response\")\n", - "\n", - "# Extract and print just the text content from the response\n", - "for task_message in rpc_response.result:\n", - " content = task_message.content\n", - " if isinstance(content, TextContent):\n", - " text = content.content\n", - " print(text)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Test streaming response\n", - "from agentex.types.text_delta import TextDelta\n", - "from agentex.types.task_message_update import StreamTaskMessageFull, StreamTaskMessageDelta\n", - "\n", - "# The result object of message/send will be a TaskMessageUpdate which is a union of the following types:\n", - "# - StreamTaskMessageStart: \n", - "# - An indicator that a streaming message was started, doesn't contain any useful content\n", - "# - StreamTaskMessageDelta: \n", - "# - A delta of a streaming message, contains the text delta to aggregate\n", - "# - StreamTaskMessageDone: \n", - "# - An indicator that a streaming message was done, doesn't contain any useful content\n", - "# - StreamTaskMessageFull: \n", - "# - A non-streaming message, there is nothing to aggregate, since this contains the full message, not deltas\n", - "\n", - "# Whenn processing StreamTaskMessageDelta, if you are expecting more than TextDeltas, such as DataDelta, ToolRequestDelta, or ToolResponseDelta, you can process them as well\n", - "# Whenn processing StreamTaskMessageFull, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "for agent_rpc_response_chunk in client.agents.send_message_stream(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"stream\": True\n", - " }\n", - "):\n", - " # We know that the result of the message/send when stream is set to True will be a TaskMessageUpdate\n", - " task_message_update = agent_rpc_response_chunk.result\n", - " # Print oly the text deltas as they arrive or any full messages\n", - " if isinstance(task_message_update, StreamTaskMessageDelta):\n", - " delta = task_message_update.delta\n", - " if isinstance(delta, TextDelta):\n", - " print(delta.text_delta, end=\"\", flush=True)\n", - " else:\n", - " print(f\"Found non-text {type(task_message)} object in streaming message.\")\n", - " elif isinstance(task_message_update, StreamTaskMessageFull):\n", - " content = task_message_update.content\n", - " if isinstance(content, TextContent):\n", - " print(content.content)\n", - " else:\n", - " print(f\"Found non-text {type(task_message)} object in full message.\")\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/00_sync/000_hello_acp/manifest.yaml b/examples/tutorials/00_sync/000_hello_acp/manifest.yaml deleted file mode 100644 index 37214b06..00000000 --- a/examples/tutorials/00_sync/000_hello_acp/manifest.yaml +++ /dev/null @@ -1,120 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 00_sync/000_hello_acp - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 00_sync/000_hello_acp/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 00_sync/000_hello_acp/.dockerignore - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - -# Agent Configuration -# ----------------- -agent: - # Unique name for your agent - # Used for task routing and monitoring - name: s000-hello-acp - - # Type of ACP to use - # sync: Simple synchronous ACP implementation - # async: Asynchronous, non-blocking ACP implementation - acp_type: sync - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent that just says hello and acknowledges the user's message - - # Temporal workflow configuration - # Set enabled: true to use Temporal workflows for long-running tasks - temporal: - enabled: false - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # - name: OPENAI_BASE_URL - # value: "https://api.openai.com/v1" - # - name: ACCOUNT_ID - # value: "your_account_id_here" - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific files (deploy/*.yaml) - global: - agent: - name: "s000-hello-acp" - description: "An AgentEx agent that just says hello and acknowledges the user's message" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" - diff --git a/examples/tutorials/00_sync/000_hello_acp/project/__init__.py b/examples/tutorials/00_sync/000_hello_acp/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/00_sync/000_hello_acp/project/acp.py b/examples/tutorials/00_sync/000_hello_acp/project/acp.py deleted file mode 100644 index 63346574..00000000 --- a/examples/tutorials/00_sync/000_hello_acp/project/acp.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Union, AsyncGenerator - -from agentex.lib.types.acp import SendMessageParams -from agentex.lib.utils.logging import make_logger -from agentex.types.task_message import TaskMessageContent -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.types.task_message_update import TaskMessageUpdate -from agentex.types.task_message_content import TextContent - -logger = make_logger(__name__) - -# Create an ACP server -acp = FastACP.create( - acp_type="sync", -) - - -@acp.on_message_send -async def handle_message_send( - params: SendMessageParams, -) -> Union[TaskMessageContent, AsyncGenerator[TaskMessageUpdate, None]]: - """Default message handler with streaming support""" - # Extract content safely from the message - - message_text = "" - print(message_text, message_text) - if hasattr(params.content, "content"): - content_val = getattr(params.content, "content", "") - if isinstance(content_val, str): - message_text = content_val - - return TextContent( - author="agent", - content=f"Hello! I've received your message. Here's a generic response, but in future tutorials we'll see how you can get me to intelligently respond to your message. This is what I heard you say: {message_text}", - ) diff --git a/examples/tutorials/00_sync/000_hello_acp/pyproject.toml b/examples/tutorials/00_sync/000_hello_acp/pyproject.toml deleted file mode 100644 index 71110739..00000000 --- a/examples/tutorials/00_sync/000_hello_acp/pyproject.toml +++ /dev/null @@ -1,36 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "000-hello-acp" -version = "0.1.0" -description = "An AgentEx agent that just says hello and acknowledges the user's message" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "pytest-asyncio", - "pytest-xdist", - "httpx", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/examples/tutorials/00_sync/000_hello_acp/tests/test_agent.py b/examples/tutorials/00_sync/000_hello_acp/tests/test_agent.py deleted file mode 100644 index ad82771f..00000000 --- a/examples/tutorials/00_sync/000_hello_acp/tests/test_agent.py +++ /dev/null @@ -1,129 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming message sending -- Streaming message sending -- Task creation via RPC - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: hello-acp) -""" - -import os - -import pytest - -from agentex import Agentex -from agentex.types import TextDelta, TextContent, TextContentParam -from agentex.types.agent_rpc_params import ParamsSendMessageRequest -from agentex.types.task_message_update import StreamTaskMessageFull, StreamTaskMessageDelta - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "s000-hello-acp") - - -@pytest.fixture -def client(): - """Create an AgentEx client instance for testing.""" - client = Agentex(base_url=AGENTEX_API_BASE_URL) - yield client - # Clean up: close the client connection - client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -class TestNonStreamingMessages: - """Test non-streaming message sending.""" - - def test_send_simple_message(self, client: Agentex, agent_name: str): - """Test sending a simple message and receiving a response.""" - - message_content = "Hello, Agent! How are you?" - response = client.agents.send_message( - agent_name=agent_name, - params=ParamsSendMessageRequest( - content=TextContentParam( - author="user", - content=message_content, - type="text", - ) - ), - ) - result = response.result - assert result is not None - assert len(result) == 1 - message = result[0] - assert isinstance(message.content, TextContent) - assert ( - message.content.content - == f"Hello! I've received your message. Here's a generic response, but in future tutorials we'll see how you can get me to intelligently respond to your message. This is what I heard you say: {message_content}" - ) - - -class TestStreamingMessages: - """Test streaming message sending.""" - - def test_stream_simple_message(self, client: Agentex, agent_name: str): - """Test streaming a simple message and aggregating deltas.""" - - message_content = "Hello, Agent! Can you stream your response?" - aggregated_content = "" - full_content = "" - received_chunks = False - - for chunk in client.agents.send_message_stream( - agent_name=agent_name, - params=ParamsSendMessageRequest( - content=TextContentParam( - author="user", - content=message_content, - type="text", - ) - ), - ): - received_chunks = True - task_message_update = chunk.result - # Collect text deltas as they arrive or check full messages - if isinstance(task_message_update, StreamTaskMessageDelta) and task_message_update.delta is not None: - delta = task_message_update.delta - if isinstance(delta, TextDelta) and delta.text_delta is not None: - aggregated_content += delta.text_delta - - elif isinstance(task_message_update, StreamTaskMessageFull): - content = task_message_update.content - if isinstance(content, TextContent): - full_content = content.content - - if not full_content and not aggregated_content: - raise AssertionError("No content was received in the streaming response.") - if not received_chunks: - raise AssertionError("No streaming chunks were received, when at least 1 was expected.") - - if full_content: - assert ( - full_content - == f"Hello! I've received your message. Here's a generic response, but in future tutorials we'll see how you can get me to intelligently respond to your message. This is what I heard you say: {message_content}" - ) - - if aggregated_content: - assert ( - aggregated_content - == f"Hello! I've received your message. Here's a generic response, but in future tutorials we'll see how you can get me to intelligently respond to your message. This is what I heard you say: {message_content}" - ) - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/00_sync/010_multiturn/.dockerignore b/examples/tutorials/00_sync/010_multiturn/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/00_sync/010_multiturn/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/00_sync/010_multiturn/Dockerfile b/examples/tutorials/00_sync/010_multiturn/Dockerfile deleted file mode 100644 index 71ccbaf5..00000000 --- a/examples/tutorials/00_sync/010_multiturn/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 00_sync/010_multiturn/pyproject.toml /app/010_multiturn/pyproject.toml -COPY 00_sync/010_multiturn/README.md /app/010_multiturn/README.md - -WORKDIR /app/010_multiturn - -# Copy the project code -COPY 00_sync/010_multiturn/project /app/010_multiturn/project - -# Copy the test files -COPY 00_sync/010_multiturn/tests /app/010_multiturn/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies -RUN uv pip install --system .[dev] - -WORKDIR /app/010_multiturn -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=010-multiturn - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] diff --git a/examples/tutorials/00_sync/010_multiturn/README.md b/examples/tutorials/00_sync/010_multiturn/README.md deleted file mode 100644 index 6f585cbb..00000000 --- a/examples/tutorials/00_sync/010_multiturn/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# [Sync] Multiturn - -Handle multi-turn conversations in synchronous agents by manually maintaining conversation history and context between messages. - -## What You'll Learn -- How to handle conversation history in sync agents -- Building context from previous messages -- The limitations of stateless multiturn patterns - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Understanding of basic sync agents (see [000_hello_acp](../000_hello_acp/)) - -## Quick Start - -```bash -cd examples/tutorials/00_sync/010_multiturn -uv run agentex agents run --manifest manifest.yaml -``` - -## Key Pattern - -Sync agents are stateless by default. To handle multi-turn conversations, you need to: -1. Accept conversation history in the request -2. Maintain context across messages -3. Return responses that build on previous exchanges - -```python -@acp.on_message_send -async def handle_message_send(params: SendMessageParams): - # Accept conversation history from client - history = params.conversation_history - - # Build context from history - context = build_context(history) - - # Generate response considering full context - response = generate_response(params.content, context) - - return TextContent(author="agent", content=response) -``` - -The handler accepts history, builds context, and returns responses that reference previous exchanges. - -## When to Use -- Simple chatbots that need conversation memory -- When client can maintain and send conversation history -- Quick prototypes before building full async agents - -## Why This Matters -While sync agents can handle conversations, you're responsible for managing state on the client side. This becomes complex quickly. For production conversational agents, consider async agents ([10_async/00_base/010_multiturn](../../10_async/00_base/010_multiturn/)) where the platform manages state automatically. - -**Next:** [020_streaming](../020_streaming/) - Stream responses in real-time diff --git a/examples/tutorials/00_sync/010_multiturn/dev.ipynb b/examples/tutorials/00_sync/010_multiturn/dev.ipynb deleted file mode 100644 index d82cf577..00000000 --- a/examples/tutorials/00_sync/010_multiturn/dev.ipynb +++ /dev/null @@ -1,166 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"s010-multiturn\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# # (Optional) Create a new task. If you don't create a new task, each message will be sent to a new task. The server will create the task for you.\n", - "\n", - "# import uuid\n", - "\n", - "# TASK_ID = str(uuid.uuid4())[:8]\n", - "\n", - "# rpc_response = client.agents.rpc_by_name(\n", - "# agent_name=AGENT_NAME,\n", - "# method=\"task/create\",\n", - "# params={\n", - "# \"name\": f\"{TASK_ID}-task\",\n", - "# \"params\": {}\n", - "# }\n", - "# )\n", - "\n", - "# task = rpc_response.result\n", - "# print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Test non streaming response\n", - "from agentex.types import TextContent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_message(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"stream\": False\n", - " }\n", - ")\n", - "\n", - "if not rpc_response or not rpc_response.result:\n", - " raise ValueError(\"No result in response\")\n", - "\n", - "# Extract and print just the text content from the response\n", - "for task_message in rpc_response.result:\n", - " content = task_message.content\n", - " if isinstance(content, TextContent):\n", - " text = content.content\n", - " print(text)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Test streaming response\n", - "from agentex.types.text_delta import TextDelta\n", - "from agentex.types.task_message_update import StreamTaskMessageFull, StreamTaskMessageDelta\n", - "\n", - "# The result object of message/send will be a TaskMessageUpdate which is a union of the following types:\n", - "# - StreamTaskMessageStart: \n", - "# - An indicator that a streaming message was started, doesn't contain any useful content\n", - "# - StreamTaskMessageDelta: \n", - "# - A delta of a streaming message, contains the text delta to aggregate\n", - "# - StreamTaskMessageDone: \n", - "# - An indicator that a streaming message was done, doesn't contain any useful content\n", - "# - StreamTaskMessageFull: \n", - "# - A non-streaming message, there is nothing to aggregate, since this contains the full message, not deltas\n", - "\n", - "# Whenn processing StreamTaskMessageDelta, if you are expecting more than TextDeltas, such as DataDelta, ToolRequestDelta, or ToolResponseDelta, you can process them as well\n", - "# Whenn processing StreamTaskMessageFull, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "for agent_rpc_response_chunk in client.agents.send_message_stream(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"stream\": True\n", - " }\n", - "):\n", - " # We know that the result of the message/send when stream is set to True will be a TaskMessageUpdate\n", - " task_message_update = agent_rpc_response_chunk.result\n", - " # Print oly the text deltas as they arrive or any full messages\n", - " if isinstance(task_message_update, StreamTaskMessageDelta):\n", - " delta = task_message_update.delta\n", - " if isinstance(delta, TextDelta):\n", - " print(delta.text_delta, end=\"\", flush=True)\n", - " else:\n", - " print(f\"Found non-text {type(task_message)} object in streaming message.\")\n", - " elif isinstance(task_message_update, StreamTaskMessageFull):\n", - " content = task_message_update.content\n", - " if isinstance(content, TextContent):\n", - " print(content.content)\n", - " else:\n", - " print(f\"Found non-text {type(task_message)} object in full message.\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/00_sync/010_multiturn/manifest.yaml b/examples/tutorials/00_sync/010_multiturn/manifest.yaml deleted file mode 100644 index c7e094aa..00000000 --- a/examples/tutorials/00_sync/010_multiturn/manifest.yaml +++ /dev/null @@ -1,118 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 00_sync/010_multiturn - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 00_sync/010_multiturn/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 00_sync/010_multiturn/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - -# Agent Configuration -# ----------------- -agent: - acp_type: sync - # Unique name for your agent - # Used for task routing and monitoring - name: s010-multiturn - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent - - # Temporal workflow configuration - # Set enabled: true to use Temporal workflows for long-running tasks - temporal: - enabled: false - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # - name: OPENAI_BASE_URL - # value: "https://api.openai.com/v1" - # - name: ACCOUNT_ID - # value: "your_account_id_here" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific files (deploy/*.yaml) - global: - agent: - name: "s010-multiturn" - description: "An AgentEx agent" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/00_sync/010_multiturn/project/__init__.py b/examples/tutorials/00_sync/010_multiturn/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/00_sync/010_multiturn/project/acp.py b/examples/tutorials/00_sync/010_multiturn/project/acp.py deleted file mode 100644 index 8fa07f7c..00000000 --- a/examples/tutorials/00_sync/010_multiturn/project/acp.py +++ /dev/null @@ -1,109 +0,0 @@ -import os -from typing import Union, AsyncGenerator - -from agents import Agent, Runner, RunConfig - -from agentex.lib import adk -from agentex.types import TextContent -from agentex.lib.types.acp import SendMessageParams -from agentex.lib.types.converters import convert_task_messages_to_oai_agents_inputs -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.types.task_message_update import TaskMessageUpdate -from agentex.types.task_message_content import TaskMessageContent -from agentex.lib.adk.providers._modules.sync_provider import SyncStreamingProvider - -# Create an ACP server -acp = FastACP.create( - acp_type="sync", -) - - -class StateModel(BaseModel): - system_prompt: str - model: str - - -# Note: The return of this handler is required to be persisted by the Agentex Server -@acp.on_message_send -async def handle_message_send( - params: SendMessageParams, -) -> Union[TaskMessageContent, AsyncGenerator[TaskMessageUpdate, None]]: - """ - In this tutorial, we'll see how to handle a basic multi-turn conversation without streaming. - """ - ######################################################### - # 0. Validate the message. - ######################################################### - - if not hasattr(params.content, "type") or params.content.type != "text": - raise ValueError(f"Expected text message, got {getattr(params.content, 'type', 'unknown')}") - - if not hasattr(params.content, "author") or params.content.author != "user": - raise ValueError(f"Expected user message, got {getattr(params.content, 'author', 'unknown')}") - - if not os.environ.get("OPENAI_API_KEY"): - return TextContent( - author="agent", - content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.", - ) - - ######################################################### - # 1. Initialize the state. Using state is optional, but it's a good way to store information between turns. - ######################################################### - - # Try to retrieve the state. If it doesn't exist, create it. - task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id) - - if not task_state: - # If the state doesn't exist, create it. - state = StateModel(system_prompt="You are a helpful assistant that can answer questions.", model="gpt-4o-mini") - task_state = await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state) - else: - state = StateModel.model_validate(task_state.state) - - ######################################################### - # 2. Fetch our message history. - ######################################################### - - task_messages = await adk.messages.list(task_id=params.task.id) - - ######################################################### - # 3. Run the agent with OpenAI Agents SDK - ######################################################### - - # Initialize the provider and run config to allow for tracing - provider = SyncStreamingProvider( - trace_id=params.task.id, - ) - - run_config = RunConfig( - model_provider=provider, - ) - - # Initialize the agent - test_agent = Agent(name="assistant", instructions=state.system_prompt, model=state.model) - - # Convert task messages to OpenAI Agents SDK format - input_list = convert_task_messages_to_oai_agents_inputs(task_messages) - - # Run the agent - result = await Runner.run(test_agent, input_list, run_config=run_config) - - - # TaskMessages are messages that are sent between an Agent and a Client. They are fundamentally decoupled from messages sent to the LLM. This is because you may want to send additional metadata to allow the client to render the message on the UI differently. - - # LLMMessages are OpenAI-compatible messages that are sent to the LLM, and are used to track the state of a conversation with a model. - - # In simple scenarios your conversion logic will just look like this. However, in complex scenarios where you are leveraging the flexibility of the TaskMessage type to send non-LLM-specific metadata, you should write custom conversion logic. - - # Some complex scenarios include: - # - Taking a markdown document output by an LLM, postprocessing it into a JSON object to clearly denote title, content, and footers. This can be sent as a DataContent TaskMessage to the client and converted back to markdown here to send back to the LLM. - # - If using multiple LLMs (like in an actor-critic framework), you may want to send DataContent that denotes which LLM generated which part of the output and write conversion logic to split the TaskMessagehistory into multiple LLM conversations. - # - If using multiple LLMs, but one LLM's output should not be sent to the user (i.e. a critic model), you can leverage the State as an internal storage mechanism to store the critic model's conversation history. This i s a powerful and flexible way to handle complex scenarios. - - ######################################################### - # 4. Return the agent response to the client. - ######################################################### - - return TextContent(author="agent", content=result.final_output) diff --git a/examples/tutorials/00_sync/010_multiturn/pyproject.toml b/examples/tutorials/00_sync/010_multiturn/pyproject.toml deleted file mode 100644 index d6ec48d2..00000000 --- a/examples/tutorials/00_sync/010_multiturn/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "010-multiturn" -version = "0.1.0" -description = "An AgentEx agent" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "pytest-asyncio", - "httpx", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/tutorials/00_sync/010_multiturn/tests/test_agent.py b/examples/tutorials/00_sync/010_multiturn/tests/test_agent.py deleted file mode 100644 index 96eaf233..00000000 --- a/examples/tutorials/00_sync/010_multiturn/tests/test_agent.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming message sending -- Streaming message sending -- Task creation via RPC - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: s010-multiturn) -""" - -import os - -import pytest -from test_utils.sync import validate_text_in_string, collect_streaming_response - -from agentex import Agentex -from agentex.types import TextContent, TextContentParam -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest, ParamsSendMessageRequest -from agentex.lib.sdk.fastacp.base.base_acp_server import uuid - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "s010-multiturn") - - -@pytest.fixture -def client(): - """Create an AgentEx client instance for testing.""" - return Agentex(base_url=AGENTEX_API_BASE_URL) - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest.fixture -def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingMessages: - """Test non-streaming message sending.""" - - def test_send_message(self, client: Agentex, agent_name: str, agent_id: str): - task_response = client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - - assert task is not None - - messages = [ - "Hello, can you tell me a litle bit about tennis? I want to you make sure you use the word 'tennis' in each response.", - "Pick one of the things you just mentioned, and dive deeper into it.", - "Can you now output a summary of this conversation", - ] - - for i, msg in enumerate(messages): - response = client.agents.send_message( - agent_name=agent_name, - params=ParamsSendMessageRequest( - content=TextContentParam( - author="user", - content=msg, - type="text", - ), - task_id=task.id, - ), - ) - assert response is not None and response.result is not None - result = response.result - - for message in result: - content = message.content - assert content is not None - assert isinstance(content, TextContent) and isinstance(content.content, str) - validate_text_in_string("tennis", content.content) - - states = client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - - state = states[0] - assert state.state is not None - assert state.state.get("system_prompt", None) == "You are a helpful assistant that can answer questions." - - message_history = client.messages.list( - task_id=task.id, - ) - assert len(message_history) == (i + 1) * 2 # user + agent messages - - -class TestStreamingMessages: - """Test streaming message sending.""" - - def test_stream_message(self, client: Agentex, agent_name: str, agent_id: str): - """Test streaming messages in a multi-turn conversation.""" - - # create a task for this specific conversation - task_response = client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - - assert task is not None - messages = [ - "Hello, can you tell me a little bit about tennis? I want you to make sure you use the word 'tennis' in each response.", - "Pick one of the things you just mentioned, and dive deeper into it.", - "Can you now output a summary of this conversation", - ] - - for i, msg in enumerate(messages): - stream = client.agents.send_message_stream( - agent_name=agent_name, - params=ParamsSendMessageRequest( - content=TextContentParam( - author="user", - content=msg, - type="text", - ), - task_id=task.id, - ), - ) - - # Collect the streaming response - aggregated_content, chunks = collect_streaming_response(stream) - - assert len(chunks) == 1 - # Get the actual content (prefer full_content if available, otherwise use aggregated) - - # Validate that "tennis" appears in the response because that is what our model does - validate_text_in_string("tennis", aggregated_content) - - states = client.states.list(task_id=task.id) - assert len(states) == 1 - - message_history = client.messages.list( - task_id=task.id, - ) - assert len(message_history) == (i + 1) * 2 # user + agent messages - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/00_sync/020_streaming/.dockerignore b/examples/tutorials/00_sync/020_streaming/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/00_sync/020_streaming/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/00_sync/020_streaming/Dockerfile b/examples/tutorials/00_sync/020_streaming/Dockerfile deleted file mode 100644 index 00137d7f..00000000 --- a/examples/tutorials/00_sync/020_streaming/Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 00_sync/020_streaming/pyproject.toml /app/020_streaming/pyproject.toml -COPY 00_sync/020_streaming/README.md /app/020_streaming/README.md - -WORKDIR /app/020_streaming - -# Copy the project code -COPY 00_sync/020_streaming/project /app/020_streaming/project - -# Copy the test files -COPY 00_sync/020_streaming/tests /app/020_streaming/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies -RUN uv pip install --system .[dev] - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=020-streaming - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/examples/tutorials/00_sync/020_streaming/README.md b/examples/tutorials/00_sync/020_streaming/README.md deleted file mode 100644 index a4f6f476..00000000 --- a/examples/tutorials/00_sync/020_streaming/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# [Sync] Streaming - -Stream responses progressively using async generators instead of returning a single message. Enables showing partial results as they're generated. - -## What You'll Learn -- How to stream responses using async generators -- The `yield` pattern for progressive updates -- When streaming improves user experience - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Understanding of basic sync agents (see [000_hello_acp](../000_hello_acp/)) - -## Quick Start - -```bash -cd examples/tutorials/00_sync/020_streaming -uv run agentex agents run --manifest manifest.yaml -``` - -## Key Code - -```python -@acp.on_message_send -async def handle_message_send(params: SendMessageParams): - async def stream_response(): - for chunk in response_chunks: - yield TaskMessageUpdate(content=TextContent(...)) - - return stream_response() -``` - -Return an async generator instead of a single response - each `yield` sends an update to the client. - -## When to Use -- Streaming LLM responses (OpenAI, Anthropic, etc.) -- Large data processing with progress updates -- Any operation that takes >1 second to complete -- Improving perceived responsiveness - -## Why This Matters -Streaming dramatically improves user experience for longer operations. Instead of waiting 10 seconds for a complete response, users see results immediately as they're generated. This is essential for modern AI agents. - -**Next:** Ready for task management? โ†’ [10_async/00_base/000_hello_acp](../../10_async/00_base/000_hello_acp/) diff --git a/examples/tutorials/00_sync/020_streaming/dev.ipynb b/examples/tutorials/00_sync/020_streaming/dev.ipynb deleted file mode 100644 index b4e517c3..00000000 --- a/examples/tutorials/00_sync/020_streaming/dev.ipynb +++ /dev/null @@ -1,158 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"s020-streaming\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# # (Optional) Create a new task. If you don't create a new task, each message will be sent to a new task. The server will create the task for you.\n", - "\n", - "# import uuid\n", - "\n", - "# TASK_ID = str(uuid.uuid4())[:8]\n", - "\n", - "# rpc_response = client.agents.rpc_by_name(\n", - "# agent_name=AGENT_NAME,\n", - "# method=\"task/create\",\n", - "# params={\n", - "# \"name\": f\"{TASK_ID}-task\",\n", - "# \"params\": {}\n", - "# }\n", - "# )\n", - "\n", - "# task = rpc_response.result\n", - "# print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Test non streaming response\n", - "from agentex.types import TextContent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_message(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"stream\": False\n", - " }\n", - ")\n", - "\n", - "if not rpc_response or not rpc_response.result:\n", - " raise ValueError(\"No result in response\")\n", - "\n", - "# Extract and print just the text content from the response\n", - "for task_message in rpc_response.result:\n", - " content = task_message.content\n", - " if isinstance(content, TextContent):\n", - " text = content.content\n", - " print(text)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Test streaming response\n", - "from agentex.types.text_delta import TextDelta\n", - "from agentex.types.task_message_update import StreamTaskMessageFull, StreamTaskMessageDelta\n", - "\n", - "# The result object of message/send will be a TaskMessageUpdate which is a union of the following types:\n", - "# - StreamTaskMessageStart: \n", - "# - An indicator that a streaming message was started, doesn't contain any useful content\n", - "# - StreamTaskMessageDelta: \n", - "# - A delta of a streaming message, contains the text delta to aggregate\n", - "# - StreamTaskMessageDone: \n", - "# - An indicator that a streaming message was done, doesn't contain any useful content\n", - "# - StreamTaskMessageFull: \n", - "# - A non-streaming message, there is nothing to aggregate, since this contains the full message, not deltas\n", - "\n", - "# Whenn processing StreamTaskMessageDelta, if you are expecting more than TextDeltas, such as DataDelta, ToolRequestDelta, or ToolResponseDelta, you can process them as well\n", - "# Whenn processing StreamTaskMessageFull, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "for agent_rpc_response_chunk in client.agents.send_message_stream(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"stream\": True\n", - " }\n", - "):\n", - " # We know that the result of the message/send when stream is set to True will be a TaskMessageUpdate\n", - " task_message_update = agent_rpc_response_chunk.result\n", - " # Print oly the text deltas as they arrive or any full messages\n", - " if isinstance(task_message_update, StreamTaskMessageDelta):\n", - " delta = task_message_update.delta\n", - " if isinstance(delta, TextDelta):\n", - " print(delta.text_delta, end=\"\", flush=True)\n", - " else:\n", - " print(f\"Found non-text {type(task_message)} object in streaming message.\")\n", - " elif isinstance(task_message_update, StreamTaskMessageFull):\n", - " content = task_message_update.content\n", - " if isinstance(content, TextContent):\n", - " print(content.content)\n", - " else:\n", - " print(f\"Found non-text {type(task_message)} object in full message.\")\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/00_sync/020_streaming/manifest.yaml b/examples/tutorials/00_sync/020_streaming/manifest.yaml deleted file mode 100644 index 39a04d0f..00000000 --- a/examples/tutorials/00_sync/020_streaming/manifest.yaml +++ /dev/null @@ -1,119 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - - include_paths: - - 00_sync/020_streaming - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 00_sync/020_streaming/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 00_sync/020_streaming/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - -# Agent Configuration -# ----------------- -agent: - acp_type: sync - # Unique name for your agent - # Used for task routing and monitoring - name: s020-streaming - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent that does multiturn streaming chat - - # Temporal workflow configuration - # Set enabled: true to use Temporal workflows for long-running tasks - temporal: - enabled: false - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # - name: OPENAI_BASE_URL - # value: "https://api.openai.com/v1" - # - name: ACCOUNT_ID - # value: "your_account_id_here" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific files (deploy/*.yaml) - global: - agent: - name: "s020-streaming" - description: "An AgentEx agent that does multiturn streaming chat" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/00_sync/020_streaming/project/__init__.py b/examples/tutorials/00_sync/020_streaming/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/00_sync/020_streaming/project/acp.py b/examples/tutorials/00_sync/020_streaming/project/acp.py deleted file mode 100644 index aff8ea67..00000000 --- a/examples/tutorials/00_sync/020_streaming/project/acp.py +++ /dev/null @@ -1,103 +0,0 @@ -import os -from typing import Union, AsyncGenerator - -from agents import Agent, Runner, RunConfig - -from agentex.lib import adk -from agentex.lib.types.acp import SendMessageParams -from agentex.lib.types.converters import convert_task_messages_to_oai_agents_inputs -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.types.task_message_update import TaskMessageUpdate, StreamTaskMessageFull -from agentex.types.task_message_content import TextContent, TaskMessageContent -from agentex.lib.adk.providers._modules.sync_provider import ( - SyncStreamingProvider, - convert_openai_to_agentex_events, -) - -# Create an ACP server -acp = FastACP.create( - acp_type="sync", -) - - -class StateModel(BaseModel): - system_prompt: str - model: str - - -# Note: The return of this handler is required to be persisted by the Agentex Server -@acp.on_message_send -async def handle_message_send( - params: SendMessageParams, -) -> Union[TaskMessageContent, AsyncGenerator[TaskMessageUpdate, None]]: - """ - In this tutorial, we'll see how to handle a basic multi-turn conversation without streaming. - """ - ######################################################### - # 1-3. These steps are all the same as the hello acp tutorial. - ######################################################### - - if not params.content: - return - - if not hasattr(params.content, "type") or params.content.type != "text": - raise ValueError(f"Expected text message, got {getattr(params.content, 'type', 'unknown')}") - - if not hasattr(params.content, "author") or params.content.author != "user": - raise ValueError(f"Expected user message, got {getattr(params.content, 'author', 'unknown')}") - - if not os.environ.get("OPENAI_API_KEY"): - yield StreamTaskMessageFull( - index=0, - type="full", - content=TextContent( - author="agent", - content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.", - ), - ) - - # Try to retrieve the state. If it doesn't exist, create it. - task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id) - - if not task_state: - # If the state doesn't exist, create it. - state = StateModel(system_prompt="You are a helpful assistant that can answer questions.", model="gpt-4o-mini") - task_state = await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state) - else: - state = StateModel.model_validate(task_state.state) - - task_messages = await adk.messages.list(task_id=params.task.id) - - - # Initialize the provider and run config to allow for tracing - provider = SyncStreamingProvider( - trace_id=params.task.id, - ) - - # Initialize the run config to allow for tracing and streaming - run_config = RunConfig( - model_provider=provider, - ) - - - test_agent = Agent(name="assistant", instructions=state.system_prompt, model=state.model) - - # Convert task messages to OpenAI Agents SDK format - input_list = convert_task_messages_to_oai_agents_inputs(task_messages) - - # Run the agent and stream the events - result = Runner.run_streamed(test_agent, input_list, run_config=run_config) - - - ######################################################### - # 4. Stream the events to the client. - ######################################################### - # Convert the OpenAI events to Agentex events - # This is done by converting the OpenAI events to Agentex events and yielding them to the client - stream = result.stream_events() - - # Yield the Agentex events to the client - async for agentex_event in convert_openai_to_agentex_events(stream): - yield agentex_event - diff --git a/examples/tutorials/00_sync/020_streaming/pyproject.toml b/examples/tutorials/00_sync/020_streaming/pyproject.toml deleted file mode 100644 index b215db07..00000000 --- a/examples/tutorials/00_sync/020_streaming/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "020-streaming" -version = "0.1.0" -description = "An AgentEx agent that does multiturn streaming chat" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "pytest-asyncio", - "httpx", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/tutorials/00_sync/020_streaming/tests/test_agent.py b/examples/tutorials/00_sync/020_streaming/tests/test_agent.py deleted file mode 100644 index 7a649f2d..00000000 --- a/examples/tutorials/00_sync/020_streaming/tests/test_agent.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming message sending -- Streaming message sending -- Task creation via RPC - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: s020-streaming) -""" - -import os - -import pytest -from test_utils.sync import collect_streaming_response - -from agentex import Agentex -from agentex.types import TextContent, TextContentParam -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest, ParamsSendMessageRequest -from agentex.lib.sdk.fastacp.base.base_acp_server import uuid - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "s020-streaming") - - -@pytest.fixture -def client(): - """Create an AgentEx client instance for testing.""" - return Agentex(base_url=AGENTEX_API_BASE_URL) - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest.fixture -def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingMessages: - """Test non-streaming message sending.""" - - def test_send_message(self, client: Agentex, agent_name: str, agent_id: str): - """Test sending a message and receiving a response.""" - task_response = client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - - assert task is not None - - messages = [ - "Hello, can you tell me a little bit about tennis? I want you to make sure you use the word 'tennis' in each response.", - "Pick one of the things you just mentioned, and dive deeper into it.", - "Can you now output a summary of this conversation", - ] - - for i, msg in enumerate(messages): - response = client.agents.send_message( - agent_name=agent_name, - params=ParamsSendMessageRequest( - content=TextContentParam( - author="user", - content=msg, - type="text", - ), - task_id=task.id, - ), - ) - assert response is not None and response.result is not None - result = response.result - - for message in result: - content = message.content - assert content is not None - assert isinstance(content, TextContent) and isinstance(content.content, str) - - states = client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - - state = states[0] - assert state.state is not None - assert state.state.get("system_prompt", None) == "You are a helpful assistant that can answer questions." - message_history = client.messages.list( - task_id=task.id, - ) - assert len(message_history) == (i + 1) * 2 # user + agent messages - - -class TestStreamingMessages: - """Test streaming message sending.""" - - def test_send_stream_message(self, client: Agentex, agent_name: str, agent_id: str): - """Test streaming messages in a multi-turn conversation.""" - # create a task for this specific conversation - task_response = client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - - assert task is not None - messages = [ - "Hello, can you tell me a little bit about tennis? I want you to make sure you use the word 'tennis' in each response.", - "Pick one of the things you just mentioned, and dive deeper into it.", - "Can you now output a summary of this conversation", - ] - - for i, msg in enumerate(messages): - stream = client.agents.send_message_stream( - agent_name=agent_name, - params=ParamsSendMessageRequest( - content=TextContentParam( - author="user", - content=msg, - type="text", - ), - task_id=task.id, - ), - ) - - # Collect the streaming response - aggregated_content, chunks = collect_streaming_response(stream) - - assert aggregated_content is not None - # this is using the chat_completion_stream, so we will be getting chunks of data - assert len(chunks) > 1, "No chunks received in streaming response." - - states = client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - - state = states[0] - assert state.state is not None - assert state.state.get("system_prompt", None) == "You are a helpful assistant that can answer questions." - message_history = client.messages.list( - task_id=task.id, - ) - assert len(message_history) == (i + 1) * 2 # user + agent messages - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) - diff --git a/examples/tutorials/10_async/00_base/000_hello_acp/.dockerignore b/examples/tutorials/10_async/00_base/000_hello_acp/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/10_async/00_base/000_hello_acp/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/00_base/000_hello_acp/Dockerfile b/examples/tutorials/10_async/00_base/000_hello_acp/Dockerfile deleted file mode 100644 index 8b0d20f8..00000000 --- a/examples/tutorials/10_async/00_base/000_hello_acp/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/00_base/000_hello_acp/pyproject.toml /app/000_hello_acp/pyproject.toml -COPY 10_async/00_base/000_hello_acp/README.md /app/000_hello_acp/README.md - -WORKDIR /app/000_hello_acp - -# Copy the project code -COPY 10_async/00_base/000_hello_acp/project /app/000_hello_acp/project - -# Copy the test files -COPY 10_async/00_base/000_hello_acp/tests /app/000_hello_acp/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies (includes pytest) -RUN uv pip install --system .[dev] pytest-asyncio httpx - -WORKDIR /app/000_hello_acp -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=ab000-hello-acp - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] diff --git a/examples/tutorials/10_async/00_base/000_hello_acp/README.md b/examples/tutorials/10_async/00_base/000_hello_acp/README.md deleted file mode 100644 index ba8aece1..00000000 --- a/examples/tutorials/10_async/00_base/000_hello_acp/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# [Async] Hello ACP - -Async agents use three handlers for async task management: `on_task_create`, `on_task_event_send`, and `on_task_cancel`. Unlike sync agents, tasks persist and can receive multiple events over time. - -## What You'll Learn -- The three-handler pattern for async agents -- How tasks differ from sync messages -- When to use async vs sync agents - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Understanding of sync agents (see [00_sync/000_hello_acp](../../../00_sync/000_hello_acp/)) - -## Quick Start - -```bash -cd examples/tutorials/10_async/00_base/000_hello_acp -uv run agentex agents run --manifest manifest.yaml -``` - -## Key Pattern - -```python -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams): - # Initialize task state, send welcome message - -@acp.on_task_event_send -async def handle_event_send(params: SendEventParams): - # Handle each message/event in the task - -@acp.on_task_cancel -async def handle_task_cancel(params: CancelTaskParams): - # Cleanup when task is cancelled -``` - -Three handlers instead of one, giving you full control over task lifecycle. Tasks can receive multiple events and maintain state across them. - -## When to Use -- Conversational agents that need memory -- Operations that require task tracking -- Agents that need lifecycle management (initialization, cleanup) -- Building towards production systems - -## Why This Matters -The task-based model is the foundation of production agents. Unlike sync agents where each message is independent, async agents maintain persistent tasks that can receive multiple events, store state, and have full lifecycle management. This is the stepping stone to Temporal-based agents. - -**Next:** [010_multiturn](../010_multiturn/) - Add conversation memory diff --git a/examples/tutorials/10_async/00_base/000_hello_acp/dev.ipynb b/examples/tutorials/10_async/00_base/000_hello_acp/dev.ipynb deleted file mode 100644 index 2d5b8800..00000000 --- a/examples/tutorials/10_async/00_base/000_hello_acp/dev.ipynb +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"ab000-hello-acp\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Async agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/10_async/00_base/000_hello_acp/manifest.yaml b/examples/tutorials/10_async/00_base/000_hello_acp/manifest.yaml deleted file mode 100644 index ba0c6836..00000000 --- a/examples/tutorials/10_async/00_base/000_hello_acp/manifest.yaml +++ /dev/null @@ -1,122 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/00_base/000_hello_acp - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/00_base/000_hello_acp/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/00_base/000_hello_acp/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - -# Agent Configuration -# ----------------- -agent: - # Unique name for your agent - # Used for task routing and monitoring - name: ab000-hello-acp - - # Type of ACP to use - # sync: Simple synchronous ACP implementation - # async: Advanced ACP with sub-types "base" or "temporal" (requires config) - acp_type: async - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent that is not intelligent. It just shows how to implement the base async ACP type. - - # Temporal workflow configuration - # Set enabled: true to use Temporal workflows for long-running tasks - temporal: - enabled: false - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # - name: OPENAI_BASE_URL - # value: "https://api.openai.com/v1" - # - name: ACCOUNT_ID - # value: "your_account_id_here" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific files (deploy/*.yaml) - global: - agent: - name: "ab000-hello-acp" - description: "An AgentEx agent that is not intelligent. It just shows how to implement the base async ACP type." - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/000_hello_acp/project/__init__.py b/examples/tutorials/10_async/00_base/000_hello_acp/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/00_base/000_hello_acp/project/acp.py b/examples/tutorials/10_async/00_base/000_hello_acp/project/acp.py deleted file mode 100644 index 341a2271..00000000 --- a/examples/tutorials/10_async/00_base/000_hello_acp/project/acp.py +++ /dev/null @@ -1,75 +0,0 @@ -import json - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams -from agentex.lib.types.fastacp import AsyncACPConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.sdk.fastacp.fastacp import FastACP - -logger = make_logger(__name__) - - -# Create an ACP server with base configuration -# This sets up the core server that will handle task creation, events, and cancellation -acp = FastACP.create( - acp_type="async", - config=AsyncACPConfig( - type="base", - ), -) - -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams): - # This handler is called first whenever a new task is created. - # It's a good place to initialize any state or resources needed for the task. - - ######################################################### - # 1. (๐Ÿ‘‹) Do task initialization here. - ######################################################### - - # Acknowledge that the task has been created. - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"Hello! I've received your task. Normally you can do some state initialization here, or just pass and do nothing until you get your first event. For now I'm just acknowledging that I've received a task with the following params:\n\n{json.dumps(params.params, indent=2)}.\n\nYou should only see this message once, when the task is created. All subsequent events will be handled by the `on_task_event_send` handler.", - ), - ) - -@acp.on_task_event_send -async def handle_event_send(params: SendEventParams): - # This handler is called whenever a new event (like a message) is sent to the task - - ######################################################### - # 2. (๐Ÿ‘‹) Echo back the client's message to show it in the UI. - ######################################################### - - # This is not done by default so the agent developer has full control over what is shown to the user. - if params.event.content: - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - ######################################################### - # 3. (๐Ÿ‘‹) Send a simple response message. - ######################################################### - - # In future tutorials, this is where we'll add more sophisticated response logic. - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"Hello! I've received your message. I can't respond right now, but in future tutorials we'll see how you can get me to intelligently respond to your message.", - ), - ) - -@acp.on_task_cancel -async def handle_task_cancel(params: CancelTaskParams): - # This handler is called when a task is cancelled. - # It's useful for cleaning up any resources or state associated with the task. - - ######################################################### - # 4. (๐Ÿ‘‹) Do task cleanup here. - ######################################################### - - # This is mostly for durable workflows that are cancellable like Temporal, but we will leave it here for demonstration purposes. - logger.info(f"Hello! I've received task cancel for task {params.task.id}: {params.task}. This isn't necessary for this example, but it's good to know that it's available.") diff --git a/examples/tutorials/10_async/00_base/000_hello_acp/pyproject.toml b/examples/tutorials/10_async/00_base/000_hello_acp/pyproject.toml deleted file mode 100644 index b65795e8..00000000 --- a/examples/tutorials/10_async/00_base/000_hello_acp/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "ab000-hello-acp" -version = "0.1.0" -description = "An AgentEx agent that is not intelligent. It just shows how to implement the base async ACP type." -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/000_hello_acp/tests/test_agent.py b/examples/tutorials/10_async/00_base/000_hello_acp/tests/test_agent.py deleted file mode 100644 index ba344410..00000000 --- a/examples/tutorials/10_async/00_base/000_hello_acp/tests/test_agent.py +++ /dev/null @@ -1,188 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: ab000-hello-acp) -""" - -import os -import uuid -import asyncio - -import pytest -import pytest_asyncio -from test_utils.async_utils import ( - poll_messages, - stream_agent_response, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types import TaskMessage -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.text_content_param import TextContentParam - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "ab000-hello-acp") - - -@pytest_asyncio.fixture -async def client(): - """Create an AgentEx client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client: AsyncAgentex, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and polling for the response.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Poll for the initial task creation message - async for message in poll_messages( - client=client, - task_id=task.id, - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if message.content and message.content.type == "text" and message.content.author == "agent": - assert "Hello! I've received your task" in message.content.content - break - - # Send an event and poll for response - user_message = "Hello, this is a test message!" - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if message.content and message.content.type == "text" and message.content.author == "agent": - assert "Hello! I've received your task" in message.content.content - break - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and streaming the response.""" - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - - assert task is not None - task_creation_found = False - # Poll for the initial task creation message - async for message in poll_messages( - client=client, - task_id=task.id, - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if message.content and message.content.type == "text" and message.content.author == "agent": - assert "Hello! I've received your task" in message.content.content - task_creation_found = True - break - - assert task_creation_found, "Task creation message not found in poll" - - user_message = "Hello, this is a test message!" - stream_timeout = 10 - - # Collect events from stream - all_events = [] - - # Flags to track what we've received - user_echo_found = False - agent_response_found = False - - async def collect_stream_events() -> None: - nonlocal user_echo_found, agent_response_found - - async for event in stream_agent_response( - client=client, - task_id=task.id, - timeout=stream_timeout, - ): - all_events.append(event) - # Check events as they arrive - event_type = event.get("type") - if event_type == "full": - content = event.get("content", {}) - if content.get("content") is None: - continue # Skip empty content - if content.get("type") == "text" and content.get("author") == "agent": - # Check for agent response to user message - if "Hello! I've received your message" in content.get("content", ""): - # Agent response should come after user echo - assert user_echo_found, "Agent response arrived before user message echo (incorrect order)" - agent_response_found = True - elif content.get("type") == "text" and content.get("author") == "user": - # Check for user message echo - if content.get("content") == user_message: - user_echo_found = True - - # Exit early if we've found all expected messages - if user_echo_found and agent_response_found: - break - - # Start streaming task - stream_task = asyncio.create_task(collect_stream_events()) - - # Send the event - event_content = TextContentParam(type="text", author="user", content=user_message) - await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # Wait for the stream to complete (with timeout) - try: - await asyncio.wait_for(stream_task, timeout=stream_timeout) - except asyncio.TimeoutError: - pytest.fail(f"Stream timed out after {stream_timeout}s waiting for expected messages") - - # Verify all expected messages were received (fail if stream ended without finding them) - assert user_echo_found, "User message echo not found in stream" - assert agent_response_found, "Agent response not found in stream" - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/10_async/00_base/010_multiturn/.dockerignore b/examples/tutorials/10_async/00_base/010_multiturn/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/10_async/00_base/010_multiturn/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/00_base/010_multiturn/Dockerfile b/examples/tutorials/10_async/00_base/010_multiturn/Dockerfile deleted file mode 100644 index 48969ad9..00000000 --- a/examples/tutorials/10_async/00_base/010_multiturn/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/00_base/010_multiturn/pyproject.toml /app/010_multiturn/pyproject.toml -COPY 10_async/00_base/010_multiturn/README.md /app/010_multiturn/README.md - -WORKDIR /app/010_multiturn - -COPY 10_async/00_base/010_multiturn/project /app/010_multiturn/project - -# Copy the test files -COPY 10_async/00_base/010_multiturn/tests /app/010_multiturn/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies (includes pytest) -RUN uv pip install --system .[dev] pytest-asyncio httpx - -WORKDIR /app/010_multiturn - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=ab010-multiturn - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] diff --git a/examples/tutorials/10_async/00_base/010_multiturn/README.md b/examples/tutorials/10_async/00_base/010_multiturn/README.md deleted file mode 100644 index e16b96c7..00000000 --- a/examples/tutorials/10_async/00_base/010_multiturn/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# [Async] Multiturn - -Handle multi-turn conversations in async agents with task-based state management. Each task maintains its own conversation history automatically. - -## What You'll Learn -- How tasks maintain conversation state across multiple exchanges -- Difference between sync and async multiturn patterns -- Building stateful conversational agents with minimal code - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Understanding of basic async agents (see [000_hello_acp](../000_hello_acp/)) - -## Quick Start - -```bash -cd examples/tutorials/10_async/00_base/010_multiturn -uv run agentex agents run --manifest manifest.yaml -``` - -## Key Pattern - -Unlike sync agents where you manually track conversation history, async agents automatically maintain state within each task: - -```python -@app.on_task_event_send() -async def on_task_event_send(event_send: TaskEventSendInput): - # The task's messages list automatically includes all previous exchanges - messages = event_send.task.messages - - # No need to manually pass history - it's already there! - response = await openai_client.chat.completions.create( - model="gpt-4o-mini", - messages=messages - ) - - return {"content": response.choices[0].message.content} -``` - -## Try It - -1. Start the agent with the command above -2. Open the web UI or use the notebook to create a task -3. Send multiple messages in the same task: - - "What's 25 + 17?" - - "What was that number again?" - - "Multiply it by 2" -4. Notice the agent remembers context from previous exchanges - -## When to Use -- Conversational agents that need memory across exchanges -- Chat interfaces where users ask follow-up questions -- Agents that build context over time within a session - -## Why This Matters -Task-based state management eliminates the complexity of manually tracking conversation history. The AgentEx platform handles state persistence automatically, making it easier to build stateful agents without custom session management code. - -**Comparison:** In the sync version ([00_sync/010_multiturn](../../../00_sync/010_multiturn/)), you manually manage conversation history. Here, the task object does it for you. - -**Next:** [020_streaming](../020_streaming/) - Add real-time streaming responses diff --git a/examples/tutorials/10_async/00_base/010_multiturn/dev.ipynb b/examples/tutorials/10_async/00_base/010_multiturn/dev.ipynb deleted file mode 100644 index e174e470..00000000 --- a/examples/tutorials/10_async/00_base/010_multiturn/dev.ipynb +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"ab010-multiturn\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Async agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/10_async/00_base/010_multiturn/manifest.yaml b/examples/tutorials/10_async/00_base/010_multiturn/manifest.yaml deleted file mode 100644 index 5d21e78d..00000000 --- a/examples/tutorials/10_async/00_base/010_multiturn/manifest.yaml +++ /dev/null @@ -1,122 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/00_base/010_multiturn - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/00_base/010_multiturn/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/00_base/010_multiturn/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - -# Agent Configuration -# ----------------- -agent: - # Unique name for your agent - # Used for task routing and monitoring - name: ab010-multiturn - - # Type of ACP to use - # sync: Simple synchronous ACP implementation - # async: Advanced ACP with sub-types "base" or "temporal" (requires config) - acp_type: async - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent that echoes back the user's message - - # Temporal workflow configuration - # Set enabled: true to use Temporal workflows for long-running tasks - temporal: - enabled: false - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # - name: OPENAI_BASE_URL - # value: "https://api.openai.com/v1" - # - name: ACCOUNT_ID - # value: "your_account_id_here" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific files (deploy/*.yaml) - global: - agent: - name: "ab010-multiturn" - description: "An AgentEx agent that echoes back the user's message" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/010_multiturn/project/__init__.py b/examples/tutorials/10_async/00_base/010_multiturn/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/00_base/010_multiturn/project/acp.py b/examples/tutorials/10_async/00_base/010_multiturn/project/acp.py deleted file mode 100644 index a32eed68..00000000 --- a/examples/tutorials/10_async/00_base/010_multiturn/project/acp.py +++ /dev/null @@ -1,167 +0,0 @@ -import os -from typing import List - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams -from agentex.lib.types.fastacp import AsyncACPConfig -from agentex.lib.types.tracing import SGPTracingProcessorConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.types.llm_messages import ( - Message, - LLMConfig, - UserMessage, - SystemMessage, - AssistantMessage, -) -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.lib.core.tracing.tracing_processor_manager import ( - add_tracing_processor_config, -) - -logger = make_logger(__name__) - -# Add a tracing processor -add_tracing_processor_config( - SGPTracingProcessorConfig( - sgp_api_key=os.environ.get("SCALE_GP_API_KEY", ""), sgp_account_id=os.environ.get("SCALE_GP_ACCOUNT_ID", "") - ) -) - -# Create an ACP server - -# !!! Warning: Because "Async" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AsyncACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes. -acp = FastACP.create( - acp_type="async", - config=AsyncACPConfig(type="base"), -) - - -class StateModel(BaseModel): - messages: List[Message] - - -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams): - # Upon task creation, we initialize the task state with a system message. - # This will be fetched by the `on_task_event_send` handler when each event is sent. - - ######################################################### - # 1. Initialize the task state. - ######################################################### - - state = StateModel(messages=[SystemMessage(content="You are a helpful assistant that can answer questions.")]) - await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state) - - -@acp.on_task_event_send -async def handle_event_send(params: SendEventParams): - # !!! Warning: Because "Async" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AsyncACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes. - - ######################################################### - # 2. Validate the event content. - ######################################################### - if not params.event.content: - return - - if params.event.content.type != "text": - raise ValueError(f"Expected text message, got {params.event.content.type}") - - if params.event.content.author != "user": - raise ValueError(f"Expected user message, got {params.event.content.author}") - - ######################################################### - # 3. Echo back the user's message so it shows up in the UI. - ######################################################### - - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=params.event.content, - ) - - ######################################################### - # 4. (๐Ÿ‘‹) If the OpenAI API key is not set, send a message to the user to let them know. - ######################################################### - - if not os.environ.get("OPENAI_API_KEY"): - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=TextContent( - author="agent", - content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.", - ), - ) - - ######################################################### - # 5. (๐Ÿ‘‹) Retrieve the task state. - ######################################################### - - task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id) - if not task_state: - raise ValueError("Task state not found - ensure task was properly initialized") - state = StateModel.model_validate(task_state.state) - - ######################################################### - # 6. (๐Ÿ‘‹) Add the new user message to the message history - ######################################################### - - # Safely extract content from the event - content_text = "" - if hasattr(params.event.content, "content"): - content_val = getattr(params.event.content, "content", "") - if isinstance(content_val, str): - content_text = content_val - state.messages.append(UserMessage(content=content_text)) - - ######################################################### - # 7. (๐Ÿ‘‹) Call an LLM to respond to the user's message - ######################################################### - - # Call an LLM to respond to the user's message - chat_completion = await adk.providers.litellm.chat_completion( - llm_config=LLMConfig(model="gpt-4o-mini", messages=state.messages), - trace_id=params.task.id, - ) - response_content = "" - if chat_completion.choices[0].message: - response_content = chat_completion.choices[0].message.content or "" - state.messages.append(AssistantMessage(content=response_content)) - - ######################################################### - # 8. (๐Ÿ‘‹) Send agent response to client - ######################################################### - - if chat_completion.choices[0].message: - content_str = chat_completion.choices[0].message.content or "" - else: - content_str = "" - - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=TextContent( - author="agent", - content=content_str, - ), - ) - - ######################################################### - # 9. (๐Ÿ‘‹) Store the messages in the task state for the next turn - ######################################################### - - await adk.state.update( - state_id=task_state.id, - task_id=params.task.id, - agent_id=params.agent.id, - state=state, - trace_id=params.task.id, - ) - - -@acp.on_task_cancel -async def handle_task_cancel(params: CancelTaskParams): - """Default task cancel handler""" - logger.info(f"Task canceled: {params.task}") diff --git a/examples/tutorials/10_async/00_base/010_multiturn/pyproject.toml b/examples/tutorials/10_async/00_base/010_multiturn/pyproject.toml deleted file mode 100644 index 8b0bb1c1..00000000 --- a/examples/tutorials/10_async/00_base/010_multiturn/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "ab010-multiturn" -version = "0.1.0" -description = "An AgentEx agent that echoes back the user's message" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/010_multiturn/tests/test_agent.py b/examples/tutorials/10_async/00_base/010_multiturn/tests/test_agent.py deleted file mode 100644 index 4da1745c..00000000 --- a/examples/tutorials/10_async/00_base/010_multiturn/tests/test_agent.py +++ /dev/null @@ -1,211 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: ab010-multiturn) -""" - -import os -import uuid -import asyncio -from typing import List - -import pytest -import pytest_asyncio -from test_utils.async_utils import ( - stream_agent_response, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types import TextContent -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.text_content_param import TextContentParam - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "ab010-multiturn") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and polling for the response.""" - # TODO: Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - await asyncio.sleep(1) # wait for state to be initialized - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - - state = states[0].state - assert state is not None - messages = state.get("messages", []) - assert isinstance(messages, List) - assert len(messages) == 1 # initial message - message = messages[0] - assert message == { - "role": "system", - "content": "You are a helpful assistant that can answer questions.", - } - - user_message = "Hello! Here is my test message" - messages = [] - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=30, - sleep_interval=1.0, - ): - messages.append(message) - if len(messages) == 1: - assert message.content == TextContent( - author="user", - content=user_message, - type="text", - ) - else: - assert message.content is not None - assert message.content.author == "agent" - break - - await asyncio.sleep(1) # wait for state to be updated - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - state = states[0].state - messages = state.get("messages", []) - - assert isinstance(messages, list) - assert len(messages) == 3 - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and streaming the response.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Check initial state - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - - state = states[0].state - assert state is not None - messages = state.get("messages", []) - assert isinstance(messages, List) - assert len(messages) == 1 # initial message - message = messages[0] - assert message == { - "role": "system", - "content": "You are a helpful assistant that can answer questions.", - } - user_message = "Hello! Here is my streaming test message" - - # Collect events from stream - all_events = [] - - # Flags to track what we've received - user_message_found = False - agent_response_found = False - - async def stream_messages(): - nonlocal user_message_found, agent_response_found - - async for event in stream_agent_response( - client=client, - task_id=task.id, - timeout=15, - ): - all_events.append(event) - - # Check events as they arrive - event_type = event.get("type") - if event_type == "full": - content = event.get("content", {}) - if content.get("content") == user_message and content.get("author") == "user": - # User message should come before agent response - assert not agent_response_found, "User message arrived after agent response (incorrect order)" - user_message_found = True - elif content.get("author") == "agent": - # Agent response should come after user message - assert user_message_found, "Agent response arrived before user message (incorrect order)" - agent_response_found = True - - # Exit early if we've found both messages - if user_message_found and agent_response_found: - break - - stream_task = asyncio.create_task(stream_messages()) - - event_content = TextContentParam(type="text", author="user", content=user_message) - await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # Wait for streaming to complete (with timeout) - try: - await asyncio.wait_for(stream_task, timeout=15) - except asyncio.TimeoutError: - pytest.fail("Stream timed out after 15s waiting for expected messages") - - # Validate we received events - assert len(all_events) > 0, "No events received in streaming response" - assert user_message_found, "User message not found in stream" - assert agent_response_found, "Agent response not found in stream" - - # Verify the state has been updated - await asyncio.sleep(1) # wait for state to be updated - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - state = states[0].state - messages = state.get("messages", []) - - assert isinstance(messages, list) - assert len(messages) == 3 - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/10_async/00_base/020_streaming/.dockerignore b/examples/tutorials/10_async/00_base/020_streaming/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/10_async/00_base/020_streaming/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/00_base/020_streaming/Dockerfile b/examples/tutorials/10_async/00_base/020_streaming/Dockerfile deleted file mode 100644 index 447ca292..00000000 --- a/examples/tutorials/10_async/00_base/020_streaming/Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/00_base/020_streaming/pyproject.toml /app/020_streaming/pyproject.toml -COPY 10_async/00_base/020_streaming/README.md /app/020_streaming/README.md - -WORKDIR /app/020_streaming - -# Copy the project code -COPY 10_async/00_base/020_streaming/project /app/020_streaming/project - -# Copy the test files -COPY 10_async/00_base/020_streaming/tests /app/020_streaming/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies (includes pytest) -RUN uv pip install --system .[dev] pytest-asyncio httpx - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=ab020-streaming - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] diff --git a/examples/tutorials/10_async/00_base/020_streaming/README.md b/examples/tutorials/10_async/00_base/020_streaming/README.md deleted file mode 100644 index 17c19b57..00000000 --- a/examples/tutorials/10_async/00_base/020_streaming/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# [Agentic] Streaming - -Stream responses in async agents using `adk.messages.create()` to send progressive updates. More flexible than sync streaming since you can send multiple messages at any time. - -## What You'll Learn -- How to stream with explicit message creation -- Difference between sync and async streaming patterns -- When to send multiple messages vs single streamed response - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Understanding of async basics (see [000_hello_acp](../000_hello_acp/)) - -## Quick Start - -```bash -cd examples/tutorials/10_async/00_base/020_streaming -uv run agentex agents run --manifest manifest.yaml -``` - -## Key Pattern - -```python -@acp.on_task_event_send -async def handle_event_send(params: SendEventParams): - # Send first message - await adk.messages.create(task_id=task_id, content=...) - - # Do work... - - # Send second message - await adk.messages.create(task_id=task_id, content=...) -``` - -Unlike sync streaming (which uses async generators), async streaming uses explicit message creation calls, giving you more control over when and what to send. - -## When to Use -- Multi-step processes with intermediate results -- Long-running operations with progress updates -- Agents that need to send messages at arbitrary times -- More complex streaming patterns than simple LLM responses - -## Why This Matters -Agentic streaming is more powerful than sync streaming. You can send messages at any time, from anywhere in your code, and even from background tasks. This flexibility is essential for complex agents with multiple concurrent operations. - -**Next:** [030_tracing](../030_tracing/) - Add observability to your agents diff --git a/examples/tutorials/10_async/00_base/020_streaming/dev.ipynb b/examples/tutorials/10_async/00_base/020_streaming/dev.ipynb deleted file mode 100644 index f66be24d..00000000 --- a/examples/tutorials/10_async/00_base/020_streaming/dev.ipynb +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"ab020-streaming\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Agentic agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/10_async/00_base/020_streaming/manifest.yaml b/examples/tutorials/10_async/00_base/020_streaming/manifest.yaml deleted file mode 100644 index bd5673a6..00000000 --- a/examples/tutorials/10_async/00_base/020_streaming/manifest.yaml +++ /dev/null @@ -1,119 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/00_base/020_streaming - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/00_base/020_streaming/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/00_base/020_streaming/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - -# Agent Configuration -# ----------------- -agent: - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: ab020-streaming - - # Description of what your agent does - # Helps with documentation and discovery - description: A multiturn AgentEx agent that streams outputs - - # Temporal workflow configuration - # Set enabled: true to use Temporal workflows for long-running tasks - temporal: - enabled: false - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # - name: OPENAI_BASE_URL - # value: "https://api.openai.com/v1" - # - name: ACCOUNT_ID - # value: "your_account_id_here" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific files (deploy/*.yaml) - global: - agent: - name: "ab020-streaming" - description: "A multiturn AgentEx agent that streams outputs" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" diff --git a/examples/tutorials/10_async/00_base/020_streaming/project/__init__.py b/examples/tutorials/10_async/00_base/020_streaming/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/00_base/020_streaming/project/acp.py b/examples/tutorials/10_async/00_base/020_streaming/project/acp.py deleted file mode 100644 index 41e44912..00000000 --- a/examples/tutorials/10_async/00_base/020_streaming/project/acp.py +++ /dev/null @@ -1,144 +0,0 @@ -import os -from typing import List - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams -from agentex.lib.types.fastacp import AsyncACPConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.types.llm_messages import Message, LLMConfig, UserMessage, SystemMessage, AssistantMessage -from agentex.lib.sdk.fastacp.fastacp import FastACP - -logger = make_logger(__name__) - - -# Create an ACP server - -# !!! Warning: Because "Async" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AsyncACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes. -acp = FastACP.create( - acp_type="async", - config=AsyncACPConfig(type="base"), -) - -class StateModel(BaseModel): - messages: List[Message] - - -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams): - # Upon task creation, we initialize the task state with a system message. - # This will be fetched by the `on_task_event_send` handler when each event is sent. - - ######################################################### - # 1. Initialize the task state. - ######################################################### - - state = StateModel(messages=[SystemMessage(content="You are a helpful assistant that can answer questions.")]) - await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state) - -@acp.on_task_event_send -async def handle_event_send(params: SendEventParams): - # !!! Warning: Because "Agentic" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AgenticACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes. - - ######################################################### - # 2. Validate the event content. - ######################################################### - if not params.event.content: - return - - if params.event.content.type != "text": - raise ValueError(f"Expected text message, got {params.event.content.type}") - - if params.event.content.author != "user": - raise ValueError(f"Expected user message, got {params.event.content.author}") - - ######################################################### - # 3. Echo back the user's message. - ######################################################### - - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=params.event.content, - ) - - ######################################################### - # 4. If the OpenAI API key is not set, send a message to the user to let them know. - ######################################################### - - if not os.environ.get("OPENAI_API_KEY"): - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=TextContent( - author="agent", - content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.", - ), - ) - - ######################################################### - # 5. Retrieve the task state. - ######################################################### - - task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id) - if not task_state: - raise ValueError("Task state not found - ensure task was properly initialized") - state = StateModel.model_validate(task_state.state) - - ######################################################### - # 6. Add the new user message to the message history - ######################################################### - - # Safely extract content from the event - content_text = "" - if hasattr(params.event.content, 'content'): - content_val = getattr(params.event.content, 'content', '') - if isinstance(content_val, str): - content_text = content_val - state.messages.append(UserMessage(content=content_text)) - - ######################################################### - # 7. (๐Ÿ‘‹) Call an LLM to respond to the user's message - ######################################################### - - # When we use the streaming version of chat completion, we can either use the `chat_completion_stream_auto_send` method, or we can use the `chat_completion_stream` method. Here is the difference: - - # `chat_completion_stream_auto_send` - This is the "managed version" of the streaming method. It will automatically send the response to the client as an agent TaskMessage. - - # `chat_completion_stream` - This is the "unmanaged version" of the streaming method. It will return a generator of chat completion chunks. You can then do whatever you want with the chunks, such as sending them to the client as an agent message, or storing them in the task state, or whatever you want. - - # Here we use the `chat_completion_stream_auto_send` method. - ######################################################### - - task_message = await adk.providers.litellm.chat_completion_stream_auto_send( - task_id=params.task.id, - llm_config=LLMConfig(model="gpt-4o-mini", messages=state.messages, stream=True), - trace_id=params.task.id, - ) - - # Safely extract content from the task message - response_text = "" - if task_message.content and hasattr(task_message.content, 'content'): # type: ignore[union-attr] - content_val = getattr(task_message.content, 'content', '') # type: ignore[union-attr] - if isinstance(content_val, str): - response_text = content_val - state.messages.append(AssistantMessage(content=response_text)) - - ######################################################### - # 8. Store the messages in the task state for the next turn - ######################################################### - - await adk.state.update( - state_id=task_state.id, - task_id=params.task.id, - agent_id=params.agent.id, - state=state, - trace_id=params.task.id, - ) - -@acp.on_task_cancel -async def handle_task_cancel(params: CancelTaskParams): - """Default task cancel handler""" - logger.info(f"Task canceled: {params.task}") - diff --git a/examples/tutorials/10_async/00_base/020_streaming/pyproject.toml b/examples/tutorials/10_async/00_base/020_streaming/pyproject.toml deleted file mode 100644 index 271bcaac..00000000 --- a/examples/tutorials/10_async/00_base/020_streaming/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "ab020-streaming" -version = "0.1.0" -description = "A multiturn AgentEx agent that streams outputs" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/020_streaming/tests/test_agent.py b/examples/tutorials/10_async/00_base/020_streaming/tests/test_agent.py deleted file mode 100644 index d863199c..00000000 --- a/examples/tutorials/10_async/00_base/020_streaming/tests/test_agent.py +++ /dev/null @@ -1,212 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: ab020-streaming) -""" - -import os -import uuid -import asyncio -from typing import List - -import pytest -import pytest_asyncio -from test_utils.async_utils import ( - stream_agent_response, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types import TaskMessage, TextContent -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.text_content_param import TextContentParam - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "ab020-streaming") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and polling for the response.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - await asyncio.sleep(1) # wait for state to be initialized - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - - state = states[0].state - assert state is not None - messages = state.get("messages", []) - assert isinstance(messages, List) - assert len(messages) == 1 # initial message - message = messages[0] - assert message == { - "role": "system", - "content": "You are a helpful assistant that can answer questions.", - } - - user_message = "Hello! Here is my test message" - messages = [] - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=30, - sleep_interval=1.0, - yield_updates=False, - ): - - messages.append(message) - - assert len(messages) > 0 - # the first message should be the agent re-iterating what the user sent - assert isinstance(messages, List) - assert len(messages) == 2 - first_message: TaskMessage = messages[0] - assert first_message.content == TextContent( - author="user", - content=user_message, - type="text", - ) - - second_message: TaskMessage = messages[1] - assert second_message.content is not None - assert second_message.content.author == "agent" - - # assert the state has been updated - await asyncio.sleep(1) # wait for state to be updated - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - state = states[0].state - messages = state.get("messages", []) - - assert isinstance(messages, list) - assert len(messages) == 3 - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and streaming the response.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Check initial state - await asyncio.sleep(1) # wait for state to be initialized - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - - state = states[0].state - assert state is not None - messages = state.get("messages", []) - assert isinstance(messages, List) - assert len(messages) == 1 # initial message - message = messages[0] - assert message == { - "role": "system", - "content": "You are a helpful assistant that can answer questions.", - } - user_message = "Hello! This is my first message. Can you please tell me something interesting about yourself?" - - # Collect events from stream - all_events = [] - - async def stream_messages() -> None: - async for event in stream_agent_response( - client=client, - task_id=task.id, - timeout=15, - ): - all_events.append(event) - - stream_task = asyncio.create_task(stream_messages()) - - event_content = TextContentParam(type="text", author="user", content=user_message) - await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # Wait for streaming to complete - await stream_task - - # Validate we received events - assert len(all_events) > 0, "No events received in streaming response" - - # Check for user message, full agent response, and delta messages - user_message_found = False - full_agent_message_found = False - delta_messages_found = False - - for event in all_events: - event_type = event.get("type") - if event_type == "full": - content = event.get("content", {}) - if content.get("content") == user_message and content.get("author") == "user": - user_message_found = True - elif content.get("author") == "agent": - full_agent_message_found = True - elif event_type == "delta": - delta_messages_found = True - - assert user_message_found, "User message not found in stream" - assert full_agent_message_found, "Full agent message not found in stream" - assert delta_messages_found, "Delta messages not found in stream (streaming response expected)" - - # Verify the state has been updated - await asyncio.sleep(1) # wait for state to be updated - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - state: dict[str, object] = states[0].state - messages = state.get("messages", []) - - assert isinstance(messages, list) - assert len(messages) == 3 - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/10_async/00_base/030_tracing/.dockerignore b/examples/tutorials/10_async/00_base/030_tracing/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/10_async/00_base/030_tracing/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/00_base/030_tracing/Dockerfile b/examples/tutorials/10_async/00_base/030_tracing/Dockerfile deleted file mode 100644 index 2aee7e1d..00000000 --- a/examples/tutorials/10_async/00_base/030_tracing/Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/00_base/030_tracing/pyproject.toml /app/030_tracing/pyproject.toml -COPY 10_async/00_base/030_tracing/README.md /app/030_tracing/README.md - -WORKDIR /app/030_tracing - -# Copy the project code -COPY 10_async/00_base/030_tracing/project /app/030_tracing/project - -# Copy the test files -COPY 10_async/00_base/030_tracing/tests /app/030_tracing/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies (includes pytest) -RUN uv pip install --system .[dev] pytest-asyncio httpx - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=ab030-tracing - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] diff --git a/examples/tutorials/10_async/00_base/030_tracing/README.md b/examples/tutorials/10_async/00_base/030_tracing/README.md deleted file mode 100644 index 1d91f565..00000000 --- a/examples/tutorials/10_async/00_base/030_tracing/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# [Agentic] Tracing - -Add observability to your agents with spans and traces using `adk.tracing.start_span()`. Track execution flow, measure performance, and debug complex agent behaviors. - -## What You'll Learn -- How to instrument agents with tracing -- Creating hierarchical spans to track operations -- Viewing traces in Scale Groundplane -- Performance debugging with observability - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Understanding of async agents (see [000_hello_acp](../000_hello_acp/)) - -## Quick Start - -```bash -cd examples/tutorials/10_async/00_base/030_tracing -uv run agentex agents run --manifest manifest.yaml -``` - -## Key Pattern - -```python -# Start a span to track an operation -span = await adk.tracing.start_span( - trace_id=task.id, - name="LLM Call", - input={"prompt": prompt} -) - -# Do work... - -# End span with output -await adk.tracing.end_span( - span_id=span.id, - output={"response": response} -) -``` - -Spans create a hierarchical view of agent execution, making it easy to see which operations take time and where errors occur. - -## When to Use -- Debugging complex agent behaviors -- Performance optimization and bottleneck identification -- Production monitoring and observability -- Understanding execution flow in multi-step agents - -## Why This Matters -Without tracing, debugging agents is like flying blind. Tracing gives you visibility into what your agent is doing, how long operations take, and where failures occur. It's essential for production agents and invaluable during development. - -**Next:** [040_other_sdks](../040_other_sdks/) - Integrate any SDK or framework diff --git a/examples/tutorials/10_async/00_base/030_tracing/dev.ipynb b/examples/tutorials/10_async/00_base/030_tracing/dev.ipynb deleted file mode 100644 index f667737b..00000000 --- a/examples/tutorials/10_async/00_base/030_tracing/dev.ipynb +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"ab030-tracing\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Agentic agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/10_async/00_base/030_tracing/manifest.yaml b/examples/tutorials/10_async/00_base/030_tracing/manifest.yaml deleted file mode 100644 index 3c9b2c14..00000000 --- a/examples/tutorials/10_async/00_base/030_tracing/manifest.yaml +++ /dev/null @@ -1,119 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/00_base/030_tracing - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/00_base/030_tracing/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/00_base/030_tracing/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - -# Agent Configuration -# ----------------- -agent: - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: ab030-tracing - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent that demonstrates how to do hierarchical and custom tracing - - # Temporal workflow configuration - # Set enabled: true to use Temporal workflows for long-running tasks - temporal: - enabled: false - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # - name: OPENAI_BASE_URL - # value: "https://api.openai.com/v1" - # - name: ACCOUNT_ID - # value: "your_account_id_here" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific files (deploy/*.yaml) - global: - agent: - name: "ab030-tracing" - description: "An AgentEx agent that demonstrates how to do hierarchical and custom tracing" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/030_tracing/project/__init__.py b/examples/tutorials/10_async/00_base/030_tracing/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/00_base/030_tracing/project/acp.py b/examples/tutorials/10_async/00_base/030_tracing/project/acp.py deleted file mode 100644 index a46e7769..00000000 --- a/examples/tutorials/10_async/00_base/030_tracing/project/acp.py +++ /dev/null @@ -1,167 +0,0 @@ -import os -from typing import List - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams -from agentex.lib.types.fastacp import AsyncACPConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.types.llm_messages import Message, LLMConfig, UserMessage, SystemMessage, AssistantMessage -from agentex.lib.sdk.fastacp.fastacp import FastACP - -logger = make_logger(__name__) - - -# Create an ACP server - -# !!! Warning: Because "Async" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AsyncACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes. -acp = FastACP.create( - acp_type="async", - config=AsyncACPConfig(type="base"), -) - -class StateModel(BaseModel): - messages: List[Message] - turn_number: int - - -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams): - # Upon task creation, we initialize the task state with a system message. - # This will be fetched by the `on_task_event_send` handler when each event is sent. - - ######################################################### - # 1. Initialize the task state. - ######################################################### - - state = StateModel( - messages=[SystemMessage(content="You are a helpful assistant that can answer questions.")], - turn_number=0, - ) - await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state) - -@acp.on_task_event_send -async def handle_event_send(params: SendEventParams): - # !!! Warning: Because "Agentic" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AgenticACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes. - - ######################################################### - # 2. Validate the event content. - ######################################################### - if not params.event.content: - return - - if params.event.content.type != "text": - raise ValueError(f"Expected text message, got {params.event.content.type}") - - if params.event.content.author != "user": - raise ValueError(f"Expected user message, got {params.event.content.author}") - - ######################################################### - # 3. Retrieve the task state. - ######################################################### - - task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id) - if not task_state: - raise ValueError("Task state not found - ensure task was properly initialized") - state = StateModel.model_validate(task_state.state) - state.turn_number += 1 - - # Add the new user message to the message history - # Safely extract content from the event - content_text = "" - if hasattr(params.event.content, 'content'): - content_val = getattr(params.event.content, 'content', '') - if isinstance(content_val, str): - content_text = content_val - state.messages.append(UserMessage(content=content_text)) - - ######################################################### - # 4. (๐Ÿ‘‹) Create a tracing span. - ######################################################### - - # Create a tracing span. All of the Agentex ADK methods are "auto-traced", but by default show up as a flat list associated with a single trace id (which is usually just set to the task id by default). - # If you want to create a hierarchical trace, you can do so by creating spans in your business logic and passing the span id to the ADK methods. Traces will be grouped under parent spans for better readability. - # If you're not trying to create a hierarchical trace, but just trying to create a custom span to trace something, you can use this too to create a custom span that is associate with your trace by trace ID. - - async with adk.tracing.span( - trace_id=params.task.id, - name=f"Turn {state.turn_number}", - input=state - ) as span: - - ######################################################### - # 5. Echo back the user's message so it shows up in the UI. - ######################################################### - - # (๐Ÿ‘‹) Notice that we pass the parent_span_id to the ADK methods to create a hierarchical trace. - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=params.event.content, - parent_span_id=span.id if span else None, - ) - - ######################################################### - # 6. If the OpenAI API key is not set, send a message to the user to let them know. - ######################################################### - - # (๐Ÿ‘‹) Notice that we pass the parent_span_id to the ADK methods to create a hierarchical trace. - if not os.environ.get("OPENAI_API_KEY"): - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=TextContent( - author="agent", - content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.", - ), - parent_span_id=span.id if span else None, - ) - - ######################################################### - # 7. Call an LLM to respond to the user's message - ######################################################### - - # (๐Ÿ‘‹) Notice that we pass the parent_span_id to the ADK methods to create a hierarchical trace. - task_message = await adk.providers.litellm.chat_completion_stream_auto_send( - task_id=params.task.id, - llm_config=LLMConfig(model="gpt-4o-mini", messages=state.messages, stream=True), - trace_id=params.task.id, - parent_span_id=span.id if span else None, - ) - - # Safely extract content from the task message - response_text = "" - if task_message.content and hasattr(task_message.content, 'content'): # type: ignore[union-attr] - content_val = getattr(task_message.content, 'content', '') # type: ignore[union-attr] - if isinstance(content_val, str): - response_text = content_val - state.messages.append(AssistantMessage(content=response_text)) - - ######################################################### - # 8. Store the messages in the task state for the next turn - ######################################################### - - # (๐Ÿ‘‹) Notice that we pass the parent_span_id to the ADK methods to create a hierarchical trace. - await adk.state.update( - state_id=task_state.id, - task_id=params.task.id, - agent_id=params.agent.id, - state=state, - trace_id=params.task.id, - parent_span_id=span.id if span else None, - ) - - ######################################################### - # 9. (๐Ÿ‘‹) Set the span output to the state for the next turn - ######################################################### - - # (๐Ÿ‘‹) You can store an arbitrary pydantic model or dictionary in the span output. The idea of a span is that it easily allows you to compare the input and output of a span to see what the wrapped function did. - # In this case, the state is comprehensive and expressive, so we just store the change in state that occured. - if span: - span.output = state # type: ignore[misc] - -@acp.on_task_cancel -async def handle_task_cancel(params: CancelTaskParams): - """Default task cancel handler""" - logger.info(f"Task canceled: {params.task}") diff --git a/examples/tutorials/10_async/00_base/030_tracing/pyproject.toml b/examples/tutorials/10_async/00_base/030_tracing/pyproject.toml deleted file mode 100644 index fe1468a8..00000000 --- a/examples/tutorials/10_async/00_base/030_tracing/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "ab030-tracing" -version = "0.1.0" -description = "An AgentEx agent that demonstrates how to do hierarchical and custom tracing" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/030_tracing/tests/test_agent.py b/examples/tutorials/10_async/00_base/030_tracing/tests/test_agent.py deleted file mode 100644 index 0cc65c56..00000000 --- a/examples/tutorials/10_async/00_base/030_tracing/tests/test_agent.py +++ /dev/null @@ -1,124 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: ab030-tracing) -""" - -import os - -import pytest -import pytest_asyncio - -from agentex import AsyncAgentex - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "ab030-tracing") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and polling for the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # TODO: Send an event and poll for response using the helper function - # messages = [] - # async for message in send_event_and_poll_yielding( - # client=client, - # agent_id=agent_id, - # task_id=task.id, - # user_message="Your test message here", - # timeout=30, - # sleep_interval=1.0, - # ): - # messages.append(message) - - # TODO: Validate the response - # assert len(messages) > 0, "No response received from agent" - # assert validate_text_in_response("expected text", messages) - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and streaming the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # TODO: Send an event and stream the response using the helper function - # all_events = [] - # - # async def collect_stream_events(): - # async for event in stream_agent_response( - # client=client, - # task_id=task.id, - # timeout=30, - # ): - # all_events.append(event) - # - # stream_task = asyncio.create_task(collect_stream_events()) - # - # event_content = TextContentParam(type="text", author="user", content="Your test message here") - # await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - # - # await stream_task - - # TODO: Validate the streaming response - # assert len(all_events) > 0, "No events received in streaming response" - # - # text_found = False - # for event in all_events: - # content = event.get("content", {}) - # if "expected text" in str(content).lower(): - # text_found = True - # break - # assert text_found, "Expected text not found in streaming response" - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/10_async/00_base/040_other_sdks/.dockerignore b/examples/tutorials/10_async/00_base/040_other_sdks/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/10_async/00_base/040_other_sdks/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/00_base/040_other_sdks/Dockerfile b/examples/tutorials/10_async/00_base/040_other_sdks/Dockerfile deleted file mode 100644 index 2e0ee6ef..00000000 --- a/examples/tutorials/10_async/00_base/040_other_sdks/Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/00_base/040_other_sdks/pyproject.toml /app/040_other_sdks/pyproject.toml -COPY 10_async/00_base/040_other_sdks/README.md /app/040_other_sdks/README.md - -WORKDIR /app/040_other_sdks - -# Copy the project code -COPY 10_async/00_base/040_other_sdks/project /app/040_other_sdks/project - -# Copy the test files -COPY 10_async/00_base/040_other_sdks/tests /app/040_other_sdks/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies (includes pytest) -RUN uv pip install --system .[dev] pytest-asyncio httpx - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=ab040-other-sdks - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] diff --git a/examples/tutorials/10_async/00_base/040_other_sdks/README.md b/examples/tutorials/10_async/00_base/040_other_sdks/README.md deleted file mode 100644 index 5c086233..00000000 --- a/examples/tutorials/10_async/00_base/040_other_sdks/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# [Agentic] Other SDKs - -Agents are just Python code - integrate any SDK you want (OpenAI, Anthropic, LangChain, LlamaIndex, custom libraries, etc.). AgentEx doesn't lock you into a specific framework. - -## What You'll Learn -- How to integrate OpenAI, Anthropic, or any SDK -- What AgentEx provides vs what you bring -- Framework-agnostic agent development -- Building agents with your preferred tools - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Understanding of async agents (see [000_hello_acp](../000_hello_acp/)) - -## Quick Start - -```bash -cd examples/tutorials/10_async/00_base/040_other_sdks -uv run agentex agents run --manifest manifest.yaml -``` - -## Key Insight - -AgentEx provides: -- ACP protocol implementation (task management, message handling) -- Deployment infrastructure -- Monitoring and observability - -You provide: -- Agent logic using whatever SDK/library you want -- Tools and capabilities specific to your use case - -Mix and match OpenAI, Anthropic, LangChain, or roll your own - it's all just Python. - -## When to Use -- You have an existing agent codebase to migrate -- Your team prefers specific SDKs or frameworks -- You need features from multiple providers -- You want full control over your agent logic - -## Why This Matters -AgentEx is infrastructure, not a framework. We handle deployment, task management, and protocol implementation - you handle the agent logic with whatever tools you prefer. This keeps you flexible and avoids vendor lock-in. - -**Next:** [080_batch_events](../080_batch_events/) - See when you need Temporal diff --git a/examples/tutorials/10_async/00_base/040_other_sdks/dev.ipynb b/examples/tutorials/10_async/00_base/040_other_sdks/dev.ipynb deleted file mode 100644 index abb1b9e7..00000000 --- a/examples/tutorials/10_async/00_base/040_other_sdks/dev.ipynb +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"ab040-other-sdks\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Agentic agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello tell me the latest news about AI and AI startups\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=20,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/10_async/00_base/040_other_sdks/manifest.yaml b/examples/tutorials/10_async/00_base/040_other_sdks/manifest.yaml deleted file mode 100644 index 8fd324c1..00000000 --- a/examples/tutorials/10_async/00_base/040_other_sdks/manifest.yaml +++ /dev/null @@ -1,119 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/00_base/040_other_sdks - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/00_base/040_other_sdks/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/00_base/040_other_sdks/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - -# Agent Configuration -# ----------------- -agent: - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: ab040-other-sdks - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent that uses other SDKs to show the flexibilty that agents are just code - - # Temporal workflow configuration - # Set enabled: true to use Temporal workflows for long-running tasks - temporal: - enabled: false - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # - name: OPENAI_BASE_URL - # value: "https://api.openai.com/v1" - # - name: ACCOUNT_ID - # value: "your_account_id_here" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific files (deploy/*.yaml) - global: - agent: - name: "ab040-other-sdks" - description: "An AgentEx agent that uses other SDKs to show the flexibilty that agents are just code" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/040_other_sdks/project/__init__.py b/examples/tutorials/10_async/00_base/040_other_sdks/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/00_base/040_other_sdks/project/acp.py b/examples/tutorials/10_async/00_base/040_other_sdks/project/acp.py deleted file mode 100644 index d2ec84fc..00000000 --- a/examples/tutorials/10_async/00_base/040_other_sdks/project/acp.py +++ /dev/null @@ -1,375 +0,0 @@ -from __future__ import annotations - -import os -import json -from typing import Dict, List, Optional -from contextlib import AsyncExitStack, asynccontextmanager - -from mcp import StdioServerParameters -from agents import Agent, Runner -from pydantic import BaseModel -from agents.mcp import MCPServerStdio -from openai.types.responses import ( - ResponseCompletedEvent, - ResponseTextDeltaEvent, - ResponseFunctionToolCall, - ResponseOutputItemDoneEvent, -) - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams -from agentex.lib.types.fastacp import AsyncACPConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.types.task_message_delta import TextDelta -from agentex.types.task_message_update import ( - StreamTaskMessageFull, - StreamTaskMessageDelta, -) -from agentex.types.task_message_content import ToolRequestContent, ToolResponseContent -from agentex.lib.core.services.adk.streaming import StreamingTaskMessageContext - -logger = make_logger(__name__) - - -# Create an ACP server - -# !!! Warning: Because "Async" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AsyncACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes. -acp = FastACP.create( - acp_type="async", - config=AsyncACPConfig(type="base"), -) - - -class StateModel(BaseModel): - input_list: List[dict] - turn_number: int - - -MCP_SERVERS = [ - StdioServerParameters( - command="npx", - args=["-y", "@modelcontextprotocol/server-sequential-thinking"], - ), - StdioServerParameters( - command="uvx", args=["openai-websearch-mcp"], env={"OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY", "")} - ), -] - - -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams): - # Upon task creation, we initialize the task state with a system message. - # This will be fetched by the `on_task_event_send` handler when each event is sent. - state = StateModel( - input_list=[], - turn_number=0, - ) - await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state) - - -@acp.on_task_event_send -async def handle_event_send(params: SendEventParams): - # !!! Warning: Because "Async" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AsyncACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes. - - if not params.event.content: - return - - if params.event.content.type != "text": - raise ValueError(f"Expected text message, got {params.event.content.type}") - - if params.event.content.author != "user": - raise ValueError(f"Expected user message, got {params.event.content.author}") - - # Retrieve the task state. Each event is handled as a new turn, so we need to get the state for the current turn. - task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id) - if not task_state: - raise ValueError("Task state not found - ensure task was properly initialized") - state = StateModel.model_validate(task_state.state) - state.turn_number += 1 - # Add the new user message to the message history - state.input_list.append({"role": "user", "content": params.event.content.content}) - - async with adk.tracing.span(trace_id=params.task.id, name=f"Turn {state.turn_number}", input=state) as span: - # Echo back the user's message so it shows up in the UI. This is not done by default so the agent developer has full control over what is shown to the user. - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=params.event.content, - parent_span_id=span.id if span else None, - ) - - if not os.environ.get("OPENAI_API_KEY"): - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=TextContent( - author="agent", - content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.", - ), - parent_span_id=span.id if span else None, - ) - - ######################################################### - # (๐Ÿ‘‹) Call an LLM to respond to the user's message using custom streaming - ######################################################### - - # This demonstrates advanced streaming patterns using adk.streaming. - # We'll show two different streaming approaches: - # 1. Simple streaming with context managers for complete messages (tool calls) - # 2. Delta-based streaming for incremental text responses - run_result = await run_openai_agent_with_custom_streaming( - task_id=params.task.id, - trace_id=params.task.id, - input_list=state.input_list, - mcp_server_params=MCP_SERVERS, - agent_name="Tool-Enabled Assistant", - agent_instructions="""You are a helpful assistant that can answer questions using various tools. - You have access to sequential thinking and web search capabilities through MCP servers. - Use these tools when appropriate to provide accurate and well-reasoned responses.""", - parent_span_id=span.id if span else None, - ) - - state.input_list = run_result.to_input_list() - logger.info(f"state.input_list: {state.input_list}") - logger.info(f"state: {state}") - # Store the messages in the task state for the next turn - await adk.state.update( - state_id=task_state.id, - task_id=params.task.id, - agent_id=params.agent.id, - state=state, - trace_id=params.task.id, - parent_span_id=span.id if span else None, - ) - logger.info("successfully updated the state") - # Set the span output to the state for the next turn - if span: - span.output = state - - -@acp.on_task_cancel -async def handle_task_cancel(params: CancelTaskParams): - """Default task cancel handler""" - logger.info(f"Task canceled: {params.task}") - - -######################################################## -# Helper functions that integrate Agentex primitives with other SDKs like OpenAI Agents -######################################################## - - -@asynccontextmanager -async def mcp_server_context(mcp_server_params: list[StdioServerParameters]): - """Context manager for MCP servers.""" - servers = [] - for params in mcp_server_params: - server = MCPServerStdio( - name=f"Server: {params.command}", - params=params.model_dump(), - cache_tools_list=True, - client_session_timeout_seconds=60, - ) - servers.append(server) - - async with AsyncExitStack() as stack: - for server in servers: - await stack.enter_async_context(server) - yield servers - - -def redact_mcp_server_params( - mcp_server_params: list[StdioServerParameters], -) -> list[StdioServerParameters]: - """Redact MCP server params.""" - return [ - StdioServerParameters( - **{k: v for k, v in server_param.model_dump().items() if k != "env"}, - env={k: "********" for k in server_param.env} if server_param.env else None, - ) - for server_param in mcp_server_params - ] - - -async def run_openai_agent_with_custom_streaming( - task_id: str, - trace_id: str, - input_list: list[Dict], - mcp_server_params: list[StdioServerParameters], - agent_name: str, - agent_instructions: str, - parent_span_id: Optional[str] = None, -): - """ - Run an OpenAI agent with custom streaming using adk.streaming. - - This demonstrates advanced streaming patterns using adk.streaming. - We'll show two different streaming approaches: - 1. Simple streaming with context managers for complete messages (tool calls) - 2. Delta-based streaming for incremental text responses - """ - - tool_call_map: Dict[str, ResponseFunctionToolCall] = {} - - redacted_mcp_server_params = redact_mcp_server_params(mcp_server_params) - - result = None - async with adk.tracing.span( - trace_id=trace_id, - name="run_agent_with_custom_streaming", - input={ - "input_list": input_list, - "mcp_server_params": redacted_mcp_server_params, - "agent_name": agent_name, - "agent_instructions": agent_instructions, - }, - parent_id=parent_span_id, - ) as span: - async with mcp_server_context(mcp_server_params) as servers: - agent = Agent( - name=agent_name, - instructions=agent_instructions, - mcp_servers=servers, - ) - - # Run with streaming enabled - result = Runner.run_streamed(starting_agent=agent, input=input_list) - - ######################################################### - # (๐Ÿ‘‹) For complete messages like tool calls we will use a with block to create a streaming context, but for text deltas we will use a streaming context that is created and closed manually. To make sure we close all streaming contexts we will track the item_id and close them all at the end. - ######################################################### - - item_id_to_streaming_context: Dict[str, StreamingTaskMessageContext] = {} - unclosed_item_ids: set[str] = set() - - try: - # Process streaming events with TaskMessage creation - async for event in result.stream_events(): - if event.type == "run_item_stream_event": - if event.item.type == "tool_call_item": - tool_call_item = event.item.raw_item - tool_call_map[tool_call_item.call_id] = tool_call_item - - logger.info(f"Tool call item: {tool_call_item}") - - tool_request_content = ToolRequestContent( - author="agent", - tool_call_id=tool_call_item.call_id, - name=tool_call_item.name, - arguments=json.loads(tool_call_item.arguments), - ) - - # (๐Ÿ‘‹) Create a streaming context for the tool call - # Since a tool call is a complete message, we can use a with block to create a streaming context. This will take care of creating a TaskMessage, sending a START event, and sending a DONE event when the context is closed. Of course you will also want to stream the content of the tool call so clients that are subscribed to streaming updates to the task will see the tool call. - async with adk.streaming.streaming_task_message_context( - task_id=task_id, - initial_content=tool_request_content, - ) as streaming_context: - # The message has already been persisted, but we still need to send an upda - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=tool_request_content, - content_type=tool_request_content.type, - type="full", - ), - ) - - elif event.item.type == "tool_call_output_item": - tool_output_item = event.item.raw_item - - tool_response_content = ToolResponseContent( - author="agent", - tool_call_id=tool_output_item["call_id"], - name=tool_call_map[tool_output_item["call_id"]].name, - content=tool_output_item["output"], - ) - - # (๐Ÿ‘‹) Create a streaming context for the tool call output - # Since a tool call output is a complete message, we can use a with block to create a streaming context. This will take care of creating a TaskMessage, sending a START event, and sending a DONE event when the context is closed. Of course you will also want to stream the content of the tool call output so clients that are subscribed to streaming updates to the task will see the tool call output. - async with adk.streaming.streaming_task_message_context( - task_id=task_id, - initial_content=tool_response_content, - ) as streaming_context: - # The message has already been persisted, but we still need to send an update - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=tool_response_content, - content_type=tool_response_content.type, - type="full", - ), - ) - - elif event.type == "raw_response_event": - if isinstance(event.data, ResponseTextDeltaEvent): - # Handle text delta - item_id = event.data.item_id - - # (๐Ÿ‘‹) Create a streaming context for the text delta - # Since a text delta is a partial message, we will create a streaming context manually without a with block because we need to persist the context across the for loop. - if item_id not in item_id_to_streaming_context: - streaming_context = adk.streaming.streaming_task_message_context( - task_id=task_id, - initial_content=TextContent( - author="agent", - content="", - ), - ) - # (๐Ÿ‘‹) Open the streaming context manually - # This will create a TaskMessage and send a START event for you. - item_id_to_streaming_context[item_id] = await streaming_context.open() - - # (๐Ÿ‘‹) Add the item_id to the set of unclosed item_ids - # This will allow us to close any lingering streaming context when the agent is done. - unclosed_item_ids.add(item_id) - else: - streaming_context = item_id_to_streaming_context[item_id] - - # (๐Ÿ‘‹) Stream the delta through the streaming service - # This will send a DELTA event. The context manager will accumulate the content for you into a final message when you close the context. - await streaming_context.stream_update( - update=StreamTaskMessageDelta( - parent_task_message=streaming_context.task_message, - delta=TextDelta(text_delta=event.data.delta, type="text"), - type="delta", - ), - ) - - elif isinstance(event.data, ResponseOutputItemDoneEvent): - # Handle item completion - item_id = event.data.item.id - - # (๐Ÿ‘‹) Close the streaming context - # This will send a DONE event and update the persisted message. - if item_id in item_id_to_streaming_context: - streaming_context = item_id_to_streaming_context[item_id] - await streaming_context.close() - unclosed_item_ids.remove(item_id) - - elif isinstance(event.data, ResponseCompletedEvent): - # (๐Ÿ‘‹) Close all remaining streaming contexts - # This will send a DONE event and update the persisted messages for all remaining streaming contents. Normally this won't be needed if all messages are closed by the time the agent is done. - for item_id in unclosed_item_ids: - streaming_context = item_id_to_streaming_context[item_id] - await streaming_context.close() - unclosed_item_ids.remove(item_id) - - finally: - # (๐Ÿ‘‹) Close all remaining streaming contexts - # This will send a DONE event and update the persisted messages for all remaining streaming contents. Normally this won't be needed, but we do it in case any errors occur. - for item_id in list(unclosed_item_ids): - streaming_context = item_id_to_streaming_context[item_id] - await streaming_context.close() - unclosed_item_ids.remove(item_id) - if span: - span.output = { - "new_items": [ - item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item - for item in result.new_items - ], - "final_output": result.final_output, - } - return result diff --git a/examples/tutorials/10_async/00_base/040_other_sdks/pyproject.toml b/examples/tutorials/10_async/00_base/040_other_sdks/pyproject.toml deleted file mode 100644 index 2d669512..00000000 --- a/examples/tutorials/10_async/00_base/040_other_sdks/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "ab040-other-sdks" -version = "0.1.0" -description = "An AgentEx agent that uses other SDKs to show the flexibilty that agents are just code" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/040_other_sdks/tests/test_agent.py b/examples/tutorials/10_async/00_base/040_other_sdks/tests/test_agent.py deleted file mode 100644 index 429d8d87..00000000 --- a/examples/tutorials/10_async/00_base/040_other_sdks/tests/test_agent.py +++ /dev/null @@ -1,413 +0,0 @@ -""" -Sample tests for AgentEx ACP agent with MCP servers and custom streaming. - -This test suite demonstrates how to test agents that integrate: -- OpenAI Agents SDK with streaming -- MCP (Model Context Protocol) servers for tool access -- Custom streaming patterns (delta-based and full messages) -- Complex multi-turn conversations with tool usage - -Key differences from regular streaming (020_streaming): -1. MCP Integration: Agent has access to external tools via MCP servers (sequential-thinking, web-search) -2. Tool Call Streaming: Tests both tool request and tool response streaming patterns -3. Mixed Streaming: Combines full message streaming (tools) with delta streaming (text) -4. Advanced State: Tracks turn_number and input_list instead of simple message history -5. Custom Streaming Context: Manual lifecycle management for different message types - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Ensure OPENAI_API_KEY is set in the environment -4. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: ab040-other-sdks) -""" - -import os -import uuid -import asyncio - -import pytest -import pytest_asyncio -from test_utils.async_utils import ( - stream_agent_response, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types import TaskMessage, TextContent -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.text_content_param import TextContentParam - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "ab040-other-sdks") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling with MCP tools.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll_simple_query(self, client: AsyncAgentex, agent_id: str): - """Test sending a simple event and polling for the response (no tool use).""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Check initial state - should have empty input_list and turn_number 0 - await asyncio.sleep(1) # wait for state to be initialized - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - - state = states[0].state - assert state is not None - assert state.get("input_list", []) == [] - assert state.get("turn_number", 0) == 0 - - # Send a simple message that shouldn't require tool use - user_message = "Hello! Please introduce yourself briefly." - messages = [] - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - messages.append(message) - - if len(messages) == 1: - assert message.content == TextContent( - author="user", - content=user_message, - type="text", - ) - break - - # Verify state has been updated by polling the states for 10 seconds - for i in range(20): - if i == 9: - raise Exception("Timeout waiting for state updates") - states = await client.states.list(agent_id=agent_id, task_id=task.id) - state = states[0].state - if len(state.get("input_list", [])) > 0 and state.get("turn_number") == 1: - break - await asyncio.sleep(1) - - states = await client.states.list(agent_id=agent_id, task_id=task.id) - state = states[0].state - assert state.get("turn_number") == 1 - - @pytest.mark.asyncio - async def test_send_event_and_poll_with_tool_use(self, client: AsyncAgentex, agent_id: str): - """Test sending an event that triggers tool usage and polling for the response.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Send a message that should trigger the sequential-thinking tool - user_message = "What is 15 multiplied by 37? Please think through this step by step." - tool_request_found = False - tool_response_found = False - has_final_agent_response = False - - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=60, # Longer timeout for tool use - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if message.content and message.content.type == "tool_request": - tool_request_found = True - assert message.content.author == "agent" - assert hasattr(message.content, "name") - assert hasattr(message.content, "tool_call_id") - elif message.content and message.content.type == "tool_response": - tool_response_found = True - assert message.content.author == "agent" - elif message.content and message.content.type == "text" and message.content.author == "agent": - has_final_agent_response = True - break - - assert has_final_agent_response, "Did not receive final agent text response" - assert tool_request_found, "Did not see tool request message" - assert tool_response_found, "Did not see tool response message" - - @pytest.mark.asyncio - async def test_multi_turn_conversation_with_state(self, client: AsyncAgentex, agent_id: str): - """Test multiple turns of conversation with state preservation.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # ensure the task is created before we send the first event - await asyncio.sleep(1) - # First turn - user_message_1 = "My favorite color is blue." - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message_1, - timeout=20, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if ( - message.content - and message.content.type == "text" - and message.content.author == "agent" - and message.content.content - ): - break - - ## keep polling the states for 10 seconds for the input_list and turn_number to be updated - for i in range(30): - if i == 29: - raise Exception("Timeout waiting for state updates") - states = await client.states.list(agent_id=agent_id, task_id=task.id) - state = states[0].state - if len(state.get("input_list", [])) > 0 and state.get("turn_number") == 1: - break - await asyncio.sleep(1) - - states = await client.states.list(agent_id=agent_id, task_id=task.id) - state = states[0].state - assert state.get("turn_number") == 1 - - await asyncio.sleep(1) - found_response = False - # Second turn - reference previous context - user_message_2 = "What did I just tell you my favorite color was?" - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message_2, - timeout=30, - sleep_interval=1.0, - ): - if ( - message.content - and message.content.type == "text" - and message.content.author == "agent" - and message.content.content - ): - response_text = message.content.content.lower() - assert "blue" in response_text - found_response = True - break - - assert found_response, "Did not receive final agent text response" - for i in range(10): - if i == 9: - raise Exception("Timeout waiting for state updates") - states = await client.states.list(agent_id=agent_id, task_id=task.id) - state = states[0].state - if len(state.get("input_list", [])) > 0 and state.get("turn_number") == 2: - break - await asyncio.sleep(1) - - states = await client.states.list(agent_id=agent_id, task_id=task.id) - state = states[0].state - assert state.get("turn_number") == 2 - - -class TestStreamingEvents: - """Test streaming event sending with MCP tools and custom streaming patterns.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream_simple(self, client: AsyncAgentex, agent_id: str): - """Test streaming a simple response without tool usage.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Check initial state - await asyncio.sleep(1) # wait for state to be initialized - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - state = states[0].state - assert state.get("input_list", []) == [] - assert state.get("turn_number", 0) == 0 - - user_message = "Tell me a very short joke about programming." - - # Collect events from stream - # Check for user message and delta messages - user_message_found = False - - async def stream_messages() -> None: - nonlocal user_message_found - async for event in stream_agent_response( - client=client, - task_id=task.id, - timeout=20, - ): - msg_type = event.get("type") - # For full messages, content is at the top level - # For delta messages, we need to check parent_task_message - if msg_type == "full": - if ( - event.get("content", {}).get("type") == "text" - and event.get("content", {}).get("author") == "user" - ): - user_message_found = True - elif msg_type == "done": - break - - stream_task = asyncio.create_task(stream_messages()) - - event_content = TextContentParam(type="text", author="user", content=user_message) - await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # Wait for streaming to complete - await stream_task - assert user_message_found, "User message found in stream" - ## keep polling the states for 10 seconds for the input_list and turn_number to be updated - for i in range(10): - if i == 9: - raise Exception("Timeout waiting for state updates") - states = await client.states.list(agent_id=agent_id, task_id=task.id) - state = states[0].state - if len(state.get("input_list", [])) > 0 and state.get("turn_number") == 1: - break - await asyncio.sleep(1) - - # Verify state has been updated - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - state = states[0].state - input_list = state.get("input_list", []) - - assert isinstance(input_list, list) - assert len(input_list) >= 2 - assert state.get("turn_number") == 1 - - @pytest.mark.asyncio - async def test_send_event_and_stream_with_tools(self, client: AsyncAgentex, agent_id: str): - """Test streaming with tool calls - demonstrates mixed streaming patterns.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # This query should trigger tool usage - user_message = "Use sequential thinking to calculate what 123 times 456 equals." - - tool_requests_seen = [] - tool_responses_seen = [] - text_deltas_seen = [] - - async def stream_messages() -> None: - nonlocal tool_requests_seen, tool_responses_seen, text_deltas_seen - - async for event in stream_agent_response( - client=client, - task_id=task.id, - timeout=45, - ): - msg_type = event.get("type") - - # For full messages, content is at the top level - # For delta messages, we need to check parent_task_message - if msg_type == "delta": - parent_msg = event.get("parent_task_message", {}) - content = parent_msg.get("content", {}) - delta = event.get("delta", {}) - content_type = content.get("type") - - if content_type == "text": - text_deltas_seen.append(delta.get("text_delta", "")) - elif msg_type == "full": - # For full messages - content = event.get("content", {}) - content_type = content.get("type") - - if content_type == "tool_request": - tool_requests_seen.append( - { - "name": content.get("name"), - "tool_call_id": content.get("tool_call_id"), - "streaming_type": msg_type, - } - ) - elif content_type == "tool_response": - tool_responses_seen.append( - { - "tool_call_id": content.get("tool_call_id"), - "streaming_type": msg_type, - } - ) - elif msg_type == "done": - break - - stream_task = asyncio.create_task(stream_messages()) - - event_content = TextContentParam(type="text", author="user", content=user_message) - await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # Wait for streaming to complete - await stream_task - - # Verify we saw tool usage (if the agent decided to use tools) - # Note: The agent may or may not use tools depending on its reasoning - # Verify the state has a response written to it - # assert len(text_deltas_seen) > 0, "Should have received text delta streaming" - for i in range(10): - if i == 9: - raise Exception("Timeout waiting for state updates") - states = await client.states.list(agent_id=agent_id, task_id=task.id) - state = states[0].state - if len(state.get("input_list", [])) > 0 and state.get("turn_number") == 1: - break - await asyncio.sleep(1) - - # Verify state has been updated - states = await client.states.list(agent_id=agent_id, task_id=task.id) - assert len(states) == 1 - state = states[0].state - input_list = state.get("input_list", []) - - assert isinstance(input_list, list) - assert len(input_list) >= 2 - print(input_list) - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/10_async/00_base/080_batch_events/.dockerignore b/examples/tutorials/10_async/00_base/080_batch_events/.dockerignore deleted file mode 100644 index c4f7a8b4..00000000 --- a/examples/tutorials/10_async/00_base/080_batch_events/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/080_batch_events/Dockerfile b/examples/tutorials/10_async/00_base/080_batch_events/Dockerfile deleted file mode 100644 index dbeccdfb..00000000 --- a/examples/tutorials/10_async/00_base/080_batch_events/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/00_base/080_batch_events/pyproject.toml /app/080_batch_events/pyproject.toml -COPY 10_async/00_base/080_batch_events/README.md /app/080_batch_events/README.md - -WORKDIR /app/080_batch_events - -# Copy the project code -COPY 10_async/00_base/080_batch_events/project /app/080_batch_events/project - -# Copy the test files -COPY 10_async/00_base/080_batch_events/tests /app/080_batch_events/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies (includes pytest) -RUN uv pip install --system .[dev] pytest-asyncio httpx - -WORKDIR /app/080_batch_events -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=ab080-batch-events - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] diff --git a/examples/tutorials/10_async/00_base/080_batch_events/README.md b/examples/tutorials/10_async/00_base/080_batch_events/README.md deleted file mode 100644 index b49e0187..00000000 --- a/examples/tutorials/10_async/00_base/080_batch_events/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# [Agentic] Batch Events - -Demonstrates limitations of the base async protocol with concurrent event processing. When multiple events arrive rapidly, base async agents handle them sequentially, which can cause issues. - -## What You'll Learn -- Limitations of non-Temporal async agents -- Race conditions and ordering issues in concurrent scenarios -- When you need workflow orchestration -- Why this motivates Temporal adoption - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Understanding of async patterns (see previous tutorials) - -## Quick Start - -```bash -cd examples/tutorials/10_async/00_base/080_batch_events -uv run agentex agents run --manifest manifest.yaml -``` - -## Why This Matters - -This tutorial shows **when you need Temporal**. If your agent needs to: -- Handle events that might arrive out of order -- Process multiple events in parallel safely -- Maintain consistent state under concurrent load - -Then you should use Temporal workflows (see tutorials 10_async/10_temporal/) which provide: -- Deterministic event ordering -- Safe concurrent processing -- Guaranteed state consistency - -This is the "breaking point" tutorial that motivates moving to Temporal for production agents. - -## When to Use (This Pattern) -This tutorial shows what NOT to use for production. Use base async agents only when: -- Events are infrequent (< 1 per second) -- Order doesn't matter -- State consistency isn't critical - -## Why This Matters -Every production agent eventually hits concurrency issues. This tutorial shows you those limits early, so you know when to graduate to Temporal. Better to learn this lesson in a tutorial than in production! - -**Next:** Ready for production? โ†’ [../10_temporal/000_hello_acp](../../10_temporal/000_hello_acp/) or explore [090_multi_agent_non_temporal](../090_multi_agent_non_temporal/) for complex non-Temporal coordination diff --git a/examples/tutorials/10_async/00_base/080_batch_events/dev.ipynb b/examples/tutorials/10_async/00_base/080_batch_events/dev.ipynb deleted file mode 100644 index 5bb98625..00000000 --- a/examples/tutorials/10_async/00_base/080_batch_events/dev.ipynb +++ /dev/null @@ -1,155 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from __future__ import annotations\n", - "\n", - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"ab080-batch-events\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Agentic agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex.types import Event\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "from agentex.types.agent_rpc_params import ParamsSendEventRequest\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "concurrent_event_messages: list[ParamsSendEventRequest] = [\n", - " {\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello, what can you do?\"},\n", - " \"task_id\": task.id,\n", - " },\n", - " {\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Can you tell me a joke?\"},\n", - " \"task_id\": task.id,\n", - " },\n", - " {\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"What is the capital of France?\"},\n", - " \"task_id\": task.id,\n", - " },\n", - " {\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Write a short story about a cat\"},\n", - " \"task_id\": task.id,\n", - " },\n", - " {\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Tell me how an LLM works\"},\n", - " \"task_id\": task.id,\n", - " },\n", - "]\n", - "\n", - "events: list[Event] = []\n", - "\n", - "for event_message in concurrent_event_messages:\n", - " rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params=event_message\n", - " )\n", - "\n", - " event = rpc_response.result\n", - " events.append(event)\n", - "\n", - "for event in events:\n", - " print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=20,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/080_batch_events/manifest.yaml b/examples/tutorials/10_async/00_base/080_batch_events/manifest.yaml deleted file mode 100644 index dd5f8cbd..00000000 --- a/examples/tutorials/10_async/00_base/080_batch_events/manifest.yaml +++ /dev/null @@ -1,117 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/00_base/080_batch_events - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/00_base/080_batch_events/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/00_base/080_batch_events/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - -# Agent Configuration -# ----------------- -agent: - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: ab080-batch-events - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent - - # Temporal workflow configuration - # Set enabled: true to use Temporal workflows for long-running tasks - temporal: - enabled: false - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # OPENAI_API_KEY: "" - # OPENAI_BASE_URL: "" - # OPENAI_ORG_ID: "" - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific files (deploy/*.yaml) - global: - agent: - name: "ab080-batch-events" - description: "An AgentEx agent" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/080_batch_events/project/__init__.py b/examples/tutorials/10_async/00_base/080_batch_events/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/00_base/080_batch_events/project/acp.py b/examples/tutorials/10_async/00_base/080_batch_events/project/acp.py deleted file mode 100644 index 94e79068..00000000 --- a/examples/tutorials/10_async/00_base/080_batch_events/project/acp.py +++ /dev/null @@ -1,235 +0,0 @@ -""" -WARNING: This tutorial is NOT something that is production ready. It is meant for a demonstration of how to handle a bulk of events in an async ACP. - -THere are many limitations with trying to do something similar to this. Please see the README.md for more details. -""" -import asyncio -from enum import Enum - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams -from agentex.lib.types.fastacp import AsyncACPConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.sdk.fastacp.fastacp import FastACP - -logger = make_logger(__name__) - - -class TaskCancelledError(Exception): - pass - - -class Status(Enum): - PROCESSING = "processing" - READY = "ready" - CANCELLED = "cancelled" - - -# Create an ACP server -acp = FastACP.create( - acp_type="async", - config=AsyncACPConfig(type="base") -) - -async def process_events_batch(events, task_id: str) -> str: - """ - Process a batch of events with 2s sleep per event to simulate work. - Returns the ID of the last processed event. - """ - if not events: - return None - - logger.info(f"๐Ÿ”„ Processing {len(events)} events: {[e.id for e in events]}") - - # Sleep for 2s per event to simulate processing work - for event in events: - await asyncio.sleep(3) - logger.info(f" INSIDE PROCESSING LOOP - FINISHED PROCESSING EVENT {event.id}") - - # Create message showing what was processed - event_ids = [event.id for event in events] - message_content = TextContent( - author="agent", - content=f"Processed event IDs: {event_ids}" - ) - - await adk.messages.create( - task_id=task_id, - content=message_content - ) - - final_cursor = events[-1].id - logger.info(f"๐Ÿ“ Message created for {len(events)} events (cursor: {final_cursor})") - return final_cursor - - -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams) -> None: - # For this tutorial, we print the parameters sent to the handler - # so you can see where and how task creation is handled - - logger.info(f"Task created: {params.task.id} for agent: {params.agent.id}") - - # The AgentTaskTracker is automatically created by the server when a task is created - # Let's verify it exists and log its initial state - try: - tracker = await adk.agent_task_tracker.get_by_task_and_agent( - task_id=params.task.id, - agent_id=params.agent.id - ) - logger.info(f"AgentTaskTracker found: {tracker.id}, status: {tracker.status}, last_processed_event_id: {tracker.last_processed_event_id}") - except Exception as e: - logger.error(f"Error getting AgentTaskTracker: {e}") - - logger.info("Task creation complete") - return - - -@acp.on_task_event_send -async def handle_task_event_send(params: SendEventParams) -> None: - """ - NOTE: See the README.md for a set of limitations as to why this is not the best way to handle events. - - Handle incoming events with batching behavior. - - Demonstrates how events arriving during PROCESSING get queued and batched: - 1. Check status - skip if CANCELLED or already PROCESSING - 2. Set status to PROCESSING - 3. Process events in batches until no more arrive - 4. Set status back to READY - - The key insight: while this agent is sleeping 2s per event, new events - can arrive and will be batched together in the next processing cycle. - """ - logger.info(f"๐Ÿ“ฅ Received event: {params.event.id}") - - # Get the current AgentTaskTracker state - try: - tracker = await adk.agent_task_tracker.get_by_task_and_agent( - task_id=params.task.id, - agent_id=params.agent.id - ) - logger.info(f"Current tracker status: {tracker.status}, cursor: {tracker.last_processed_event_id}") - except Exception as e: - logger.error(f"Error getting AgentTaskTracker: {e}") - return - - # Skip if task is cancelled - if tracker.status == Status.CANCELLED.value: - logger.error("โŒ Task is cancelled. Skipping.") - return - - # Skip if already processing (another pod is handling it) - if tracker.status == Status.PROCESSING.value: - logger.info("โญ๏ธ Task is already being processed by another pod. Skipping.") - return - - # LIMITATION - because this is not atomic, it is possible that two different processes will read the value of true - # and then both will try to set it to processing. The only way to prevent this is locking, which is not supported - # by the agentex server. - # - # Options: - # 1. Implement your own database locking mechanism and provide the agent with the credentials to the database - # 2. Use Temporal, which will ensure that there is only one workflow execution to be processing at a time (thus not needing a lock anymore) - # Update status to PROCESSING to claim this processing cycle - try: - tracker = await adk.agent_task_tracker.update( - tracker_id=tracker.id, - status=Status.PROCESSING.value, - status_reason="Processing events in batches" - - ) - logger.info(f"๐Ÿ”’ Set status to PROCESSING") - except Exception as e: - logger.error(f"โŒ Failed to set status to PROCESSING (another pod may have claimed it): {e}") - return - - reset_to_ready = True - try: - current_cursor = tracker.last_processed_event_id - # Main processing loop - keep going until no more new events - while True: - print(f"\n๐Ÿ” Checking for new events since cursor: {current_cursor}") - - tracker = await adk.agent_task_tracker.get(tracker_id=tracker.id) - if tracker.status == Status.CANCELLED.value: - logger.error("โŒ Task is cancelled. Skipping.") - raise TaskCancelledError("Task is cancelled") - - # Get all new events since current cursor - try: - print("Listing events since cursor: ", current_cursor) - new_events = await adk.events.list_events( - task_id=params.task.id, - agent_id=params.agent.id, - last_processed_event_id=current_cursor, - limit=100 - ) - - if not new_events: - print("โœ… No more new events found - processing cycle complete") - break - - logger.info(f"๐ŸŽฏ BATCH: Found {len(new_events)} events to process") - - except Exception as e: - logger.error(f"โŒ Error collecting events: {e}") - break - - # Process this batch of events (with 2s sleeps) - try: - final_cursor = await process_events_batch(new_events, params.task.id) - - # Update cursor to mark these events as processed - await adk.agent_task_tracker.update( - tracker_id=tracker.id, - last_processed_event_id=final_cursor, - status=Status.PROCESSING.value, # Still processing, might be more - status_reason=f"Processed batch of {len(new_events)} events" - ) - - current_cursor = final_cursor - logger.info(f"๐Ÿ“Š Updated cursor to: {current_cursor}") - - except Exception as e: - logger.error(f"โŒ Error processing events batch: {e}") - break - except TaskCancelledError as e: - logger.error(f"โŒ Task cancelled: {e}") - reset_to_ready = False - finally: - if reset_to_ready: - # Always set status back to READY when done processing - try: - await adk.agent_task_tracker.update( - tracker_id=tracker.id, - status=Status.READY.value, - status_reason="Completed event processing - ready for new events" - ) - logger.info(f"๐ŸŸข Set status back to READY - agent available for new events") - except Exception as e: - logger.error(f"โŒ Error setting status back to READY: {e}") - - -@acp.on_task_cancel -async def handle_task_canceled(params: CancelTaskParams): - # For this tutorial, we print the parameters sent to the handler - # so you can see where and how task cancellation is handled - logger.info(f"Hello world! Task canceled: {params.task.id}") - - # Update the AgentTaskTracker to reflect cancellation - try: - tracker = await adk.agent_task_tracker.get_by_task_and_agent( - task_id=params.task.id, - agent_id=params.agent.id - ) - await adk.agent_task_tracker.update( - tracker_id=tracker.id, - status=Status.CANCELLED.value, - status_reason="Task was cancelled by user" - ) - logger.info(f"Updated tracker status to cancelled") - except Exception as e: - logger.error(f"Error updating tracker on cancellation: {e}") - diff --git a/examples/tutorials/10_async/00_base/080_batch_events/pyproject.toml b/examples/tutorials/10_async/00_base/080_batch_events/pyproject.toml deleted file mode 100644 index a38bfbb6..00000000 --- a/examples/tutorials/10_async/00_base/080_batch_events/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "ab080-batch-events" -version = "0.1.0" -description = "An AgentEx agent" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/080_batch_events/test_batch_events.py b/examples/tutorials/10_async/00_base/080_batch_events/test_batch_events.py deleted file mode 100644 index b7a5397d..00000000 --- a/examples/tutorials/10_async/00_base/080_batch_events/test_batch_events.py +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/env python3 -""" -Simple script to test agent RPC endpoints using the actual schemas. -""" - -import json -import uuid -import asyncio - -import httpx - -# Configuration -BASE_URL = "http://localhost:5003" -# AGENT_ID = "b4f32d71-ff69-4ac9-84d1-eb2937fea0c7" -AGENT_ID = "58e78cd0-c898-4009-b5d9-eada8ebcad83" -RPC_ENDPOINT = f"{BASE_URL}/agents/{AGENT_ID}/rpc" - -async def send_rpc_request(method: str, params: dict): - """Send an RPC request to the agent.""" - request_data = { - "jsonrpc": "2.0", - "id": str(uuid.uuid4()), - "method": method, - "params": params - } - - print(f"โ†’ Sending: {method}") - print(f" Request: {json.dumps(request_data, indent=2)}") - - async with httpx.AsyncClient() as client: - try: - response = await client.post( - RPC_ENDPOINT, - json=request_data, - headers={"Content-Type": "application/json"}, - timeout=30.0 - ) - - print(f" Status: {response.status_code}") - - if response.status_code == 200: - response_data = response.json() - print(f" Response: {json.dumps(response_data, indent=2)}") - return response_data - else: - print(f" Error: {response.text}") - return None - - except Exception as e: - print(f" Failed: {e}") - return None - -async def main(): - """Main function to test the agent RPC endpoints.""" - print(f"๐Ÿš€ Testing Agent RPC: {AGENT_ID}") - print(f"๐Ÿ”— Endpoint: {RPC_ENDPOINT}") - print("=" * 50) - - # Step 1: Create a task - print("\n๐Ÿ“ Step 1: Creating a task...") - task_response = await send_rpc_request("task/create", { - "params": { - "description": "Test task from simple script" - } - }) - - if not task_response or task_response.get("error"): - print("โŒ Task creation failed, continuing anyway...") - task_id = str(uuid.uuid4()) # Generate a task ID to continue - else: - # Extract task_id from response (adjust based on actual response structure) - task_id = task_response.get("result", {}).get("id", str(uuid.uuid4())) - - print(f"๐Ÿ“‹ Using task_id: {task_id}") - - # Step 2: Send messages - print("\n๐Ÿ“ค Step 2: Sending messages...") - - messages = [f"This is message {i}" for i in range(20)] - - for i, message in enumerate(messages, 1): - print(f"\n๐Ÿ“จ Sending message {i}/{len(messages)}") - - # Create message content using TextContent structure - message_content = { - "type": "text", - "author": "user", - "style": "static", - "format": "plain", - "content": message - } - - # Send message using message/send method - response = await send_rpc_request("event/send", { - "task_id": task_id, - "event": message_content, - }) - - if response and not response.get("error"): - print(f"โœ… Message {i} sent successfully") - else: - print(f"โŒ Message {i} failed") - - # Small delay between messages - await asyncio.sleep(0.1) - - print("\n" + "=" * 50) - print("โœจ Script completed!") - print(f"๐Ÿ“‹ Task ID: {task_id}") - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/tutorials/10_async/00_base/080_batch_events/tests/test_agent.py b/examples/tutorials/10_async/00_base/080_batch_events/tests/test_agent.py deleted file mode 100644 index 6ccad7d2..00000000 --- a/examples/tutorials/10_async/00_base/080_batch_events/tests/test_agent.py +++ /dev/null @@ -1,223 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: ab080-batch-events) -""" - -import os -import re -import uuid -import asyncio - -import pytest -import pytest_asyncio -from test_utils.async_utils import ( - stream_agent_response, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types import TaskMessage -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.text_content_param import TextContentParam -from agentex.types.task_message_content import TextContent - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "ab080-batch-events") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, agent_id: str): - """Test sending a single event and polling for the response.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Send an event and poll for response using the helper function - # there should only be one message returned about batching - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message="Process this single event", - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - assert isinstance(message.content, TextContent) - assert "Processed event IDs" in message.content.content - assert message.content.author == "agent" - break - - @pytest.mark.asyncio - async def test_send_multiple_events_batched(self, client: AsyncAgentex, agent_id: str): - """Test sending multiple events that should be batched together.""" - # Create a task - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Send multiple events in quick succession (should be batched) - num_events = 7 - for i in range(num_events): - event_content = TextContentParam(type="text", author="user", content=f"Batch event {i + 1}") - await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - await asyncio.sleep(0.1) # Small delay to ensure ordering - - # Wait for processing to complete (5 events * 5 seconds each = 25s + buffer) - - ## there should be at least 2 agent responses to ensure that not all of the events are processed - ## in the same message - agent_messages = [] - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message="Process this single event", - timeout=30, - sleep_interval=1.0, - ): - if message.content and message.content.author == "agent": - agent_messages.append(message) - - if len(agent_messages) == 2: - break - - assert len(agent_messages) > 0, "Should have received at least one agent response" - - # PROOF OF BATCHING: Should have fewer responses than events sent - assert len(agent_messages) < num_events, ( - f"Expected batching to result in fewer responses than {num_events} events, got {len(agent_messages)}" - ) - - # Analyze each batch response to count how many events were in each batch - found_batch_with_multiple_events = False - for msg in agent_messages: - assert isinstance(msg.content, TextContent) - response = msg.content.content - - # Count event IDs in this response (they're in a list like ['id1', 'id2', ...]) - # Use regex to find all quoted strings in the list - event_ids = re.findall(r"'([^']+)'", response) - batch_size = len(event_ids) - if batch_size > 1: - # this measn that we have found a batch with multiple events - found_batch_with_multiple_events = True - break - - assert found_batch_with_multiple_events, "Should have found a batch with multiple events" - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_twenty_events_batched_streaming(self, client: AsyncAgentex, agent_id: str): - """Test sending 20 events and verifying batch processing via streaming.""" - # Create a task - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Send 10 events in quick succession (should be batched) - num_events = 10 - print(f"\nSending {num_events} events in quick succession...") - for i in range(num_events): - event_content = TextContentParam(type="text", author="user", content=f"Batch event {i + 1}") - await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - await asyncio.sleep(0.1) # Small delay to ensure ordering - - # Stream the responses and collect agent messages - print("\nStreaming batch responses...") - - # We'll collect all agent messages from the stream - agent_messages = [] - stream_timeout = 90 # Longer timeout for 20 events - - async for event in stream_agent_response( - client=client, - task_id=task.id, - timeout=stream_timeout, - ): - # Collect agent text messages - if event.get("type") == "full": - content = event.get("content", {}) - if content.get("type") == "text" and content.get("author") == "agent": - msg_content = content.get("content", "") - if msg_content and msg_content.strip(): - agent_messages.append(msg_content) - - if len(agent_messages) >= 2: - break - - print(f"\nSent {num_events} events") - print(f"Received {len(agent_messages)} agent response(s)") - - assert len(agent_messages) > 0, "Should have received at least one agent response" - - # PROOF OF BATCHING: Should have fewer responses than events sent - assert len(agent_messages) < num_events, ( - f"Expected batching to result in fewer responses than {num_events} events, got {len(agent_messages)}" - ) - - # Analyze each batch response to count how many events were in each batch - total_events_processed = 0 - found_batch_with_multiple_events = False - for response in agent_messages: - # Count event IDs in this response (they're in a list like ['id1', 'id2', ...]) - # Use regex to find all quoted strings in the list - event_ids = re.findall(r"'([^']+)'", response) - batch_size = len(event_ids) - - total_events_processed += batch_size - - # At least one response should have multiple events (proof of batching) - if batch_size > 1: - found_batch_with_multiple_events = True - break - - assert found_batch_with_multiple_events, "Should have found a batch with multiple events" - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/Dockerfile b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/Dockerfile deleted file mode 100644 index 24ecf448..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/Dockerfile +++ /dev/null @@ -1,57 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim - -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/00_base/090_multi_agent_non_temporal/pyproject.toml /app/090_multi_agent_non_temporal/pyproject.toml -COPY 10_async/00_base/090_multi_agent_non_temporal/README.md /app/090_multi_agent_non_temporal/README.md - -WORKDIR /app/090_multi_agent_non_temporal - -# Copy the project code -COPY 10_async/00_base/090_multi_agent_non_temporal/project /app/090_multi_agent_non_temporal/project - -# Copy the test files -COPY 10_async/00_base/090_multi_agent_non_temporal/tests /app/090_multi_agent_non_temporal/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies -RUN uv pip install --system .[dev] - -# Set environment variables -ENV PYTHONPATH=/app - -ARG AGENT_FILE -ARG PORT - -# Set test environment variables -ENV AGENT_NAME=ab090-multi-agent-non-temporal - -# Note: AGENT_NAME can be overridden at runtime based on which agent is running -# (ab090-creator-agent, ab090-critic-agent, ab090-formatter-agent, or ab090-orchestrator-agent) - -# Run the agent using uvicorn -CMD uvicorn project.${AGENT_FILE%.*}:acp --host 0.0.0.0 --port ${PORT:-8000} diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/README.md b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/README.md deleted file mode 100644 index d9f860e3..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/README.md +++ /dev/null @@ -1,210 +0,0 @@ -# Multi-Agent Content Assembly Line - -A multi-agent system that creates content through a collaborative workflow. Four agents work together: a creator generates content, a critic reviews it against rules, and a formatter outputs the final result, all coordinated by an orchestrator. - -## ๐Ÿ—๏ธ Architecture Overview - -``` -090_multi_agent_non_temporal/ -โ”œโ”€โ”€ project/ # All agent code -โ”‚ โ”œโ”€โ”€ creator.py # Content generation agent -โ”‚ โ”œโ”€โ”€ critic.py # Content review agent -โ”‚ โ”œโ”€โ”€ formatter.py # Content formatting agent -โ”‚ โ”œโ”€โ”€ orchestrator.py # Workflow coordination agent -โ”‚ โ”œโ”€โ”€ models.py # Pydantic models for type safety -โ”‚ โ””โ”€โ”€ state_machines/ -โ”‚ โ””โ”€โ”€ content_workflow.py # State machine definitions -โ”œโ”€โ”€ creator.yaml # Creator agent manifest -โ”œโ”€โ”€ critic.yaml # Critic agent manifest -โ”œโ”€โ”€ formatter.yaml # Formatter agent manifest -โ”œโ”€โ”€ orchestrator.yaml # Orchestrator agent manifest -โ”œโ”€โ”€ Dockerfile # Single shared Dockerfile -โ”œโ”€โ”€ pyproject.toml # Dependencies and project configuration -โ”œโ”€โ”€ start-agents.sh # Agent management script -โ””โ”€โ”€ README.md # This file -``` - -## ๐Ÿ“ File Structure - -The system uses a shared build configuration with type-safe interfaces: -- **Single `Dockerfile`** with build arguments for different agents -- **Single `pyproject.toml`** for all dependencies -- **Agent code** in `project/` directory with clear separation of concerns -- **Individual manifest files** at root level for each agent deployment -- **Shared state machine definitions** for workflow coordination -- **Pydantic models** (`models.py`) for type safety and validation across all agents - -### Key Files: -- `project/models.py` - Defines request/response models for type safety -- `project/orchestrator.py` - Workflow coordination and inter-agent communication -- `project/creator.py` - Content generation with revision capabilities -- `project/critic.py` - Content validation against rules -- `project/formatter.py` - Multi-format content transformation -- `project/state_machines/content_workflow.py` - State management for the workflow - -## ๐Ÿš€ Quick Start - -### Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Python 3.12+ and uv package manager -- OpenAI API key (set `OPENAI_API_KEY` or create `.env` file) -- Understanding of async patterns (see previous tutorials) - -### Running the System - -1. **Start all agents**: - ```bash - cd examples/tutorials/10_async/00_base/090_multi_agent_non_temporal - ./start-agents.sh start - ``` - -2. **Check agent status**: - ```bash - ./start-agents.sh status - ``` - -3. **Send a test request**: - ```bash - ./start-agents.sh test - ``` - -4. **Monitor logs**: - ```bash - ./start-agents.sh logs - ``` - -5. **Stop all agents**: - ```bash - ./start-agents.sh stop - ``` - -## ๐Ÿค– Agent Responsibilities - -### **Creator Agent** (Port 8001) -- Generates original content based on user requests -- Revises content based on critic feedback -- Maintains conversation history and iteration tracking - -### **Critic Agent** (Port 8002) -- Reviews content against specified rules -- Provides specific, actionable feedback -- Approves content when all rules are met - -### **Formatter Agent** (Port 8003) -- Converts approved content to target formats (HTML, Markdown, JSON, etc.) -- Preserves meaning while applying format-specific conventions -- Supports multiple output formats - -### **Orchestrator Agent** (Port 8000) -- Coordinates the entire workflow using state machines -- Manages inter-agent communication -- Tracks progress and handles errors/retries - -## ๐Ÿ“‹ Example Request - -Send a JSON request to the orchestrator: - -```json -{ - "request": "Write a welcome message for our AI assistant", - "rules": ["Under 50 words", "Friendly tone", "Include emoji"], - "target_format": "HTML" -} -``` - -The system will: -1. **Create** content using the Creator agent -2. **Review** against rules using the Critic agent -3. **Revise** if needed (up to 10 iterations) -4. **Format** final approved content using the Formatter agent - -## ๐Ÿ”ง Development - -### Type Safety with Pydantic -The tutorial demonstrates proper type safety using Pydantic models: - -```python -# Define request structure -class CreatorRequest(BaseModel): - request: str = Field(..., description="The content creation request") - current_draft: Optional[str] = Field(default=None, description="Current draft for revision") - feedback: Optional[List[str]] = Field(default=None, description="Feedback from critic") - -# Validate incoming requests -creator_request = CreatorRequest.model_validate(request_data) -``` - -Benefits: -- **Explicit failures** when required fields are missing -- **Self-documenting** APIs with field descriptions -- **IDE support** with auto-completion and type checking -- **Runtime validation** with clear error messages - -### Adding New Agents -1. **Add models** to `project/models.py` for request/response types -2. **Create agent** in `project/new_agent.py` using the FastACP pattern -3. **Add manifest** as `new_agent.yaml` at root level with deployment configuration -4. **Update startup script** in `start-agents.sh` to include the new agent - -### Modifying Agents -- **Agent code** is in `project/` directory -- **Shared models** are in `project/models.py` for consistency -- **Dependencies** go in `pyproject.toml` -- **Docker configuration** is shared across all agents - -### Deployment -Each agent can be deployed independently using its manifest: -```bash -uv run agentex agents deploy --cluster your-cluster --manifest creator.yaml -``` - -## ๐Ÿ—๏ธ Technical Implementation - -### Shared Dockerfile -The Dockerfile uses build arguments to run different agents: -```dockerfile -CMD uvicorn project.${AGENT_FILE%.*}:acp --host 0.0.0.0 --port ${PORT:-8000} -``` - -Manifest files specify which agent to run: -```yaml -build_args: - AGENT_FILE: creator.py - PORT: 8001 -``` - -### State Machine Flow -The orchestrator coordinates the workflow through these states: -- `CREATING` โ†’ `WAITING_FOR_CREATOR` โ†’ `REVIEWING` โ†’ `WAITING_FOR_CRITIC` โ†’ `FORMATTING` โ†’ `COMPLETED` - -### Inter-Agent Communication -Agents communicate using AgentEx events: -```python -await adk.acp.send_event( - agent_name="ab090-creator-agent", - task_id=task_id, - content=TextContent(author="agent", content=json.dumps(request_data)) -) -``` - -## ๐Ÿ“š What You'll Learn - -This tutorial demonstrates: -- **Multi-agent coordination** using state machines for complex workflows -- **Type-safe communication** with Pydantic models for all request/response data -- **Shared build configuration** for multiple agents in a single deployment -- **AgentEx CLI usage** for development and deployment -- **Inter-agent communication patterns** with proper error handling -- **Scalable agent architecture** with clear separation of concerns - -## When to Use -- Complex workflows requiring multiple specialized agents -- Content pipelines with review/approval steps -- Systems where each stage needs different capabilities -- When you want agent separation without Temporal (though Temporal is recommended for production) - -## Why This Matters -This shows how far you can go with non-Temporal multi-agent systems. However, note the limitations: manual state management, potential race conditions, and no built-in durability. For production multi-agent systems, consider Temporal ([../10_temporal/](../../10_temporal/)) which provides workflow orchestration, durability, and state management out of the box. - -**Next:** Ready for production workflows? โ†’ [../../10_temporal/000_hello_acp](../../10_temporal/000_hello_acp/) diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/creator.yaml b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/creator.yaml deleted file mode 100644 index 9d531bbf..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/creator.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Creator Agent Manifest Configuration -# ---------------------------------- -# This file defines how the creator agent should be built and deployed. - -build: - context: - root: ../ - dockerfile: Dockerfile - build_args: - AGENT_FILE: creator.py - PORT: 8001 - -local_development: - agent: - port: 8001 - host_address: host.docker.internal - paths: - acp: project/creator.py - -agent: - name: ab090-creator-agent - acp_type: async - description: Creator agent that generates and revises content based on requests and feedback - temporal: - enabled: false - -deployment: - image: - repository: "" - tag: "latest" - global: - agent: - name: "ab090-creator-agent" - description: "Creator agent that generates and revises content based on requests and feedback" - replicaCount: 1 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/critic.yaml b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/critic.yaml deleted file mode 100644 index 0a18fc12..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/critic.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Critic Agent Manifest Configuration -# --------------------------------- -# This file defines how the critic agent should be built and deployed. - -build: - context: - root: ../ - dockerfile: Dockerfile - build_args: - AGENT_FILE: critic.py - PORT: 8002 - -local_development: - agent: - port: 8002 - host_address: host.docker.internal - paths: - acp: project/critic.py - -agent: - name: ab090-critic-agent - acp_type: async - description: Critic agent that reviews content drafts against specified rules and provides feedback - temporal: - enabled: false - -deployment: - image: - repository: "" - tag: "latest" - global: - agent: - name: "ab090-critic-agent" - description: "Critic agent that reviews content drafts against specified rules and provides feedback" - replicaCount: 1 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/formatter.yaml b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/formatter.yaml deleted file mode 100644 index 9c69b74c..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/formatter.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Formatter Agent Manifest Configuration -# ------------------------------------- -# This file defines how the formatter agent should be built and deployed. - -build: - context: - root: ../ - dockerfile: Dockerfile - build_args: - AGENT_FILE: formatter.py - PORT: 8003 - -local_development: - agent: - port: 8003 - host_address: host.docker.internal - paths: - acp: project/formatter.py - -agent: - name: ab090-formatter-agent - acp_type: async - description: Formatter agent that converts approved content to various target formats (HTML, Markdown, etc.) - temporal: - enabled: false - -deployment: - image: - repository: "" - tag: "latest" - global: - agent: - name: "ab090-formatter-agent" - description: "Formatter agent that converts approved content to various target formats (HTML, Markdown, etc.)" - replicaCount: 1 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/orchestrator.yaml b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/orchestrator.yaml deleted file mode 100644 index 079329fd..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/orchestrator.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Orchestrator Agent Manifest Configuration -# ---------------------------------------- -# This file defines how the orchestrator agent should be built and deployed. - -build: - context: - root: ../ - dockerfile: Dockerfile - build_args: - AGENT_FILE: orchestrator.py - PORT: 8000 - -local_development: - agent: - port: 8000 - host_address: host.docker.internal - paths: - acp: project/orchestrator.py - -agent: - name: ab090-orchestrator-agent - acp_type: async - description: Orchestrator agent that coordinates a multi-agent content creation workflow using state machines and inter-agent communication - temporal: - enabled: false - -deployment: - image: - repository: "" - tag: "latest" - global: - agent: - name: "ab090-orchestrator-agent" - description: "Orchestrator agent that coordinates a multi-agent content creation workflow using state machines and inter-agent communication" - replicaCount: 1 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/__init__.py b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/__init__.py deleted file mode 100644 index 4d299677..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Multi-agent package diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/creator.py b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/creator.py deleted file mode 100644 index 31697548..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/creator.py +++ /dev/null @@ -1,294 +0,0 @@ -# Creator Agent - Generates and revises content based on requests and feedback - -import os -import sys -import json -from typing import List -from pathlib import Path - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams -from agentex.lib.types.fastacp import AsyncACPConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.types.llm_messages import ( - Message, - LLMConfig, - UserMessage, - SystemMessage, - AssistantMessage, -) -from agentex.lib.sdk.fastacp.fastacp import FastACP - -# Add the current directory to the Python path to enable imports -current_dir = Path(__file__).parent -if str(current_dir) not in sys.path: - sys.path.append(str(current_dir)) - -from models import CreatorRequest, CreatorResponse - -from agentex.lib.utils.model_utils import BaseModel - -logger = make_logger(__name__) - -# Create an ACP server with base configuration -acp = FastACP.create( - acp_type="async", - config=AsyncACPConfig( - type="base", - ), -) - - -class CreatorState(BaseModel): - messages: List[Message] - creation_history: List[dict] = [] - -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams): - """Initialize the creator agent state.""" - logger.info(f"Creator task created: {params.task.id}") - - # Initialize state with system message - system_message = SystemMessage( - content="""You are a skilled content creator and writer. Your job is to generate and revise high-quality content based on requests and feedback. - -Your responsibilities: -1. Create engaging, original content based on user requests -2. Follow all specified rules and requirements precisely -3. Revise content based on feedback while maintaining quality -4. Ensure content meets all specified criteria - -When creating content: -- Be creative and engaging while staying on topic -- Follow all rules strictly -- Maintain appropriate tone and style -- Focus on quality and clarity - -When revising content: -- Address all feedback points thoroughly -- Maintain the core message while making improvements -- Ensure all rules are still followed after revision - -Return ONLY the content itself, no explanations or metadata.""" - ) - - state = CreatorState(messages=[system_message]) - await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state) - - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โœจ **Creator Agent** - Content Generation & Revision\n\nI specialize in creating and revising high-quality content based on your requests.\n\nFor content creation, send:\n```json\n{\n \"request\": \"Your content request\",\n \"rules\": [\"Rule 1\", \"Rule 2\"]\n}\n```\n\nFor content revision, send:\n```json\n{\n \"content\": \"Original content\",\n \"feedback\": \"Feedback to address\",\n \"rules\": [\"Rule 1\", \"Rule 2\"]\n}\n```\n\nReady to create amazing content! ๐Ÿš€", - ), - ) - - -@acp.on_task_event_send -async def handle_event_send(params: SendEventParams): - """Handle content creation and revision requests.""" - - if not params.event.content: - return - - if params.event.content.type != "text": - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ I can only process text messages.", - ), - ) - return - - # Echo back the message (if from user) - if params.event.content.author == "user": - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - # Check if OpenAI API key is available - if not os.environ.get("OPENAI_API_KEY"): - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ OpenAI API key not found. Please set the OPENAI_API_KEY environment variable.", - ), - ) - return - - content = params.event.content.content - - try: - # Parse the JSON request - try: - request_data = json.loads(content) - except json.JSONDecodeError: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ Please provide a valid JSON request with 'request', 'current_draft', and 'feedback' fields.", - ), - ) - return - - # Validate required fields - if "request" not in request_data: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ Missing required field: 'request'", - ), - ) - return - - # Parse and validate request using Pydantic - try: - creator_request = CreatorRequest.model_validate(request_data) - except ValueError as e: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"โŒ Invalid request format: {e}", - ), - ) - return - - user_request = creator_request.request - current_draft = creator_request.current_draft - feedback = creator_request.feedback - orchestrator_task_id = creator_request.orchestrator_task_id - - # Get current state - task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id) - state = CreatorState.model_validate(task_state.state) - - # Add this request to history - state.creation_history.append({ - "request": user_request, - "current_draft": current_draft, - "feedback": feedback, - "is_revision": bool(current_draft) - }) - - # Create content generation prompt - if current_draft and feedback: - # This is a revision request - user_message_content = f"""Please revise the following content based on the feedback provided: - -ORIGINAL REQUEST: {user_request} - -CURRENT DRAFT: -{current_draft} - -FEEDBACK TO ADDRESS: -{chr(10).join(f'- {item}' for item in feedback)} - -Please provide a revised version that addresses all the feedback while maintaining the quality and intent of the original request.""" - - status_message = f"๐Ÿ”„ **Revising Content** (Iteration {len(state.creation_history)})\n\nRevising based on {len(feedback)} feedback point(s)..." - - else: - # This is an initial creation request - user_message_content = f"""Please create content for the following request: - -{user_request} - -Provide high-quality, engaging content that fulfills this request.""" - - status_message = f"โœจ **Creating New Content**\n\nGenerating content for: {user_request}" - - # Send status update - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=status_message, - ), - ) - - # Add user message to conversation - state.messages.append(UserMessage(content=user_message_content)) - - # Generate content using LLM - chat_completion = await adk.providers.litellm.chat_completion( - llm_config=LLMConfig(model="gpt-4o-mini", messages=state.messages), - trace_id=params.task.id, - ) - - if not chat_completion.choices or not chat_completion.choices[0].message: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ Failed to generate content. Please try again.", - ), - ) - return - - generated_content = chat_completion.choices[0].message.content or "" - - # Add assistant response to conversation - state.messages.append(AssistantMessage(content=generated_content)) - - # Send the generated content back to this task - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=generated_content, - ), - ) - - # Also send the result back to the orchestrator agent if this request came from another agent - if params.event.content.author == "agent" and orchestrator_task_id: - try: - # Send result back to orchestrator using Pydantic model - result_data = CreatorResponse( - content=generated_content, - task_id=params.task.id - ).model_dump() - - await adk.acp.send_event( - agent_name="ab090-orchestrator-agent", - task_id=orchestrator_task_id, # Use the orchestrator's original task ID - content=TextContent( - author="agent", - content=json.dumps(result_data) - ) - ) - logger.info(f"Sent result back to orchestrator for task {orchestrator_task_id}") - - except Exception as e: - logger.error(f"Failed to send result to orchestrator: {e}") - - # Update state - await adk.state.update( - state_id=task_state.id, - task_id=params.task.id, - agent_id=params.agent.id, - state=state, - trace_id=params.task.id, - ) - - logger.info(f"Generated content for task {params.task.id}: {len(generated_content)} characters") - - except Exception as e: - logger.error(f"Error in content creation: {e}") - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"โŒ Error creating content: {e}", - ), - ) - - -@acp.on_task_cancel -async def handle_task_cancel(params: CancelTaskParams): - """Handle task cancellation.""" - logger.info(f"Creator task cancelled: {params.task.id}") - diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/critic.py b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/critic.py deleted file mode 100644 index e58ea44a..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/critic.py +++ /dev/null @@ -1,312 +0,0 @@ -# Critic Agent - Reviews content drafts against specified rules and provides feedback - -import os -import sys -import json -from typing import List -from pathlib import Path - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams -from agentex.lib.types.fastacp import AsyncACPConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.types.llm_messages import ( - Message, - LLMConfig, - UserMessage, - SystemMessage, - AssistantMessage, -) -from agentex.lib.sdk.fastacp.fastacp import FastACP - -# Add the current directory to the Python path to enable imports -current_dir = Path(__file__).parent -if str(current_dir) not in sys.path: - sys.path.append(str(current_dir)) - -from models import CriticRequest, CriticResponse - -from agentex.lib.utils.model_utils import BaseModel - -logger = make_logger(__name__) - -# Create an ACP server with base configuration -acp = FastACP.create( - acp_type="async", - config=AsyncACPConfig( - type="base", - ), -) - - -class CriticState(BaseModel): - messages: List[Message] - review_history: List[dict] = [] - - -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams): - """Initialize the critic agent state.""" - logger.info(f"Critic task created: {params.task.id}") - - # Initialize state with system message - system_message = SystemMessage( - content="""You are a professional content critic and quality assurance specialist. Your job is to review content against specific rules and provide constructive feedback. - -Your responsibilities: -1. Review content against a set of rules -2. Provide specific, actionable feedback for each rule violation -3. Approve content only when all rules are met -4. Be objective and consistent in your reviews - -When reviewing content: -- Systematically check the content against each rule -- For each violation, explain clearly why it fails and suggest how to fix it -- If a rule is subjective (e.g., "friendly tone"), provide a brief justification for your assessment -- If all rules are met, provide an empty feedback list - -Return ONLY a JSON object in the specified format. Do not include any other text or explanations.""" - ) - - state = CriticState(messages=[system_message]) - await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state) - - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="๐Ÿ” **Critic Agent** - Content Quality Assurance\n\nI specialize in reviewing content against specific rules and providing constructive feedback.\n\nSend me a JSON request with:\n```json\n{\n \"draft\": \"Content to review\",\n \"rules\": [\"Rule 1\", \"Rule 2\", \"Rule 3\"]\n}\n```\n\nI'll respond with feedback JSON:\n```json\n{\n \"feedback\": [\"issue1\", \"issue2\"] // or [] if approved\n}\n```\n\nReady to ensure quality! ๐ŸŽฏ", - ), - ) - - -@acp.on_task_event_send -async def handle_event_send(params: SendEventParams): - """Handle content review requests.""" - - if not params.event.content: - return - - if params.event.content.type != "text": - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ I can only process text messages.", - ), - ) - return - - # Echo back the message (if from user) - if params.event.content.author == "user": - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - # Check if OpenAI API key is available - if not os.environ.get("OPENAI_API_KEY"): - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ OpenAI API key not found. Please set the OPENAI_API_KEY environment variable.", - ), - ) - return - - content = params.event.content.content - - try: - # Parse the JSON request - try: - request_data = json.loads(content) - except json.JSONDecodeError: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ Please provide a valid JSON request with 'draft' and 'rules' fields.", - ), - ) - return - - # Validate required fields - if "draft" not in request_data or "rules" not in request_data: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ Missing required fields: 'draft' and 'rules'", - ), - ) - return - - # Parse and validate request using Pydantic - try: - critic_request = CriticRequest.model_validate(request_data) - except ValueError as e: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"โŒ Invalid request format: {e}", - ), - ) - return - - draft = critic_request.draft - rules = critic_request.rules - orchestrator_task_id = critic_request.orchestrator_task_id - - if not isinstance(rules, list): - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ 'rules' must be a list of strings", - ), - ) - return - - # Get current state - task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id) - state = CriticState.model_validate(task_state.state) - - # Add this review to history - state.review_history.append({ - "draft": draft, - "rules": rules, - "timestamp": "now" # In real implementation, use proper timestamp - }) - - # Send status update - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"๐Ÿ” **Reviewing Content** (Review #{len(state.review_history)})\n\nChecking content against {len(rules)} rules...", - ), - ) - - # Create review prompt - rules_text = "\n".join([f"{i+1}. {rule}" for i, rule in enumerate(rules)]) - - user_message_content = f"""Please review the following content against the specified rules and provide feedback: - -CONTENT TO REVIEW: -{draft} - -RULES TO CHECK: -{rules_text} - -Review the content systematically against each rule. For each rule violation: -1. Identify which rule is violated -2. Explain why it violates the rule -3. Suggest how to fix it - -If the content meets all rules, return an empty feedback list. - -You MUST respond with a JSON object in this exact format: -{{ - "feedback": ["specific issue 1", "specific issue 2", ...] // or [] if all rules are met -}} - -Do not include any other text or explanations outside the JSON response.""" - - # Add user message to conversation - state.messages.append(UserMessage(content=user_message_content)) - - # Generate review using LLM - chat_completion = await adk.providers.litellm.chat_completion( - llm_config=LLMConfig(model="gpt-4o-mini", messages=state.messages), - trace_id=params.task.id, - ) - - if not chat_completion.choices or not chat_completion.choices[0].message: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ Failed to generate review. Please try again.", - ), - ) - return - - review_response = chat_completion.choices[0].message.content or "" - - # Add assistant response to conversation - state.messages.append(AssistantMessage(content=review_response)) - - # Parse the review response - try: - review_data = json.loads(review_response.strip()) - feedback = review_data.get("feedback", []) - except json.JSONDecodeError: - # Fallback if LLM doesn't return valid JSON - feedback = ["Unable to parse review response"] - - # Create result message - if feedback: - result_message = f"โŒ **Content Needs Revision**\n\nIssues found:\n" + "\n".join([f"โ€ข {item}" for item in feedback]) - approval_status = "needs_revision" - else: - result_message = "โœ… **Content Approved**\n\nAll rules have been met!" - approval_status = "approved" - - # Send the review result back to this task - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=result_message, - ), - ) - - # Also send the result back to the orchestrator agent if this request came from another agent - if params.event.content.author == "agent" and orchestrator_task_id: - try: - # Send result back to orchestrator using Pydantic model - result_data = CriticResponse( - feedback=feedback, - approval_status=approval_status, - task_id=params.task.id - ).model_dump() - - await adk.acp.send_event( - agent_name="ab090-orchestrator-agent", - task_id=orchestrator_task_id, # Use the orchestrator's original task ID - content=TextContent( - author="agent", - content=json.dumps(result_data) - ) - ) - logger.info(f"Sent review result back to orchestrator for task {orchestrator_task_id}") - - except Exception as e: - logger.error(f"Failed to send result to orchestrator: {e}") - - # Update state - await adk.state.update( - state_id=task_state.id, - task_id=params.task.id, - agent_id=params.agent.id, - state=state, - trace_id=params.task.id, - ) - - logger.info(f"Completed review for task {params.task.id}: {len(feedback)} issues found") - - except Exception as e: - logger.error(f"Error in content review: {e}") - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"โŒ Error reviewing content: {e}", - ), - ) - - -@acp.on_task_cancel -async def handle_task_cancel(params: CancelTaskParams): - """Handle task cancellation.""" - logger.info(f"Critic task cancelled: {params.task.id}") diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/formatter.py b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/formatter.py deleted file mode 100644 index 3301d066..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/formatter.py +++ /dev/null @@ -1,327 +0,0 @@ -# Formatter Agent - Converts approved content to various target formats - -import os -import sys -import json -from typing import List -from pathlib import Path - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams -from agentex.lib.types.fastacp import AsyncACPConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.types.llm_messages import ( - Message, - LLMConfig, - UserMessage, - SystemMessage, - AssistantMessage, -) -from agentex.lib.sdk.fastacp.fastacp import FastACP - -# Add the current directory to the Python path to enable imports -current_dir = Path(__file__).parent -if str(current_dir) not in sys.path: - sys.path.append(str(current_dir)) - -from models import FormatterRequest, FormatterResponse - -from agentex.lib.utils.model_utils import BaseModel - -logger = make_logger(__name__) - -# Create an ACP server with base configuration -acp = FastACP.create( - acp_type="async", - config=AsyncACPConfig( - type="base", - ), -) - - -class FormatterState(BaseModel): - messages: List[Message] - format_history: List[dict] = [] - - -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams): - """Initialize the formatter agent state.""" - logger.info(f"Formatter task created: {params.task.id}") - - # Initialize state with system message - system_message = SystemMessage( - content="""You are a professional content formatter specialist. Your job is to convert approved content into various target formats while preserving the original message and quality. - -Your responsibilities: -1. Convert content to the specified target format (HTML, Markdown, JSON, etc.) -2. Apply proper formatting conventions for the target format -3. Preserve all content and meaning during conversion -4. Ensure the formatted output is valid and well-structured - -Supported formats: -- HTML: Convert to clean, semantic HTML with appropriate tags -- Markdown: Convert to properly formatted Markdown syntax -- JSON: Structure content in a meaningful JSON format -- Text: Clean plain text formatting -- Email: Format as professional email with proper structure - -When formatting: -1. Maintain the original content's meaning and tone -2. Apply format-specific best practices -3. Ensure proper structure and readability -4. Use semantic elements appropriate to the format - -You must respond with a JSON object in this exact format: -{ - "formatted_content": "the fully formatted content here" -} - -Do not include any other text, explanations, or formatting outside the JSON response.""" - ) - - state = FormatterState(messages=[system_message]) - await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state) - - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="๐ŸŽจ **Formatter Agent** - Content Format Conversion\n\nI specialize in converting approved content to various target formats while preserving meaning and quality.\n\nSend me a JSON request with:\n```json\n{\n \"content\": \"Content to format\",\n \"target_format\": \"HTML|Markdown|JSON|Text|Email\"\n}\n```\n\nI'll respond with formatted content JSON:\n```json\n{\n \"formatted_content\": \"Your beautifully formatted content\"\n}\n```\n\nSupported formats: HTML, Markdown, JSON, Text, Email\nReady to make your content shine! โœจ", - ), - ) - - -@acp.on_task_event_send -async def handle_event_send(params: SendEventParams): - """Handle content formatting requests.""" - - if not params.event.content: - return - - if params.event.content.type != "text": - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ I can only process text messages.", - ), - ) - return - - # Echo back the message (if from user) - if params.event.content.author == "user": - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - # Check if OpenAI API key is available - if not os.environ.get("OPENAI_API_KEY"): - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ OpenAI API key not found. Please set the OPENAI_API_KEY environment variable.", - ), - ) - return - - content = params.event.content.content - - try: - # Parse the JSON request - try: - request_data = json.loads(content) - except json.JSONDecodeError: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ Please provide a valid JSON request with 'content' and 'target_format' fields.", - ), - ) - return - - # Validate required fields - if "content" not in request_data or "target_format" not in request_data: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ Missing required fields: 'content' and 'target_format'", - ), - ) - return - - # Parse and validate request using Pydantic - try: - formatter_request = FormatterRequest.model_validate(request_data) - except ValueError as e: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"โŒ Invalid request format: {e}", - ), - ) - return - - content_to_format = formatter_request.content - target_format = formatter_request.target_format.upper() - orchestrator_task_id = formatter_request.orchestrator_task_id - - # Validate target format - supported_formats = ["HTML", "MARKDOWN", "JSON", "TEXT", "EMAIL"] - if target_format not in supported_formats: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"โŒ Unsupported format: {target_format}. Supported formats: {', '.join(supported_formats)}", - ), - ) - return - - # Get current state - task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id) - state = FormatterState.model_validate(task_state.state) - - # Add this format request to history - state.format_history.append({ - "content": content_to_format, - "target_format": target_format, - "timestamp": "now" # In real implementation, use proper timestamp - }) - - # Send status update - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"๐ŸŽจ **Formatting Content** (Request #{len(state.format_history)})\n\nConverting to {target_format} format...", - ), - ) - - # Create formatting prompt based on target format - format_instructions = { - "HTML": "Convert to clean, semantic HTML with appropriate tags (headings, paragraphs, lists, etc.). Use proper HTML structure.", - "MARKDOWN": "Convert to properly formatted Markdown syntax with appropriate headers, emphasis, lists, and other Markdown elements.", - "JSON": "Structure the content in a meaningful JSON format with appropriate keys and values that represent the content structure.", - "TEXT": "Format as clean, well-structured plain text with proper line breaks and spacing.", - "EMAIL": "Format as a professional email with proper subject, greeting, body, and closing." - } - - user_message_content = f"""Please format the following content into {target_format} format: - -CONTENT TO FORMAT: -{content_to_format} - -FORMATTING INSTRUCTIONS: -{format_instructions[target_format]} - -Requirements: -1. Preserve all original meaning and content -2. Apply best practices for {target_format} formatting -3. Ensure the output is valid and well-structured -4. Maintain readability and professional appearance - -You MUST respond with a JSON object in this exact format: -{{ - "formatted_content": "the fully formatted content here" -}} - -Do not include any other text, explanations, or formatting outside the JSON response.""" - - # Add user message to conversation - state.messages.append(UserMessage(content=user_message_content)) - - # Generate formatted content using LLM - chat_completion = await adk.providers.litellm.chat_completion( - llm_config=LLMConfig(model="gpt-4o-mini", messages=state.messages), - trace_id=params.task.id, - ) - - if not chat_completion.choices or not chat_completion.choices[0].message: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ Failed to format content. Please try again.", - ), - ) - return - - format_response = chat_completion.choices[0].message.content or "" - - # Add assistant response to conversation - state.messages.append(AssistantMessage(content=format_response)) - - # Parse the format response - try: - format_data = json.loads(format_response.strip()) - formatted_content = format_data.get("formatted_content", "") - except json.JSONDecodeError: - # Fallback if LLM doesn't return valid JSON - formatted_content = format_response.strip() - - # Create result message - result_message = f"โœ… **Content Formatted Successfully**\n\nFormat: {target_format}\n\n**Formatted Content:**\n```{target_format.lower()}\n{formatted_content}\n```" - - # Send the formatted content back to this task - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=result_message, - ), - ) - - # Also send the result back to the orchestrator agent if this request came from another agent - if params.event.content.author == "agent" and orchestrator_task_id: - try: - # Send result back to orchestrator - # Send result back to orchestrator using Pydantic model - result_data = FormatterResponse( - formatted_content=formatted_content, - target_format=target_format, - task_id=params.task.id - ).model_dump() - - await adk.acp.send_event( - agent_name="ab090-orchestrator-agent", - task_id=orchestrator_task_id, # Use the orchestrator's original task ID - content=TextContent( - author="agent", - content=json.dumps(result_data) - ) - ) - logger.info(f"Sent formatted content back to orchestrator for task {orchestrator_task_id}") - - except Exception as e: - logger.error(f"Failed to send result to orchestrator: {e}") - - # Update state - await adk.state.update( - state_id=task_state.id, - task_id=params.task.id, - agent_id=params.agent.id, - state=state, - trace_id=params.task.id, - ) - - logger.info(f"Completed formatting for task {params.task.id}: {target_format}") - - except Exception as e: - logger.error(f"Error in content formatting: {e}") - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"โŒ Error formatting content: {e}", - ), - ) - - -@acp.on_task_cancel -async def handle_task_cancel(params: CancelTaskParams): - """Handle task cancellation.""" - logger.info(f"Formatter task cancelled: {params.task.id}") diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/models.py b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/models.py deleted file mode 100644 index e9aef6d7..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/models.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Pydantic models for request/response data structures across all agents. -This provides type safety and clear documentation of expected data formats. -""" - -from typing import List, Literal, Optional - -from pydantic import Field, BaseModel - -# Request Models - -class OrchestratorRequest(BaseModel): - """Request to the orchestrator agent to start a content creation workflow.""" - request: str = Field(..., description="The content creation request") - rules: Optional[List[str]] = Field(default=None, description="Rules for content validation") - target_format: Optional[str] = Field(default=None, description="Desired output format (HTML, MARKDOWN, JSON, TEXT, EMAIL)") - - -class CreatorRequest(BaseModel): - """Request to the creator agent for content generation or revision.""" - request: str = Field(..., description="The content creation request") - current_draft: Optional[str] = Field(default=None, description="Current draft for revision (if any)") - feedback: Optional[List[str]] = Field(default=None, description="Feedback from critic for revision") - orchestrator_task_id: Optional[str] = Field(default=None, description="Original orchestrator task ID for callback") - - -class CriticRequest(BaseModel): - """Request to the critic agent for content review.""" - draft: str = Field(..., description="Content draft to review") - rules: List[str] = Field(..., description="Rules to validate against") - orchestrator_task_id: Optional[str] = Field(default=None, description="Original orchestrator task ID for callback") - - -class FormatterRequest(BaseModel): - """Request to the formatter agent for content formatting.""" - content: str = Field(..., description="Content to format") - target_format: str = Field(..., description="Target format (HTML, MARKDOWN, JSON, TEXT, EMAIL)") - orchestrator_task_id: Optional[str] = Field(default=None, description="Original orchestrator task ID for callback") - - -# Response Models - -class CreatorResponse(BaseModel): - """Response from the creator agent.""" - agent: Literal["creator"] = Field(default="creator", description="Agent identifier") - content: str = Field(..., description="Generated or revised content") - task_id: str = Field(..., description="Task ID for this creation request") - - -class CriticResponse(BaseModel): - """Response from the critic agent.""" - agent: Literal["critic"] = Field(default="critic", description="Agent identifier") - feedback: List[str] = Field(..., description="List of feedback items (empty if approved)") - approval_status: str = Field(..., description="Approval status (approved/needs_revision)") - task_id: str = Field(..., description="Task ID for this review request") - - -class FormatterResponse(BaseModel): - """Response from the formatter agent.""" - agent: Literal["formatter"] = Field(default="formatter", description="Agent identifier") - formatted_content: str = Field(..., description="Content formatted in the target format") - target_format: str = Field(..., description="The format used for formatting") - task_id: str = Field(..., description="Task ID for this formatting request") - - -# Enums for validation - -class SupportedFormat(str): - """Supported output formats for the formatter.""" - HTML = "HTML" - MARKDOWN = "MARKDOWN" - JSON = "JSON" - TEXT = "TEXT" - EMAIL = "EMAIL" - - -class ApprovalStatus(str): - """Content approval status from critic.""" - APPROVED = "approved" - NEEDS_REVISION = "needs_revision" \ No newline at end of file diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/orchestrator.py b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/orchestrator.py deleted file mode 100644 index f9aea8be..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/orchestrator.py +++ /dev/null @@ -1,419 +0,0 @@ -# Orchestrator Agent - Coordinates the multi-agent content creation workflow -from __future__ import annotations - -import sys -import json -from pathlib import Path - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams -from agentex.lib.types.fastacp import AsyncACPConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.sdk.fastacp.fastacp import FastACP - -# Add the current directory to the Python path to enable imports -current_dir = Path(__file__).parent -if str(current_dir) not in sys.path: - sys.path.append(str(current_dir)) - -from models import CriticResponse, CreatorResponse, FormatterResponse, OrchestratorRequest -from state_machines.content_workflow import WorkflowData, ContentWorkflowState, ContentWorkflowStateMachine - -logger = make_logger(__name__) - -# Create an ACP server with base configuration -acp = FastACP.create( - acp_type="async", - config=AsyncACPConfig( - type="base", - ), -) - -# Store active state machines by task_id -active_workflows: dict[str, ContentWorkflowStateMachine] = {} - - -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams): - """Initialize the content workflow state machine when a task is created.""" - logger.info(f"Task created: {params.task.id}") - - # Acknowledge task creation - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="๐ŸŽญ **Orchestrator Agent** - Content Assembly Line\n\nI coordinate a multi-agent workflow for content creation:\nโ€ข **Creator Agent** - Generates content\nโ€ข **Critic Agent** - Reviews against rules\nโ€ข **Formatter Agent** - Formats final output\n\nSend me a JSON request with:\n```json\n{\n \"request\": \"Your content request\",\n \"rules\": [\"Rule 1\", \"Rule 2\"],\n \"target_format\": \"HTML\"\n}\n```\n\nReady to orchestrate your content creation! ๐Ÿš€", - ), - ) - - -@acp.on_task_event_send -async def handle_event_send(params: SendEventParams): - """Handle incoming events and coordinate the multi-agent workflow.""" - - if not params.event.content: - return - - if params.event.content.type != "text": - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content="โŒ I can only process text messages.", - ), - ) - return - - # Echo back the user's message - if params.event.content.author == "user": - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - content = params.event.content.content - - # Check if this is a response from another agent - if await handle_agent_response(params.task.id, content): - return - - # Otherwise, this is a user request to start a new workflow - if params.event.content.author == "user": - await start_content_workflow(params.task.id, content) - - -async def handle_agent_response(task_id: str, content: str) -> bool: - """Handle responses from other agents in the workflow. Returns True if this was an agent response.""" - try: - # Try to parse as JSON (agent responses should be JSON) - response_data = json.loads(content) - - # Check if this is a response from one of our agents - if "agent" in response_data and "task_id" in response_data: - agent_name = response_data["agent"] - - # Find the corresponding workflow - workflow = active_workflows.get(task_id) - if not workflow: - logger.warning(f"No active workflow found for task {task_id}") - return True - - logger.info(f"Received response from {agent_name} for task {task_id}") - - # Handle based on agent type - if agent_name == "creator": - try: - creator_response = CreatorResponse.model_validate(response_data) - await workflow.handle_creator_response(creator_response.content) - - # Send status update - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"๐Ÿ“ **Creator Output:**\n{creator_response.content}\n\n๐Ÿ” Calling critic agent...", - ), - ) - except ValueError as e: - logger.error(f"Invalid creator response format: {e}") - return True - - # Advance the workflow to the next state - await advance_workflow(task_id, workflow) - - elif agent_name == "critic": - try: - critic_response = CriticResponse.model_validate(response_data) - feedback = critic_response.feedback - approval_status = critic_response.approval_status - except ValueError as e: - logger.error(f"Invalid critic response format: {e}") - return True - - # Create the response in the format expected by the state machine - critic_response = {"feedback": feedback} - await workflow.handle_critic_response(json.dumps(critic_response)) - - # Send status update - if feedback: - feedback_text = '\nโ€ข '.join(feedback) - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"๐ŸŽฏ **Critic Feedback:**\nโ€ข {feedback_text}\n\n๐Ÿ“ Calling creator agent for revision...", - ), - ) - else: - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"โœ… **Content Approved by Critic!**\n\n๐ŸŽจ Calling formatter agent...", - ), - ) - - # Advance the workflow to the next state - await advance_workflow(task_id, workflow) - - elif agent_name == "formatter": - try: - formatter_response = FormatterResponse.model_validate(response_data) - formatted_content = formatter_response.formatted_content - target_format = formatter_response.target_format - except ValueError as e: - logger.error(f"Invalid formatter response format: {e}") - return True - - # Create the response in the format expected by the state machine - formatter_response = {"formatted_content": formatted_content} - await workflow.handle_formatter_response(json.dumps(formatter_response)) - - # Workflow completion is handled in handle_formatter_response - await complete_workflow(task_id, workflow) - - # Send final result - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"๐ŸŽ‰ **Workflow Complete!**\n\nYour content has been successfully created, reviewed, and formatted.\n\n**Final Result ({target_format}):**\n```{target_format.lower()}\n{formatted_content}\n```", - ), - ) - - # Clean up completed workflow - if task_id in active_workflows: - del active_workflows[task_id] - logger.info(f"Cleaned up completed workflow for task {task_id}") - - # Continue workflow execution - if workflow and not await workflow.terminal_condition(): - await advance_workflow(task_id, workflow) - - return True - - except json.JSONDecodeError: - # Not a JSON response, might be a user message - return False - except Exception as e: - logger.error(f"Error handling agent response: {e}") - return True - - return False - - -async def start_content_workflow(task_id: str, content: str): - """Start a new content creation workflow.""" - try: - # Parse the user request - try: - request_data = json.loads(content) - except json.JSONDecodeError: - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content="โŒ Please provide a valid JSON request with 'request', 'rules', and 'target_format' fields.\n\nExample:\n```json\n{\n \"request\": \"Write a welcome message\",\n \"rules\": [\"Under 50 words\", \"Friendly tone\"],\n \"target_format\": \"HTML\"\n}\n```", - ), - ) - return - - # Parse and validate request using Pydantic - try: - orchestrator_request = OrchestratorRequest.model_validate(request_data) - except ValueError as e: - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"โŒ Invalid request format: {e}", - ), - ) - return - - user_request = orchestrator_request.request - rules = orchestrator_request.rules - target_format = orchestrator_request.target_format - - if not isinstance(rules, list): - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content="โŒ 'rules' must be a list of strings", - ), - ) - return - - # Create workflow data - workflow_data = WorkflowData( - user_request=user_request, - rules=rules, - target_format=target_format - ) - - # Create and start the state machine - workflow = ContentWorkflowStateMachine(task_id=task_id, initial_data=workflow_data) - active_workflows[task_id] = workflow - - # Send acknowledgment - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"๐Ÿš€ **Starting Content Workflow**\n\n**Request:** {user_request}\n**Rules:** {len(rules)} rule(s)\n**Target Format:** {target_format}\n\nInitializing multi-agent workflow...", - ), - ) - - # Start the workflow - await advance_workflow(task_id, workflow) - logger.info(f"Started content workflow for task {task_id}") - - except Exception as e: - logger.error(f"Error starting workflow: {e}") - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"โŒ Error starting workflow: {e}", - ), - ) - - -async def advance_workflow(task_id: str, workflow: ContentWorkflowStateMachine): - """Advance the workflow to the next state.""" - - try: - # Keep advancing until we reach a waiting state or complete - max_steps = 10 # Prevent infinite loops - step_count = 0 - - while step_count < max_steps and not await workflow.terminal_condition(): - current_state = workflow.get_current_state() - data = workflow.get_state_machine_data() - logger.info(f"Advancing workflow from state: {current_state} (step {step_count + 1})") - - # Execute the current state's workflow - logger.info(f"About to execute workflow step") - await workflow.step() - logger.info(f"Workflow step completed") - - new_state = workflow.get_current_state() - logger.info(f"New state after step: {new_state}") - - # Skip redundant status updates since we handle them in response handlers - # if current_state != new_state: - # await send_status_update(task_id, new_state, data) - - # Stop advancing if we're in a waiting state (waiting for external response) - if new_state in [ContentWorkflowState.WAITING_FOR_CREATOR, - ContentWorkflowState.WAITING_FOR_CRITIC, - ContentWorkflowState.WAITING_FOR_FORMATTER]: - logger.info(f"Workflow paused in waiting state: {new_state}") - break - - step_count += 1 - - # Check if workflow is complete - if await workflow.terminal_condition(): - final_state = workflow.get_current_state() - if final_state == ContentWorkflowState.COMPLETED: - await complete_workflow(task_id, workflow) - else: - await fail_workflow(task_id, workflow) - elif step_count >= max_steps: - logger.error(f"Workflow exceeded max steps ({max_steps}), stopping") - data = workflow.get_state_machine_data() - data.last_error = f"Workflow exceeded maximum steps ({max_steps})" - await workflow.transition(ContentWorkflowState.FAILED) - await fail_workflow(task_id, workflow) - - except Exception as e: - logger.error(f"Error advancing workflow: {e}") - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"โŒ Workflow error: {e}", - ), - ) - - -async def send_status_update(task_id: str, state: str, data: WorkflowData): - """Send status updates to the user based on the current state.""" - - message = "" - # Special handling for CREATING state to show feedback - if state == ContentWorkflowState.CREATING: - if data.iteration_count > 0 and data.feedback: - feedback_text = '\n- '.join(data.feedback) - message = f"๐Ÿ”„ **Revising Content** (Iteration {data.iteration_count + 1})\n\nCritic provided feedback:\n- {feedback_text}\n\nSending back to Creator Agent for revision..." - else: - message = f"๐Ÿ“ **Step 1/3: Creating Content** (Iteration {data.iteration_count + 1})\n\nSending request to Creator Agent..." - else: - status_messages = { - ContentWorkflowState.WAITING_FOR_CREATOR: "โณ Waiting for Creator Agent to generate content...", - ContentWorkflowState.REVIEWING: f"๐Ÿ” **Step 2/3: Reviewing Content** (Iteration {data.iteration_count})\n\nSending draft to Critic Agent for review against {len(data.rules)} rule(s)...", - ContentWorkflowState.WAITING_FOR_CRITIC: f"โณ Waiting for Critic Agent to review...\n\n**Draft:**\n{data.current_draft}\n\n**Rules:**\n- {', '.join(data.rules)}", - ContentWorkflowState.FORMATTING: f"๐ŸŽจ **Step 3/3: Formatting Content**\n\nSending approved content to Formatter Agent for {data.target_format} formatting...", - ContentWorkflowState.WAITING_FOR_FORMATTER: "โณ Waiting for Formatter Agent to format content...", - ContentWorkflowState.FAILED: f"โŒ **Workflow Failed**\n\nError: {data.last_error}", - } - message = status_messages.get(state, f"๐Ÿ“Š Current state: {state}") - - if not message: - return - - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=message, - ), - ) - - -async def complete_workflow(task_id: str, workflow: ContentWorkflowStateMachine): - """Handle successful workflow completion.""" - - data = workflow.get_state_machine_data() - - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"โœ… **Content Creation Complete!**\n\n๐ŸŽฏ **Original Request:** {data.user_request}\n๐Ÿ”„ **Iterations:** {data.iteration_count}\n๐Ÿ“‹ **Rules Applied:** {len(data.rules)}\n๐ŸŽจ **Format:** {data.target_format}\n\n๐Ÿ“ **Final Content:**\n\n{data.final_content}", - ), - ) - - # Clean up - if task_id in active_workflows: - del active_workflows[task_id] - - -async def fail_workflow(task_id: str, workflow: ContentWorkflowStateMachine): - """Handle workflow failure.""" - - data = workflow.get_state_machine_data() - - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"โŒ **Workflow Failed**\n\nAfter {data.iteration_count} iteration(s), the content creation workflow has failed.\n\n**Error:** {data.last_error}\n\nPlease try again with a simpler request or fewer rules.", - ), - ) - - # Clean up - if task_id in active_workflows: - del active_workflows[task_id] - - -@acp.on_task_cancel -async def handle_task_cancel(params: CancelTaskParams): - """Handle task cancellation.""" - logger.info(f"Orchestrator task cancelled: {params.task.id}") - - # Clean up any active workflow - if params.task.id in active_workflows: - del active_workflows[params.task.id] - logger.info(f"Cleaned up cancelled workflow for task {params.task.id}") diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/state_machines/__init__.py b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/state_machines/__init__.py deleted file mode 100644 index 1b5b70b5..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/state_machines/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# State machines package for multi-agent orchestration diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/state_machines/content_workflow.py b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/state_machines/content_workflow.py deleted file mode 100644 index 389b0575..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/project/state_machines/content_workflow.py +++ /dev/null @@ -1,307 +0,0 @@ -# ruff: noqa: ARG002 -from __future__ import annotations - -import json -import asyncio -from enum import Enum -from typing import Optional - -from agentex.lib import adk -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.sdk.state_machine.state import State -from agentex.lib.sdk.state_machine.state_machine import StateMachine -from agentex.lib.sdk.state_machine.state_workflow import StateWorkflow - -logger = make_logger(__name__) - -# Use adk module for inter-agent communication - - -class ContentWorkflowState(str, Enum): - INITIALIZING = "initializing" - CREATING = "creating" - WAITING_FOR_CREATOR = "waiting_for_creator" - REVIEWING = "reviewing" - WAITING_FOR_CRITIC = "waiting_for_critic" - FORMATTING = "formatting" - WAITING_FOR_FORMATTER = "waiting_for_formatter" - COMPLETED = "completed" - FAILED = "failed" - - -class WorkflowData(BaseModel): - user_request: str = "" - rules: list[str] = [] - target_format: str = "text" - current_draft: str = "" - feedback: list[str] = [] - final_content: str = "" - iteration_count: int = 0 - max_iterations: int = 10 - - # Task tracking for async coordination - creator_task_id: Optional[str] = None - critic_task_id: Optional[str] = None - formatter_task_id: Optional[str] = None - - # Response tracking - pending_response_from: Optional[str] = None - last_error: Optional[str] = None - - -class InitializingWorkflow(StateWorkflow): - async def execute(self, state_machine: "ContentWorkflowStateMachine", state_machine_data: WorkflowData) -> str: - logger.info("Initializing content workflow") - return ContentWorkflowState.CREATING - - -class CreatingWorkflow(StateWorkflow): - async def execute(self, state_machine: "ContentWorkflowStateMachine", state_machine_data: WorkflowData) -> str: - logger.info("Starting content creation") - try: - # Create task for creator agent - creator_task = await adk.acp.create_task(agent_name="ab090-creator-agent") - task_id = creator_task.id - logger.info(f"Created task ID: {task_id}") - - state_machine_data.creator_task_id = task_id - state_machine_data.pending_response_from = "creator" - - # Send request to creator - request_data = { - "request": state_machine_data.user_request, - "current_draft": state_machine_data.current_draft, - "feedback": state_machine_data.feedback, - "orchestrator_task_id": state_machine._task_id # Tell creator which task to respond to - } - - # Send event to creator agent - await adk.acp.send_event( - task_id=task_id, - agent_name="ab090-creator-agent", - content=TextContent(author="agent", content=json.dumps(request_data)) - ) - - logger.info(f"Sent creation request to creator agent, task_id: {task_id}") - return ContentWorkflowState.WAITING_FOR_CREATOR - - except Exception as e: - logger.error(f"Error in creating workflow: {e}") - state_machine_data.last_error = str(e) - return ContentWorkflowState.FAILED - - -class WaitingForCreatorWorkflow(StateWorkflow): - async def execute(self, state_machine: "ContentWorkflowStateMachine", state_machine_data: WorkflowData) -> str: - # This state waits for creator response - transition happens in ACP event handler - logger.info("Waiting for creator response...") - - # Check if workflow should terminate - if await state_machine.terminal_condition(): - logger.info("Workflow terminated, stopping waiting loop") - return state_machine.get_current_state() - - await asyncio.sleep(1) # Prevent tight loop, allow other tasks to run - return ContentWorkflowState.WAITING_FOR_CREATOR - - -class ReviewingWorkflow(StateWorkflow): - async def execute(self, state_machine: "ContentWorkflowStateMachine", state_machine_data: WorkflowData) -> str: - logger.info("Starting content review") - try: - # Create task for critic agent - critic_task = await adk.acp.create_task(agent_name="ab090-critic-agent") - task_id = critic_task.id - logger.info(f"Created critic task ID: {task_id}") - - state_machine_data.critic_task_id = task_id - state_machine_data.pending_response_from = "critic" - - # Send request to critic - request_data = { - "draft": state_machine_data.current_draft, - "rules": state_machine_data.rules, - "orchestrator_task_id": state_machine._task_id # Tell critic which task to respond to - } - - # Send event to critic agent - await adk.acp.send_event( - task_id=task_id, - agent_name="ab090-critic-agent", - content=TextContent(author="agent", content=json.dumps(request_data)) - ) - - logger.info(f"Sent review request to critic agent, task_id: {task_id}") - return ContentWorkflowState.WAITING_FOR_CRITIC - - except Exception as e: - logger.error(f"Error in reviewing workflow: {e}") - state_machine_data.last_error = str(e) - return ContentWorkflowState.FAILED - - -class WaitingForCriticWorkflow(StateWorkflow): - async def execute(self, state_machine: "ContentWorkflowStateMachine", state_machine_data: WorkflowData) -> str: - # This state waits for critic response - transition happens in ACP event handler - logger.info("Waiting for critic response...") - - # Check if workflow should terminate - if await state_machine.terminal_condition(): - logger.info("Workflow terminated, stopping waiting loop") - return state_machine.get_current_state() - - await asyncio.sleep(1) # Prevent tight loop, allow other tasks to run - return ContentWorkflowState.WAITING_FOR_CRITIC - - -class FormattingWorkflow(StateWorkflow): - async def execute(self, state_machine: "ContentWorkflowStateMachine", state_machine_data: WorkflowData) -> str: - logger.info("Starting content formatting") - try: - # Create task for formatter agent - formatter_task = await adk.acp.create_task(agent_name="ab090-formatter-agent") - task_id = formatter_task.id - logger.info(f"Created formatter task ID: {task_id}") - - state_machine_data.formatter_task_id = task_id - state_machine_data.pending_response_from = "formatter" - - # Send request to formatter - request_data = { - "content": state_machine_data.current_draft, # Fixed field name - "target_format": state_machine_data.target_format, - "orchestrator_task_id": state_machine._task_id # Tell formatter which task to respond to - } - - # Send event to formatter agent - await adk.acp.send_event( - task_id=task_id, - agent_name="ab090-formatter-agent", - content=TextContent(author="agent", content=json.dumps(request_data)) - ) - - logger.info(f"Sent format request to formatter agent, task_id: {task_id}") - return ContentWorkflowState.WAITING_FOR_FORMATTER - - except Exception as e: - logger.error(f"Error in formatting workflow: {e}") - state_machine_data.last_error = str(e) - return ContentWorkflowState.FAILED - - -class WaitingForFormatterWorkflow(StateWorkflow): - async def execute(self, state_machine: "ContentWorkflowStateMachine", state_machine_data: WorkflowData) -> str: - # This state waits for formatter response - transition happens in ACP event handler - logger.info("Waiting for formatter response...") - - # Check if workflow should terminate - if await state_machine.terminal_condition(): - logger.info("Workflow terminated, stopping waiting loop") - return state_machine.get_current_state() - - await asyncio.sleep(1) # Prevent tight loop, allow other tasks to run - return ContentWorkflowState.WAITING_FOR_FORMATTER - - -class CompletedWorkflow(StateWorkflow): - async def execute(self, state_machine: "ContentWorkflowStateMachine", state_machine_data: WorkflowData) -> str: - logger.info("Content workflow completed successfully") - return ContentWorkflowState.COMPLETED - - -class FailedWorkflow(StateWorkflow): - async def execute(self, state_machine: "ContentWorkflowStateMachine", state_machine_data: WorkflowData) -> str: - logger.error(f"Content workflow failed: {state_machine_data.last_error}") - return ContentWorkflowState.FAILED - - -class ContentWorkflowStateMachine(StateMachine[WorkflowData]): - def __init__(self, task_id: str | None = None, initial_data: WorkflowData | None = None): - states = [ - State(name=ContentWorkflowState.INITIALIZING, workflow=InitializingWorkflow()), - State(name=ContentWorkflowState.CREATING, workflow=CreatingWorkflow()), - State(name=ContentWorkflowState.WAITING_FOR_CREATOR, workflow=WaitingForCreatorWorkflow()), - State(name=ContentWorkflowState.REVIEWING, workflow=ReviewingWorkflow()), - State(name=ContentWorkflowState.WAITING_FOR_CRITIC, workflow=WaitingForCriticWorkflow()), - State(name=ContentWorkflowState.FORMATTING, workflow=FormattingWorkflow()), - State(name=ContentWorkflowState.WAITING_FOR_FORMATTER, workflow=WaitingForFormatterWorkflow()), - State(name=ContentWorkflowState.COMPLETED, workflow=CompletedWorkflow()), - State(name=ContentWorkflowState.FAILED, workflow=FailedWorkflow()), - ] - - super().__init__( - initial_state=ContentWorkflowState.INITIALIZING, - states=states, - task_id=task_id, - state_machine_data=initial_data or WorkflowData(), - trace_transitions=True - ) - - async def terminal_condition(self) -> bool: - current_state = self.get_current_state() - return current_state in [ContentWorkflowState.COMPLETED, ContentWorkflowState.FAILED] - - async def handle_creator_response(self, response_content: str): - """Handle response from creator agent""" - try: - data = self.get_state_machine_data() - data.current_draft = response_content - data.pending_response_from = None - - # Move to reviewing state - await self.transition(ContentWorkflowState.REVIEWING) - logger.info("Received creator response, transitioning to reviewing") - - except Exception as e: - logger.error(f"Error handling creator response: {e}") - data = self.get_state_machine_data() - data.last_error = str(e) - await self.transition(ContentWorkflowState.FAILED) - - async def handle_critic_response(self, response_content: str): - """Handle response from critic agent""" - try: - response_data = json.loads(response_content) - data = self.get_state_machine_data() - data.feedback = response_data.get("feedback") - data.pending_response_from = None - - if data.feedback: - # Has feedback, need to revise - data.iteration_count += 1 - if data.iteration_count >= data.max_iterations: - data.last_error = f"Max iterations ({data.max_iterations}) reached" - await self.transition(ContentWorkflowState.FAILED) - else: - await self.transition(ContentWorkflowState.CREATING) - logger.info(f"Received critic feedback, iteration {data.iteration_count}, transitioning to creating") - else: - # No feedback, content approved - await self.transition(ContentWorkflowState.FORMATTING) - logger.info("Content approved by critic, transitioning to formatting") - - except Exception as e: - logger.error(f"Error handling critic response: {e}") - data = self.get_state_machine_data() - data.last_error = str(e) - await self.transition(ContentWorkflowState.FAILED) - - async def handle_formatter_response(self, response_content: str): - """Handle response from formatter agent""" - try: - response_data = json.loads(response_content) - data = self.get_state_machine_data() - data.final_content = response_data.get("formatted_content") - data.pending_response_from = None - - # Move to completed state - await self.transition(ContentWorkflowState.COMPLETED) - logger.info("Received formatter response, workflow completed") - - except Exception as e: - logger.error(f"Error handling formatter response: {e}") - data = self.get_state_machine_data() - data.last_error = str(e) - await self.transition(ContentWorkflowState.FAILED) diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/pyproject.toml b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/pyproject.toml deleted file mode 100644 index a2234c45..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/pyproject.toml +++ /dev/null @@ -1,33 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "ab090-multi-agent-content-assembly" -version = "0.1.0" -description = "A multi-agent system that creates content through a collaborative workflow with creator, critic, formatter, and orchestrator agents." -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["manifests"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/start-agents.sh b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/start-agents.sh deleted file mode 100755 index 78346363..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/start-agents.sh +++ /dev/null @@ -1,327 +0,0 @@ -#!/bin/bash -# Multi-Agent Content Assembly Line - Start All Agents (Flattened Structure) -# This script starts all 4 agents in the simplified flattened structure - -set -e - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Configuration -ORCHESTRATOR_PORT=8000 -CREATOR_PORT=8001 -CRITIC_PORT=8002 -FORMATTER_PORT=8003 - -# Base directory -BASE_DIR="examples/tutorials/10_async/00_base/090_multi_agent_non_temporal" - -echo -e "${BLUE}๐ŸŽญ Multi-Agent Content Assembly Line (Flattened)${NC}" -echo -e "${BLUE}===============================================${NC}" -echo "" - -# Function to check if port is available -check_port() { - local port=$1 - if lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1; then - echo -e "${RED}โŒ Port $port is already in use${NC}" - echo "Please stop the process using port $port or change the port in the manifest files" - return 1 - fi - return 0 -} - -# Function to check prerequisites -check_prerequisites() { - echo -e "${YELLOW}๐Ÿ” Checking prerequisites...${NC}" - - # Check if we're in the right directory - if [[ ! -f "pyproject.toml" ]] || [[ ! -d "src/agentex" ]]; then - echo -e "${RED}โŒ Please run this script from the agentex-sdk-python repository root${NC}" - exit 1 - fi - - # Check if flattened directory exists - if [[ ! -d "$BASE_DIR" ]]; then - echo -e "${RED}โŒ Flattened multi-agent directory not found: $BASE_DIR${NC}" - exit 1 - fi - - # Check if project directory exists - if [[ ! -d "$BASE_DIR/project" ]]; then - echo -e "${RED}โŒ Project directory not found: $BASE_DIR/project${NC}" - exit 1 - fi - - # Check if manifest files exist - if [[ ! -f "$BASE_DIR/orchestrator.yaml" ]]; then - echo -e "${RED}โŒ Orchestrator manifest not found: $BASE_DIR/orchestrator.yaml${NC}" - exit 1 - fi - - # Check if uv is available - if ! command -v uv &> /dev/null; then - echo -e "${RED}โŒ uv is required but not installed${NC}" - echo "Please install uv: curl -LsSf https://astral.sh/uv/install.sh | sh" - exit 1 - fi - - # Check if OPENAI_API_KEY is set - if [[ -z "${OPENAI_API_KEY}" ]]; then - echo -e "${YELLOW}โš ๏ธ OPENAI_API_KEY not found in environment${NC}" - if [[ -f ".env" ]]; then - echo -e "${GREEN}โœ… Found .env file - agents will load it automatically${NC}" - else - echo -e "${RED}โŒ No .env file found and OPENAI_API_KEY not set${NC}" - echo "Please create a .env file with OPENAI_API_KEY=your_key_here" - exit 1 - fi - else - echo -e "${GREEN}โœ… OPENAI_API_KEY found in environment${NC}" - fi - - # Check ports - echo -e "${YELLOW}๐Ÿ” Checking ports...${NC}" - check_port $ORCHESTRATOR_PORT || exit 1 - check_port $CREATOR_PORT || exit 1 - check_port $CRITIC_PORT || exit 1 - check_port $FORMATTER_PORT || exit 1 - - echo -e "${GREEN}โœ… All prerequisites met${NC}" - echo "" -} - -# Function to start agent in background -start_agent() { - local name=$1 - local manifest=$2 - local port=$3 - local logfile="/tmp/agentex-${name}.log" - - echo -e "${YELLOW}๐Ÿš€ Starting ${name} agent on port ${port}...${NC}" - - # Start the agent in background and capture PID - uv run agentex agents run --manifest "$manifest" > "$logfile" 2>&1 & - local pid=$! - - echo "$pid" > "/tmp/agentex-${name}.pid" - echo -e "${GREEN}โœ… ${name} agent started (PID: $pid, logs: $logfile)${NC}" - - # Give it a moment to start - sleep 2 - - # Check if process is still running - if ! kill -0 $pid 2>/dev/null; then - echo -e "${RED}โŒ ${name} agent failed to start${NC}" - echo "Check logs: tail -f $logfile" - return 1 - fi - - return 0 -} - -# Function to stop all agents -stop_agents() { - echo -e "${YELLOW}๐Ÿ›‘ Stopping all agents...${NC}" - - for agent in orchestrator creator critic formatter; do - pidfile="/tmp/agentex-${agent}.pid" - if [[ -f "$pidfile" ]]; then - pid=$(cat "$pidfile") - if kill -0 "$pid" 2>/dev/null; then - echo -e "${YELLOW}๐Ÿ›‘ Stopping ${agent} agent (PID: $pid)${NC}" - kill "$pid" - rm -f "$pidfile" - else - echo -e "${YELLOW}โš ๏ธ ${agent} agent was not running${NC}" - rm -f "$pidfile" - fi - fi - done - - echo -e "${GREEN}โœ… All agents stopped${NC}" -} - -# Function to show agent status -show_status() { - echo -e "${BLUE}๐Ÿ“Š Agent Status${NC}" - echo -e "${BLUE}==============${NC}" - - for agent in orchestrator creator critic formatter; do - pidfile="/tmp/agentex-${agent}.pid" - if [[ -f "$pidfile" ]]; then - pid=$(cat "$pidfile") - if kill -0 "$pid" 2>/dev/null; then - case $agent in - orchestrator) port=$ORCHESTRATOR_PORT ;; - creator) port=$CREATOR_PORT ;; - critic) port=$CRITIC_PORT ;; - formatter) port=$FORMATTER_PORT ;; - esac - echo -e "${GREEN}โœ… ${agent} agent running (PID: $pid, Port: $port)${NC}" - else - echo -e "${RED}โŒ ${agent} agent not running (stale PID file)${NC}" - rm -f "$pidfile" - fi - else - echo -e "${RED}โŒ ${agent} agent not running${NC}" - fi - done -} - -# Function to show logs -show_logs() { - local agent=${1:-"all"} - - if [[ "$agent" == "all" ]]; then - echo -e "${BLUE}๐Ÿ“ Showing logs for all agents (press Ctrl+C to stop)${NC}" - tail -f /tmp/agentex-*.log 2>/dev/null || echo "No log files found" - else - local logfile="/tmp/agentex-${agent}.log" - if [[ -f "$logfile" ]]; then - echo -e "${BLUE}๐Ÿ“ Showing logs for ${agent} agent (press Ctrl+C to stop)${NC}" - tail -f "$logfile" - else - echo -e "${RED}โŒ Log file not found: $logfile${NC}" - fi - fi -} - -# Function to test agent connectivity -test_system() { - echo -e "${BLUE}๐Ÿงช Testing agent connectivity${NC}" - echo -e "${BLUE}=============================${NC}" - - # Check if agents are responding on their ports - echo -e "${YELLOW}๐Ÿ” Testing agent connectivity...${NC}" - - ports=(8000 8001 8002 8003) - agents=("orchestrator" "creator" "critic" "formatter") - all_responding=true - - for i in "${!ports[@]}"; do - port=${ports[$i]} - agent=${agents[$i]} - if nc -z localhost $port 2>/dev/null; then - echo -e "${GREEN}โœ… ${agent} agent responding on port $port${NC}" - else - echo -e "${RED}โŒ ${agent} agent not responding on port $port${NC}" - all_responding=false - fi - done - - echo "" - if $all_responding; then - echo -e "${GREEN}๐ŸŽ‰ All agents are ready and responding!${NC}" - echo -e "${BLUE}๐Ÿ’ก You can now:${NC}" - echo " 1. Monitor logs: $0 logs" - echo " 2. Send requests through the AgentEx platform UI" - echo " 3. Use direct HTTP calls to test individual agents" - echo "" - echo -e "${BLUE}๐Ÿ”— Agent Endpoints:${NC}" - echo " โ€ข Orchestrator: http://localhost:8000" - echo " โ€ข Creator: http://localhost:8001" - echo " โ€ข Critic: http://localhost:8002" - echo " โ€ข Formatter: http://localhost:8003" - echo "" - echo -e "${BLUE}๐Ÿ“ Sample Request (send via AgentEx UI):${NC}" - echo '{"request": "Write a brief welcome message for our new AI assistant", "rules": ["Under 100 words", "Friendly tone", "Include emoji"], "target_format": "HTML"}' - else - echo -e "${RED}โŒ Some agents are not responding${NC}" - echo "Check status: $0 status" - echo "Check logs: $0 logs" - fi -} - -# Main script logic -case "${1:-start}" in - "start") - check_prerequisites - - echo -e "${YELLOW}๐Ÿš€ Starting all agents in flattened structure...${NC}" - echo "" - - # Start all agents using the flattened manifests - start_agent "orchestrator" "$BASE_DIR/orchestrator.yaml" $ORCHESTRATOR_PORT || exit 1 - start_agent "creator" "$BASE_DIR/creator.yaml" $CREATOR_PORT || exit 1 - start_agent "critic" "$BASE_DIR/critic.yaml" $CRITIC_PORT || exit 1 - start_agent "formatter" "$BASE_DIR/formatter.yaml" $FORMATTER_PORT || exit 1 - - echo "" - echo -e "${GREEN}๐ŸŽ‰ All agents started successfully!${NC}" - echo "" - echo -e "${BLUE}๐Ÿ“ Available commands:${NC}" - echo " $0 status - Show agent status" - echo " $0 logs - Show all agent logs" - echo " $0 logs - Show specific agent logs (orchestrator|creator|critic|formatter)" - echo " $0 test - Test agent connectivity" - echo " $0 stop - Stop all agents" - echo "" - echo -e "${BLUE}๐Ÿ“ค Agent Endpoints:${NC}" - echo " โ€ข Orchestrator: http://localhost:8000" - echo " โ€ข Creator: http://localhost:8001" - echo " โ€ข Critic: http://localhost:8002" - echo " โ€ข Formatter: http://localhost:8003" - echo "" - echo -e "${BLUE}๐Ÿ’ก To interact with agents:${NC}" - echo " 1. Use the AgentEx platform to send tasks" - echo " 2. Send HTTP requests directly to agent endpoints" - echo " 3. Monitor workflow progress with: $0 logs" - echo "" - ;; - - "stop") - stop_agents - ;; - - "status") - show_status - ;; - - "logs") - show_logs "$2" - ;; - - "test") - test_system - ;; - - "help"|"-h"|"--help") - echo -e "${BLUE}๐ŸŽญ Multi-Agent Content Assembly Line (Flattened Structure)${NC}" - echo "" - echo "Usage: $0 [command]" - echo "" - echo "Commands:" - echo " start Start all agents (default)" - echo " stop Stop all agents" - echo " status Show agent status" - echo " logs Show all agent logs" - echo " logs Show specific agent logs" - echo " test Test agent connectivity" - echo " help Show this help" - echo "" - echo "Examples:" - echo " $0 start # Start all agents" - echo " $0 status # Check if agents are running" - echo " $0 logs # Monitor all logs" - echo " $0 logs orchestrator # Monitor orchestrator logs only" - echo " $0 test # Check agent connectivity" - echo " $0 stop # Stop all agents" - echo "" - echo "Architecture Benefits:" - echo " โ€ข 90% less boilerplate (12 files vs ~40 files)" - echo " โ€ข Single shared Dockerfile and pyproject.toml" - echo " โ€ข All agent code in one directory" - echo " โ€ข Maintains AgentEx CLI compatibility" - ;; - - *) - echo -e "${RED}โŒ Unknown command: $1${NC}" - echo "Use '$0 help' for usage information" - exit 1 - ;; -esac diff --git a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/tests/test_agent.py b/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/tests/test_agent.py deleted file mode 100644 index d4c1dd7d..00000000 --- a/examples/tutorials/10_async/00_base/090_multi_agent_non_temporal/tests/test_agent.py +++ /dev/null @@ -1,241 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: ab090-orchestrator-agent) -""" - -import os -import uuid - -# import pytest -# import pytest_asyncio -from test_utils.async_utils import ( - stream_agent_response, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.text_content_param import TextContentParam - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "ab090-orchestrator-agent") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_multi_agent_workflow_complete(self, client: AsyncAgentex, agent_id: str): - """Test the complete multi-agent workflow with all agents using polling that yields messages.""" - # Create a task for the orchestrator - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Send a content creation request as JSON - request_json = { - "request": "Write a welcome message for our AI assistant", - "rules": ["Under 50 words", "Friendly tone", "Include emoji"], - "target_format": "HTML", - } - - import json - - # Collect messages as they arrive from polling - messages = [] - print("\n๐Ÿ”„ Polling for multi-agent workflow responses...") - - # Track which agents have completed their work - workflow_markers = { - "orchestrator_started": False, - "creator_called": False, - "critic_called": False, - "formatter_called": False, - "workflow_completed": False, - } - - all_agents_done = False - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=json.dumps(request_json), - timeout=120, # Longer timeout for multi-agent workflow - sleep_interval=2.0, - ): - messages.append(message) - # Print messages as they arrive to show real-time progress - if message.content and message.content.content: - # Track agent participation as messages arrive - content = message.content.content.lower() - - if "starting content workflow" in content: - workflow_markers["orchestrator_started"] = True - - if "creator output" in content: - workflow_markers["creator_called"] = True - - if "critic feedback" in content or "content approved by critic" in content: - workflow_markers["critic_called"] = True - - if "calling formatter agent" in content: - workflow_markers["formatter_called"] = True - - if "workflow complete" in content or "content creation complete" in content: - workflow_markers["workflow_completed"] = True - - # Check if all agents have participated - all_agents_done = all(workflow_markers.values()) - if all_agents_done: - break - - # Assert all agents participated - assert workflow_markers["orchestrator_started"], "Orchestrator did not start workflow" - assert workflow_markers["creator_called"], "Creator agent was not called" - assert workflow_markers["critic_called"], "Critic agent was not called" - assert workflow_markers["formatter_called"], "Formatter agent was not called" - assert workflow_markers["workflow_completed"], "Workflow did not complete successfully" - - assert all_agents_done, "Not all agents completed their work before timeout" - - # Verify the final output contains HTML (since we requested HTML format) - all_messages_text = " ".join([msg.content.content for msg in messages if msg.content]) - assert "" in all_messages_text.lower() or " 0, "No messages received from streaming" - - # Assert all agents participated - assert workflow_markers["orchestrator_started"], "Orchestrator did not start workflow" - assert workflow_markers["creator_called"], "Creator agent was not called" - assert workflow_markers["critic_called"], "Critic agent was not called" - assert workflow_markers["formatter_called"], "Formatter agent was not called" - assert workflow_markers["workflow_completed"], "Workflow did not complete successfully" - - # Verify the final output contains Markdown (since we requested Markdown format) - combined_response = " ".join(all_messages) - assert "markdown" in combined_response.lower() or "#" in combined_response, ( - "Final output does not contain Markdown formatting" - ) - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/10_async/10_temporal/000_hello_acp/.dockerignore b/examples/tutorials/10_async/10_temporal/000_hello_acp/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/10_async/10_temporal/000_hello_acp/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/10_temporal/000_hello_acp/Dockerfile b/examples/tutorials/10_async/10_temporal/000_hello_acp/Dockerfile deleted file mode 100644 index e739eb4a..00000000 --- a/examples/tutorials/10_async/10_temporal/000_hello_acp/Dockerfile +++ /dev/null @@ -1,59 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/10_temporal/000_hello_acp/pyproject.toml /app/000_hello_acp/pyproject.toml -COPY 10_async/10_temporal/000_hello_acp/README.md /app/000_hello_acp/README.md - -WORKDIR /app/000_hello_acp - -# Copy the project code -COPY 10_async/10_temporal/000_hello_acp/project /app/000_hello_acp/project - -# Copy the test files -COPY 10_async/10_temporal/000_hello_acp/tests /app/000_hello_acp/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies (includes pytest) -RUN uv pip install --system .[dev] pytest-asyncio httpx - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=at000-hello-acp - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/000_hello_acp/README.md b/examples/tutorials/10_async/10_temporal/000_hello_acp/README.md deleted file mode 100644 index 95d8f852..00000000 --- a/examples/tutorials/10_async/10_temporal/000_hello_acp/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# [Temporal] Hello ACP - -Temporal workflows make agents durable - they survive restarts and can run indefinitely without consuming resources while idle. Instead of handlers, you define a workflow class with `@workflow.run` and `@workflow.signal` methods. - -## What You'll Learn -- Building durable agents with Temporal workflows -- The workflow and signal pattern -- How workflows survive failures and resume automatically -- When to use Temporal vs base async agents - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root (includes Temporal) -- Temporal UI available at http://localhost:8233 -- Understanding of base async agents (see [../../00_base/080_batch_events](../../00_base/080_batch_events/) to understand why Temporal) - -## Quick Start - -```bash -cd examples/tutorials/10_async/10_temporal/000_hello_acp -uv run agentex agents run --manifest manifest.yaml -``` - -**Monitor:** Check Temporal UI at http://localhost:8233 to see your durable workflow running. - -## Key Pattern - -```python -@workflow.defn(name="my-workflow") -class MyWorkflow(BaseWorkflow): - @workflow.run - async def on_task_create(self, params: CreateTaskParams): - # Wait indefinitely for events - workflow stays alive - await workflow.wait_condition(lambda: self._complete) - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - async def on_task_event_send(self, params: SendEventParams): - # Handle events as signals to the workflow -``` - -## When to Use -- Production agents that need guaranteed execution -- Long-running tasks (hours, days, weeks, or longer) -- Operations that must survive system failures -- Agents with concurrent event handling requirements -- When you need durability and observability - -## Why This Matters -**Without Temporal:** If your worker crashes, the agent loses all state and has to start over. - -**With Temporal:** The workflow resumes exactly where it left off. If it crashes mid-conversation, Temporal brings it back up with full context intact. Can run for years if needed, only consuming resources when actively processing. - -This is the foundation for production-ready agents that handle real-world reliability requirements. - -**Next:** [010_agent_chat](../010_agent_chat/) - Build a complete conversational agent with tools diff --git a/examples/tutorials/10_async/10_temporal/000_hello_acp/dev.ipynb b/examples/tutorials/10_async/10_temporal/000_hello_acp/dev.ipynb deleted file mode 100644 index f8a66a0f..00000000 --- a/examples/tutorials/10_async/10_temporal/000_hello_acp/dev.ipynb +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"at000-hello-acp\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Agentic agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/10_async/10_temporal/000_hello_acp/manifest.yaml b/examples/tutorials/10_async/10_temporal/000_hello_acp/manifest.yaml deleted file mode 100644 index e93fe8ec..00000000 --- a/examples/tutorials/10_async/10_temporal/000_hello_acp/manifest.yaml +++ /dev/null @@ -1,139 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/10_temporal/000_hello_acp - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/10_temporal/000_hello_acp/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/10_temporal/000_hello_acp/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - # Path to temporal worker file - # Examples: - # project/run_worker.py (standard) - # workers/temporal.py (custom structure) - # ../shared/worker.py (shared across projects) - worker: project/run_worker.py - - -# Agent Configuration -# ----------------- -agent: - # Type of agent - either sync or async - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: at000-hello-acp - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent that shows how ACP works with Temporal - - # Temporal workflow configuration - # This enables your agent to run as a Temporal workflow for long-running tasks - temporal: - enabled: true - workflows: - # Name of the workflow class - # Must match the @workflow.defn name in your workflow.py - - name: at000-hello-acp - - # Queue name for task distribution - # Used by Temporal to route tasks to your agent - # Convention: _task_queue - queue_name: 000_hello_acp_queue - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # - name: OPENAI_BASE_URL - # value: "https://api.openai.com/v1" - # - name: ACCOUNT_ID - # value: "your_account_id_here" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: - - name: my-registry-secret # Update with your image pull secret name - - # Global deployment settings that apply to all clusters - # These can be overridden using --override-file with custom configuration files - global: - agent: - name: "at000-hello-acp" - description: "An AgentEx agent that shows how ACP works with Temporal" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/000_hello_acp/project/__init__.py b/examples/tutorials/10_async/10_temporal/000_hello_acp/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/10_temporal/000_hello_acp/project/acp.py b/examples/tutorials/10_async/10_temporal/000_hello_acp/project/acp.py deleted file mode 100644 index 744068d7..00000000 --- a/examples/tutorials/10_async/10_temporal/000_hello_acp/project/acp.py +++ /dev/null @@ -1,30 +0,0 @@ -import os - -from agentex.lib.types.fastacp import TemporalACPConfig -from agentex.lib.sdk.fastacp.fastacp import FastACP - -# Create the ACP server -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - # When deployed to the cluster, the Temporal address will automatically be set to the cluster address - # For local development, we set the address manually to talk to the local Temporal service set up via docker compose - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233") - ) -) - - -# Notice that we don't need to register any handlers when we use type="temporal" -# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp -# You can see that these handlers are automatically registered when the ACP is created - -# @acp.on_task_create -# This will be handled by the method in your workflow that is decorated with @workflow.run - -# @acp.on_task_event_send -# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) - -# @acp.on_task_cancel -# This does not need to be handled by your workflow. -# It is automatically handled by the temporal client which cancels the workflow directly \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/000_hello_acp/project/run_worker.py b/examples/tutorials/10_async/10_temporal/000_hello_acp/project/run_worker.py deleted file mode 100644 index 7db2fcdc..00000000 --- a/examples/tutorials/10_async/10_temporal/000_hello_acp/project/run_worker.py +++ /dev/null @@ -1,34 +0,0 @@ -import asyncio - -from project.workflow import At000HelloAcpWorkflow -from agentex.lib.utils.debug import setup_debug_if_enabled -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker - -environment_variables = EnvironmentVariables.refresh() - -logger = make_logger(__name__) - - -async def main(): - # Setup debug mode if enabled - setup_debug_if_enabled() - - task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE - if task_queue_name is None: - raise ValueError("WORKFLOW_TASK_QUEUE is not set") - - # Create a worker with automatic tracing - worker = AgentexWorker( - task_queue=task_queue_name, - ) - - await worker.run( - activities=get_all_activities(), - workflow=At000HelloAcpWorkflow, - ) - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/000_hello_acp/project/workflow.py b/examples/tutorials/10_async/10_temporal/000_hello_acp/project/workflow.py deleted file mode 100644 index 2ca0858b..00000000 --- a/examples/tutorials/10_async/10_temporal/000_hello_acp/project/workflow.py +++ /dev/null @@ -1,71 +0,0 @@ -import json -from typing import override - -from temporalio import workflow - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow - -environment_variables = EnvironmentVariables.refresh() - -if environment_variables.WORKFLOW_NAME is None: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if environment_variables.AGENT_NAME is None: - raise ValueError("Environment variable AGENT_NAME is not set") - -logger = make_logger(__name__) - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class At000HelloAcpWorkflow(BaseWorkflow): - """ - Minimal async workflow template for AgentEx Temporal agents. - """ - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self._complete_task = False - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - @override - async def on_task_event_send(self, params: SendEventParams) -> None: - logger.info(f"Received task message instruction: {params}") - - # 2. Echo back the client's message to show it in the UI. This is not done by default so the agent developer has full control over what is shown to the user. - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - # 3. Send a simple response message. - # In future tutorials, this is where we'll add more sophisticated response logic. - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"Hello! I've received your message. I can't respond right now, but in future tutorials we'll see how you can get me to intelligently respond to your message.", - ), - ) - - @workflow.run - @override - async def on_task_create(self, params: CreateTaskParams) -> None: - logger.info(f"Received task create params: {params}") - - # 1. Acknowledge that the task has been created. - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"Hello! I've received your task. Normally you can do some state initialization here, or just pass and do nothing until you get your first event. For now I'm just acknowledging that I've received a task with the following params:\n\n{json.dumps(params.params, indent=2)}.\n\nYou should only see this message once, when the task is created. All subsequent events will be handled by the `on_task_event_send` handler.", - ), - ) - - # 2. Wait for the task to be completed indefinitely. If we don't do this the workflow will close as soon as this function returns. Temporal can run hundreds of millions of workflows in parallel, so you don't need to worry about too many workflows running at once. - - # Thus, if you want this agent to field events indefinitely (or for a long time) you need to wait for a condition to be met. - await workflow.wait_condition( - lambda: self._complete_task, - timeout=None, # Set a timeout if you want to prevent the task from running indefinitely. Generally this is not needed. Temporal can run hundreds of millions of workflows in parallel and more. Only do this if you have a specific reason to do so. - ) diff --git a/examples/tutorials/10_async/10_temporal/000_hello_acp/pyproject.toml b/examples/tutorials/10_async/10_temporal/000_hello_acp/pyproject.toml deleted file mode 100644 index ace35866..00000000 --- a/examples/tutorials/10_async/10_temporal/000_hello_acp/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "at000-hello-acp" -version = "0.1.0" -description = "An AgentEx agent that shows how ACP works with Temporal" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", - "debugpy>=1.8.15", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/000_hello_acp/tests/test_agent.py b/examples/tutorials/10_async/10_temporal/000_hello_acp/tests/test_agent.py deleted file mode 100644 index 9150afaa..00000000 --- a/examples/tutorials/10_async/10_temporal/000_hello_acp/tests/test_agent.py +++ /dev/null @@ -1,189 +0,0 @@ -""" -Sample tests for AgentEx ACP agent (Temporal version). - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: at000-hello-acp) -""" - -import os -import uuid -import asyncio -from typing import Any - -import pytest -import pytest_asyncio -from test_utils.async_utils import ( - poll_messages, - stream_agent_response, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types import TaskMessage -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.text_content_param import TextContentParam - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "at000-hello-acp") - - -@pytest_asyncio.fixture -async def client(): - """Create an AgentEx client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client: AsyncAgentex, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and polling for the response.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - task_creation_found = False - # Poll for the initial task creation message - async for message in poll_messages( - client=client, - task_id=task.id, - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if message.content and message.content.type == "text" and message.content.author == "agent": - assert "Hello! I've received your task" in message.content.content - task_creation_found = True - break - - assert task_creation_found, "Task creation message not found in poll" - await asyncio.sleep(1.5) - # Send an event and poll for response - user_message = "Hello, this is a test message!" - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=30, - sleep_interval=1.0, - ): - if message.content and message.content.type == "text" and message.content.author == "agent": - assert "Hello! I've received your message" in message.content.content - break - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and streaming the response.""" - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - task_creation_found = False - async for message in poll_messages( - client=client, - task_id=task.id, - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if message.content and message.content.type == "text" and message.content.author == "agent": - assert "Hello! I've received your task" in message.content.content - task_creation_found = True - break - - assert task_creation_found, "Task creation message not found in poll" - - user_message = "Hello, this is a test message!" - - # Collect events from stream - all_events: list[dict[str, Any]] = [] - - # Flags to track what we've received - user_echo_found = False - agent_response_found = False - stream_timeout = 30 - async def collect_stream_events(): #noqa: ANN101 - nonlocal user_echo_found, agent_response_found - - async for event in stream_agent_response( - client=client, - task_id=task.id, - timeout=stream_timeout, - ): - # Check events as they arrive - event_type = event.get("type") - if event_type == "full": - content = event.get("content", {}) - if content.get("content") is None: - continue # Skip empty content - if content.get("type") == "text" and content.get("author") == "agent": - # Check for agent response to user message - if "Hello! I've received your message" in content.get("content", ""): - # Agent response should come after user echo - assert user_echo_found, "Agent response arrived before user message echo (incorrect order)" - agent_response_found = True - elif content.get("type") == "text" and content.get("author") == "user": - # Check for user message echo - if content.get("content") == user_message: - user_echo_found = True - - # Exit early if we've found all expected messages - if user_echo_found and agent_response_found: - break - # Start streaming task - stream_task = asyncio.create_task(collect_stream_events()) - - # Send the event - event_content = TextContentParam(type="text", author="user", content=user_message) - await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # Wait for the stream to complete (with timeout) - try: - await asyncio.wait_for(stream_task, timeout=stream_timeout) - except asyncio.TimeoutError: - pytest.fail(f"Stream timed out after {stream_timeout}s waiting for expected messages") - - # Verify all expected messages were received (fail if stream ended without finding them) - - assert user_echo_found, "User message echo not found in stream" - assert agent_response_found, "Agent response not found in stream" - # Wait for streaming to complete - await stream_task - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/010_agent_chat/.dockerignore b/examples/tutorials/10_async/10_temporal/010_agent_chat/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/10_async/10_temporal/010_agent_chat/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/10_temporal/010_agent_chat/Dockerfile b/examples/tutorials/10_async/10_temporal/010_agent_chat/Dockerfile deleted file mode 100644 index 5ecf911b..00000000 --- a/examples/tutorials/10_async/10_temporal/010_agent_chat/Dockerfile +++ /dev/null @@ -1,59 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/10_temporal/010_agent_chat/pyproject.toml /app/010_agent_chat/pyproject.toml -COPY 10_async/10_temporal/010_agent_chat/README.md /app/010_agent_chat/README.md - -WORKDIR /app/010_agent_chat - -# Copy the project code -COPY 10_async/10_temporal/010_agent_chat/project /app/010_agent_chat/project - -# Copy the test files -COPY 10_async/10_temporal/010_agent_chat/tests /app/010_agent_chat/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies (includes pytest) -RUN uv pip install --system .[dev] pytest-asyncio httpx - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=at010-agent-chat - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/010_agent_chat/README.md b/examples/tutorials/10_async/10_temporal/010_agent_chat/README.md deleted file mode 100644 index 37c31f13..00000000 --- a/examples/tutorials/10_async/10_temporal/010_agent_chat/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# [Temporal] Agent Chat - -Combine streaming responses, multi-turn chat, tool calling, and tracing - all with Temporal's durability guarantees. This shows how to build a complete conversational agent that can survive failures. - -## What You'll Learn -- Building a complete conversational agent with Temporal -- Combining streaming, multiturn, tools, and tracing -- How all agent capabilities work together with durability -- Production-ready conversational patterns - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Temporal UI available at http://localhost:8233 -- Understanding of Temporal basics (see [000_hello_acp](../000_hello_acp/)) - -## Quick Start - -```bash -cd examples/tutorials/10_async/10_temporal/010_agent_chat -uv run agentex agents run --manifest manifest.yaml -``` - -## Key Pattern - -- **Streaming**: Progressive response generation with `adk.messages.create()` -- **Multi-turn**: Conversation history maintained in durable workflow state -- **Tools**: Agent can call functions to perform actions -- **Tracing**: Full observability of tool calls and LLM interactions -- **Durability**: All of the above survives worker restarts - -**Monitor:** Open Temporal UI at http://localhost:8233 to see the workflow and all tool call activities. - -## Key Insight - -In base async agents, all this state lives in memory and is lost on crash. With Temporal, the entire conversation - history, tool calls, intermediate state - is durably persisted. The agent can pick up a conversation that paused days ago as if no time passed. - -## When to Use -- Production chatbots with tool capabilities -- Long-running customer service conversations -- Agents that need both reliability and rich features -- Any conversational agent handling real user traffic - -## Why This Matters -This is the pattern for real production agents. By combining all capabilities (streaming, tools, tracing) with Temporal's durability, you get an agent that's both feature-rich and reliable. This is what enterprise conversational AI looks like. - -**Next:** [020_state_machine](../020_state_machine/) - Add complex multi-phase workflows diff --git a/examples/tutorials/10_async/10_temporal/010_agent_chat/dev.ipynb b/examples/tutorials/10_async/10_temporal/010_agent_chat/dev.ipynb deleted file mode 100644 index 3cb9b822..00000000 --- a/examples/tutorials/10_async/10_temporal/010_agent_chat/dev.ipynb +++ /dev/null @@ -1,1562 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"at010-agent-chat\"" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "2", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Task(id='e5333f10-5fe2-4862-8c89-7b422e27a471', created_at=datetime.datetime(2025, 10, 2, 0, 17, 9, 695914, tzinfo=TzInfo(UTC)), name='ffba53be-task', params={}, status='RUNNING', status_reason='Task created, forwarding to ACP server', task_metadata=None, updated_at=datetime.datetime(2025, 10, 2, 0, 17, 9, 695914, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# (REQUIRED) Create a new task. For Agentic agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "3", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Event(id='76ab0b9a-b107-4199-a5f6-31315dc43ee2', agent_id='2f4d3b3d-6a59-46ff-993e-afd6c1f1c7ab', sequence_id=131, task_id='e5333f10-5fe2-4862-8c89-7b422e27a471', content=TextContent(author='user', content='Tell me about recent AI news for today only.', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 10, 2, 0, 17, 9, 776113, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Tell me about recent AI news for today only.\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "4", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ USER [10/02/2025 00:17:09] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ Tell me about recent AI news for today only.                                 โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[96mโ•ญโ”€\u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96m \u001b[0m\u001b[1;96mUSER\u001b[0m\u001b[96m [10/02/2025 00:17:09] \u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96mโ”€โ•ฎ\u001b[0m\n", - "\u001b[96mโ”‚\u001b[0m Tell me about recent AI news for today only. \u001b[96mโ”‚\u001b[0m\n", - "\u001b[96mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:17:13] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿง  Reasoning                                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Searching for AI news                                                        โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ The user wants an update on recent AI news specifically for today, October   โ”‚\n",
-       "โ”‚ 2, 2025. Iโ€™ll use the web search tool to find this information quickly,      โ”‚\n",
-       "โ”‚ aiming for outlets that are major or well-known. I plan to use a query like  โ”‚\n",
-       "โ”‚ \"AI news October 2 2025\" or \"AI news today October 2 2025\". Itโ€™ll help to    โ”‚\n",
-       "โ”‚ set the search context size high to ensure thorough coverage on this topic.  โ”‚\n",
-       "โ”‚ Let's go ahead and get this done!                                            โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[95mโ•ญโ”€\u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95m \u001b[0m\u001b[1;95mAGENT\u001b[0m\u001b[95m [10/02/2025 00:17:13] \u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95mโ”€โ•ฎ\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m ๐Ÿง  \u001b[1mReasoning\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[1mSearching for AI news\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m The user wants an update on recent AI news specifically for today, October \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m 2, 2025. Iโ€™ll use the web search tool to find this information quickly, \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m aiming for outlets that are major or well-known. I plan to use a query like \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \"AI news October 2 2025\" or \"AI news today October 2 2025\". Itโ€™ll help to \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m set the search context size high to ensure thorough coverage on this topic. \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m Let's go ahead and get this done! \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:17:16] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿ”ง Tool Request: openai_web_search                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Arguments:                                                                   โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"arguments\": \"{\\\"input\\\":\\\"AI news October 2 2025 or 'today' Oct 2 2025   โ”‚\n",
-       "โ”‚  major AI                                                                    โ”‚\n",
-       "โ”‚  announcements\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"ty  โ”‚\n",
-       "โ”‚  \\\":\\\"web_search_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\",               โ”‚\n",
-       "โ”‚    \"call_id\": \"call_tKw9WT0BYQyCqEcgJ2rNjoy3\",                               โ”‚\n",
-       "โ”‚    \"name\": \"openai_web_search\",                                              โ”‚\n",
-       "โ”‚    \"type\": \"function_call\",                                                  โ”‚\n",
-       "โ”‚    \"id\": \"fc_021a54d0dc6d53340068ddc48d04c481a393f03e37e300827e\",            โ”‚\n",
-       "โ”‚    \"status\": \"completed\"                                                     โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[33mโ•ญโ”€\u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[33m [10/02/2025 00:17:16] \u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33mโ”€โ•ฎ\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m ๐Ÿ”ง \u001b[1mTool Request: openai_web_search\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[1mArguments:\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"arguments\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"{\\\"input\\\":\\\"AI news October 2 2025 or 'today' Oct 2 2025 \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mmajor AI \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mannouncements\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"ty\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\\\":\\\"web_search_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"call_id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"call_tKw9WT0BYQyCqEcgJ2rNjoy3\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"name\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"openai_web_search\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"function_call\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"fc_021a54d0dc6d53340068ddc48d04c481a393f03e37e300827e\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"status\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"completed\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:17:34] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ โœ… Tool Response: openai_web_search                                          โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Response:                                                                    โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"type\": \"text\",                                                           โ”‚\n",
-       "โ”‚    \"text\": \"Here are the major AI-related announcements and headlines aroun  โ”‚\n",
-       "โ”‚  today (October 2, 2025). I focused on the biggest product, infrastructure,  โ”‚\n",
-       "โ”‚  and partnership news; sources are listed after each item.\\n\\n- Microsoft    โ”‚\n",
-       "โ”‚  launches Microsoft 365 Premium (Copilot-integrated) for individuals \\u2014  โ”‚\n",
-       "โ”‚  new $19.99/month plan that bundles Copilot across Outlook/Word/Excel, rais  โ”‚\n",
-       "โ”‚  Copilot usage limits for some existing Personal/Family customers, and       โ”‚\n",
-       "โ”‚  replaces Copilot Pro. (Reported Oct 1\\u20132, 2025).                        โ”‚\n",
-       "โ”‚  ([reuters.com](https://www.reuters.com/technology/microsoft-launches-ai-po  โ”‚\n",
-       "โ”‚  red-365-premium-bundle-1999-per-month-2025-10-01/?utm_source=openai))\\n\\n-  โ”‚\n",
-       "โ”‚  OpenAI announces expanded Stargate partnerships in South Korea with Samsun  โ”‚\n",
-       "โ”‚  and SK (memory and data\\u2011center collaboration) as part of its large     โ”‚\n",
-       "โ”‚  \\u201cStargate\\u201d infrastructure push and related global datacenter      โ”‚\n",
-       "โ”‚  plans. (Reported Oct 2, 2025).                                              โ”‚\n",
-       "โ”‚  ([apnews.com](https://apnews.com/article/a65fd1a21a8587c991cc30b94b1dfe89?  โ”‚\n",
-       "โ”‚  m_source=openai))\\n\\n- Google launches new Nest cameras and a redesigned    โ”‚\n",
-       "โ”‚  Google Home app built for Gemini for Home, introducing Gemini-integrated    โ”‚\n",
-       "โ”‚  home features (descriptive alerts, \\u201cHome Brief,\\u201d Ask Home chatbo  โ”‚\n",
-       "โ”‚  and new subscription tiers. (Reported Oct 1\\u20132, 2025).                  โ”‚\n",
-       "โ”‚  ([theverge.com](https://www.theverge.com/news/789412/new-nest-cams-nest-do  โ”‚\n",
-       "โ”‚  bell-launch-price-specs-release-date?utm_source=openai))\\n\\n- NVIDIA        โ”‚\n",
-       "โ”‚  continues major infrastructure/product rollouts: recent announcements arou  โ”‚\n",
-       "โ”‚  Rubin CPX (GPU class for massive-context inference), Dynamo                 โ”‚\n",
-       "โ”‚  inference-serving software, Blackwell Ultra/AI Factory platform, and        โ”‚\n",
-       "โ”‚  availability plans for AI foundation models on RTX AI PCs \\u2014 signaling  โ”‚\n",
-       "โ”‚  heavy pushes on inference scale, large-context models, and on\\u2011device   โ”‚\n",
-       "โ”‚  for creators/enterprises. (Company releases Sept\\u2013Oct 2025).            โ”‚\n",
-       "โ”‚  ([investor.nvidia.com](https://investor.nvidia.com/news/press-release-deta  โ”‚\n",
-       "โ”‚  s/2025/NVIDIA-Unveils-Rubin-CPX-A-New-Class-of-GPU-Designed-for-Massive-Co  โ”‚\n",
-       "โ”‚  ext-Inference/default.aspx?utm_source=openai))\\n\\n- Industry events /       โ”‚\n",
-       "โ”‚  company summits with product demos and roadmaps: OpenAI DevDay scheduled O  โ”‚\n",
-       "โ”‚  6, 2025 (preview teasers and DevDay announcements expected), and Anthropic  โ”‚\n",
-       "โ”‚  held a London Builder Summit Oct 1 with demos of Claude and discussions of  โ”‚\n",
-       "โ”‚  autonomous-agent work. These events are driving near-term product and       โ”‚\n",
-       "โ”‚  developer announcements.                                                    โ”‚\n",
-       "โ”‚  ([openai.com](https://openai.com/index/announcing-devday-2025/?utm_source=  โ”‚\n",
-       "โ”‚  enai))\\n\\nIf you want, I can:\\n- Pull full articles and timelines for any   โ”‚\n",
-       "โ”‚  the items above (e.g., full Reuters/AP/Verge/NVIDIA coverage).\\n- Summariz  โ”‚\n",
-       "โ”‚  the expected user impact, pricing, or migration steps (e.g., what Microsof  โ”‚\n",
-       "โ”‚  365 Premium means if you\\u2019re a Copilot Pro or Personal subscriber).\\n-  โ”‚\n",
-       "โ”‚  Track other outlets for any breaking follow-ups today (Oct 2,               โ”‚\n",
-       "โ”‚  2025).\\n\\nWhich of these would you like more detail on?\",                   โ”‚\n",
-       "โ”‚    \"annotations\": null,                                                      โ”‚\n",
-       "โ”‚    \"meta\": null                                                              โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[92mโ•ญโ”€\u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[92m [10/02/2025 00:17:34] \u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92mโ”€โ•ฎ\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m โœ… \u001b[1mTool Response: openai_web_search\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[1mResponse:\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"Here are the major AI-related announcements and headlines aroun\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mtoday (October 2, 2025). I focused on the biggest product, infrastructure,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mand partnership news; sources are listed after each item.\\n\\n- Microsoft \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mlaunches Microsoft 365 Premium (Copilot-integrated) for individuals \\u2014\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mnew $19.99/month plan that bundles Copilot across Outlook/Word/Excel, rais\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mCopilot usage limits for some existing Personal/Family customers, and \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mreplaces Copilot Pro. (Reported Oct 1\\u20132, 2025). \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([reuters.com](https://www.reuters.com/technology/microsoft-launches-ai-po\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mred-365-premium-bundle-1999-per-month-2025-10-01/?utm_source=openai))\\n\\n-\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mOpenAI announces expanded Stargate partnerships in South Korea with Samsun\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mand SK (memory and data\\u2011center collaboration) as part of its large \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\\u201cStargate\\u201d infrastructure push and related global datacenter \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mplans. (Reported Oct 2, 2025). \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([apnews.com](https://apnews.com/article/a65fd1a21a8587c991cc30b94b1dfe89?\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mm_source=openai))\\n\\n- Google launches new Nest cameras and a redesigned \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mGoogle Home app built for Gemini for Home, introducing Gemini-integrated \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mhome features (descriptive alerts, \\u201cHome Brief,\\u201d Ask Home chatbo\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mand new subscription tiers. (Reported Oct 1\\u20132, 2025). \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([theverge.com](https://www.theverge.com/news/789412/new-nest-cams-nest-do\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mbell-launch-price-specs-release-date?utm_source=openai))\\n\\n- NVIDIA \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mcontinues major infrastructure/product rollouts: recent announcements arou\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mRubin CPX (GPU class for massive-context inference), Dynamo \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34minference-serving software, Blackwell Ultra/AI Factory platform, and \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mavailability plans for AI foundation models on RTX AI PCs \\u2014 signaling\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mheavy pushes on inference scale, large-context models, and on\\u2011device \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mfor creators/enterprises. (Company releases Sept\\u2013Oct 2025). \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([investor.nvidia.com](https://investor.nvidia.com/news/press-release-deta\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34ms/2025/NVIDIA-Unveils-Rubin-CPX-A-New-Class-of-GPU-Designed-for-Massive-Co\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mext-Inference/default.aspx?utm_source=openai))\\n\\n- Industry events / \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mcompany summits with product demos and roadmaps: OpenAI DevDay scheduled O\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m6, 2025 (preview teasers and DevDay announcements expected), and Anthropic\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mheld a London Builder Summit Oct 1 with demos of Claude and discussions of\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mautonomous-agent work. These events are driving near-term product and \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mdeveloper announcements. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([openai.com](https://openai.com/index/announcing-devday-2025/?utm_source=\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34menai))\\n\\nIf you want, I can:\\n- Pull full articles and timelines for any \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mthe items above (e.g., full Reuters/AP/Verge/NVIDIA coverage).\\n- Summariz\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mthe expected user impact, pricing, or migration steps (e.g., what Microsof\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m365 Premium means if you\\u2019re a Copilot Pro or Personal subscriber).\\n-\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mTrack other outlets for any breaking follow-ups today (Oct 2, \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m2025).\\n\\nWhich of these would you like more detail on?\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"annotations\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"meta\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:17:37] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿง  Reasoning                                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Ensuring news accuracy                                                       โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ I found a summary that included items from both October 1 and 2, but since   โ”‚\n",
-       "โ”‚ the user specifically asked for news from October 2, I'm needing to clarify  โ”‚\n",
-       "โ”‚ that. The summary flagged items related to OpenAI partnerships specifically  โ”‚\n",
-       "โ”‚ from October 2, while Microsoft and Google Nest news fell within the range   โ”‚\n",
-       "โ”‚ of October 1โ€“2. To refine this, I should run a new search focused on         โ”‚\n",
-       "โ”‚ \"October 2, 2025 AI news\" for accuracy. Letโ€™s go for high-context results!   โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[95mโ•ญโ”€\u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95m \u001b[0m\u001b[1;95mAGENT\u001b[0m\u001b[95m [10/02/2025 00:17:37] \u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95mโ”€โ•ฎ\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m ๐Ÿง  \u001b[1mReasoning\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[1mEnsuring news accuracy\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m I found a summary that included items from both October 1 and 2, but since \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m the user specifically asked for news from October 2, I'm needing to clarify \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m that. The summary flagged items related to OpenAI partnerships specifically \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m from October 2, while Microsoft and Google Nest news fell within the range \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m of October 1โ€“2. To refine this, I should run a new search focused on \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \"October 2, 2025 AI news\" for accuracy. Letโ€™s go for high-context results! \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:17:39] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿ”ง Tool Request: openai_web_search                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Arguments:                                                                   โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"arguments\": \"{\\\"input\\\":\\\"October 2 2025 AI news \\\\\\\"Oct 2\\\\\\\" 2025 'AI  โ”‚\n",
-       "โ”‚  'October 2, 2025'                                                           โ”‚\n",
-       "โ”‚  headlines\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"type\\\"  โ”‚\n",
-       "โ”‚  \"web_search_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\",                   โ”‚\n",
-       "โ”‚    \"call_id\": \"call_yPkg4tWTQozDGti1938f9WNV\",                               โ”‚\n",
-       "โ”‚    \"name\": \"openai_web_search\",                                              โ”‚\n",
-       "โ”‚    \"type\": \"function_call\",                                                  โ”‚\n",
-       "โ”‚    \"id\": \"fc_021a54d0dc6d53340068ddc4a4413881a3a6b57998c7e7a360\",            โ”‚\n",
-       "โ”‚    \"status\": \"completed\"                                                     โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[33mโ•ญโ”€\u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[33m [10/02/2025 00:17:39] \u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33mโ”€โ•ฎ\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m ๐Ÿ”ง \u001b[1mTool Request: openai_web_search\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[1mArguments:\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"arguments\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"{\\\"input\\\":\\\"October 2 2025 AI news \\\\\\\"Oct 2\\\\\\\" 2025 'AI\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m'October 2, 2025' \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mheadlines\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"type\\\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"web_search_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"call_id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"call_yPkg4tWTQozDGti1938f9WNV\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"name\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"openai_web_search\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"function_call\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"fc_021a54d0dc6d53340068ddc4a4413881a3a6b57998c7e7a360\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"status\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"completed\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:17:55] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ โœ… Tool Response: openai_web_search                                          โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Response:                                                                    โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"type\": \"text\",                                                           โ”‚\n",
-       "โ”‚    \"text\": \"Here are major AI-related headlines for October 2, 2025 (Oct 2,  โ”‚\n",
-       "โ”‚  2025), with one-line summaries and sources:\\n\\n1) Meta to use AI-chatbot    โ”‚\n",
-       "โ”‚  conversations to target ads and content (policy announced; rollout details  โ”‚\n",
-       "โ”‚  and exclusions described).                                                  โ”‚\n",
-       "โ”‚  ([wsj.com](https://www.wsj.com/tech/ai/meta-will-begin-using-ai-chatbot-co  โ”‚\n",
-       "โ”‚  ersations-to-target-ads-291093d3?utm_source=openai))\\n\\n2) OpenAI launches  โ”‚\n",
-       "โ”‚  Sora (generative-AI short-video app) and debuts an upgraded generative vid  โ”‚\n",
-       "โ”‚  model; also rolls out a ChatGPT shopping feature (starts with Etsy sellers  โ”‚\n",
-       "โ”‚  ([sfgate.com](https://www.sfgate.com/tech/article/openai-takes-on-google-m  โ”‚\n",
-       "โ”‚  a-21076572.php?utm_source=openai))\\n\\n3) New AI-directed feature film \\\"Th  โ”‚\n",
-       "โ”‚  Sweet Idleness\\\" (claimed as first feature-length AI-generated film) tease  โ”‚\n",
-       "โ”‚  trailer release.                                                            โ”‚\n",
-       "โ”‚  ([en.wikipedia.org](https://en.wikipedia.org/wiki/The_Sweet_Idleness?utm_s  โ”‚\n",
-       "โ”‚  rce=openai))\\n\\n4) Reports of Elon Musk / xAI creating an AI-only software  โ”‚\n",
-       "โ”‚  company (projected to simulate traditional software firms) \\u2014 coverage  โ”‚\n",
-       "โ”‚  and industry reaction.                                                      โ”‚\n",
-       "โ”‚  ([fladgate.com](https://www.fladgate.com/insights/ai-round-up-october-2025  โ”‚\n",
-       "โ”‚  tm_source=openai))\\n\\n5) Ongoing international/regulatory moves: continued  โ”‚\n",
-       "โ”‚  reporting on AI governance (Framework Convention on AI, national executive  โ”‚\n",
-       "โ”‚  actions and proposed U.S. AI bills discussed in recent coverage).           โ”‚\n",
-       "โ”‚  ([en.wikipedia.org](https://en.wikipedia.org/wiki/Framework_Convention_on_  โ”‚\n",
-       "โ”‚  tificial_Intelligence?utm_source=openai))\\n\\nIf you\\u2019d like, I can:\\n-  โ”‚\n",
-       "โ”‚  Expand any headline into a short summary (2\\u20134 paragraphs) with         โ”‚\n",
-       "โ”‚  additional sources.\\n- Provide links to the full articles or a timeline of  โ”‚\n",
-       "โ”‚  Oct 2 coverage.\\n- Filter headlines by topic (policy, industry product      โ”‚\n",
-       "โ”‚  launches, legal, entertainment).\",                                          โ”‚\n",
-       "โ”‚    \"annotations\": null,                                                      โ”‚\n",
-       "โ”‚    \"meta\": null                                                              โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[92mโ•ญโ”€\u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[92m [10/02/2025 00:17:55] \u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92mโ”€โ•ฎ\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m โœ… \u001b[1mTool Response: openai_web_search\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[1mResponse:\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"Here are major AI-related headlines for October 2, 2025 (Oct 2,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m2025), with one-line summaries and sources:\\n\\n1) Meta to use AI-chatbot \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mconversations to target ads and content (policy announced; rollout details\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mand exclusions described). \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([wsj.com](https://www.wsj.com/tech/ai/meta-will-begin-using-ai-chatbot-co\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mersations-to-target-ads-291093d3?utm_source=openai))\\n\\n2) OpenAI launches\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mSora (generative-AI short-video app) and debuts an upgraded generative vid\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mmodel; also rolls out a ChatGPT shopping feature (starts with Etsy sellers\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([sfgate.com](https://www.sfgate.com/tech/article/openai-takes-on-google-m\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34ma-21076572.php?utm_source=openai))\\n\\n3) New AI-directed feature film \\\"Th\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mSweet Idleness\\\" (claimed as first feature-length AI-generated film) tease\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mtrailer release. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([en.wikipedia.org](https://en.wikipedia.org/wiki/The_Sweet_Idleness?utm_s\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mrce=openai))\\n\\n4) Reports of Elon Musk / xAI creating an AI-only software\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mcompany (projected to simulate traditional software firms) \\u2014 coverage\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mand industry reaction. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([fladgate.com](https://www.fladgate.com/insights/ai-round-up-october-2025\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mtm_source=openai))\\n\\n5) Ongoing international/regulatory moves: continued\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mreporting on AI governance (Framework Convention on AI, national executive\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mactions and proposed U.S. AI bills discussed in recent coverage). \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([en.wikipedia.org](https://en.wikipedia.org/wiki/Framework_Convention_on_\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mtificial_Intelligence?utm_source=openai))\\n\\nIf you\\u2019d like, I can:\\n-\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mExpand any headline into a short summary (2\\u20134 paragraphs) with \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34madditional sources.\\n- Provide links to the full articles or a timeline of\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mOct 2 coverage.\\n- Filter headlines by topic (policy, industry product \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mlaunches, legal, entertainment).\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"annotations\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"meta\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n", - " \n", - " \n", - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:18:10] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿง  Reasoning                                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Confirming AI news dates                                                     โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Iโ€™m considering the upcoming events like the Anthropic London Builder Summit โ”‚\n",
-       "โ”‚ and OpenAI DevDay, but since theyโ€™re not specifically Oct 2 news, I think    โ”‚\n",
-       "โ”‚ itโ€™s best to skip them. Thereโ€™s also a report about an AI-generated film     โ”‚\n",
-       "โ”‚ teaser that seems weaker, so Iโ€™ll avoid including that. I want to focus on   โ”‚\n",
-       "โ”‚ providing 3-4 solid items from reliable sources like AP, WSJ, and SFGate.    โ”‚\n",
-       "โ”‚ Iโ€™ll run searches to verify each article's date by using web search to check โ”‚\n",
-       "โ”‚ the headlines specifically for Oct 2!                                        โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[95mโ•ญโ”€\u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95m \u001b[0m\u001b[1;95mAGENT\u001b[0m\u001b[95m [10/02/2025 00:18:10] \u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95mโ”€โ•ฎ\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m ๐Ÿง  \u001b[1mReasoning\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[1mConfirming AI news dates\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m Iโ€™m considering the upcoming events like the Anthropic London Builder Summit \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m and OpenAI DevDay, but since theyโ€™re not specifically Oct 2 news, I think \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m itโ€™s best to skip them. Thereโ€™s also a report about an AI-generated film \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m teaser that seems weaker, so Iโ€™ll avoid including that. I want to focus on \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m providing 3-4 solid items from reliable sources like AP, WSJ, and SFGate. \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m Iโ€™ll run searches to verify each article's date by using web search to check \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m the headlines specifically for Oct 2! \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:18:13] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿ”ง Tool Request: openai_web_search                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Arguments:                                                                   โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"arguments\": \"{\\\"input\\\":\\\"Meta will begin using AI chatbot conversation  โ”‚\n",
-       "โ”‚  to target ads WSJ October 2                                                 โ”‚\n",
-       "โ”‚  2025\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"type\\\":\\\"we  โ”‚\n",
-       "โ”‚  search_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\",                        โ”‚\n",
-       "โ”‚    \"call_id\": \"call_Gf1YdPUxylTVI6pvznxqX0Si\",                               โ”‚\n",
-       "โ”‚    \"name\": \"openai_web_search\",                                              โ”‚\n",
-       "โ”‚    \"type\": \"function_call\",                                                  โ”‚\n",
-       "โ”‚    \"id\": \"fc_021a54d0dc6d53340068ddc4c5d2e481a3a52d44867b4e838b\",            โ”‚\n",
-       "โ”‚    \"status\": \"completed\"                                                     โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[33mโ•ญโ”€\u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[33m [10/02/2025 00:18:13] \u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33mโ”€โ•ฎ\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m ๐Ÿ”ง \u001b[1mTool Request: openai_web_search\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[1mArguments:\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"arguments\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"{\\\"input\\\":\\\"Meta will begin using AI chatbot conversation\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mto target ads WSJ October 2 \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m2025\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"type\\\":\\\"we\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34msearch_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"call_id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"call_Gf1YdPUxylTVI6pvznxqX0Si\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"name\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"openai_web_search\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"function_call\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"fc_021a54d0dc6d53340068ddc4c5d2e481a3a52d44867b4e838b\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"status\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"completed\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:18:27] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ โœ… Tool Response: openai_web_search                                          โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Response:                                                                    โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"type\": \"text\",                                                           โ”‚\n",
-       "โ”‚    \"text\": \"Yes \\u2014 several outlets (including The Wall Street Journal)   โ”‚\n",
-       "โ”‚  report that Meta announced it will begin using users\\u2019 conversations    โ”‚\n",
-       "โ”‚  with its AI assistant to personalize ads and content. Key points:\\n\\n- Wha  โ”‚\n",
-       "โ”‚  Meta announced: interactions with Meta AI (text and voice) will be added t  โ”‚\n",
-       "โ”‚  the signals Meta uses to personalize feeds and ads across Facebook,         โ”‚\n",
-       "โ”‚  Instagram and other Meta apps.                                              โ”‚\n",
-       "โ”‚  ([wsj.com](https://www.wsj.com/tech/ai/meta-will-begin-using-ai-chatbot-co  โ”‚\n",
-       "โ”‚  ersations-to-target-ads-291093d3?utm_source=openai))  \\n- When it starts:   โ”‚\n",
-       "โ”‚  Meta will notify users beginning October 7, 2025, and the change takes      โ”‚\n",
-       "โ”‚  effect on December 16, 2025. Conversations before December 16 won\\u2019t b  โ”‚\n",
-       "โ”‚  used.                                                                       โ”‚\n",
-       "โ”‚  ([wsj.com](https://www.wsj.com/tech/ai/meta-will-begin-using-ai-chatbot-co  โ”‚\n",
-       "โ”‚  ersations-to-target-ads-291093d3?utm_source=openai))  \\n- Opt-out and scop  โ”‚\n",
-       "โ”‚  Users reportedly will not be able to opt out of using AI-chat data for      โ”‚\n",
-       "โ”‚  personalization; the change applies only to people who use Meta AI. Meta    โ”‚\n",
-       "โ”‚  says it will exclude certain \\u201csensitive\\u201d topics (examples listed  โ”‚\n",
-       "โ”‚  include politics, religion, sexual orientation, health, race) from being    โ”‚\n",
-       "โ”‚  used for ad targeting. The rollout initially excludes the U.K., the EU and  โ”‚\n",
-       "โ”‚  South Korea.                                                                โ”‚\n",
-       "โ”‚  ([wsj.com](https://www.wsj.com/tech/ai/meta-will-begin-using-ai-chatbot-co  โ”‚\n",
-       "โ”‚  ersations-to-target-ads-291093d3?utm_source=openai))  \\n- Why: Meta frames  โ”‚\n",
-       "โ”‚  this as part of funding and improving its AI/assistant strategy and making  โ”‚\n",
-       "โ”‚  recommendations more personalized; the company has large ad revenues and    โ”‚\n",
-       "โ”‚  many monthly Meta AI users.                                                 โ”‚\n",
-       "โ”‚  ([reuters.com](https://www.reuters.com/business/media-telecom/meta-use-ai-  โ”‚\n",
-       "โ”‚  ats-personalize-content-ads-december-2025-10-01/?utm_source=openai))\\n\\nIf  โ”‚\n",
-       "โ”‚  you want, I can:\\n- Pull the full WSJ article and quote the most relevant   โ”‚\n",
-       "โ”‚  passages (with source citation), or  \\n- Summarize differences in how majo  โ”‚\n",
-       "โ”‚  outlets are reporting this (WSJ vs. Reuters vs. Bloomberg), or  \\n- Explai  โ”‚\n",
-       "โ”‚  privacy implications and practical steps you can take (e.g., stop using Me  โ”‚\n",
-       "โ”‚  AI, adjust ad preferences, limit cross-account linking). Which would you    โ”‚\n",
-       "โ”‚  prefer?\",                                                                   โ”‚\n",
-       "โ”‚    \"annotations\": null,                                                      โ”‚\n",
-       "โ”‚    \"meta\": null                                                              โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[92mโ•ญโ”€\u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[92m [10/02/2025 00:18:27] \u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92mโ”€โ•ฎ\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m โœ… \u001b[1mTool Response: openai_web_search\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[1mResponse:\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"Yes \\u2014 several outlets (including The Wall Street Journal) \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mreport that Meta announced it will begin using users\\u2019 conversations \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mwith its AI assistant to personalize ads and content. Key points:\\n\\n- Wha\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mMeta announced: interactions with Meta AI (text and voice) will be added t\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mthe signals Meta uses to personalize feeds and ads across Facebook, \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mInstagram and other Meta apps. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([wsj.com](https://www.wsj.com/tech/ai/meta-will-begin-using-ai-chatbot-co\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mersations-to-target-ads-291093d3?utm_source=openai)) \\n- When it starts: \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mMeta will notify users beginning October 7, 2025, and the change takes \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34meffect on December 16, 2025. Conversations before December 16 won\\u2019t b\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mused. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([wsj.com](https://www.wsj.com/tech/ai/meta-will-begin-using-ai-chatbot-co\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mersations-to-target-ads-291093d3?utm_source=openai)) \\n- Opt-out and scop\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mUsers reportedly will not be able to opt out of using AI-chat data for \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mpersonalization; the change applies only to people who use Meta AI. Meta \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34msays it will exclude certain \\u201csensitive\\u201d topics (examples listed\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34minclude politics, religion, sexual orientation, health, race) from being \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mused for ad targeting. The rollout initially excludes the U.K., the EU and\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mSouth Korea. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([wsj.com](https://www.wsj.com/tech/ai/meta-will-begin-using-ai-chatbot-co\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mersations-to-target-ads-291093d3?utm_source=openai)) \\n- Why: Meta frames\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mthis as part of funding and improving its AI/assistant strategy and making\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mrecommendations more personalized; the company has large ad revenues and \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mmany monthly Meta AI users. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([reuters.com](https://www.reuters.com/business/media-telecom/meta-use-ai-\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mats-personalize-content-ads-december-2025-10-01/?utm_source=openai))\\n\\nIf\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34myou want, I can:\\n- Pull the full WSJ article and quote the most relevant \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mpassages (with source citation), or \\n- Summarize differences in how majo\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34moutlets are reporting this (WSJ vs. Reuters vs. Bloomberg), or \\n- Explai\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mprivacy implications and practical steps you can take (e.g., stop using Me\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mAI, adjust ad preferences, limit cross-account linking). Which would you \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mprefer?\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"annotations\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"meta\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:18:29] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿ”ง Tool Request: openai_web_search                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Arguments:                                                                   โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"arguments\": \"{\\\"input\\\":\\\"OpenAI Sora generative short-video app 'Sora'  โ”‚\n",
-       "โ”‚  launch October 2 2025 SFGate OpenAI generative video model ChatGPT shoppin  โ”‚\n",
-       "โ”‚  Etsy Oct 2                                                                  โ”‚\n",
-       "โ”‚  2025\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"type\\\":\\\"we  โ”‚\n",
-       "โ”‚  search_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\",                        โ”‚\n",
-       "โ”‚    \"call_id\": \"call_uW4nB1OU4OkqRFJXTnjiDIfl\",                               โ”‚\n",
-       "โ”‚    \"name\": \"openai_web_search\",                                              โ”‚\n",
-       "โ”‚    \"type\": \"function_call\",                                                  โ”‚\n",
-       "โ”‚    \"id\": \"fc_021a54d0dc6d53340068ddc4d626d481a3b1995f20b3b6bbb1\",            โ”‚\n",
-       "โ”‚    \"status\": \"completed\"                                                     โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[33mโ•ญโ”€\u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[33m [10/02/2025 00:18:29] \u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33mโ”€โ•ฎ\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m ๐Ÿ”ง \u001b[1mTool Request: openai_web_search\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[1mArguments:\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"arguments\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"{\\\"input\\\":\\\"OpenAI Sora generative short-video app 'Sora'\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mlaunch October 2 2025 SFGate OpenAI generative video model ChatGPT shoppin\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mEtsy Oct 2 \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m2025\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"type\\\":\\\"we\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34msearch_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"call_id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"call_uW4nB1OU4OkqRFJXTnjiDIfl\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"name\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"openai_web_search\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"function_call\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"fc_021a54d0dc6d53340068ddc4d626d481a3b1995f20b3b6bbb1\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"status\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"completed\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:18:48] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ โœ… Tool Response: openai_web_search                                          โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Response:                                                                    โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"type\": \"text\",                                                           โ”‚\n",
-       "โ”‚    \"text\": \"Looks like you\\u2019re referencing two related OpenAI            โ”‚\n",
-       "โ”‚  announcements today (October 2, 2025). Here\\u2019s a short, sourced summar  โ”‚\n",
-       "โ”‚  and what I could confirm:\\n\\n- OpenAI launched a new short-video / social   โ”‚\n",
-       "โ”‚  app called Sora (invite-only, iOS) that uses its Sora video model (Sora 2)  โ”‚\n",
-       "โ”‚  to generate short videos with synchronized audio, \\u201ccameo\\u201d likene  โ”‚\n",
-       "โ”‚  features, and feed-style sharing. Multiple outlets reported the rollout     โ”‚\n",
-       "โ”‚  on/around Oct 1\\u20132, 2025.                                               โ”‚\n",
-       "โ”‚  ([macrumors.com](https://www.macrumors.com/2025/09/30/openai-sora-ai-video  โ”‚\n",
-       "โ”‚  pp/?utm_source=openai))\\n\\n- Separately (and related to ChatGPT), OpenAI    โ”‚\n",
-       "โ”‚  announced an Instant Checkout shopping feature in ChatGPT that initially    โ”‚\n",
-       "โ”‚  lets U.S. users buy single items from Etsy sellers directly inside ChatGPT  โ”‚\n",
-       "โ”‚  and will expand to many Shopify merchants. Etsy has a partnership page      โ”‚\n",
-       "โ”‚  explaining purchases through ChatGPT and CNBC/other outlets covered the     โ”‚\n",
-       "โ”‚  Instant Checkout announcement (late September / rolling into early October  โ”‚\n",
-       "โ”‚  2025).                                                                      โ”‚\n",
-       "โ”‚  ([help.etsy.com](https://help.etsy.com/hc/en-us/articles/34208252828695-Pu  โ”‚\n",
-       "โ”‚  hasing-Etsy-Items-Through-ChatGPT?utm_source=openai))\\n\\nNote about SFGate  โ”‚\n",
-       "โ”‚  I didn\\u2019t find an SFGate article in the searches I ran; major outlets   โ”‚\n",
-       "โ”‚  (MacRumors, CNBC, TechCrunch, Wired, Indian Express, Etsy\\u2019s own        โ”‚\n",
-       "โ”‚  help/news page) are reporting these items. If you want, I can specifically  โ”‚\n",
-       "โ”‚  search SFGate (or fetch any SFGate link) and pull that article for you.     โ”‚\n",
-       "โ”‚  Would you like me to do that or summarize anything in more detail (privacy  โ”‚\n",
-       "โ”‚  controls, \\u201ccameo\\u201d flow, Instant Checkout payment methods,         โ”‚\n",
-       "โ”‚  availability, etc.)?\",                                                      โ”‚\n",
-       "โ”‚    \"annotations\": null,                                                      โ”‚\n",
-       "โ”‚    \"meta\": null                                                              โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[92mโ•ญโ”€\u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[92m [10/02/2025 00:18:48] \u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92mโ”€โ•ฎ\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m โœ… \u001b[1mTool Response: openai_web_search\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[1mResponse:\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"Looks like you\\u2019re referencing two related OpenAI \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mannouncements today (October 2, 2025). Here\\u2019s a short, sourced summar\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mand what I could confirm:\\n\\n- OpenAI launched a new short-video / social \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mapp called Sora (invite-only, iOS) that uses its Sora video model (Sora 2)\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mto generate short videos with synchronized audio, \\u201ccameo\\u201d likene\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mfeatures, and feed-style sharing. Multiple outlets reported the rollout \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mon/around Oct 1\\u20132, 2025. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([macrumors.com](https://www.macrumors.com/2025/09/30/openai-sora-ai-video\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mpp/?utm_source=openai))\\n\\n- Separately (and related to ChatGPT), OpenAI \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mannounced an Instant Checkout shopping feature in ChatGPT that initially \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mlets U.S. users buy single items from Etsy sellers directly inside ChatGPT\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mand will expand to many Shopify merchants. Etsy has a partnership page \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mexplaining purchases through ChatGPT and CNBC/other outlets covered the \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mInstant Checkout announcement (late September / rolling into early October\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m2025). \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([help.etsy.com](https://help.etsy.com/hc/en-us/articles/34208252828695-Pu\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mhasing-Etsy-Items-Through-ChatGPT?utm_source=openai))\\n\\nNote about SFGate\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mI didn\\u2019t find an SFGate article in the searches I ran; major outlets \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m(MacRumors, CNBC, TechCrunch, Wired, Indian Express, Etsy\\u2019s own \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mhelp/news page) are reporting these items. If you want, I can specifically\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34msearch SFGate (or fetch any SFGate link) and pull that article for you. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mWould you like me to do that or summarize anything in more detail (privacy\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mcontrols, \\u201ccameo\\u201d flow, Instant Checkout payment methods, \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mavailability, etc.)?\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"annotations\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"meta\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:18:50] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿ”ง Tool Request: openai_web_search                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Arguments:                                                                   โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"arguments\": \"{\\\"input\\\":\\\"OpenAI Stargate partnerships South Korea       โ”‚\n",
-       "โ”‚  Samsung SK Oct 2 2025 AP News 'Stargate' Oct 2 2025 OpenAI South Korea      โ”‚\n",
-       "โ”‚  partnership\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"type  โ”‚\n",
-       "โ”‚  :\\\"web_search_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\",                 โ”‚\n",
-       "โ”‚    \"call_id\": \"call_VW52QZ6thugsAMWsvZe3SwfN\",                               โ”‚\n",
-       "โ”‚    \"name\": \"openai_web_search\",                                              โ”‚\n",
-       "โ”‚    \"type\": \"function_call\",                                                  โ”‚\n",
-       "โ”‚    \"id\": \"fc_021a54d0dc6d53340068ddc4eaaf5081a39fac1af70f131d0d\",            โ”‚\n",
-       "โ”‚    \"status\": \"completed\"                                                     โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[33mโ•ญโ”€\u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[33m [10/02/2025 00:18:50] \u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33mโ”€โ•ฎ\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m ๐Ÿ”ง \u001b[1mTool Request: openai_web_search\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[1mArguments:\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"arguments\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"{\\\"input\\\":\\\"OpenAI Stargate partnerships South Korea \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mSamsung SK Oct 2 2025 AP News 'Stargate' Oct 2 2025 OpenAI South Korea \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mpartnership\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"type\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m:\\\"web_search_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"call_id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"call_VW52QZ6thugsAMWsvZe3SwfN\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"name\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"openai_web_search\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"function_call\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"fc_021a54d0dc6d53340068ddc4eaaf5081a39fac1af70f131d0d\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"status\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"completed\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:19:07] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ โœ… Tool Response: openai_web_search                                          โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Response:                                                                    โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"type\": \"text\",                                                           โ”‚\n",
-       "โ”‚    \"text\": \"Yes \\u2014 OpenAI announced this partnership with major South    โ”‚\n",
-       "โ”‚  Korean firms at the start of October 2025. Short summary of the key points  โ”‚\n",
-       "โ”‚  (with sources):\\n\\n- What was announced: OpenAI signed letters of intent /  โ”‚\n",
-       "โ”‚  memoranda of understanding with Samsung Electronics and SK Group (includin  โ”‚\n",
-       "โ”‚  SK hynix and SK Telecom) to supply memory chips and explore data\\u2011cent  โ”‚\n",
-       "โ”‚  collaboration as part of OpenAI\\u2019s large \\u201cStargate\\u201d           โ”‚\n",
-       "โ”‚  infrastructure initiative.                                                  โ”‚\n",
-       "โ”‚  ([apnews.com](https://apnews.com/article/a65fd1a21a8587c991cc30b94b1dfe89?  โ”‚\n",
-       "โ”‚  m_source=openai))\\n\\n- Supply plans: Samsung and SK hynix are expected to   โ”‚\n",
-       "โ”‚  scale production of advanced memory (HBM/DRAM) to support Stargate, with    โ”‚\n",
-       "โ”‚  reports quoting demand estimates up to about 900,000 DRAM wafers per month  โ”‚\n",
-       "โ”‚  Specific delivery schedules and final contract volumes were not finalized   โ”‚\n",
-       "โ”‚  the announcements.                                                          โ”‚\n",
-       "โ”‚  ([reuters.com](https://www.reuters.com/business/media-telecom/samsung-sk-h  โ”‚\n",
-       "โ”‚  ix-supply-memory-chips-openais-stargate-project-2025-10-01/?utm_source=ope  โ”‚\n",
-       "โ”‚  i))\\n\\n- Data\\u2011center cooperation: OpenAI and SK Telecom signed an MOU  โ”‚\n",
-       "โ”‚  to explore building an AI data center in South Korea (referred to in        โ”‚\n",
-       "โ”‚  coverage as \\u201cStargate Korea\\u201d), and Samsung affiliates will explo  โ”‚\n",
-       "โ”‚  data\\u2011center technologies (including discussions reported about floati  โ”‚\n",
-       "โ”‚  data\\u2011center concepts).                                                 โ”‚\n",
-       "โ”‚  ([apnews.com](https://apnews.com/article/a65fd1a21a8587c991cc30b94b1dfe89?  โ”‚\n",
-       "โ”‚  m_source=openai))\\n\\n- Context / meetings: The public announcements follow  โ”‚\n",
-       "โ”‚  meetings in Seoul between OpenAI CEO Sam Altman, South Korean President Le  โ”‚\n",
-       "โ”‚  Jae\\u2011myung, and senior leaders of Samsung and SK. The coverage ties th  โ”‚\n",
-       "โ”‚  deals to OpenAI\\u2019s broader Stargate expansion and efforts to secure     โ”‚\n",
-       "โ”‚  large-scale compute and memory supply.                                      โ”‚\n",
-       "โ”‚  ([apnews.com](https://apnews.com/article/a65fd1a21a8587c991cc30b94b1dfe89?  โ”‚\n",
-       "โ”‚  m_source=openai))\\n\\nIf you want, I can:\\n- Pull up the full AP article     โ”‚\n",
-       "โ”‚  (published Oct 2, 2025) or Reuters/Samsung/SK press releases for exact      โ”‚\n",
-       "โ”‚  wording and timestamps.\\n- Extract and compare direct quotes from Sam       โ”‚\n",
-       "โ”‚  Altman, Samsung and SK executives.\\n- Track whether these LOIs/MOUs later   โ”‚\n",
-       "โ”‚  convert into firm purchase agreements and delivery timelines (would requir  โ”‚\n",
-       "โ”‚  follow-up searches). Which would you like?\",                                โ”‚\n",
-       "โ”‚    \"annotations\": null,                                                      โ”‚\n",
-       "โ”‚    \"meta\": null                                                              โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[92mโ•ญโ”€\u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[92m [10/02/2025 00:19:07] \u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92mโ”€โ•ฎ\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m โœ… \u001b[1mTool Response: openai_web_search\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[1mResponse:\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"Yes \\u2014 OpenAI announced this partnership with major South \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mKorean firms at the start of October 2025. Short summary of the key points\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m(with sources):\\n\\n- What was announced: OpenAI signed letters of intent /\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mmemoranda of understanding with Samsung Electronics and SK Group (includin\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mSK hynix and SK Telecom) to supply memory chips and explore data\\u2011cent\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mcollaboration as part of OpenAI\\u2019s large \\u201cStargate\\u201d \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34minfrastructure initiative. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([apnews.com](https://apnews.com/article/a65fd1a21a8587c991cc30b94b1dfe89?\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mm_source=openai))\\n\\n- Supply plans: Samsung and SK hynix are expected to \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mscale production of advanced memory (HBM/DRAM) to support Stargate, with \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mreports quoting demand estimates up to about 900,000 DRAM wafers per month\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mSpecific delivery schedules and final contract volumes were not finalized \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mthe announcements. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([reuters.com](https://www.reuters.com/business/media-telecom/samsung-sk-h\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mix-supply-memory-chips-openais-stargate-project-2025-10-01/?utm_source=ope\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mi))\\n\\n- Data\\u2011center cooperation: OpenAI and SK Telecom signed an MOU\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mto explore building an AI data center in South Korea (referred to in \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mcoverage as \\u201cStargate Korea\\u201d), and Samsung affiliates will explo\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mdata\\u2011center technologies (including discussions reported about floati\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mdata\\u2011center concepts). \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([apnews.com](https://apnews.com/article/a65fd1a21a8587c991cc30b94b1dfe89?\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mm_source=openai))\\n\\n- Context / meetings: The public announcements follow\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mmeetings in Seoul between OpenAI CEO Sam Altman, South Korean President Le\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mJae\\u2011myung, and senior leaders of Samsung and SK. The coverage ties th\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mdeals to OpenAI\\u2019s broader Stargate expansion and efforts to secure \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mlarge-scale compute and memory supply. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([apnews.com](https://apnews.com/article/a65fd1a21a8587c991cc30b94b1dfe89?\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mm_source=openai))\\n\\nIf you want, I can:\\n- Pull up the full AP article \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m(published Oct 2, 2025) or Reuters/Samsung/SK press releases for exact \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mwording and timestamps.\\n- Extract and compare direct quotes from Sam \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mAltman, Samsung and SK executives.\\n- Track whether these LOIs/MOUs later \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mconvert into firm purchase agreements and delivery timelines (would requir\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mfollow-up searches). Which would you like?\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"annotations\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"meta\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:19:09] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿง  Reasoning                                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Looking for relevant news                                                    โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Iโ€™m considering if there are any major items from October 2. The second      โ”‚\n",
-       "โ”‚ search generated some reports about film and Elon Musk, which seem less      โ”‚\n",
-       "โ”‚ central. I might add a brief note about ongoing regulatory actions. To be    โ”‚\n",
-       "โ”‚ thorough, I'll search for โ€œAI regulation news from October 2, 2025,โ€ as that โ”‚\n",
-       "โ”‚ might provide more focused and relevant updates regarding current            โ”‚\n",
-       "โ”‚ discussions on regulation in AI. Letโ€™s see what I find!                      โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[95mโ•ญโ”€\u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95m \u001b[0m\u001b[1;95mAGENT\u001b[0m\u001b[95m [10/02/2025 00:19:09] \u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95mโ”€โ•ฎ\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m ๐Ÿง  \u001b[1mReasoning\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[1mLooking for relevant news\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m Iโ€™m considering if there are any major items from October 2. The second \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m search generated some reports about film and Elon Musk, which seem less \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m central. I might add a brief note about ongoing regulatory actions. To be \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m thorough, I'll search for โ€œAI regulation news from October 2, 2025,โ€ as that \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m might provide more focused and relevant updates regarding current \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m discussions on regulation in AI. Letโ€™s see what I find! \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:19:11] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿ”ง Tool Request: openai_web_search                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Arguments:                                                                   โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"arguments\": \"{\\\"input\\\":\\\"AI regulation news October 2 2025 'Oct 2' AI   โ”‚\n",
-       "โ”‚  governance today Oct 2 2025                                                 โ”‚\n",
-       "โ”‚  headlines\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"type\\\"  โ”‚\n",
-       "โ”‚  \"web_search_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\",                   โ”‚\n",
-       "โ”‚    \"call_id\": \"call_C5Uie1v4Y0iShinSml9ekyUr\",                               โ”‚\n",
-       "โ”‚    \"name\": \"openai_web_search\",                                              โ”‚\n",
-       "โ”‚    \"type\": \"function_call\",                                                  โ”‚\n",
-       "โ”‚    \"id\": \"fc_021a54d0dc6d53340068ddc500466481a3901b511309490a71\",            โ”‚\n",
-       "โ”‚    \"status\": \"completed\"                                                     โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[33mโ•ญโ”€\u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[33m [10/02/2025 00:19:11] \u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33mโ”€โ•ฎ\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m ๐Ÿ”ง \u001b[1mTool Request: openai_web_search\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[1mArguments:\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"arguments\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"{\\\"input\\\":\\\"AI regulation news October 2 2025 'Oct 2' AI \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mgovernance today Oct 2 2025 \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mheadlines\\\",\\\"model\\\":\\\"gpt-5-mini\\\",\\\"reasoning_effort\\\":\\\"low\\\",\\\"type\\\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"web_search_preview\\\",\\\"search_context_size\\\":\\\"high\\\"}\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"call_id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"call_C5Uie1v4Y0iShinSml9ekyUr\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"name\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"openai_web_search\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"function_call\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"id\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"fc_021a54d0dc6d53340068ddc500466481a3901b511309490a71\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"status\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"completed\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:19:20] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ โœ… Tool Response: openai_web_search                                          โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Response:                                                                    โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"type\": \"text\",                                                           โ”‚\n",
-       "โ”‚    \"text\": \"Here are the top AI-regulation/governance headlines for October  โ”‚\n",
-       "โ”‚  2, 2025 (sources cited):\\n\\n- U.S. administration pushes back on            โ”‚\n",
-       "โ”‚  industry-led health\\u2011AI oversight (Coalition for Health AI): Trump      โ”‚\n",
-       "โ”‚  administration officials and some GOP lawmakers criticized the CHAI         โ”‚\n",
-       "โ”‚  private-sector oversight initiative as potentially monopolistic and are     โ”‚\n",
-       "โ”‚  moving to distance federal endorsement.                                     โ”‚\n",
-       "โ”‚  ([politico.com](https://www.politico.com/news/2025/10/01/trump-ai-artifici  โ”‚\n",
-       "โ”‚  -intelligence-regulation-hhs-00590902?utm_source=openai))\\n\\n- European     โ”‚\n",
-       "โ”‚  Commission transparency consultation for AI-generated content closes today  โ”‚\n",
-       "โ”‚  (Oct 2, 2025): the consultation on Article 50 transparency guidelines \\u20  โ”‚\n",
-       "โ”‚  covering labeling of AI\\u2011generated content, deepfake disclosure and     โ”‚\n",
-       "โ”‚  related rules \\u2014 runs through Oct 2, 2025 and will feed mandatory       โ”‚\n",
-       "โ”‚  transparency obligations that take effect next year.                        โ”‚\n",
-       "โ”‚  ([euairisk.com](https://euairisk.com/news/2025-09-13?utm_source=openai))\\n  โ”‚\n",
-       "โ”‚  - Google Europe executive urges EU to simplify overlapping AI rules:        โ”‚\n",
-       "โ”‚  Alphabet/Google\\u2019s Europe president called for streamlining the         โ”‚\n",
-       "โ”‚  EU\\u2019s growing patchwork of internet- and AI-related laws, warning       โ”‚\n",
-       "โ”‚  complexity risks harming innovation.                                        โ”‚\n",
-       "โ”‚  ([timesofindia.indiatimes.com](https://timesofindia.indiatimes.com/technol  โ”‚\n",
-       "โ”‚  y/tech-news/google-europe-president-debbie-weinstein-on-eus-ai-laws-there-  โ”‚\n",
-       "โ”‚  -a-real-need-for-/articleshow/124264273.cms?utm_source=openai))\\n\\n-        โ”‚\n",
-       "โ”‚  Implementation issues and timeline pressure continue around the EU AI Act:  โ”‚\n",
-       "โ”‚  standards bodies and others have flagged delays and calls from industry fo  โ”‚\n",
-       "โ”‚  more time or simplification as key AI\\u2011Act technical standards and      โ”‚\n",
-       "โ”‚  compliance guidance are still being finalized.                              โ”‚\n",
-       "โ”‚  ([euronews.com](https://www.euronews.com/next/2025/04/16/eu-standards-bodi  โ”‚\n",
-       "โ”‚  -flag-delays-to-work-on-ai-act?utm_source=openai))\\n\\n- Broader geopolitic  โ”‚\n",
-       "โ”‚  / market context: policymakers\\u2019 moves and regulatory uncertainty are   โ”‚\n",
-       "โ”‚  taking place amid continued large capital flows into AI infrastructure and  โ”‚\n",
-       "โ”‚  products, which is keeping AI governance high on legislative and corporate  โ”‚\n",
-       "โ”‚  agendas.                                                                    โ”‚\n",
-       "โ”‚  ([theaustralian.com.au](https://www.theaustralian.com.au/business/markets/  โ”‚\n",
-       "โ”‚  -shutdown-the-real-story-behind-surging-ai-investment-numbers/news-story/9  โ”‚\n",
-       "โ”‚  f4249381f557d13c947d037a6e905?utm_source=openai))\\n\\nWould you like a deep  โ”‚\n",
-       "โ”‚  summary for any of these items (timeline, who\\u2019s involved, likely next  โ”‚\n",
-       "โ”‚  steps), or links to the full articles?\",                                    โ”‚\n",
-       "โ”‚    \"annotations\": null,                                                      โ”‚\n",
-       "โ”‚    \"meta\": null                                                              โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[92mโ•ญโ”€\u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[92m [10/02/2025 00:19:20] \u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92mโ”€โ•ฎ\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m โœ… \u001b[1mTool Response: openai_web_search\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[1mResponse:\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"type\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"text\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"Here are the top AI-regulation/governance headlines for October\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m2, 2025 (sources cited):\\n\\n- U.S. administration pushes back on \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mindustry-led health\\u2011AI oversight (Coalition for Health AI): Trump \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34madministration officials and some GOP lawmakers criticized the CHAI \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mprivate-sector oversight initiative as potentially monopolistic and are \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mmoving to distance federal endorsement. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([politico.com](https://www.politico.com/news/2025/10/01/trump-ai-artifici\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m-intelligence-regulation-hhs-00590902?utm_source=openai))\\n\\n- European \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mCommission transparency consultation for AI-generated content closes today\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m(Oct 2, 2025): the consultation on Article 50 transparency guidelines \\u20\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mcovering labeling of AI\\u2011generated content, deepfake disclosure and \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mrelated rules \\u2014 runs through Oct 2, 2025 and will feed mandatory \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mtransparency obligations that take effect next year. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([euairisk.com](https://euairisk.com/news/2025-09-13?utm_source=openai))\\n\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m- Google Europe executive urges EU to simplify overlapping AI rules: \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mAlphabet/Google\\u2019s Europe president called for streamlining the \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mEU\\u2019s growing patchwork of internet- and AI-related laws, warning \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mcomplexity risks harming innovation. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([timesofindia.indiatimes.com](https://timesofindia.indiatimes.com/technol\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34my/tech-news/google-europe-president-debbie-weinstein-on-eus-ai-laws-there-\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m-a-real-need-for-/articleshow/124264273.cms?utm_source=openai))\\n\\n- \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mImplementation issues and timeline pressure continue around the EU AI Act:\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mstandards bodies and others have flagged delays and calls from industry fo\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mmore time or simplification as key AI\\u2011Act technical standards and \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mcompliance guidance are still being finalized. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([euronews.com](https://www.euronews.com/next/2025/04/16/eu-standards-bodi\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m-flag-delays-to-work-on-ai-act?utm_source=openai))\\n\\n- Broader geopolitic\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m/ market context: policymakers\\u2019 moves and regulatory uncertainty are \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mtaking place amid continued large capital flows into AI infrastructure and\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mproducts, which is keeping AI governance high on legislative and corporate\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34magendas. \u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m([theaustralian.com.au](https://www.theaustralian.com.au/business/markets/\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m-shutdown-the-real-story-behind-surging-ai-investment-numbers/news-story/9\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34mf4249381f557d13c947d037a6e905?utm_source=openai))\\n\\nWould you like a deep\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34msummary for any of these items (timeline, who\\u2019s involved, likely next\u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34msteps), or links to the full articles?\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"annotations\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"meta\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;102;217;239;48;2;39;40;34mnull\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:19:23] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿง  Reasoning                                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Gathering AI news summaries                                                  โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ I noticed that some items are from earlier dates but were listed as closing  โ”‚\n",
-       "โ”‚ on October 2. The user probably wants a brief list of the top AI news items  โ”‚\n",
-       "โ”‚ reported today. I should create concise bullet points with sources and 1-2   โ”‚\n",
-       "โ”‚ sentence summaries. It might be a good idea to offer to expand on any item   โ”‚\n",
-       "โ”‚ or provide full article links. Plus, Iโ€™ll mention that coverage evolves and  โ”‚\n",
-       "โ”‚ offer to keep monitoring for updates.                                        โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[95mโ•ญโ”€\u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95m \u001b[0m\u001b[1;95mAGENT\u001b[0m\u001b[95m [10/02/2025 00:19:23] \u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95mโ”€โ•ฎ\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m ๐Ÿง  \u001b[1mReasoning\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[1mGathering AI news summaries\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m I noticed that some items are from earlier dates but were listed as closing \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m on October 2. The user probably wants a brief list of the top AI news items \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m reported today. I should create concise bullet points with sources and 1-2 \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m sentence summaries. It might be a good idea to offer to expand on any item \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m or provide full article links. Plus, Iโ€™ll mention that coverage evolves and \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m offer to keep monitoring for updates. \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:19:26] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿง  Reasoning                                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Summarizing AI stories for today                                             โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Iโ€™m crafting the final answer by starting with, \"Here are the main AI        โ”‚\n",
-       "โ”‚ stories reported today (Oct 2, 2025):\" First, Meta plans to use AI-chatbot   โ”‚\n",
-       "โ”‚ conversations to target ads, starting December 16, with notifications on     โ”‚\n",
-       "โ”‚ October 7, although it excludes the UK, EU, and South Korea initially and    โ”‚\n",
-       "โ”‚ sensitive topics.                                                            โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Next, thereโ€™s news about OpenAI expanding partnerships in South Korea with   โ”‚\n",
-       "โ”‚ Samsung and SK, and launching the Sora short-video app. Finally, although    โ”‚\n",
-       "โ”‚ the user wants \"today only,\" I might mention the EU consultation on          โ”‚\n",
-       "โ”‚ transparency is relevant too.                                                โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[95mโ•ญโ”€\u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95m \u001b[0m\u001b[1;95mAGENT\u001b[0m\u001b[95m [10/02/2025 00:19:26] \u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95mโ”€โ•ฎ\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m ๐Ÿง  \u001b[1mReasoning\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[1mSummarizing AI stories for today\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m Iโ€™m crafting the final answer by starting with, \"Here are the main AI \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m stories reported today (Oct 2, 2025):\" First, Meta plans to use AI-chatbot \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m conversations to target ads, starting December 16, with notifications on \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m October 7, although it excludes the UK, EU, and South Korea initially and \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m sensitive topics. \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m Next, thereโ€™s news about OpenAI expanding partnerships in South Korea with \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m Samsung and SK, and launching the Sora short-video app. Finally, although \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m the user wants \"today only,\" I might mention the EU consultation on \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m transparency is relevant too. \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:19:28] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿง  Reasoning                                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Crafting the final message                                                   โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ I could ask the user if they want the full articles in addition to the       โ”‚\n",
-       "โ”‚ summaries. I need to keep things concise and make direct offers like, \"Do    โ”‚\n",
-       "โ”‚ you want full articles, a deeper summary, or tracking updates?\" Itโ€™s         โ”‚\n",
-       "โ”‚ important to mention sources as well, and since the search provided URLs in  โ”‚\n",
-       "โ”‚ the tool outputs, I can include those in parentheses. Alright, Iโ€™ll put all  โ”‚\n",
-       "โ”‚ this together to create the final message!                                   โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[95mโ•ญโ”€\u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95m \u001b[0m\u001b[1;95mAGENT\u001b[0m\u001b[95m [10/02/2025 00:19:28] \u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95mโ”€โ•ฎ\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m ๐Ÿง  \u001b[1mReasoning\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[1mCrafting the final message\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m I could ask the user if they want the full articles in addition to the \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m summaries. I need to keep things concise and make direct offers like, \"Do \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m you want full articles, a deeper summary, or tracking updates?\" Itโ€™s \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m important to mention sources as well, and since the search provided URLs in \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m the tool outputs, I can include those in parentheses. Alright, Iโ€™ll put all \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m this together to create the final message! \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [10/02/2025 00:19:31] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ Here are the main AI stories reported today (October 2, 2025):               โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Meta will begin using conversations with its Meta AI assistant to         โ”‚\n",
-       "โ”‚    personalize ads and content (WSJ / Reuters). Key points: Meta says it     โ”‚\n",
-       "โ”‚    will add signals from AI chats to ad/content personalization (excluding   โ”‚\n",
-       "โ”‚    certain โ€œsensitiveโ€ topics), notify users starting Oct 7, and the change  โ”‚\n",
-       "โ”‚    takes effect Dec 16; rollout initially excludes the U.K., EU and South    โ”‚\n",
-       "โ”‚    Korea. Sources: Wall Street Journal (Oct 2) and Reuters coverage.         โ”‚\n",
-       "โ”‚    Links:                                                                    โ”‚\n",
-       "โ”‚    https://www.wsj.com/tech/ai/meta-will-begin-using-ai-chatbot-conversation โ”‚\n",
-       "โ”‚    s-to-target-ads-291093d3 and                                              โ”‚\n",
-       "โ”‚    https://www.reuters.com/business/media-telecom/meta-use-ai-chats-personal โ”‚\n",
-       "โ”‚    ize-content-ads-december-2025-10-01/                                      โ”‚\n",
-       "โ”‚  โ€ข OpenAI expands its โ€œStargateโ€ infrastructure partnerships in South Korea  โ”‚\n",
-       "โ”‚    (AP / Reuters). OpenAI signed MOUs/LOIs with Samsung and SK Group to      โ”‚\n",
-       "โ”‚    secure advanced memory supply (HBM/DRAM) and explore dataโ€‘center          โ”‚\n",
-       "โ”‚    collaboration as part of its global Stargate buildโ€‘out. Coverage          โ”‚\n",
-       "โ”‚    highlights meetings in Seoul between OpenAI leadership and South Korean   โ”‚\n",
-       "โ”‚    officials/companies. Source: AP News (Oct 2) and Reuters.                 โ”‚\n",
-       "โ”‚    Link: https://apnews.com/article/a65fd1a21a8587c991cc30b94b1dfe89         โ”‚\n",
-       "โ”‚  โ€ข OpenAI launches Sora (short-video / generative-video app) and related     โ”‚\n",
-       "โ”‚    generative-video features; ChatGPT shopping/Instant Checkout pilots       โ”‚\n",
-       "โ”‚    continue (tech press reports). Outlets report OpenAI rolling out an       โ”‚\n",
-       "โ”‚    invite-style short-video app (Sora) built on its video-generation models  โ”‚\n",
-       "โ”‚    and expanding ChatGPT shopping integrations (initial merchants include    โ”‚\n",
-       "โ”‚    Etsy sellers). Sources: major tech outlets reporting Oct 1โ€“2 (examples:   โ”‚\n",
-       "โ”‚    MacRumors, CNBC/tech sites).                                              โ”‚\n",
-       "โ”‚    Example link:                                                             โ”‚\n",
-       "โ”‚    https://www.macrumors.com/2025/09/30/openai-sora-ai-video-app/            โ”‚\n",
-       "โ”‚  โ€ข AI governance/regulatory items tied to Oct 2: EU transparency             โ”‚\n",
-       "โ”‚    consultation on AIโ€‘generated content and labeling closes today (feeds     โ”‚\n",
-       "โ”‚    ongoing EU transparency/labeling work), and the U.S. press continues      โ”‚\n",
-       "โ”‚    debate about private-sector AI oversight initiatives. Sources include EU  โ”‚\n",
-       "โ”‚    filings and Politico / other regulatory coverage.                         โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Would you like me to:                                                        โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Pull the full articles for any of the items above and summarize them in   โ”‚\n",
-       "โ”‚    more detail?                                                              โ”‚\n",
-       "โ”‚  โ€ข Focus on one topic (privacy implications for Metaโ€™s change,               โ”‚\n",
-       "โ”‚    technical/market impact of OpenAIโ€™s Stargate deals, how Sora works and    โ”‚\n",
-       "โ”‚    availability)?                                                            โ”‚\n",
-       "โ”‚  โ€ข Monitor the rest of today for any breaking updates (I can check again and โ”‚\n",
-       "โ”‚    send any new items)?                                                      โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[32mโ•ญโ”€\u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[32m [10/02/2025 00:19:31] \u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32mโ”€โ•ฎ\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Here are the main AI stories reported today (October 2, 2025): \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mMeta will begin using conversations with its Meta AI assistant to \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mpersonalize ads and content (WSJ / Reuters). Key points: Meta says it \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mwill add signals from AI chats to ad/content personalization (excluding \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mcertain โ€œsensitiveโ€ topics), notify users starting Oct 7, and the change \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mtakes effect Dec 16; rollout initially excludes the U.K., EU and South \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mKorea. Sources: Wall Street Journal (Oct 2) and Reuters coverage. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mLinks: \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mhttps://www.wsj.com/tech/ai/meta-will-begin-using-ai-chatbot-conversation \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0ms-to-target-ads-291093d3 and \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mhttps://www.reuters.com/business/media-telecom/meta-use-ai-chats-personal \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mize-content-ads-december-2025-10-01/ \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mOpenAI expands its โ€œStargateโ€ infrastructure partnerships in South Korea \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0m(AP / Reuters). OpenAI signed MOUs/LOIs with Samsung and SK Group to \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0msecure advanced memory supply (HBM/DRAM) and explore dataโ€‘center \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mcollaboration as part of its global Stargate buildโ€‘out. Coverage \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mhighlights meetings in Seoul between OpenAI leadership and South Korean \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mofficials/companies. Source: AP News (Oct 2) and Reuters. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mLink: https://apnews.com/article/a65fd1a21a8587c991cc30b94b1dfe89 \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mOpenAI launches Sora (short-video / generative-video app) and related \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mgenerative-video features; ChatGPT shopping/Instant Checkout pilots \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mcontinue (tech press reports). Outlets report OpenAI rolling out an \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0minvite-style short-video app (Sora) built on its video-generation models \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mand expanding ChatGPT shopping integrations (initial merchants include \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mEtsy sellers). Sources: major tech outlets reporting Oct 1โ€“2 (examples: \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mMacRumors, CNBC/tech sites). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mExample link: \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mhttps://www.macrumors.com/2025/09/30/openai-sora-ai-video-app/ \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mAI governance/regulatory items tied to Oct 2: EU transparency \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mconsultation on AIโ€‘generated content and labeling closes today (feeds \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mongoing EU transparency/labeling work), and the U.S. press continues \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mdebate about private-sector AI oversight initiatives. Sources include EU \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mfilings and Politico / other regulatory coverage. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Would you like me to: \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mPull the full articles for any of the items above and summarize them in \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mmore detail? \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mFocus on one topic (privacy implications for Metaโ€™s change, \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mtechnical/market impact of OpenAIโ€™s Stargate deals, how Sora works and \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mavailability)? \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mMonitor the rest of today for any breaking updates (I can check again and \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0msend any new items)? \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Streaming timed out after 120 seconds - returning collected messages\n" - ] - } - ], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=120,\n", - ")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/10_async/10_temporal/010_agent_chat/manifest.yaml b/examples/tutorials/10_async/10_temporal/010_agent_chat/manifest.yaml deleted file mode 100644 index 1d53a7c2..00000000 --- a/examples/tutorials/10_async/10_temporal/010_agent_chat/manifest.yaml +++ /dev/null @@ -1,139 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/10_temporal/010_agent_chat - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/10_temporal/010_agent_chat/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/10_temporal/010_agent_chat/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - # Path to temporal worker file - # Examples: - # project/run_worker.py (standard) - # workers/temporal.py (custom structure) - # ../shared/worker.py (shared across projects) - worker: project/run_worker.py - - -# Agent Configuration -# ----------------- -agent: - # Type of agent - either sync or async - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: at010-agent-chat - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agentthat streams multiturn tool-enabled chat with tracing - - # Temporal workflow configuration - # This enables your agent to run as a Temporal workflow for long-running tasks - temporal: - enabled: true - workflows: - # Name of the workflow class - # Must match the @workflow.defn name in your workflow.py - - name: at010-agent-chat - - # Queue name for task distribution - # Used by Temporal to route tasks to your agent - # Convention: _task_queue - queue_name: 010_agent_chat_queue - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # - name: OPENAI_BASE_URL - # value: "https://api.openai.com/v1" - # - name: ACCOUNT_ID - # value: "your_account_id_here" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: - - name: my-registry-secret # Update with your image pull secret name - - # Global deployment settings that apply to all clusters - # These can be overridden using --override-file with custom configuration files - global: - agent: - name: "at010-agent-chat" - description: "An AgentEx agentthat streams multiturn tool-enabled chat with tracing" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/010_agent_chat/project/__init__.py b/examples/tutorials/10_async/10_temporal/010_agent_chat/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/10_temporal/010_agent_chat/project/acp.py b/examples/tutorials/10_async/10_temporal/010_agent_chat/project/acp.py deleted file mode 100644 index 744068d7..00000000 --- a/examples/tutorials/10_async/10_temporal/010_agent_chat/project/acp.py +++ /dev/null @@ -1,30 +0,0 @@ -import os - -from agentex.lib.types.fastacp import TemporalACPConfig -from agentex.lib.sdk.fastacp.fastacp import FastACP - -# Create the ACP server -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - # When deployed to the cluster, the Temporal address will automatically be set to the cluster address - # For local development, we set the address manually to talk to the local Temporal service set up via docker compose - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233") - ) -) - - -# Notice that we don't need to register any handlers when we use type="temporal" -# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp -# You can see that these handlers are automatically registered when the ACP is created - -# @acp.on_task_create -# This will be handled by the method in your workflow that is decorated with @workflow.run - -# @acp.on_task_event_send -# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) - -# @acp.on_task_cancel -# This does not need to be handled by your workflow. -# It is automatically handled by the temporal client which cancels the workflow directly \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/010_agent_chat/project/run_worker.py b/examples/tutorials/10_async/10_temporal/010_agent_chat/project/run_worker.py deleted file mode 100644 index 31a3c98c..00000000 --- a/examples/tutorials/10_async/10_temporal/010_agent_chat/project/run_worker.py +++ /dev/null @@ -1,34 +0,0 @@ -import asyncio - -from project.workflow import At010AgentChatWorkflow -from agentex.lib.utils.debug import setup_debug_if_enabled -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker - -environment_variables = EnvironmentVariables.refresh() - -logger = make_logger(__name__) - - -async def main(): - # Setup debug mode if enabled - setup_debug_if_enabled() - - task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE - if task_queue_name is None: - raise ValueError("WORKFLOW_TASK_QUEUE is not set") - - # Create a worker with automatic tracing - worker = AgentexWorker( - task_queue=task_queue_name, - ) - - await worker.run( - activities=get_all_activities(), - workflow=At010AgentChatWorkflow, - ) - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/010_agent_chat/project/workflow.py b/examples/tutorials/10_async/10_temporal/010_agent_chat/project/workflow.py deleted file mode 100644 index 3e3ac5b2..00000000 --- a/examples/tutorials/10_async/10_temporal/010_agent_chat/project/workflow.py +++ /dev/null @@ -1,276 +0,0 @@ -import os -import json -from typing import Any, Dict, List, override - -from mcp import StdioServerParameters -from agents import ModelSettings, RunContextWrapper -from dotenv import load_dotenv -from temporalio import workflow -from openai.types.shared import Reasoning - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from agentex.lib.types.tracing import SGPTracingProcessorConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow -from agentex.lib.core.tracing.tracing_processor_manager import ( - add_tracing_processor_config, -) -from agentex.lib.core.temporal.activities.adk.providers.openai_activities import ( - FunctionTool, -) - -environment_variables = EnvironmentVariables.refresh() -load_dotenv(dotenv_path=".env") - -add_tracing_processor_config( - SGPTracingProcessorConfig( - sgp_api_key=os.environ.get("SCALE_GP_API_KEY", ""), - sgp_account_id=os.environ.get("SCALE_GP_ACCOUNT_ID", ""), - ) -) - -if not environment_variables.WORKFLOW_NAME: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if not environment_variables.AGENT_NAME: - raise ValueError("Environment variable AGENT_NAME is not set") - -logger = make_logger(__name__) - - -class StateModel(BaseModel): - input_list: List[Dict[str, Any]] - turn_number: int - - -MCP_SERVERS = [ # No longer needed due to reasoning - # StdioServerParameters( - # command="npx", - # args=["-y", "@modelcontextprotocol/server-sequential-thinking"], - # ), - StdioServerParameters( - command="uvx", - args=["openai-websearch-mcp"], - env={"OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY", "")}, - ), -] - - -async def calculator(context: RunContextWrapper, args: str) -> str: # noqa: ARG001 - """ - Simple calculator that can perform basic arithmetic operations. - - Args: - context: The run context wrapper - args: JSON string containing the operation and operands - - Returns: - String representation of the calculation result - """ - try: - # Parse the JSON arguments - parsed_args = json.loads(args) - operation = parsed_args.get("operation") - a = parsed_args.get("a") - b = parsed_args.get("b") - - if operation is None or a is None or b is None: - return ( - "Error: Missing required parameters. " - "Please provide 'operation', 'a', and 'b'." - ) - - # Convert to numbers - try: - a = float(a) - b = float(b) - except (ValueError, TypeError): - return "Error: 'a' and 'b' must be valid numbers." - - # Perform the calculation - if operation == "add": - result = a + b - elif operation == "subtract": - result = a - b - elif operation == "multiply": - result = a * b - elif operation == "divide": - if b == 0: - return "Error: Division by zero is not allowed." - result = a / b - else: - supported_ops = "add, subtract, multiply, divide" - return ( - f"Error: Unknown operation '{operation}'. " - f"Supported operations: {supported_ops}." - ) - - # Format the result nicely - if result == int(result): - return f"The result of {a} {operation} {b} is {int(result)}" - else: - formatted = f"{result:.6f}".rstrip("0").rstrip(".") - return f"The result of {a} {operation} {b} is {formatted}" - - except json.JSONDecodeError: - return "Error: Invalid JSON format in arguments." - except Exception as e: - return f"Error: An unexpected error occurred: {str(e)}" - - -# Create the calculator tool -CALCULATOR_TOOL = FunctionTool( - name="calculator", - description=( - "Performs basic arithmetic operations (add, subtract, multiply, divide) " - "on two numbers." - ), - params_json_schema={ - "type": "object", - "properties": { - "operation": { - "type": "string", - "enum": ["add", "subtract", "multiply", "divide"], - "description": "The arithmetic operation to perform", - }, - "a": {"type": "number", "description": "The first number"}, - "b": {"type": "number", "description": "The second number"}, - }, - "required": ["operation", "a", "b"], - "additionalProperties": False, - }, - strict_json_schema=True, - on_invoke_tool=calculator, -) - - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class At010AgentChatWorkflow(BaseWorkflow): - """ - Minimal async workflow template for AgentEx Temporal agents. - """ - - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self._complete_task = False - self._state: StateModel | None = None - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - @override - async def on_task_event_send(self, params: SendEventParams) -> None: - logger.info(f"Received task message instruction: {params}") - - if not params.event.content: - return - if params.event.content.type != "text": - raise ValueError(f"Expected text message, got {params.event.content.type}") - - if params.event.content.author != "user": - raise ValueError( - f"Expected user message, got {params.event.content.author}" - ) - - if self._state is None: - raise ValueError("State is not initialized") - - # Increment the turn number - self._state.turn_number += 1 - # Add the new user message to the message history - self._state.input_list.append( - {"role": "user", "content": params.event.content.content} - ) - - async with adk.tracing.span( - trace_id=params.task.id, - name=f"Turn {self._state.turn_number}", - input=self._state, - ) as span: - # Echo back the user's message so it shows up in the UI. This is not done by default so the agent developer has full control over what is shown to the user. - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=params.event.content, - parent_span_id=span.id if span else None, - ) - - if not os.environ.get("OPENAI_API_KEY"): - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=TextContent( - author="agent", - content=( - "Hey, sorry I'm unable to respond to your message " - "because you're running this example without an " - "OpenAI API key. Please set the OPENAI_API_KEY " - "environment variable to run this example. Do this " - "by either by adding a .env file to the project/ " - "directory or by setting the environment variable " - "in your terminal." - ), - ), - parent_span_id=span.id if span else None, - ) - - # Call an LLM to respond to the user's message - # When send_as_agent_task_message=True, returns a TaskMessage - run_result = await adk.providers.openai.run_agent_streamed_auto_send( - task_id=params.task.id, - trace_id=params.task.id, - input_list=self._state.input_list, - mcp_server_params=MCP_SERVERS, - agent_name="Tool-Enabled Assistant", - agent_instructions=( - "You are a helpful assistant that can answer questions " - "using various tools. You have access to sequential " - "thinking and web search capabilities through MCP servers, " - "as well as a calculator tool for performing basic " - "arithmetic operations. Use these tools when appropriate " - "to provide accurate and well-reasoned responses." - ), - parent_span_id=span.id if span else None, - model="gpt-5", - model_settings=ModelSettings( - # Include reasoning items in the response (IDs, summaries) - # response_include=["reasoning.encrypted_content"], - # Ask the model to include a short reasoning summary - reasoning=Reasoning(effort="medium", summary="detailed"), - ), - # tools=[CALCULATOR_TOOL], - ) - if self._state: - # Update the state with the final input list if available - final_list = getattr(run_result, "final_input_list", None) - if final_list is not None: - self._state.input_list = final_list - - # Set the span output to the state for the next turn - if span and self._state: - span.output = self._state.model_dump() - - @workflow.run - @override - async def on_task_create(self, params: CreateTaskParams) -> None: - logger.info(f"Received task create params: {params}") - - # 1. Initialize the state. You can either do this here or in the __init__ method. - # This function is triggered whenever a client creates a task for this agent. - # It is not re-triggered when a new event is sent to the task. - self._state = StateModel( - input_list=[], - turn_number=0, - ) - - # 2. Wait for the task to be completed indefinitely. If we don't do this the workflow will close as soon as this function returns. Temporal can run hundreds of millions of workflows in parallel, so you don't need to worry about too many workflows running at once. - - # Thus, if you want this agent to field events indefinitely (or for a long time) you need to wait for a condition to be met. - - await workflow.wait_condition( - lambda: self._complete_task, - timeout=None, # Set a timeout if you want to prevent the task from running indefinitely. Generally this is not needed. Temporal can run hundreds of millions of workflows in parallel and more. Only do this if you have a specific reason to do so. - ) diff --git a/examples/tutorials/10_async/10_temporal/010_agent_chat/pyproject.toml b/examples/tutorials/10_async/10_temporal/010_agent_chat/pyproject.toml deleted file mode 100644 index 799fa5fe..00000000 --- a/examples/tutorials/10_async/10_temporal/010_agent_chat/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "at010-agent-chat" -version = "0.1.0" -description = "An AgentEx agentthat streams multiturn tool-enabled chat with tracing" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "debugpy>=1.8.15", - "scale-gp", - "yaspin>=3.1.0", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/examples/tutorials/10_async/10_temporal/010_agent_chat/tests/test_agent.py b/examples/tutorials/10_async/10_temporal/010_agent_chat/tests/test_agent.py deleted file mode 100644 index 6eb03f72..00000000 --- a/examples/tutorials/10_async/10_temporal/010_agent_chat/tests/test_agent.py +++ /dev/null @@ -1,285 +0,0 @@ -""" -Sample tests for AgentEx Temporal agent with OpenAI Agents SDK integration. - -This test suite demonstrates how to test agents that integrate: -- OpenAI Agents SDK with streaming (via Temporal workflows) -- MCP (Model Context Protocol) servers for tool access -- Multi-turn conversations with state management -- Tool usage (calculator and web search via MCP) - -Key differences from base async (040_other_sdks): -1. Temporal Integration: Uses Temporal workflows for durable execution -2. State Management: State is managed within the workflow instance -3. No Race Conditions: Temporal ensures sequential event processing -4. Durable Execution: Workflow state survives restarts - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Ensure OPENAI_API_KEY is set in the environment -4. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: at010-agent-chat) -""" - -import os -import uuid -import asyncio - -import pytest -import pytest_asyncio -from test_utils.async_utils import ( - stream_agent_response, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types import TaskMessage, TextContent -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.agent_rpc_result import StreamTaskMessageDone, StreamTaskMessageFull -from agentex.types.text_content_param import TextContentParam - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "at010-agent-chat") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling with OpenAI Agents SDK.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll_simple_query(self, client: AsyncAgentex, agent_id: str): - """Test sending a simple event and polling for the response (no tool use).""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Wait for workflow to initialize - await asyncio.sleep(1) - - # Send a simple message that shouldn't require tool use - user_message = "Hello! Please introduce yourself briefly." - messages = [] - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - messages.append(message) - - if len(messages) == 1: - assert message.content == TextContent( - author="user", - content=user_message, - type="text", - ) - break - - @pytest.mark.asyncio - async def test_send_event_and_poll_with_calculator(self, client: AsyncAgentex, agent_id: str): - """Test sending an event that triggers calculator tool usage and polling for the response.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Wait for workflow to initialize - await asyncio.sleep(1) - - # Send a message that could trigger the calculator tool (though with reasoning, it may not need it) - user_message = "What is 15 multiplied by 37?" - has_final_agent_response = False - - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=60, # Longer timeout for tool use - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if message.content and message.content.type == "text" and message.content.author == "agent": - # Check that the answer contains 555 (15 * 37) - if "555" in message.content.content: - has_final_agent_response = True - break - - assert has_final_agent_response, "Did not receive final agent text response with correct answer" - - @pytest.mark.asyncio - async def test_multi_turn_conversation(self, client: AsyncAgentex, agent_id: str): - """Test multiple turns of conversation with state preservation.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Wait for workflow to initialize - await asyncio.sleep(1) - - # First turn - user_message_1 = "My favorite color is blue." - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message_1, - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if ( - message.content - and message.content.type == "text" - and message.content.author == "agent" - and message.content.content - ): - break - - # Wait a bit for state to update - await asyncio.sleep(2) - - # Second turn - reference previous context - found_response = False - user_message_2 = "What did I just tell you my favorite color was?" - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message_2, - timeout=30, - sleep_interval=1.0, - ): - if ( - message.content - and message.content.type == "text" - and message.content.author == "agent" - and message.content.content - ): - response_text = message.content.content.lower() - assert "blue" in response_text, f"Expected 'blue' in response but got: {response_text}" - found_response = True - break - - assert found_response, "Did not receive final agent text response with context recall" - - -class TestStreamingEvents: - """Test streaming event sending with OpenAI Agents SDK and tool usage.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream_with_reasoning(self, client: AsyncAgentex, agent_id: str): - """Test streaming a simple response without tool usage.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Wait for workflow to initialize - await asyncio.sleep(1) - - user_message = "Tell me a very short joke about programming." - - # Check for user message and agent response - user_message_found = False - agent_response_found = False - reasoning_found = False - - async def stream_messages() -> None: # noqa: ANN101 - nonlocal user_message_found, agent_response_found, reasoning_found - async for event in stream_agent_response( - client=client, - task_id=task.id, - timeout=90, # Increased timeout for CI environments - ): - msg_type = event.get("type") - if msg_type == "full": - task_message_update = StreamTaskMessageFull.model_validate(event) - if task_message_update.parent_task_message and task_message_update.parent_task_message.id: - finished_message = await client.messages.retrieve(task_message_update.parent_task_message.id) - if ( - finished_message.content - and finished_message.content.type == "text" - and finished_message.content.author == "user" - ): - user_message_found = True - elif ( - finished_message.content - and finished_message.content.type == "text" - and finished_message.content.author == "agent" - ): - agent_response_found = True - elif finished_message.content and finished_message.content.type == "reasoning": - reasoning_found = True - - # Exit early if we have what we need - if user_message_found and agent_response_found: - break - - elif msg_type == "done": - task_message_update = StreamTaskMessageDone.model_validate(event) - if task_message_update.parent_task_message and task_message_update.parent_task_message.id: - finished_message = await client.messages.retrieve(task_message_update.parent_task_message.id) - if finished_message.content and finished_message.content.type == "reasoning": - reasoning_found = True - elif ( - finished_message.content - and finished_message.content.type == "text" - and finished_message.content.author == "agent" - ): - agent_response_found = True - - # Exit early if we have what we need - if user_message_found and agent_response_found: - break - - stream_task = asyncio.create_task(stream_messages()) - - event_content = TextContentParam(type="text", author="user", content=user_message) - await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # Wait for streaming to complete with timeout - try: - await asyncio.wait_for(stream_task, timeout=120) # Overall timeout for CI - except asyncio.TimeoutError: - stream_task.cancel() - pytest.fail("Test timed out waiting for streaming response") - - assert user_message_found, "User message not found in stream" - assert agent_response_found, "Agent response not found in stream" - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/.dockerignore b/examples/tutorials/10_async/10_temporal/020_state_machine/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/Dockerfile b/examples/tutorials/10_async/10_temporal/020_state_machine/Dockerfile deleted file mode 100644 index 59051b4b..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/Dockerfile +++ /dev/null @@ -1,60 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/10_temporal/020_state_machine/pyproject.toml /app/020_state_machine/pyproject.toml -COPY 10_async/10_temporal/020_state_machine/README.md /app/020_state_machine/README.md - -WORKDIR /app/020_state_machine - -# Copy the project code -COPY 10_async/10_temporal/020_state_machine/project /app/020_state_machine/project - -# Copy the test files -COPY 10_async/10_temporal/020_state_machine/tests /app/020_state_machine/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies (includes pytest) -RUN uv pip install --system .[dev] pytest-asyncio httpx - -WORKDIR /app/020_state_machine - -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=at020-state-machine - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/README.md b/examples/tutorials/10_async/10_temporal/020_state_machine/README.md deleted file mode 100644 index 49814000..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# [Temporal] State Machine - -Build complex multi-state workflows using state machines with Temporal. This tutorial shows a "deep research" agent that transitions through states: clarify query โ†’ wait for input โ†’ perform research โ†’ wait for follow-ups. - -## What You'll Learn -- Building state machines with Temporal sub-workflows -- Explicit state transitions and phase management -- When to use state machines vs simple workflows -- Handling complex multi-phase agent behaviors - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Temporal UI available at http://localhost:8233 -- Understanding of Temporal workflows (see [010_agent_chat](../010_agent_chat/)) - -## Quick Start - -```bash -cd examples/tutorials/10_async/10_temporal/020_state_machine -uv run agentex agents run --manifest manifest.yaml -``` - -**Monitor:** Open Temporal UI at http://localhost:8233 to see state transitions and sub-workflows. - -## Architecture - -The workflow uses three sub-workflows, each handling a specific state: -- `ClarifyUserQueryWorkflow` - Asks follow-up questions to understand user intent -- `WaitingForUserInputWorkflow` - Waits for user responses -- `PerformingDeepResearchWorkflow` - Executes the research with full context - -State transitions are explicit and tracked, with each sub-workflow handling its own logic. - -## Why State Machines Matter - -Complex agents often need to: -- Wait for user input at specific points -- Branch behavior based on conditions -- Orchestrate multiple steps with clear transitions -- Resume at the exact state after failures - -State machines provide this structure. Each state is a sub-workflow, and Temporal ensures transitions are durable and resumable. - -## Key Pattern - -```python -self.state_machine = DeepResearchStateMachine( - initial_state=DeepResearchState.WAITING_FOR_USER_INPUT, - states=[ - State(name=DeepResearchState.CLARIFYING, workflow=ClarifyWorkflow()), - State(name=DeepResearchState.RESEARCHING, workflow=ResearchWorkflow()), - ] -) - -await self.state_machine.transition(DeepResearchState.RESEARCHING) -``` - -This is an advanced pattern - only needed when your agent has complex, multi-phase behavior. - -## When to Use -- Multi-step processes with clear phases -- Workflows that wait for user input at specific points -- Operations with branching logic based on state -- Complex coordination patterns requiring explicit transitions - -## Why This Matters -State machines provide structure for complex agent behaviors. While simple agents can use basic workflows, complex agents benefit from explicit state management. Temporal ensures state transitions are durable and resumable, even after failures. - -**Next:** [030_custom_activities](../030_custom_activities/) - Extend workflows with custom activities diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/dev.ipynb b/examples/tutorials/10_async/10_temporal/020_state_machine/dev.ipynb deleted file mode 100644 index 8f9f4dff..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/dev.ipynb +++ /dev/null @@ -1,167 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"at020-state-machine\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Agentic agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello tell me the latest news about AI and AI startups\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "# Send a follow up event to the agent in response to the agent's follow up question\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"I want to know what viral news came up and which startups failed, got acquired, or became very successful or popular in the last 3 months\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=30, # Notice the longer timeout to give time for the agent to respond\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/manifest.yaml b/examples/tutorials/10_async/10_temporal/020_state_machine/manifest.yaml deleted file mode 100644 index 8b2bca14..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/manifest.yaml +++ /dev/null @@ -1,138 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/10_temporal/020_state_machine - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/10_temporal/020_state_machine/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/10_temporal/020_state_machine/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - # Path to temporal worker file - # Examples: - # project/run_worker.py (standard) - # workers/temporal.py (custom structure) - # ../shared/worker.py (shared across projects) - worker: project/run_worker.py - - -# Agent Configuration -# ----------------- -agent: - # Type of agent - either sync or async - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: at020-state-machine - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agentthat demonstrates how to uose state machines to manage complex async workflows - - # Temporal workflow configuration - # This enables your agent to run as a Temporal workflow for long-running tasks - temporal: - enabled: true - workflows: - # Name of the workflow class - # Must match the @workflow.defn name in your workflow.py - - name: at020-state-machine - - # Queue name for task distribution - # Used by Temporal to route tasks to your agent - # Convention: _task_queue - queue_name: 020_state_machine_queue - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # OPENAI_API_KEY: "" - # OPENAI_BASE_URL: "" - # OPENAI_ORG_ID: "" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: - - name: my-registry-secret # Update with your image pull secret name - - # Global deployment settings that apply to all clusters - # These can be overridden using --override-file with custom configuration files - global: - agent: - name: "at020-state-machine" - description: "An AgentEx agentthat demonstrates how to uose state machines to manage complex async workflows" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/project/__init__.py b/examples/tutorials/10_async/10_temporal/020_state_machine/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/project/acp.py b/examples/tutorials/10_async/10_temporal/020_state_machine/project/acp.py deleted file mode 100644 index 744068d7..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/project/acp.py +++ /dev/null @@ -1,30 +0,0 @@ -import os - -from agentex.lib.types.fastacp import TemporalACPConfig -from agentex.lib.sdk.fastacp.fastacp import FastACP - -# Create the ACP server -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - # When deployed to the cluster, the Temporal address will automatically be set to the cluster address - # For local development, we set the address manually to talk to the local Temporal service set up via docker compose - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233") - ) -) - - -# Notice that we don't need to register any handlers when we use type="temporal" -# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp -# You can see that these handlers are automatically registered when the ACP is created - -# @acp.on_task_create -# This will be handled by the method in your workflow that is decorated with @workflow.run - -# @acp.on_task_event_send -# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) - -# @acp.on_task_cancel -# This does not need to be handled by your workflow. -# It is automatically handled by the temporal client which cancels the workflow directly \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/project/run_worker.py b/examples/tutorials/10_async/10_temporal/020_state_machine/project/run_worker.py deleted file mode 100644 index 2f0059d5..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/project/run_worker.py +++ /dev/null @@ -1,34 +0,0 @@ -import asyncio - -from project.workflow import At020StateMachineWorkflow -from agentex.lib.utils.debug import setup_debug_if_enabled -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker - -environment_variables = EnvironmentVariables.refresh() - -logger = make_logger(__name__) - - -async def main(): - # Setup debug mode if enabled - setup_debug_if_enabled() - - task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE - if task_queue_name is None: - raise ValueError("WORKFLOW_TASK_QUEUE is not set") - - # Create a worker with automatic tracing - worker = AgentexWorker( - task_queue=task_queue_name, - ) - - await worker.run( - activities=get_all_activities(), - workflow=At020StateMachineWorkflow, - ) - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/project/state_machines/deep_research.py b/examples/tutorials/10_async/10_temporal/020_state_machine/project/state_machines/deep_research.py deleted file mode 100644 index d1c4df00..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/project/state_machines/deep_research.py +++ /dev/null @@ -1,41 +0,0 @@ -from enum import Enum -from typing import Dict, List, Optional, override - -from pydantic import BaseModel - -from agentex.types.span import Span -from agentex.lib.sdk.state_machine import StateMachine - - -class DeepResearchState(str, Enum): - """States for the deep research workflow.""" - CLARIFYING_USER_QUERY = "clarifying_user_query" - PERFORMING_DEEP_RESEARCH = "performing_deep_research" - WAITING_FOR_USER_INPUT = "waiting_for_user_input" - COMPLETED = "completed" - FAILED = "failed" - - -class DeepResearchData(BaseModel): - """Data model for the deep research state machine - everything is one continuous research report.""" - task_id: Optional[str] = None - current_span: Optional[Span] = None - current_turn: int = 1 - - # Research report data - user_query: str = "" - follow_up_questions: List[str] = [] - follow_up_responses: List[str] = [] - n_follow_up_questions_to_ask: int = 1 - agent_input_list: List[Dict[str, str]] = [] - research_report: str = "" - research_iteration: int = 0 - - -class DeepResearchStateMachine(StateMachine[DeepResearchData]): - """State machine for the deep research workflow.""" - - @override - async def terminal_condition(self) -> bool: - """Check if the state machine has reached a terminal state.""" - return self.get_current_state() == DeepResearchState.COMPLETED diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflow.py b/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflow.py deleted file mode 100644 index aa88de68..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflow.py +++ /dev/null @@ -1,154 +0,0 @@ -import asyncio -from typing import override - -from temporalio import workflow - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.sdk.state_machine.state import State -from project.state_machines.deep_research import DeepResearchData, DeepResearchState, DeepResearchStateMachine -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow -from project.workflows.deep_research.clarify_user_query import ClarifyUserQueryWorkflow -from project.workflows.deep_research.waiting_for_user_input import WaitingForUserInputWorkflow -from project.workflows.deep_research.performing_deep_research import PerformingDeepResearchWorkflow - -environment_variables = EnvironmentVariables.refresh() - -if environment_variables.WORKFLOW_NAME is None: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if environment_variables.AGENT_NAME is None: - raise ValueError("Environment variable AGENT_NAME is not set") - - -logger = make_logger(__name__) - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class At020StateMachineWorkflow(BaseWorkflow): - """ - Minimal async workflow template for AgentEx Temporal agents. - """ - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self.state_machine = DeepResearchStateMachine( - initial_state=DeepResearchState.WAITING_FOR_USER_INPUT, - states=[ - State(name=DeepResearchState.CLARIFYING_USER_QUERY, workflow=ClarifyUserQueryWorkflow()), - State(name=DeepResearchState.WAITING_FOR_USER_INPUT, workflow=WaitingForUserInputWorkflow()), - State(name=DeepResearchState.PERFORMING_DEEP_RESEARCH, workflow=PerformingDeepResearchWorkflow()), - ], - state_machine_data=DeepResearchData(), - trace_transitions=True - ) - - @override - @workflow.signal(name=SignalName.RECEIVE_EVENT) - async def on_task_event_send(self, params: SendEventParams) -> None: - deep_research_data = self.state_machine.get_state_machine_data() - task = params.task - message = params.event.content - - # If waiting for user input, handle the message - if self.state_machine.get_current_state() == DeepResearchState.WAITING_FOR_USER_INPUT: - if not deep_research_data.user_query: - # First time - initialize research data - deep_research_data.user_query = message.content - deep_research_data.current_turn += 1 - - if not deep_research_data.current_span: - deep_research_data.current_span = await adk.tracing.start_span( - trace_id=task.id, - name=f"Turn {deep_research_data.current_turn}", - input={ - "task_id": task.id, - "message": message.content, - } - ) - else: - # Check if we're in the middle of follow-up questions - if deep_research_data.n_follow_up_questions_to_ask > 0: - # User is responding to a follow-up question - # Safely extract content from message - content_text = "" - if hasattr(message, 'content'): - content_val = getattr(message, 'content', '') - if isinstance(content_val, str): - content_text = content_val - deep_research_data.follow_up_responses.append(content_text) - - # Add the Q&A to the agent input list as context - if deep_research_data.follow_up_questions: - last_question = deep_research_data.follow_up_questions[-1] - qa_context = f"Q: {last_question}\nA: {message.content}" - deep_research_data.agent_input_list.append({ - "role": "user", - "content": qa_context - }) - else: - # User is asking a new follow-up question about the same research topic - # Add the user's follow-up question to the agent input list as context - if deep_research_data.agent_input_list: - # Add user's follow-up question to the conversation - deep_research_data.agent_input_list.append({ - "role": "user", - "content": f"Additional question: {message.content}" - }) - else: - # Initialize agent input list with the follow-up question - deep_research_data.agent_input_list = [{ - "role": "user", - "content": f"Original query: {deep_research_data.user_query}\nAdditional question: {message.content}" - }] - - deep_research_data.current_turn += 1 - - if not deep_research_data.current_span: - deep_research_data.current_span = await adk.tracing.start_span( - trace_id=task.id, - name=f"Turn {deep_research_data.current_turn}", - input={ - "task_id": task.id, - "message": message.content, - } - ) - - # Always go to clarifying user query to ask follow-up questions - # This ensures we gather more context before doing deep research - await self.state_machine.transition(DeepResearchState.CLARIFYING_USER_QUERY) - - # Echo back the user's message - # Safely extract content from message for display - message_content = "" - if hasattr(message, 'content'): - content_val = getattr(message, 'content', '') - if isinstance(content_val, str): - message_content = content_val - - await adk.messages.create( - task_id=task.id, - content=TextContent( - author="user", - content=message_content, - ), - trace_id=task.id, - parent_span_id=deep_research_data.current_span.id if deep_research_data.current_span else None, - ) - - @override - @workflow.run - async def on_task_create(self, params: CreateTaskParams) -> None: - task = params.task - - self.state_machine.set_task_id(task.id) - deep_research_data = self.state_machine.get_state_machine_data() - deep_research_data.task_id = task.id - - try: - await self.state_machine.run() - except asyncio.CancelledError as error: - logger.warning(f"Task canceled by user: {task.id}") - raise error \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflows/deep_research/clarify_user_query.py b/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflows/deep_research/clarify_user_query.py deleted file mode 100644 index c8e756b2..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflows/deep_research/clarify_user_query.py +++ /dev/null @@ -1,89 +0,0 @@ -from typing import Optional, override - -from project.state_machines.deep_research import DeepResearchData, DeepResearchState - -from agentex.lib import adk -from agentex.lib.utils.logging import make_logger -from agentex.lib.types.llm_messages import LLMConfig, UserMessage, SystemMessage -from agentex.lib.sdk.state_machine.state_machine import StateMachine -from agentex.lib.sdk.state_machine.state_workflow import StateWorkflow - -logger = make_logger(__name__) - - -FOLLOW_UP_QUESTION_TEMPLATE = """ -Given the following research query from the user, ask a follow up question to clarify the research direction. - -{{ user_query }} - - -{% if follow_up_questions|length > 0 %} -The following are follow up questions and answers that have been asked/given so far: -{% for q in follow_up_questions %} -Q: {{ follow_up_questions[loop.index0] }} -A: {{ follow_up_responses[loop.index0] }} -{% endfor %} -{% endif %} - -Return the follow up question and nothing else. -Follow up question: -""" - -class ClarifyUserQueryWorkflow(StateWorkflow): - """Workflow for engaging in follow-up questions.""" - - @override - async def execute(self, state_machine: StateMachine, state_machine_data: Optional[DeepResearchData] = None) -> str: - """Execute the workflow.""" - if state_machine_data is None: - return DeepResearchState.PERFORMING_DEEP_RESEARCH - - if state_machine_data.n_follow_up_questions_to_ask == 0: - # No more follow-up questions to ask, proceed to deep research - return DeepResearchState.PERFORMING_DEEP_RESEARCH - - # Generate follow-up question prompt - if state_machine_data.task_id and state_machine_data.current_span: - follow_up_question_generation_prompt = await adk.utils.templating.render_jinja( - trace_id=state_machine_data.task_id, - template=FOLLOW_UP_QUESTION_TEMPLATE, - variables={ - "user_query": state_machine_data.user_query, - "follow_up_questions": state_machine_data.follow_up_questions, - "follow_up_responses": state_machine_data.follow_up_responses - }, - parent_span_id=state_machine_data.current_span.id, - ) - - task_message = await adk.providers.litellm.chat_completion_stream_auto_send( - task_id=state_machine_data.task_id, - llm_config=LLMConfig( - model="gpt-4o-mini", - messages=[ - SystemMessage(content="You are assistant that follows exact instructions without outputting any other text except your response to the user's exact request."), - UserMessage(content=follow_up_question_generation_prompt), - ], - stream=True, - ), - trace_id=state_machine_data.task_id, - parent_span_id=state_machine_data.current_span.id, - ) - # Safely extract content from task message - follow_up_question = "" - if task_message.content and hasattr(task_message.content, 'content'): - content_val = getattr(task_message.content, 'content', '') - if isinstance(content_val, str): - follow_up_question = content_val - - # Update with follow-up question - state_machine_data.follow_up_questions.append(follow_up_question) - - # Decrement the number of follow-up questions to ask - state_machine_data.n_follow_up_questions_to_ask -= 1 - - logger.info(f"Current research data: {state_machine_data}") - - # Always go back to waiting for user input to get their response - return DeepResearchState.WAITING_FOR_USER_INPUT - else: - return DeepResearchState.PERFORMING_DEEP_RESEARCH \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflows/deep_research/performing_deep_research.py b/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflows/deep_research/performing_deep_research.py deleted file mode 100644 index 954a7566..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflows/deep_research/performing_deep_research.py +++ /dev/null @@ -1,155 +0,0 @@ -import os -from typing import Optional, override -from datetime import datetime - -from mcp import StdioServerParameters -from project.state_machines.deep_research import DeepResearchData, DeepResearchState - -from agentex.lib import adk -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.sdk.state_machine.state_machine import StateMachine -from agentex.lib.sdk.state_machine.state_workflow import StateWorkflow - -logger = make_logger(__name__) - -MCP_SERVERS = [ - StdioServerParameters( - command="uvx", - args=["mcp-server-time", "--local-timezone", "America/Los_Angeles"], - ), - StdioServerParameters( - command="uvx", - args=["openai-websearch-mcp"], - env={ - "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY", "") - } - ), - StdioServerParameters( - command="uvx", - args=["mcp-server-fetch"], - ), -] - -class PerformingDeepResearchWorkflow(StateWorkflow): - """Workflow for performing deep research.""" - - @override - async def execute(self, state_machine: StateMachine, state_machine_data: Optional[DeepResearchData] = None) -> str: - """Execute the workflow.""" - if state_machine_data is None: - return DeepResearchState.CLARIFYING_USER_QUERY - - if not state_machine_data.user_query: - return DeepResearchState.CLARIFYING_USER_QUERY - - # Construct initial research instruction - follow_up_qa_str = "" - for q, r in zip(state_machine_data.follow_up_questions, state_machine_data.follow_up_responses): - follow_up_qa_str += f"Q: {q}\nA: {r}\n" - - # Increment research iteration - state_machine_data.research_iteration += 1 - - # Create research instruction based on whether this is the first iteration or a continuation - if state_machine_data.research_iteration == 1: - initial_instruction = ( - f"Initial Query: {state_machine_data.user_query}\n" - f"Follow-up Q&A:\n{follow_up_qa_str}" - ) - - # Notify user that deep research is starting - if state_machine_data.task_id and state_machine_data.current_span: - await adk.messages.create( - task_id=state_machine_data.task_id, - content=TextContent( - author="agent", - content="Starting deep research process based on your query and follow-up responses...", - ), - trace_id=state_machine_data.task_id, - parent_span_id=state_machine_data.current_span.id, - ) - else: - initial_instruction = ( - f"Initial Query: {state_machine_data.user_query}\n" - f"Follow-up Q&A:\n{follow_up_qa_str}\n" - f"Current Research Report (Iteration {state_machine_data.research_iteration - 1}):\n{state_machine_data.research_report}" - ) - - # Notify user that research is continuing - if state_machine_data.task_id and state_machine_data.current_span: - await adk.messages.create( - task_id=state_machine_data.task_id, - content=TextContent( - author="agent", - content=f"Continuing deep research (iteration {state_machine_data.research_iteration}) to expand and refine the research report...", - ), - trace_id=state_machine_data.task_id, - parent_span_id=state_machine_data.current_span.id, - ) - - # Fetch the current time in human readable format - current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S %Z") - - # Deep Research Loop - if not state_machine_data.agent_input_list: - state_machine_data.agent_input_list = [ - {"role": "user", "content": f""" -Here is my initial query, clarified with the following follow-up questions and answers: -{initial_instruction} - -You should now perform a depth search to get a more detailed understanding of the most promising areas. - -The current time is {current_time}. -"""} - ] - - if state_machine_data.task_id and state_machine_data.current_span: - result = await adk.providers.openai.run_agent_streamed_auto_send( - task_id=state_machine_data.task_id, - trace_id=state_machine_data.task_id, - input_list=state_machine_data.agent_input_list, - mcp_server_params=MCP_SERVERS, - agent_name="Deep Research Agent", - agent_instructions=f"""You are a deep research expert that can search the web for information. -You should use the tools you have access to to write an extensive report on the users query. - -You must use the web search tool at least 10 times before writing your report. -Use the fetch tool to open links you want to read. -Then use web search again repeatedly to dig deeper into the most promising areas of search results. - -Be very targeted with your searches, make sure all search queries are relevant to either the initial user query or dig deeper into the most promising areas of search results. All searches should tie back to the original query though. Remember your searches are stateless, so there is no context shared between search queries. - -Always cite your sources in the format [source](link). Do not hallucinate. Your latent information is not likely to be up to date. - -If this is a continuation of previous research (iteration {state_machine_data.research_iteration}), focus on: -1. Expanding areas that need more detail -2. Adding new relevant information discovered -3. Removing outdated or incorrect information -4. Improving the overall structure and clarity of the report -""", - parent_span_id=state_machine_data.current_span.id, - mcp_timeout_seconds=180, - ) - - # Update state with conversation history - state_machine_data.agent_input_list = result.final_input_list - - # Extract the research report from the last assistant message - if result.final_input_list: - for message in reversed(result.final_input_list): - if message.get("role") == "assistant": - state_machine_data.research_report = message.get("content", "") - break - - # Keep the research data active for future iterations - - if state_machine_data.task_id and state_machine_data.current_span: - await adk.tracing.end_span( - trace_id=state_machine_data.task_id, - span=state_machine_data.current_span, - ) - state_machine_data.current_span = None - - # Transition to waiting for user input state - return DeepResearchState.WAITING_FOR_USER_INPUT \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflows/deep_research/waiting_for_user_input.py b/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflows/deep_research/waiting_for_user_input.py deleted file mode 100644 index 842c5c42..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/project/workflows/deep_research/waiting_for_user_input.py +++ /dev/null @@ -1,21 +0,0 @@ -from __future__ import annotations - -from typing import override - -from temporalio import workflow -from project.state_machines.deep_research import DeepResearchData, DeepResearchState - -from agentex.lib.utils.logging import make_logger -from agentex.lib.sdk.state_machine import StateMachine, StateWorkflow - -logger = make_logger(__name__) - -class WaitingForUserInputWorkflow(StateWorkflow): - @override - async def execute(self, state_machine: StateMachine, state_machine_data: DeepResearchData | None = None) -> str: - logger.info("ActorWaitingForUserInputWorkflow: waiting for user input...") - def condition(): - current_state = state_machine.get_current_state() - return current_state != DeepResearchState.WAITING_FOR_USER_INPUT - await workflow.wait_condition(condition) - return state_machine.get_current_state() \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/pyproject.toml b/examples/tutorials/10_async/10_temporal/020_state_machine/pyproject.toml deleted file mode 100644 index e018b322..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "at020-state-machine" -version = "0.1.0" -description = "An AgentEx agentthat demonstrates how to uose state machines to manage complex async workflows" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", - "debugpy>=1.8.15", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/020_state_machine/tests/test_agent.py b/examples/tutorials/10_async/10_temporal/020_state_machine/tests/test_agent.py deleted file mode 100644 index 5c458fe8..00000000 --- a/examples/tutorials/10_async/10_temporal/020_state_machine/tests/test_agent.py +++ /dev/null @@ -1,187 +0,0 @@ -""" -Sample tests for AgentEx Temporal State Machine agent. - -This test suite demonstrates how to test a state machine-based agent that: -- Uses state transitions (WAITING โ†’ CLARIFYING โ†’ PERFORMING_DEEP_RESEARCH) -- Asks follow-up questions before performing research -- Performs deep web research using MCP servers -- Handles multi-turn conversations with context preservation - -Key features tested: -1. State Machine Flow: Agent transitions through multiple states -2. Follow-up Questions: Agent clarifies queries before research -3. Deep Research: Agent performs extensive web research -4. Multi-turn Support: User can ask follow-ups about research - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Ensure OPENAI_API_KEY is set in the environment -4. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: at020-state-machine) -""" - -import os -import uuid -import asyncio - -import pytest -import pytest_asyncio -from test_utils.async_utils import ( - stream_task_messages, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.text_content_param import TextContentParam -from agentex.types.tool_request_content import ToolRequestContent - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "at020-state-machine") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling with state machine workflow.""" - @pytest.mark.asyncio - async def test_send_event_and_poll_simple_query(self, client: AsyncAgentex, agent_id: str): - """Test sending a simple event and polling for the response (no tool use).""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Wait for workflow to initialize - await asyncio.sleep(1) - - # Send a simple message that shouldn't require tool use - user_message = "Hello! Please tell me the latest news about AI and AI startups." - messages = [] - found_agent_message = False - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=30, - sleep_interval=1.0, - ): - ## we should expect to get a question from the agent - if message.content.type == "text" and message.content.author == "agent": - found_agent_message = True - break - - assert found_agent_message, "Did not find an agent message" - - # now we want to clarity that message - await asyncio.sleep(2) - next_user_message = "I want to know what viral news came up and which startups failed, got acquired, or became very successful or popular in the last 3 months" - starting_deep_research_message = False - uses_tool_requests = False - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=next_user_message, - timeout=30, - sleep_interval=1.0, - ): - if message.content.type == "text" and message.content.author == "agent": - if "starting deep research" in message.content.content.lower(): - starting_deep_research_message = True - if isinstance(message.content, ToolRequestContent): - uses_tool_requests = True - break - - assert starting_deep_research_message, "Did not start deep research" - assert uses_tool_requests, "Did not use tool requests" - -class TestStreamingEvents: - """Test streaming event sending with state machine workflow.""" - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and streaming the response.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - found_agent_message = False - async def poll_message_in_background() -> None: - nonlocal found_agent_message - async for message in stream_task_messages( - client=client, - task_id=task.id, - timeout=30, - ): - if message.content.type == "text" and message.content.author == "agent": - found_agent_message = True - break - - assert found_agent_message, "Did not find an agent message" - - poll_task = asyncio.create_task(poll_message_in_background()) - # create the first - user_message = "Hello! Please tell me the latest news about AI and AI startups." - await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": TextContentParam(type="text", author="user", content=user_message)}) - - await poll_task - - await asyncio.sleep(2) - starting_deep_research_message = False - uses_tool_requests = False - async def poll_message_in_background_2() -> None: - nonlocal starting_deep_research_message, uses_tool_requests - async for message in stream_task_messages( - client=client, - task_id=task.id, - timeout=30, - ): - # can you add the same checks as we did in the non-streaming events test? - if message.content.type == "text" and message.content.author == "agent": - if "starting deep research" in message.content.content.lower(): - starting_deep_research_message = True - if isinstance(message.content, ToolRequestContent): - uses_tool_requests = True - break - - assert starting_deep_research_message, "Did not start deep research" - assert uses_tool_requests, "Did not use tool requests" - - poll_task_2 = asyncio.create_task(poll_message_in_background_2()) - - next_user_message = "I want to know what viral news came up and which startups failed, got acquired, or became very successful or popular in the last 3 months" - await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": TextContentParam(type="text", author="user", content=next_user_message)}) - await poll_task_2 - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/.dockerignore b/examples/tutorials/10_async/10_temporal/030_custom_activities/.dockerignore deleted file mode 100644 index c4f7a8b4..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/Dockerfile b/examples/tutorials/10_async/10_temporal/030_custom_activities/Dockerfile deleted file mode 100644 index 752ad8e9..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/Dockerfile +++ /dev/null @@ -1,59 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/10_temporal/030_custom_activities/pyproject.toml /app/030_custom_activities/pyproject.toml -COPY 10_async/10_temporal/030_custom_activities/README.md /app/030_custom_activities/README.md - -WORKDIR /app/030_custom_activities - -# Copy the project code -COPY 10_async/10_temporal/030_custom_activities/project /app/030_custom_activities/project - -# Copy the test files -COPY 10_async/10_temporal/030_custom_activities/tests /app/030_custom_activities/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies (includes pytest) -RUN uv pip install --system .[dev] pytest-asyncio httpx - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=at030-custom-activities - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/README.md b/examples/tutorials/10_async/10_temporal/030_custom_activities/README.md deleted file mode 100644 index 28a08c21..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/README.md +++ /dev/null @@ -1,106 +0,0 @@ -# [Temporal] Custom Activities - -Learn how to extend Temporal workflows with custom activities for external operations like API calls, database queries, or complex computations. - -## What You'll Learn -- How to define custom Temporal activities -- When to use activities vs inline workflow code -- Activity retry and timeout configuration -- Integrating external services into workflows - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Temporal UI available at http://localhost:8233 -- Understanding of basic Temporal workflows (see [000_hello_acp](../000_hello_acp/)) - -## Quick Start - -**Terminal 1 - Start Worker:** -```bash -cd examples/tutorials/10_async/10_temporal/030_custom_activities -uv run python project/run_worker.py -``` - -**Terminal 2 - Run Agent:** -```bash -uv run agentex agents run --manifest manifest.yaml -``` - -**Terminal 3 - Test via Notebook:** -```bash -jupyter notebook dev.ipynb -``` - -## Key Concepts - -### Activities vs Workflow Code - -**Use activities for:** -- External API calls -- Database operations -- File I/O or network operations -- Non-deterministic operations (random, time, external state) - -**Use workflow code for:** -- Orchestration logic -- State management -- Decision making based on activity results - -### Defining a Custom Activity - -```python -# In project/activities.py -from temporalio import activity - -@activity.defn -async def call_external_api(endpoint: str, data: dict) -> dict: - """Activities can perform non-deterministic operations.""" - import httpx - async with httpx.AsyncClient() as client: - response = await client.post(endpoint, json=data) - return response.json() -``` - -### Using Activities in Workflows - -```python -# In project/workflow.py -from temporalio import workflow - -@workflow.defn -class MyWorkflow(BaseWorkflow): - @workflow.run - async def run(self, input: dict): - # Activities are executed with retry and timeout policies - result = await workflow.execute_activity( - call_external_api, - args=["https://api.example.com", input], - start_to_close_timeout=timedelta(seconds=30), - retry_policy=RetryPolicy(maximum_attempts=3) - ) - return result -``` - -## Try It - -1. Modify `project/activities.py` to add a new activity -2. Update `project/workflow.py` to call your activity -3. Register the activity in `project/run_worker.py` -4. Restart the worker and test via the notebook -5. Check Temporal UI at http://localhost:8233 to see activity execution and retries - -## When to Use -- Integrating external services (OpenAI, databases, APIs) -- Operations that may fail and need automatic retries -- Long-running computations that should be checkpointed -- Separating business logic from orchestration - -## Why This Matters -Activities are Temporal's way of handling the real world's messiness: network failures, API rate limits, and transient errors. They provide automatic retries, timeouts, and observability for operations that would otherwise require extensive error handling code. - ---- - -**For detailed setup instructions, see [TEMPLATE_GUIDE.md](./TEMPLATE_GUIDE.md)** - -**Next:** [050_agent_chat_guardrails](../050_agent_chat_guardrails/) - Add safety and validation to your workflows diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/dev.ipynb b/examples/tutorials/10_async/10_temporal/030_custom_activities/dev.ipynb deleted file mode 100644 index b0806369..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/dev.ipynb +++ /dev/null @@ -1,228 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 38, - "id": "36834357", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "id": "d1c309d6", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"at030-custom-activities\"" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "id": "9f6e6ef0", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Task(id='0927b469-5aed-4804-aa53-79a6af70f76f', created_at=datetime.datetime(2025, 8, 14, 5, 54, 44, 734709, tzinfo=TzInfo(UTC)), name='26d1fa25-task', status='RUNNING', status_reason='Task created, forwarding to ACP server', updated_at=datetime.datetime(2025, 8, 14, 5, 54, 44, 734709, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# (REQUIRED) Create a new task. For Agentic agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "id": "b03b0d37", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Event(id='5f402c77-ed37-4f56-b161-50f3ceb87685', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=247, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 0', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 54, 52, 106969, tzinfo=TzInfo(UTC)))\n", - "Event(id='f71c4b80-6d93-4167-bdf9-d2f407bde759', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=248, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 1', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 54, 53, 141757, tzinfo=TzInfo(UTC)))\n", - "Event(id='797aca62-6260-4c4d-a89b-43e33ecfdc30', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=249, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 2', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 54, 54, 200724, tzinfo=TzInfo(UTC)))\n", - "Event(id='f207d685-789c-4538-b1c2-03c5a93028d8', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=250, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 3', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 54, 55, 264489, tzinfo=TzInfo(UTC)))\n", - "Event(id='9685fc87-d38b-4d4a-94e2-ac148b3b060f', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=251, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 4', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 54, 56, 352169, tzinfo=TzInfo(UTC)))\n", - "Event(id='f2a134c7-5dac-4643-acee-259dc076c953', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=252, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 5', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 54, 57, 419635, tzinfo=TzInfo(UTC)))\n", - "Event(id='68dcfc1f-71b1-4876-907a-4282d12b1c54', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=253, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 6', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 54, 58, 476724, tzinfo=TzInfo(UTC)))\n", - "Event(id='f8644089-1b20-49ca-b8bd-706feeeed096', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=254, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 7', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 54, 59, 527430, tzinfo=TzInfo(UTC)))\n", - "Event(id='11300500-4843-4c15-bfa9-7de6e1f29017', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=255, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 8', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 0, 584924, tzinfo=TzInfo(UTC)))\n", - "Event(id='27d8f4f3-7e25-4d17-aaa8-a4c9d39bfcda', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=256, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 9', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 1, 637711, tzinfo=TzInfo(UTC)))\n", - "Event(id='7b964f5c-504c-43c5-a1ea-84f96ce1a696', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=257, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 10', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 2, 693531, tzinfo=TzInfo(UTC)))\n", - "Event(id='dc70e5c3-75d7-4b76-9a1b-171f755440c4', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=258, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 11', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 3, 724789, tzinfo=TzInfo(UTC)))\n", - "Event(id='adb547d2-5ac8-4c45-86f0-85703434568a', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=259, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 12', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 4, 773604, tzinfo=TzInfo(UTC)))\n", - "Event(id='575b7dbc-d884-42cf-b67b-47f752862b43', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=260, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 13', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 5, 825423, tzinfo=TzInfo(UTC)))\n", - "Event(id='de7f328a-03f2-44a3-9ee0-111ba41b48c4', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=261, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 14', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 6, 873700, tzinfo=TzInfo(UTC)))\n", - "Event(id='639fc12d-867a-4739-a5d6-71e3db5cb3db', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=262, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 15', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 7, 920757, tzinfo=TzInfo(UTC)))\n", - "Event(id='d7c93e13-8d49-4ba9-88c9-642176bea019', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=263, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 16', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 8, 952535, tzinfo=TzInfo(UTC)))\n", - "Event(id='177047ce-bd57-47e4-b1ab-ca6a2c8333fe', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=264, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 17', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 9, 986904, tzinfo=TzInfo(UTC)))\n", - "Event(id='f081c0c8-f9c4-4cb8-90d3-aa1fb662e065', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=265, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 18', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 11, 34936, tzinfo=TzInfo(UTC)))\n", - "Event(id='45c49d20-de58-4f29-be75-b4169f30ff5c', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=266, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 19', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 12, 64206, tzinfo=TzInfo(UTC)))\n", - "Event(id='23ef7bb3-f1a7-41cd-ba33-3513724ecb9a', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=267, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 20', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 13, 117837, tzinfo=TzInfo(UTC)))\n", - "Event(id='f6d5dbda-ca45-42a9-a15e-3726f15a42ce', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=268, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 21', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 14, 172609, tzinfo=TzInfo(UTC)))\n", - "Event(id='f8c0e226-c950-4173-9b63-00530d5a5a62', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=269, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 22', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 15, 242257, tzinfo=TzInfo(UTC)))\n", - "Event(id='0cc1591b-0e5f-4044-9b08-8a7f8977f35b', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=270, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 23', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 16, 287209, tzinfo=TzInfo(UTC)))\n", - "Event(id='562ad0d5-695e-46ca-a3f1-c918f44d8dce', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=271, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 24', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 17, 330180, tzinfo=TzInfo(UTC)))\n", - "Event(id='c35b5515-a51c-486b-a46a-47cbd31b7b98', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=272, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 25', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 18, 373297, tzinfo=TzInfo(UTC)))\n", - "Event(id='83b99f74-27d0-443f-8929-ce6f3fea1868', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=273, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 26', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 19, 446678, tzinfo=TzInfo(UTC)))\n", - "Event(id='291784dd-0722-47d2-913e-27d2c328d972', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=274, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 27', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 20, 508485, tzinfo=TzInfo(UTC)))\n", - "Event(id='36fff710-3617-4a58-827f-baee95ef0d6d', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=275, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 28', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 21, 603524, tzinfo=TzInfo(UTC)))\n", - "Event(id='40058df6-7226-47c3-9e11-eb6aeb304ff2', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=276, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=TextContent(author='user', content='Hello what can you do? EVENT NUM: 29', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 22, 651817, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "num_events = 30\n", - "for i in range(num_events):\n", - " rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": f\"Hello what can you do? EVENT NUM: {i}\"},\n", - " \"task_id\": task.id,\n", - " }\n", - " )\n", - " \n", - " event = rpc_response.result\n", - " print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "id": "a2c269df-a33a-422e-a2bf-1cab514080e8", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Event(id='e4204e25-dba1-428a-a278-e5bf16464cbc', agent_id='c9fc2e91-df7a-42b4-bc79-fe154bd4db5a', sequence_id=277, task_id='0927b469-5aed-4804-aa53-79a6af70f76f', content=DataContent(author='user', data={'clear_queue': True, 'cancel_running_tasks': True}, style='static', type='data'), created_at=datetime.datetime(2025, 8, 14, 5, 55, 23, 716517, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"data\", \"author\": \"user\", \"data\": {\"clear_queue\": True, \"cancel_running_tasks\": True}},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "id": "a6927cc0", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/14/2025 05:49:07] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ I just cleared the queue of events that were received. Total cleared events: โ”‚\n",
-       "โ”‚ 1                                                                            โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[32mโ•ญโ”€\u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[32m [08/14/2025 05:49:07] \u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32mโ”€โ•ฎ\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m I just cleared the queue of events that were received. Total cleared events: \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m 1 \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Streaming timed out after 5 seconds - returning collected messages\n" - ] - } - ], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4864e354", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.8" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/manifest.yaml b/examples/tutorials/10_async/10_temporal/030_custom_activities/manifest.yaml deleted file mode 100644 index 40af196f..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/manifest.yaml +++ /dev/null @@ -1,138 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/10_temporal/030_custom_activities - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/10_temporal/030_custom_activities/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/10_temporal/030_custom_activities/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - # Path to temporal worker file - # Examples: - # project/run_worker.py (standard) - # workers/temporal.py (custom structure) - # ../shared/worker.py (shared across projects) - worker: project/run_worker.py - - -# Agent Configuration -# ----------------- -agent: - # Type of agent - either sync or async - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: at030-custom-activities - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent with custom activities - - # Temporal workflow configuration - # This enables your agent to run as a Temporal workflow for long-running tasks - temporal: - enabled: true - workflows: - # Name of the workflow class - # Must match the @workflow.defn name in your workflow.py - - name: at030-custom-activities - - # Queue name for task distribution - # Used by Temporal to route tasks to your agent - # Convention: _task_queue - queue_name: 030_custom_activities_queue - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # OPENAI_API_KEY: "" - # OPENAI_BASE_URL: "" - # OPENAI_ORG_ID: "" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: - - name: my-registry-secret # Update with your image pull secret name - - # Global deployment settings that apply to all clusters - # These can be overridden using --override-file with custom configuration files - global: - agent: - name: "at030-custom-activities" - description: "An AgentEx agent with custom activities" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/__init__.py b/examples/tutorials/10_async/10_temporal/030_custom_activities/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/acp.py b/examples/tutorials/10_async/10_temporal/030_custom_activities/project/acp.py deleted file mode 100644 index 819b119c..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/acp.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -import sys - -# === DEBUG SETUP (AgentEx CLI Debug Support) === -if os.getenv("AGENTEX_DEBUG_ENABLED") == "true": - try: - import debugpy - debug_port = int(os.getenv("AGENTEX_DEBUG_PORT", "5679")) - debug_type = os.getenv("AGENTEX_DEBUG_TYPE", "acp") - wait_for_attach = os.getenv("AGENTEX_DEBUG_WAIT_FOR_ATTACH", "false").lower() == "true" - - # Configure debugpy - debugpy.configure(subProcess=False) - debugpy.listen(debug_port) - - print(f"๐Ÿ› [{debug_type.upper()}] Debug server listening on port {debug_port}") - - if wait_for_attach: - print(f"โณ [{debug_type.upper()}] Waiting for debugger to attach...") - debugpy.wait_for_client() - print(f"โœ… [{debug_type.upper()}] Debugger attached!") - else: - print(f"๐Ÿ“ก [{debug_type.upper()}] Ready for debugger attachment") - - except ImportError: - print("โŒ debugpy not available. Install with: pip install debugpy") - sys.exit(1) - except Exception as e: - print(f"โŒ Debug setup failed: {e}") - sys.exit(1) -# === END DEBUG SETUP === - -from agentex.lib.types.fastacp import TemporalACPConfig -from agentex.lib.sdk.fastacp.fastacp import FastACP - -# Create the ACP server -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - # When deployed to the cluster, the Temporal address will automatically be set to the cluster address - # For local development, we set the address manually to talk to the local Temporal service set up via docker compose - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233") - ) -) - - -# Notice that we don't need to register any handlers when we use type="temporal" -# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp -# You can see that these handlers are automatically registered when the ACP is created - -# @acp.on_task_create -# This will be handled by the method in your workflow that is decorated with @workflow.run - -# @acp.on_task_event_send -# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) - -# @acp.on_task_cancel -# This does not need to be handled by your workflow. -# It is automatically handled by the temporal client which cancels the workflow directly \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/custom_activites.py b/examples/tutorials/10_async/10_temporal/030_custom_activities/project/custom_activites.py deleted file mode 100644 index 36b5c9d2..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/custom_activites.py +++ /dev/null @@ -1,111 +0,0 @@ -import asyncio -from typing import Any, List - -from pydantic import BaseModel -from temporalio import activity - -from agentex.lib import adk -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent - -logger = make_logger(__name__) - - -PROCESS_BATCH_EVENTS_ACTIVITY = "process_batch_events" -class ProcessBatchEventsActivityParams(BaseModel): - events: List[Any] - batch_number: int - - -REPORT_PROGRESS_ACTIVITY = "report_progress" -class ReportProgressActivityParams(BaseModel): - num_batches_processed: int - num_batches_failed: int - num_batches_running: int - task_id: str - - -COMPLETE_WORKFLOW_ACTIVITY = "complete_workflow" -class CompleteWorkflowActivityParams(BaseModel): - task_id: str - - -class CustomActivities: - def __init__(self): - self._batch_size = 5 - - - @activity.defn(name=PROCESS_BATCH_EVENTS_ACTIVITY) - async def process_batch_events(self, params: ProcessBatchEventsActivityParams) -> bool: - """ - This activity will take a list of events and process them. - - This is a simple example that demonstrates how to: - 1. Create a custom Temporal activity - 2. Accept structured parameters via Pydantic models - 3. Process batched data - 4. Simulate work with async sleep - 5. Return results back to the workflow - - In a real-world scenario, you could: - - Make database calls (batch inserts, updates) - - Call external APIs (payment processing, email sending) - - Perform heavy computations (ML model inference, data analysis) - - Generate reports or files - - Any other business logic that benefits from Temporal's reliability - - The key benefit is that this activity will automatically: - - Retry on failures (with configurable retry policies) - - Be durable across worker restarts - - Provide observability and metrics - - Handle timeouts and cancellations gracefully - """ - logger.info(f"[Batch {params.batch_number}] ๐Ÿš€ Starting to process batch of {len(params.events)} events") - - # Process each event with some simulated work - for i, event in enumerate(params.events): - logger.info(f"[Batch {params.batch_number}] ๐Ÿ“„ Processing event {i+1}/{len(params.events)}: {event}") - - # Simulate processing time - in reality this could be: - # - Database operations, API calls, file processing, ML inference, etc. - await asyncio.sleep(2) - - logger.info(f"[Batch {params.batch_number}] โœ… Event {i+1} processed successfully") - - logger.info(f"[Batch {params.batch_number}] ๐ŸŽ‰ Batch processing complete! Processed {len(params.events)} events") - - # Return success - in reality you might return processing results, IDs, stats, etc. - return True - - @activity.defn(name=REPORT_PROGRESS_ACTIVITY) - async def report_progress(self, params: ReportProgressActivityParams) -> None: - """ - This activity will report progress to an external system. - - NORMALLY, this would be a call to an external system to report progress. For example, this could - be a call to an email service to send an update email to the user. - - In this example, we'll just log the progress to the console. - """ - logger.info(f"๐Ÿ“Š Progress Update - num_batches_processed: {params.num_batches_processed}, num_batches_failed: {params.num_batches_failed}, num_batches_running: {params.num_batches_running}") - - await adk.messages.create( - task_id=params.task_id, - content=TextContent( - author="agent", - content=f"๐Ÿ“Š Progress Update - num_batches_processed: {params.num_batches_processed}, num_batches_failed: {params.num_batches_failed}, num_batches_running: {params.num_batches_running}", - ), - ) - - @activity.defn(name=COMPLETE_WORKFLOW_ACTIVITY) - async def complete_workflow(self, params: CompleteWorkflowActivityParams) -> None: - """ - This activity will complete the workflow. - - Typically here you may do anything like: - - Send a final email to the user - - Send a final message to the user - - Update a job status in a database to completed - """ - logger.info(f"๐ŸŽ‰ Workflow Complete! Task ID: {params.task_id}") - diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/run_worker.py b/examples/tutorials/10_async/10_temporal/030_custom_activities/project/run_worker.py deleted file mode 100644 index 44ff5530..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/run_worker.py +++ /dev/null @@ -1,44 +0,0 @@ -import asyncio - -from project.workflow import At030CustomActivitiesWorkflow -from agentex.lib.utils.debug import setup_debug_if_enabled -from project.custom_activites import CustomActivities -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker - -environment_variables = EnvironmentVariables.refresh() - -logger = make_logger(__name__) - - -async def main(): - # Setup debug mode if enabled - setup_debug_if_enabled() - - task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE - if task_queue_name is None: - raise ValueError("WORKFLOW_TASK_QUEUE is not set") - - # Create a worker with automatic tracing - worker = AgentexWorker( - task_queue=task_queue_name, - ) - - agentex_activities = get_all_activities() - - custom_activities_use_case = CustomActivities() - all_activites = [ - custom_activities_use_case.report_progress, - custom_activities_use_case.process_batch_events, - *agentex_activities, - ] - - await worker.run( - activities=all_activites, - workflow=At030CustomActivitiesWorkflow, - ) - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/shared_models.py b/examples/tutorials/10_async/10_temporal/030_custom_activities/project/shared_models.py deleted file mode 100644 index 2d894a9f..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/shared_models.py +++ /dev/null @@ -1,14 +0,0 @@ -from pydantic import BaseModel - - -class StateModel(BaseModel): - num_batches_processed: int = 0 - num_batches_failed: int = 0 - total_events_processed: int = 0 - total_events_dropped: int = 0 - total_events_enqueued: int = 0 - - -class IncomingEventData(BaseModel): - clear_queue: bool = False - cancel_running_tasks: bool = False \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/workflow.py b/examples/tutorials/10_async/10_temporal/030_custom_activities/project/workflow.py deleted file mode 100644 index 0fa85bbb..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/workflow.py +++ /dev/null @@ -1,216 +0,0 @@ -import asyncio -from typing import Any, List, override -from datetime import timedelta - -from temporalio import workflow -from temporalio.common import RetryPolicy - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from project.shared_models import StateModel, IncomingEventData -from project.workflow_utils import BatchProcessingUtils -from project.custom_activites import ( - REPORT_PROGRESS_ACTIVITY, - COMPLETE_WORKFLOW_ACTIVITY, - ReportProgressActivityParams, - CompleteWorkflowActivityParams, -) -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow - -environment_variables = EnvironmentVariables.refresh() - -if environment_variables.WORKFLOW_NAME is None: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if not environment_variables.AGENT_NAME: - raise ValueError("Environment variable AGENT_NAME is not set") - -logger = make_logger(__name__) - - -WAIT_TIMEOUT = 300 -BATCH_SIZE = 5 -MAX_QUEUE_DEPTH = 50 - - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class At030CustomActivitiesWorkflow(BaseWorkflow): - """ - Simple tutorial workflow demonstrating custom activities with concurrent processing. - - Key Learning Points: - 1. Queue incoming events using Temporal signals - 2. Process events in batches when enough arrive - 3. Use asyncio.create_task() for concurrent processing - 4. Execute custom activities from within workflows - 5. Handle workflow completion cleanly - """ - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self._incoming_queue: asyncio.Queue[Any] = asyncio.Queue() - self._processing_tasks: List[asyncio.Task[Any]] = [] - self._batch_size = BATCH_SIZE - self._state: StateModel - - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - @override - async def on_task_event_send(self, params: SendEventParams) -> None: - if params.event.content is None: - return - - if params.event.content.type == "text": - if self._incoming_queue.qsize() >= MAX_QUEUE_DEPTH: - logger.warning(f"Queue is at max depth of {MAX_QUEUE_DEPTH}. Dropping event.") - if self._state: - self._state.total_events_dropped += 1 - else: - await self._incoming_queue.put(params.event.content) - return - - elif params.event.content.type == "data": - received_data = params.event.content.data - try: - received_data = IncomingEventData.model_validate(received_data) - except Exception as e: - logger.error(f"Error parsing received data: {e}. Dropping event.") - return - - if received_data.clear_queue: - await BatchProcessingUtils.handle_queue_clear(self._incoming_queue, params.task.id) - - if received_data.cancel_running_tasks: - await BatchProcessingUtils.handle_task_cancellation(self._processing_tasks, params.task.id) - else: - logger.info(f"Received IncomingEventData: {received_data} with no known action.") - else: - logger.info(f"Received event: {params.event.content} with no action.") - - - @workflow.run - @override - async def on_task_create(self, params: CreateTaskParams) -> None: - logger.info(f"Received task create params: {params}") - - self._state = StateModel() - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"๐Ÿš€ Starting batch processing! I'll collect events into batches of {self._batch_size} and process them using custom activities. I'll also report progress you as I go..", - ), - ) - - batch_number = 0 - - # Simple event processing loop with progress tracking - while True: - # Check for completed tasks and update progress - self._processing_tasks = await BatchProcessingUtils.update_progress(self._processing_tasks, self._state, params.task.id) - - # Wait for enough events to form a batch, or timeout - try: - await workflow.wait_condition( - lambda: self._incoming_queue.qsize() >= self._batch_size, - timeout=WAIT_TIMEOUT - ) - except asyncio.TimeoutError: - logger.info(f"โฐ Timeout after {WAIT_TIMEOUT} seconds - ending workflow") - break - - # We have enough events - start processing them as a batch - data_to_process: List[Any] = [] - await BatchProcessingUtils.dequeue_pending_data(self._incoming_queue, data_to_process, self._batch_size) - - if data_to_process: - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"๐Ÿ“ฆ Starting batch #{batch_number} with {len(data_to_process)} events using asyncio.create_task()", - ), - ) - - # Create concurrent task for this batch - this is the key learning point! - task = asyncio.create_task( - BatchProcessingUtils.process_batch_concurrent( - events=data_to_process, - batch_number=batch_number, - task_id=params.task.id - ) - ) - batch_number += 1 - self._processing_tasks.append(task) - - logger.info(f"๐Ÿ“ Tutorial Note: Created asyncio.create_task() for batch #{batch_number} to run asynchronously") - - # Check progress again immediately to show real-time updates - self._processing_tasks = await BatchProcessingUtils.update_progress(self._processing_tasks, self._state, params.task.id) - - # Process any remaining events that didn't form a complete batch - if self._incoming_queue.qsize() > 0: - data_to_process: List[Any] = [] - await BatchProcessingUtils.dequeue_pending_data(self._incoming_queue, data_to_process, self._incoming_queue.qsize()) - - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"๐Ÿ”„ Processing final {len(data_to_process)} events that didn't form a complete batch.", - ), - ) - - # Now, add another batch to process the remaining events - task = asyncio.create_task( - BatchProcessingUtils.process_batch_concurrent( - events=data_to_process, - batch_number=batch_number, - task_id=params.task.id - ) - ) - self._processing_tasks.append(task) - batch_number += 1 - - # Wait for all remaining tasks to complete, with real-time progress updates - await BatchProcessingUtils.wait_for_remaining_tasks(self._processing_tasks, self._state, params.task.id) - await workflow.execute_activity( - REPORT_PROGRESS_ACTIVITY, - ReportProgressActivityParams( - num_batches_processed=self._state.num_batches_processed, - num_batches_failed=self._state.num_batches_failed, - num_batches_running=0, - task_id=params.task.id - ), - start_to_close_timeout=timedelta(minutes=1), - retry_policy=RetryPolicy(maximum_attempts=3) - ) - - final_summary = ( - f"โœ… Workflow Complete! Final Summary:\n" - f"โ€ข Batches completed successfully: {self._state.num_batches_processed} โœ…\n" - f"โ€ข Batches failed: {self._state.num_batches_failed} โŒ\n" - f"โ€ข Total events processed: {self._state.total_events_processed}\n" - f"โ€ข Events dropped (queue full): {self._state.total_events_dropped}\n" - f"๐Ÿ“ Tutorial completed - you learned how to use asyncio.create_task() with Temporal custom activities!" - ) - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=final_summary - ), - ) - - await workflow.execute_activity( - COMPLETE_WORKFLOW_ACTIVITY, - CompleteWorkflowActivityParams( - task_id=params.task.id - ), - start_to_close_timeout=timedelta(minutes=1), - retry_policy=RetryPolicy(maximum_attempts=3) - ) - diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/workflow_utils.py b/examples/tutorials/10_async/10_temporal/030_custom_activities/project/workflow_utils.py deleted file mode 100644 index da04a8da..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/project/workflow_utils.py +++ /dev/null @@ -1,204 +0,0 @@ -import asyncio -from typing import Any, Dict, List -from datetime import timedelta - -from temporalio import workflow -from temporalio.common import RetryPolicy - -from agentex.lib import adk -from project.shared_models import StateModel -from project.custom_activites import ( - REPORT_PROGRESS_ACTIVITY, - PROCESS_BATCH_EVENTS_ACTIVITY, - ReportProgressActivityParams, - ProcessBatchEventsActivityParams, -) -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent - -logger = make_logger(__name__) - - -class BatchProcessingUtils: - """ - Utility class containing batch processing logic extracted from the main workflow. - This keeps the workflow clean while maintaining all the same functionality. - """ - - @staticmethod - async def dequeue_pending_data(queue: asyncio.Queue[Any], data_to_process: List[Any], max_items: int) -> None: - """ - Dequeue exactly the number of items requested, maintaining FIFO order. - This is much cleaner than dequeuing everything and putting items back. - """ - items_dequeued = 0 - while items_dequeued < max_items and not queue.empty(): - try: - item = queue.get_nowait() - data_to_process.append(item) - items_dequeued += 1 - except Exception: - # Queue became empty while we were dequeuing - break - - @staticmethod - async def process_batch_concurrent(events: List[Any], batch_number: int, task_id: str) -> Dict[str, Any]: - """ - Process a single batch using a custom activity. - This demonstrates how asyncio.create_task() allows multiple batches to run concurrently. - Returns batch info for state tracking by the main workflow thread. - """ - try: - logger.info(f"๐Ÿš€ Batch #{batch_number}: Starting concurrent processing of {len(events)} events") - - # This is the key: calling a custom activity from within the workflow - await workflow.execute_activity( - PROCESS_BATCH_EVENTS_ACTIVITY, - ProcessBatchEventsActivityParams( - events=events, - batch_number=batch_number - ), - start_to_close_timeout=timedelta(minutes=5), - retry_policy=RetryPolicy(maximum_attempts=3) - ) - - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"โœ… Batch #{batch_number} completed! Processed {len(events)} events using custom activity.", - ), - ) - - logger.info(f"โœ… Batch #{batch_number}: Processing completed successfully") - return {"success": True, "events_processed": len(events), "batch_number": batch_number} - - except Exception as e: - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"โŒ Batch #{batch_number} failed: {str(e)}", - ), - ) - logger.error(f"โŒ Batch #{batch_number} failed: {str(e)}") - return {"success": False, "events_processed": 0, "batch_number": batch_number, "error": str(e)} - - @staticmethod - async def update_progress(processing_tasks: List[asyncio.Task[Any]], state: StateModel, task_id: str) -> List[asyncio.Task[Any]]: - """ - Check for completed tasks and update progress in real-time. - This is key for tutorials - showing progress as things happen! - - Returns the updated list of still-running tasks. - """ - if not processing_tasks: - return processing_tasks - - # Check which tasks have completed - completed_tasks: List[asyncio.Task[Any]] = [] - still_running: List[asyncio.Task[Any]] = [] - - for task in processing_tasks: - if task.done(): - completed_tasks.append(task) - else: - still_running.append(task) - - # Update state based on completed tasks - if completed_tasks: - for task in completed_tasks: - try: - result = await task # Get the result - if isinstance(result, dict) and result.get("success"): - # Successful processing - update state - state.num_batches_processed += 1 - state.total_events_processed += result.get("events_processed", 0) - else: - # Failed processing - state.num_batches_failed += 1 - except Exception: - # Task failed with exception - state.num_batches_failed += 1 - - await workflow.execute_activity( - REPORT_PROGRESS_ACTIVITY, - ReportProgressActivityParams( - num_batches_processed=state.num_batches_processed, - num_batches_failed=state.num_batches_failed, - num_batches_running=len(still_running), - task_id=task_id, - ), - start_to_close_timeout=timedelta(minutes=1), - retry_policy=RetryPolicy(maximum_attempts=3) - ) - return still_running - - @staticmethod - async def handle_queue_clear(queue: asyncio.Queue[Any], task_id: str) -> int: - """ - Handle clearing the event queue and return the number of events cleared. - """ - num_events = queue.qsize() - logger.info(f"Clearing queue of size: {num_events}") - while not queue.empty(): - queue.get_nowait() - - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"I just cleared the queue of events that were received. Total cleared events: {num_events}", - ), - ) - return num_events - - @staticmethod - async def handle_task_cancellation(processing_tasks: List[asyncio.Task[Any]], task_id: str) -> int: - """ - Handle cancelling all running batch processing tasks. - Returns the number of tasks cancelled. - """ - # Simple cancellation for tutorial purposes - cancelled_count = len([task for task in processing_tasks if not task.done()]) - for task in processing_tasks: - if not task.done(): - task.cancel() - - processing_tasks.clear() - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"โ›” Cancelled {cancelled_count} running tasks. This shows how asyncio.create_task() tasks can be cancelled!", - ), - ) - return cancelled_count - - @staticmethod - async def wait_for_remaining_tasks(processing_tasks: List[asyncio.Task[Any]], state: Any, task_id: str) -> None: - """ - Wait for all remaining tasks to complete, with real-time progress updates. - """ - while processing_tasks: - await adk.messages.create( - task_id=task_id, - content=TextContent( - author="agent", - content=f"โณ Waiting for {len(processing_tasks)} remaining batches to complete...", - ), - ) - - # Wait a bit, then update progress - try: - await workflow.wait_condition( - lambda: not any(task for task in processing_tasks if not task.done()), - timeout=10 # Check progress every 10 seconds - ) - # All tasks are done! - processing_tasks[:] = await BatchProcessingUtils.update_progress(processing_tasks, state, task_id) - break - except asyncio.TimeoutError: - # Some tasks still running, update progress and continue waiting - processing_tasks[:] = await BatchProcessingUtils.update_progress(processing_tasks, state, task_id) - continue \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/pyproject.toml b/examples/tutorials/10_async/10_temporal/030_custom_activities/pyproject.toml deleted file mode 100644 index cc53d065..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/pyproject.toml +++ /dev/null @@ -1,42 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "030_custom_activities" -version = "0.1.0" -description = "An AgentEx agent with custom activities" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "ipykernel>=6.30.1", - "jupyter-server>=2.16.0", - "jupyterlab>=4.4.5", - "nbconvert>=7.16.6", - "nbformat>=5.10.4", - "notebook>=7.4.5", - "scale-gp", - "temporalio", - "yaspin>=3.1.0", -] - -[project.optional-dependencies] -dev = [ - "jupyter", - "pytest", - "black", - "isort", - "flake8", - "debugpy>=1.8.15", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/examples/tutorials/10_async/10_temporal/030_custom_activities/tests/test_agent.py b/examples/tutorials/10_async/10_temporal/030_custom_activities/tests/test_agent.py deleted file mode 100644 index b839332c..00000000 --- a/examples/tutorials/10_async/10_temporal/030_custom_activities/tests/test_agent.py +++ /dev/null @@ -1,136 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: at030-custom-activities) -""" - -import os - -import pytest -import pytest_asyncio - -from agentex import AsyncAgentex - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "at030-custom-activities") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and polling for the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # TODO: Poll for the initial task creation message (if your agent sends one) - # async for message in poll_messages( - # client=client, - # task_id=task.id, - # timeout=30, - # sleep_interval=1.0, - # ): - # assert isinstance(message, TaskMessage) - # if message.content and message.content.type == "text" and message.content.author == "agent": - # # Check for your expected initial message - # assert "expected initial text" in message.content.content - # break - - # TODO: Send an event and poll for response using the yielding helper function - # user_message = "Your test message here" - # async for message in send_event_and_poll_yielding( - # client=client, - # agent_id=agent_id, - # task_id=task.id, - # user_message=user_message, - # timeout=30, - # sleep_interval=1.0, - # ): - # assert isinstance(message, TaskMessage) - # if message.content and message.content.type == "text" and message.content.author == "agent": - # # Check for your expected response - # assert "expected response text" in message.content.content - # break - pass - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and streaming the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # user_message = "Your test message here" - - # # Collect events from stream - # all_events = [] - - # async def collect_stream_events(): - # async for event in stream_agent_response( - # client=client, - # task_id=task.id, - # timeout=30, - # ): - # all_events.append(event) - - # # Start streaming task - # stream_task = asyncio.create_task(collect_stream_events()) - - # # Send the event - # event_content = TextContentParam(type="text", author="user", content=user_message) - # await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # # Wait for streaming to complete - # await stream_task - - # # TODO: Add your validation here - # assert len(all_events) > 0, "No events received in streaming response" - pass - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/.dockerignore b/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/.dockerignore deleted file mode 100644 index c2d7fca4..00000000 --- a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/Dockerfile b/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/Dockerfile deleted file mode 100644 index ef1ea0bf..00000000 --- a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/Dockerfile +++ /dev/null @@ -1,59 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/10_temporal/050_agent_chat_guardrails/pyproject.toml /app/050_agent_chat_guardrails/pyproject.toml -COPY 10_async/10_temporal/050_agent_chat_guardrails/README.md /app/050_agent_chat_guardrails/README.md - -WORKDIR /app/050_agent_chat_guardrails - -# Copy the project code -COPY 10_async/10_temporal/050_agent_chat_guardrails/project /app/050_agent_chat_guardrails/project - -# Copy the test files -COPY 10_async/10_temporal/050_agent_chat_guardrails/tests /app/050_agent_chat_guardrails/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies (includes pytest) -RUN uv pip install --system .[dev] pytest-asyncio httpx - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=at050-agent-chat-guardrails - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/README.md b/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/README.md deleted file mode 100644 index b6e192b5..00000000 --- a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# [Temporal] Agent Chat with Guardrails - -This tutorial demonstrates how to implement streaming multiturn tool-enabled chat with input and output guardrails using Temporal workflows in AgentEx agents. - -## What You'll Learn -- Adding safety guardrails to conversational agents -- Input validation and output filtering -- Implementing content moderation with Temporal -- When to block vs warn vs allow content - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Temporal UI available at http://localhost:8233 -- Understanding of agent chat patterns (see [010_agent_chat](../010_agent_chat/)) - -## Quick Start - -```bash -cd examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails -uv run agentex agents run --manifest manifest.yaml -``` - -**Monitor:** Open Temporal UI at http://localhost:8233 to see guardrail checks as workflow activities. - -## Guardrails - -### Input Guardrails -- **Spaghetti Guardrail**: Blocks any mention of "spaghetti" in user messages -- **Soup Guardrail**: Blocks any mention of "soup" in user messages - -### Output Guardrails -- **Pizza Guardrail**: Prevents the AI from mentioning "pizza" in responses -- **Sushi Guardrail**: Prevents the AI from mentioning "sushi" in responses - -## Testing the Guardrails - -To see the guardrails in action: - -1. **Test Input Guardrails:** - - Try: "Tell me about spaghetti" - - Try: "What's your favorite soup?" - - The guardrails will block these messages before they reach the AI - -2. **Test Output Guardrails:** - - Ask: "What are popular Italian foods?" (may trigger pizza guardrail) - - Ask: "What are popular Japanese foods?" (may trigger sushi guardrail) - - The AI may generate responses containing these words, but the guardrails will block them - -## Implementation Details - -The guardrails are implemented as functions that: -- Check the input/output for specific content -- Return a `GuardrailFunctionOutput` with: - - `tripwire_triggered`: Whether to block the content - - `output_info`: Metadata about the check - - `rejection_message`: Custom message shown when content is blocked - -See `workflow.py` for the complete implementation. - -## When to Use -- Content moderation and safety requirements -- Compliance with regulatory restrictions -- Brand safety and reputation protection -- Preventing agents from discussing sensitive topics - -## Why This Matters -Production agents need safety rails. This pattern shows how to implement content filtering without sacrificing the benefits of Temporal workflows. Guardrail checks become durable activities, visible in Temporal UI for audit and debugging. - -**Next:** [060_open_ai_agents_sdk_hello_world](../060_open_ai_agents_sdk_hello_world/) - Integrate OpenAI Agents SDK with Temporal \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/dev.ipynb b/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/dev.ipynb deleted file mode 100644 index ab87b676..00000000 --- a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/dev.ipynb +++ /dev/null @@ -1,1196 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "0", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "1", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"at010-agent-chat\"" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "2", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Task(id='0577cdc8-6c6a-4ef7-bc5c-85d27b9327e7', created_at=datetime.datetime(2025, 8, 27, 21, 33, 21, 976210, tzinfo=TzInfo(UTC)), name='7ff11264-task', params={}, status='RUNNING', status_reason='Task created, forwarding to ACP server', updated_at=datetime.datetime(2025, 8, 27, 21, 33, 21, 976210, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# (REQUIRED) Create a new task. For Agentic agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "markdown", - "id": "645fb612", - "metadata": {}, - "source": [ - "## Testing Guardrails\n", - "\n", - "We have configured 4 guardrails:\n", - "- **Input Guardrails**: Spaghetti (tested above), Soup\n", - "- **Output Guardrails**: Pizza, Sushi\n" - ] - }, - { - "cell_type": "markdown", - "id": "11d260f4", - "metadata": {}, - "source": [ - "### Test 2: Soup Input Guardrail\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "3", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Event(id='b243f073-a7cb-4420-b513-305c2b6aae5d', agent_id='a1abb90e-c673-4448-a4e2-841170568840', sequence_id=1844, task_id='0577cdc8-6c6a-4ef7-bc5c-85d27b9327e7', content=TextContent(author='user', content='Find me a recipe on spaghetti', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 27, 21, 33, 22, 16063, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "# - ReasoningContent: A message with a reasoning content, which contains a reasoning object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Find me a recipe on spaghetti\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "4", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ USER [08/27/2025 21:33:22] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ Find me a recipe on spaghetti                                                โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[96mโ•ญโ”€\u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96m \u001b[0m\u001b[1;96mUSER\u001b[0m\u001b[96m [08/27/2025 21:33:22] \u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96mโ”€โ•ฎ\u001b[0m\n", - "\u001b[96mโ”‚\u001b[0m Find me a recipe on spaghetti \u001b[96mโ”‚\u001b[0m\n", - "\u001b[96mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/27/2025 21:33:25] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ I'm sorry, but I cannot process messages about spaghetti. This guardrail was โ”‚\n",
-       "โ”‚ put in place for demonstration purposes. Please ask me about something else! โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[32mโ•ญโ”€\u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[32m [08/27/2025 21:33:25] \u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32mโ”€โ•ฎ\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m I'm sorry, but I cannot process messages about spaghetti. This guardrail was \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m put in place for demonstration purposes. Please ask me about something else! \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Streaming timed out after 60 seconds - returning collected messages\n" - ] - } - ], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=60,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "ff7cf427", - "metadata": {}, - "source": [ - "### Test 3: Soup Input Guardrail\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "ea464eea", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Task(id='b34a414a-5753-4c6c-a6f5-aa8eabb6a731', created_at=datetime.datetime(2025, 8, 27, 21, 34, 25, 397654, tzinfo=TzInfo(UTC)), name='66fd90bb-soup-test', params={}, status='RUNNING', status_reason='Task created, forwarding to ACP server', updated_at=datetime.datetime(2025, 8, 27, 21, 34, 25, 397654, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# Create a new task for soup guardrail test\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-soup-test\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task_soup = rpc_response.result\n", - "print(task_soup)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "48d40391", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Event(id='90d002ac-ff06-4d36-8af7-b764420ae2ff', agent_id='a1abb90e-c673-4448-a4e2-841170568840', sequence_id=1845, task_id='b34a414a-5753-4c6c-a6f5-aa8eabb6a731', content=TextContent(author='user', content=\"What's your favorite soup recipe?\", attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 27, 21, 34, 25, 427792, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# Send event that triggers soup guardrail\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"What's your favorite soup recipe?\"},\n", - " \"task_id\": task_soup.id,\n", - " }\n", - ")\n", - "\n", - "event_soup = rpc_response.result\n", - "print(event_soup)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "154c6498", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ USER [08/27/2025 21:34:25] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ What's your favorite soup recipe?                                            โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[96mโ•ญโ”€\u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96m \u001b[0m\u001b[1;96mUSER\u001b[0m\u001b[96m [08/27/2025 21:34:25] \u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96mโ”€โ•ฎ\u001b[0m\n", - "\u001b[96mโ”‚\u001b[0m What's your favorite soup recipe? \u001b[96mโ”‚\u001b[0m\n", - "\u001b[96mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/27/2025 21:34:26] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ I'm sorry, but I cannot process messages about soup. This is a demonstration โ”‚\n",
-       "โ”‚ guardrail for testing purposes. Please ask about something other than soup!  โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[32mโ•ญโ”€\u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[32m [08/27/2025 21:34:26] \u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32mโ”€โ•ฎ\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m I'm sorry, but I cannot process messages about soup. This is a demonstration \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m guardrail for testing purposes. Please ask about something other than soup! \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Streaming timed out after 30 seconds - returning collected messages\n" - ] - } - ], - "source": [ - "# Subscribe to see the soup guardrail response\n", - "task_messages_soup = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task_soup, \n", - " only_after_timestamp=event_soup.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=30,\n", - ")\n" - ] - }, - { - "cell_type": "markdown", - "id": "dae8d0be", - "metadata": {}, - "source": [ - "### Test 4: Pizza Output Guardrail\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "1abbe06b", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Task(id='ca2d107e-4f21-48f6-830a-61a03779895f', created_at=datetime.datetime(2025, 8, 27, 21, 34, 56, 922244, tzinfo=TzInfo(UTC)), name='fbd68764-pizza-test', params={}, status='RUNNING', status_reason='Task created, forwarding to ACP server', updated_at=datetime.datetime(2025, 8, 27, 21, 34, 56, 922244, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# Create a new task for pizza guardrail test\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-pizza-test\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task_pizza = rpc_response.result\n", - "print(task_pizza)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "ea6b58b5", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Event(id='2b39425a-f3c0-409b-b725-2ee88e6ae178', agent_id='a1abb90e-c673-4448-a4e2-841170568840', sequence_id=1846, task_id='ca2d107e-4f21-48f6-830a-61a03779895f', content=TextContent(author='user', content='What are some popular Italian dishes?', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 27, 21, 34, 56, 969021, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# Send event that might trigger pizza output guardrail\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"What are some popular Italian dishes?\"},\n", - " \"task_id\": task_pizza.id,\n", - " }\n", - ")\n", - "\n", - "event_pizza = rpc_response.result\n", - "print(event_pizza)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "899be668", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ USER [08/27/2025 21:34:57] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ What are some popular Italian dishes?                                        โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[96mโ•ญโ”€\u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96m \u001b[0m\u001b[1;96mUSER\u001b[0m\u001b[96m [08/27/2025 21:34:57] \u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96mโ”€โ•ฎ\u001b[0m\n", - "\u001b[96mโ”‚\u001b[0m What are some popular Italian dishes? \u001b[96mโ”‚\u001b[0m\n", - "\u001b[96mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/27/2025 21:35:01] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿง  Reasoning                                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Listing popular Italian dishes                                               โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ The user is asking about popular Italian dishes, which is simple enough!     โ”‚\n",
-       "โ”‚ Iโ€™ll create a list that spans across various courses: antipasti, primi (like โ”‚\n",
-       "โ”‚ pasta and risotto), secondi (meat and fish), contorni, and dolci. I think I  โ”‚\n",
-       "โ”‚ should mention regional specialties, aiming for 15-20 items. Key dishes will โ”‚\n",
-       "โ”‚ include pizza, several pasta types like spaghetti alla carbonara and         โ”‚\n",
-       "โ”‚ bolognese, risotto alla milanese, and more. I can also offer recipes or      โ”‚\n",
-       "โ”‚ recommendations if theyโ€™d like.                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[95mโ•ญโ”€\u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[95m [08/27/2025 21:35:01] \u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95mโ”€โ•ฎ\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m ๐Ÿง  \u001b[1mReasoning\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[1mListing popular Italian dishes\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m The user is asking about popular Italian dishes, which is simple enough! \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m Iโ€™ll create a list that spans across various courses: antipasti, primi (like \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m pasta and risotto), secondi (meat and fish), contorni, and dolci. I think I \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m should mention regional specialties, aiming for 15-20 items. Key dishes will \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m include pizza, several pasta types like spaghetti alla carbonara and \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m bolognese, risotto alla milanese, and more. I can also offer recipes or \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m recommendations if theyโ€™d like. \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/27/2025 21:35:03] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ Here are some popular Italian dishes, grouped by course with a short         โ”‚\n",
-       "โ”‚ description for each:                                                        โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Antipasti (starters)                                                         โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Bruschetta: grilled bread rubbed with garlic and topped (commonly) with   โ”‚\n",
-       "โ”‚    tomatoes, basil, olive oil.                                               โ”‚\n",
-       "โ”‚  โ€ข Caprese: fresh tomatoes, mozzarella, basil and olive oil (from Campania). โ”‚\n",
-       "โ”‚  โ€ข Carpaccio: thinly sliced raw beef or fish, dressed with lemon/olive oil   โ”‚\n",
-       "โ”‚    and parmesan.                                                             โ”‚\n",
-       "โ”‚  โ€ข Prosciutto e melone: cured ham served with cantaloupe.                    โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Primi (first courses โ€” usually pasta, rice or soup)                          โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Spaghetti alla Carbonara: eggs, Pecorino/Romano cheese, guanciale (cured  โ”‚\n",
-       "โ”‚    pork) and black pepper (Roman classic).                                   โ”‚\n",
-       "โ”‚  โ€ข Spaghetti alla Bolognese / Ragรน: meat-based sauce (Emilia-Romagna).       โ”‚\n",
-       "โ”‚  โ€ข Pasta allโ€™Amatriciana: tomato, guanciale and pecorino (from Amatrice).    โ”‚\n",
-       "โ”‚  โ€ข Cacio e Pepe: very simple pasta with Pecorino cheese and black pepper     โ”‚\n",
-       "โ”‚    (Roman).                                                                  โ”‚\n",
-       "โ”‚  โ€ข Lasagna alla Bolognese: layered pasta with ragรน, bรฉchamel and cheese.     โ”‚\n",
-       "โ”‚  โ€ข Risotto alla Milanese: creamy saffron risotto (Milan).                    โ”‚\n",
-       "โ”‚  โ€ข Gnocchi: potato dumplings served with various sauces.                     โ”‚\n",
-       "โ”‚  โ€ข Minestrone: hearty vegetable soup.                                        โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Secondi (main courses)                                                       โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Pollo alla Cacciatora (chicken cacciatore): chicken stewed with tomatoes, โ”‚\n",
-       "โ”‚    herbs, wine.                                                              โ”‚\n",
-       "โ”‚  โ€ข Saltimbocca alla Romana: veal topped with prosciutto and sage, cooked in  โ”‚\n",
-       "โ”‚    wine/butter (Rome).                                                       โ”‚\n",
-       "โ”‚  โ€ข Osso Buco: braised veal shanks, often served with risotto alla Milanese.  โ”‚\n",
-       "โ”‚  โ€ข Branzino al forno: roast sea bass (common coastal dish).                  โ”‚\n",
-       "โ”‚  โ€ข Parmigiana di Melanzane (Eggplant Parmesan): fried eggplant layered with  โ”‚\n",
-       "โ”‚    tomato sauce and cheese (Southern Italy).                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Contorni (sides)                                                             โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Focaccia: flat oven-baked bread from Liguria (often seasoned with olive   โ”‚\n",
-       "โ”‚    oil, rosemary).                                                           โ”‚\n",
-       "โ”‚  โ€ข Polenta: cornmeal porridge, served soft or grilled (Northern Italy).      โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Dolci (desserts)                                                             โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Tiramisu: coffee-soaked ladyfingers layered with mascarpone cream.        โ”‚\n",
-       "โ”‚  โ€ข Gelato: Italian-style ice cream, denser and more intense than many ice    โ”‚\n",
-       "โ”‚    creams.                                                                   โ”‚\n",
-       "โ”‚  โ€ข Panna Cotta: creamy set dessert, often served with fruit coulis.          โ”‚\n",
-       "โ”‚  โ€ข Cannoli: Sicilian fried pastry tubes filled with sweet ricotta.           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Regional specialties worth noting                                            โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Pizza Margherita (Naples): tomato, mozzarella, basil โ€” the classic        โ”‚\n",
-       "โ”‚    Neapolitan pizza.                                                         โ”‚\n",
-       "โ”‚  โ€ข Arancini (Sicily): fried rice balls usually filled with ragรน, peas and    โ”‚\n",
-       "โ”‚    cheese.                                                                   โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ If youโ€™d like, I can:                                                        โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Give recipes for any of these dishes,                                     โ”‚\n",
-       "โ”‚  โ€ข Suggest restaurants or regional variations, or                            โ”‚\n",
-       "โ”‚  โ€ข Provide wine-pairing ideas. Which would you prefer?                       โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[32mโ•ญโ”€\u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[32m [08/27/2025 21:35:03] \u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32mโ”€โ•ฎ\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Here are some popular Italian dishes, grouped by course with a short \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m description for each: \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Antipasti (starters) \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mBruschetta: grilled bread rubbed with garlic and topped (commonly) with \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mtomatoes, basil, olive oil. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mCaprese: fresh tomatoes, mozzarella, basil and olive oil (from Campania). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mCarpaccio: thinly sliced raw beef or fish, dressed with lemon/olive oil \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mand parmesan. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mProsciutto e melone: cured ham served with cantaloupe. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Primi (first courses โ€” usually pasta, rice or soup) \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mSpaghetti alla Carbonara: eggs, Pecorino/Romano cheese, guanciale (cured \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mpork) and black pepper (Roman classic). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mSpaghetti alla Bolognese / Ragรน: meat-based sauce (Emilia-Romagna). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mPasta allโ€™Amatriciana: tomato, guanciale and pecorino (from Amatrice). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mCacio e Pepe: very simple pasta with Pecorino cheese and black pepper \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0m(Roman). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mLasagna alla Bolognese: layered pasta with ragรน, bรฉchamel and cheese. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mRisotto alla Milanese: creamy saffron risotto (Milan). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mGnocchi: potato dumplings served with various sauces. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mMinestrone: hearty vegetable soup. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Secondi (main courses) \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mPollo alla Cacciatora (chicken cacciatore): chicken stewed with tomatoes, \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mherbs, wine. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mSaltimbocca alla Romana: veal topped with prosciutto and sage, cooked in \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mwine/butter (Rome). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mOsso Buco: braised veal shanks, often served with risotto alla Milanese. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mBranzino al forno: roast sea bass (common coastal dish). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mParmigiana di Melanzane (Eggplant Parmesan): fried eggplant layered with \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mtomato sauce and cheese (Southern Italy). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Contorni (sides) \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mFocaccia: flat oven-baked bread from Liguria (often seasoned with olive \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0moil, rosemary). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mPolenta: cornmeal porridge, served soft or grilled (Northern Italy). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Dolci (desserts) \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mTiramisu: coffee-soaked ladyfingers layered with mascarpone cream. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mGelato: Italian-style ice cream, denser and more intense than many ice \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mcreams. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mPanna Cotta: creamy set dessert, often served with fruit coulis. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mCannoli: Sicilian fried pastry tubes filled with sweet ricotta. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Regional specialties worth noting \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mPizza Margherita (Naples): tomato, mozzarella, basil โ€” the classic \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mNeapolitan pizza. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mArancini (Sicily): fried rice balls usually filled with ragรน, peas and \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mcheese. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m If youโ€™d like, I can: \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mGive recipes for any of these dishes, \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mSuggest restaurants or regional variations, or \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mProvide wine-pairing ideas. Which would you prefer? \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/27/2025 21:35:10] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ I cannot provide this response as it mentions pizza. Due to content          โ”‚\n",
-       "โ”‚ policies, I need to avoid discussing pizza. Let me provide a different       โ”‚\n",
-       "โ”‚ response.                                                                    โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[32mโ•ญโ”€\u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[32m [08/27/2025 21:35:10] \u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32mโ”€โ•ฎ\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m I cannot provide this response as it mentions pizza. Due to content \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m policies, I need to avoid discussing pizza. Let me provide a different \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m response. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Streaming timed out after 30 seconds - returning collected messages\n" - ] - } - ], - "source": [ - "# Subscribe to see if pizza output guardrail triggers\n", - "task_messages_pizza = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task_pizza, \n", - " only_after_timestamp=event_pizza.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=30,\n", - ")\n" - ] - }, - { - "cell_type": "markdown", - "id": "d59c0cfc", - "metadata": {}, - "source": [ - "### Test 5: Sushi Output Guardrail\n" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "0443e640", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Task(id='1b3e7c18-b2a7-4980-be10-c8e50aac8643', created_at=datetime.datetime(2025, 8, 27, 21, 35, 48, 956144, tzinfo=TzInfo(UTC)), name='3bd766f1-sushi-test', params={}, status='RUNNING', status_reason='Task created, forwarding to ACP server', updated_at=datetime.datetime(2025, 8, 27, 21, 35, 48, 956144, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# Create a new task for sushi guardrail test\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-sushi-test\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task_sushi = rpc_response.result\n", - "print(task_sushi)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "7e7feb64", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Event(id='ab1f8ec6-5bdb-4b75-9999-f6b193de3772', agent_id='a1abb90e-c673-4448-a4e2-841170568840', sequence_id=1847, task_id='1b3e7c18-b2a7-4980-be10-c8e50aac8643', content=TextContent(author='user', content='What are some popular Japanese foods?', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 27, 21, 35, 48, 983826, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# Send event that might trigger sushi output guardrail\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"What are some popular Japanese foods?\"},\n", - " \"task_id\": task_sushi.id,\n", - " }\n", - ")\n", - "\n", - "event_sushi = rpc_response.result\n", - "print(event_sushi)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "33d8b0f6", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ USER [08/27/2025 21:35:49] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ What are some popular Japanese foods?                                        โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[96mโ•ญโ”€\u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96m \u001b[0m\u001b[1;96mUSER\u001b[0m\u001b[96m [08/27/2025 21:35:49] \u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96mโ”€โ•ฎ\u001b[0m\n", - "\u001b[96mโ”‚\u001b[0m What are some popular Japanese foods? \u001b[96mโ”‚\u001b[0m\n", - "\u001b[96mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/27/2025 21:35:59] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿง  Reasoning                                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Compiling popular Japanese foods                                             โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ The user is asking for a list of popular Japanese foods, likely with brief   โ”‚\n",
-       "โ”‚ descriptions. I donโ€™t need any tools for this, so Iโ€™ll compile a             โ”‚\n",
-       "โ”‚ well-rounded list that covers items like sushi, sashimi, ramen, udon,        โ”‚\n",
-       "โ”‚ tempura, and more, along with regional specialties and brief notes on        โ”‚\n",
-       "โ”‚ etiquette like using chopsticks. Iโ€™ll keep it concise for a casual reader    โ”‚\n",
-       "โ”‚ while including around 20 items with short descriptions and suggestions for  โ”‚\n",
-       "โ”‚ where to try them. This will help create a great summary!                    โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[95mโ•ญโ”€\u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[95m [08/27/2025 21:35:59] \u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95mโ”€โ•ฎ\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m ๐Ÿง  \u001b[1mReasoning\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[1mCompiling popular Japanese foods\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m The user is asking for a list of popular Japanese foods, likely with brief \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m descriptions. I donโ€™t need any tools for this, so Iโ€™ll compile a \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m well-rounded list that covers items like sushi, sashimi, ramen, udon, \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m tempura, and more, along with regional specialties and brief notes on \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m etiquette like using chopsticks. Iโ€™ll keep it concise for a casual reader \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m while including around 20 items with short descriptions and suggestions for \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m where to try them. This will help create a great summary! \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/27/2025 21:36:00] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ Here are many popular Japanese foods, with a short description of each so    โ”‚\n",
-       "โ”‚ you know what to look for:                                                   โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Sushi โ€” Vinegared rice with raw fish or other toppings (nigiri, maki      โ”‚\n",
-       "โ”‚    rolls, chirashi).                                                         โ”‚\n",
-       "โ”‚  โ€ข Sashimi โ€” Thinly sliced raw fish served with soy sauce and wasabi.        โ”‚\n",
-       "โ”‚  โ€ข Ramen โ€” Wheat noodles in flavorful broth (shoyu, miso, shio, tonkotsu)    โ”‚\n",
-       "โ”‚    with toppings like chashu pork and egg.                                   โ”‚\n",
-       "โ”‚  โ€ข Tempura โ€” Lightly battered and deep-fried seafood or vegetables.          โ”‚\n",
-       "โ”‚  โ€ข Udon โ€” Thick wheat noodles served hot in broth or chilled with a dipping  โ”‚\n",
-       "โ”‚    sauce.                                                                    โ”‚\n",
-       "โ”‚  โ€ข Soba โ€” Buckwheat noodles, served hot or cold (zaru soba is a cold,        โ”‚\n",
-       "โ”‚    dipping style).                                                           โ”‚\n",
-       "โ”‚  โ€ข Yakitori โ€” Skewered grilled chicken (various parts) usually seasoned with โ”‚\n",
-       "โ”‚    tare or salt.                                                             โ”‚\n",
-       "โ”‚  โ€ข Okonomiyaki โ€” Savory pancake with cabbage and choice of fillings (Osaka   โ”‚\n",
-       "โ”‚    and Hiroshima styles).                                                    โ”‚\n",
-       "โ”‚  โ€ข Takoyaki โ€” Octopus-filled batter balls, topped with sauce, mayo and       โ”‚\n",
-       "โ”‚    bonito flakesโ€”common street food.                                         โ”‚\n",
-       "โ”‚  โ€ข Tonkatsu โ€” Breaded, deep-fried pork cutlet served with shredded cabbage   โ”‚\n",
-       "โ”‚    and tonkatsu sauce.                                                       โ”‚\n",
-       "โ”‚  โ€ข Gyoza โ€” Pan-fried dumplings filled with pork and vegetables (also boiled  โ”‚\n",
-       "โ”‚    or steamed).                                                              โ”‚\n",
-       "โ”‚  โ€ข Karaage โ€” Japanese-style fried chicken, marinated then deep-friedโ€”crispy  โ”‚\n",
-       "โ”‚    and juicy.                                                                โ”‚\n",
-       "โ”‚  โ€ข Onigiri โ€” Rice balls often wrapped in nori and filled with pickled plum,  โ”‚\n",
-       "โ”‚    salmon, or tuna mayo.                                                     โ”‚\n",
-       "โ”‚  โ€ข Miso soup โ€” Soup made from miso paste with tofu, wakame seaweed and       โ”‚\n",
-       "โ”‚    scallions.                                                                โ”‚\n",
-       "โ”‚  โ€ข Bento โ€” Packed meal box with rice, protein and side dishesโ€”convenient and โ”‚\n",
-       "โ”‚    varied.                                                                   โ”‚\n",
-       "โ”‚  โ€ข Shabu-shabu โ€” Hot-pot where thin meat and veggies are briefly cooked in   โ”‚\n",
-       "โ”‚    boiling broth and dipped in sauces.                                       โ”‚\n",
-       "โ”‚  โ€ข Sukiyaki โ€” Hot-pot cooked with soy-sugar broth, sliced beef and           โ”‚\n",
-       "โ”‚    vegetables, often dipped in raw egg.                                      โ”‚\n",
-       "โ”‚  โ€ข Yakiniku โ€” Japanese-style barbecue where you grill slices of meat at the  โ”‚\n",
-       "โ”‚    table.                                                                    โ”‚\n",
-       "โ”‚  โ€ข Kaiseki โ€” Multi-course traditional meal emphasizing seasonal ingredients  โ”‚\n",
-       "โ”‚    and presentation (formal dining).                                         โ”‚\n",
-       "โ”‚  โ€ข Natto โ€” Fermented soybeans with a sticky texture and strong flavor (often โ”‚\n",
-       "โ”‚    eaten with rice).                                                         โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Regional specialties to try:                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Hakata (Fukuoka) tonkotsu ramen, Osaka takoyaki/okonomiyaki, Hokkaido     โ”‚\n",
-       "โ”‚    seafood and miso ramen, Kyoto kaiseki and yudofu (tofu hot dish).         โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Tips:                                                                        โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  โ€ข Many dishes have vegetarian/vegan variations (ask about dashi, which      โ”‚\n",
-       "โ”‚    often contains fish).                                                     โ”‚\n",
-       "โ”‚  โ€ข Try street-food stalls, izakayas (pubs), ramen shops, and traditional     โ”‚\n",
-       "โ”‚    ryokan or kaiseki restaurants for authentic experiences.                  โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ If you want, I can suggest: typical places to try any of these, simple       โ”‚\n",
-       "โ”‚ recipes, or a short list of must-tries for a first-time visitor. Which would โ”‚\n",
-       "โ”‚ you prefer?                                                                  โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[32mโ•ญโ”€\u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[32m [08/27/2025 21:36:00] \u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32mโ”€โ•ฎ\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Here are many popular Japanese foods, with a short description of each so \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m you know what to look for: \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mSushi โ€” Vinegared rice with raw fish or other toppings (nigiri, maki \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mrolls, chirashi). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mSashimi โ€” Thinly sliced raw fish served with soy sauce and wasabi. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mRamen โ€” Wheat noodles in flavorful broth (shoyu, miso, shio, tonkotsu) \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mwith toppings like chashu pork and egg. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mTempura โ€” Lightly battered and deep-fried seafood or vegetables. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mUdon โ€” Thick wheat noodles served hot in broth or chilled with a dipping \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0msauce. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mSoba โ€” Buckwheat noodles, served hot or cold (zaru soba is a cold, \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mdipping style). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mYakitori โ€” Skewered grilled chicken (various parts) usually seasoned with \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mtare or salt. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mOkonomiyaki โ€” Savory pancake with cabbage and choice of fillings (Osaka \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mand Hiroshima styles). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mTakoyaki โ€” Octopus-filled batter balls, topped with sauce, mayo and \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mbonito flakesโ€”common street food. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mTonkatsu โ€” Breaded, deep-fried pork cutlet served with shredded cabbage \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mand tonkatsu sauce. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mGyoza โ€” Pan-fried dumplings filled with pork and vegetables (also boiled \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mor steamed). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mKaraage โ€” Japanese-style fried chicken, marinated then deep-friedโ€”crispy \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mand juicy. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mOnigiri โ€” Rice balls often wrapped in nori and filled with pickled plum, \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0msalmon, or tuna mayo. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mMiso soup โ€” Soup made from miso paste with tofu, wakame seaweed and \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mscallions. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mBento โ€” Packed meal box with rice, protein and side dishesโ€”convenient and \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mvaried. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mShabu-shabu โ€” Hot-pot where thin meat and veggies are briefly cooked in \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mboiling broth and dipped in sauces. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mSukiyaki โ€” Hot-pot cooked with soy-sugar broth, sliced beef and \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mvegetables, often dipped in raw egg. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mYakiniku โ€” Japanese-style barbecue where you grill slices of meat at the \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mtable. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mKaiseki โ€” Multi-course traditional meal emphasizing seasonal ingredients \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mand presentation (formal dining). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mNatto โ€” Fermented soybeans with a sticky texture and strong flavor (often \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0meaten with rice). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Regional specialties to try: \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mHakata (Fukuoka) tonkotsu ramen, Osaka takoyaki/okonomiyaki, Hokkaido \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mseafood and miso ramen, Kyoto kaiseki and yudofu (tofu hot dish). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m Tips: \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mMany dishes have vegetarian/vegan variations (ask about dashi, which \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0moften contains fish). \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m โ€ข \u001b[0mTry street-food stalls, izakayas (pubs), ramen shops, and traditional \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[1;33m \u001b[0mryokan or kaiseki restaurants for authentic experiences. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m If you want, I can suggest: typical places to try any of these, simple \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m recipes, or a short list of must-tries for a first-time visitor. Which would \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m you prefer? \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/27/2025 21:36:07] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ I cannot mention sushi in my response. This guardrail prevents discussions   โ”‚\n",
-       "โ”‚ about sushi for demonstration purposes. Please let me provide information    โ”‚\n",
-       "โ”‚ about other topics.                                                          โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[32mโ•ญโ”€\u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[32m [08/27/2025 21:36:07] \u001b[0m\u001b[32mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[32mโ”€โ•ฎ\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m I cannot mention sushi in my response. This guardrail prevents discussions \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m about sushi for demonstration purposes. Please let me provide information \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ”‚\u001b[0m about other topics. \u001b[32mโ”‚\u001b[0m\n", - "\u001b[32mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Streaming timed out after 30 seconds - returning collected messages\n" - ] - } - ], - "source": [ - "# Subscribe to see if sushi output guardrail triggers\n", - "task_messages_sushi = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task_sushi, \n", - " only_after_timestamp=event_sushi.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=30,\n", - ")\n" - ] - }, - { - "cell_type": "markdown", - "id": "5ade7d59", - "metadata": {}, - "source": [ - "### Test 6: Normal Conversation (No Guardrails Triggered)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "096a8784", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Task(id='e14d5602-bc80-4023-b523-354af82dcdc2', created_at=datetime.datetime(2025, 8, 27, 21, 36, 46, 563649, tzinfo=TzInfo(UTC)), name='e8618275-normal-test', params={}, status='RUNNING', status_reason='Task created, forwarding to ACP server', updated_at=datetime.datetime(2025, 8, 27, 21, 36, 46, 563649, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# Create a new task for normal conversation\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-normal-test\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task_normal = rpc_response.result\n", - "print(task_normal)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "ec04822d", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Event(id='406c31f1-5eb4-4a90-bd8d-825ddbddcfcd', agent_id='a1abb90e-c673-4448-a4e2-841170568840', sequence_id=1848, task_id='e14d5602-bc80-4023-b523-354af82dcdc2', content=TextContent(author='user', content='What is 5 + 3? Use the calculator tool.', attachments=None, format='plain', style='static', type='text'), created_at=datetime.datetime(2025, 8, 27, 21, 36, 46, 593485, tzinfo=TzInfo(UTC)))\n" - ] - } - ], - "source": [ - "# Send event that won't trigger any guardrails\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"What is 5 + 3? Use the calculator tool.\"},\n", - " \"task_id\": task_normal.id,\n", - " }\n", - ")\n", - "\n", - "event_normal = rpc_response.result\n", - "print(event_normal)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "3ab67e94", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ USER [08/27/2025 21:36:46] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ What is 5 + 3? Use the calculator tool.                                      โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[96mโ•ญโ”€\u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96m \u001b[0m\u001b[1;96mUSER\u001b[0m\u001b[96m [08/27/2025 21:36:46] \u001b[0m\u001b[96mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[96mโ”€โ•ฎ\u001b[0m\n", - "\u001b[96mโ”‚\u001b[0m What is 5 + 3? Use the calculator tool. \u001b[96mโ”‚\u001b[0m\n", - "\u001b[96mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/27/2025 21:36:49] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿง  Reasoning                                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ I see the user wants to do a simple addition and prefers using the           โ”‚\n",
-       "โ”‚ calculator tool. I'll call the functions.calculator with parameters a=5,     โ”‚\n",
-       "โ”‚ b=3, and the operation set to \"add.\" It's pretty straightforward, and        โ”‚\n",
-       "โ”‚ there's no need for sequential thinking here. Just a direct call to the tool โ”‚\n",
-       "โ”‚ will do the job efficiently. So, I'll go ahead and call that function to get โ”‚\n",
-       "โ”‚ the result for the user!                                                     โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[95mโ•ญโ”€\u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[95m [08/27/2025 21:36:49] \u001b[0m\u001b[95mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[95mโ”€โ•ฎ\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m ๐Ÿง  \u001b[1mReasoning\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m I see the user wants to do a simple addition and prefers using the \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m calculator tool. I'll call the functions.calculator with parameters a=5, \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m b=3, and the operation set to \"add.\" It's pretty straightforward, and \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m there's no need for sequential thinking here. Just a direct call to the tool \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m will do the job efficiently. So, I'll go ahead and call that function to get \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ”‚\u001b[0m the result for the user! \u001b[95mโ”‚\u001b[0m\n", - "\u001b[95mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/27/2025 21:36:51] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ ๐Ÿ”ง Tool Request: calculator                                                  โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ Arguments:                                                                   โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚  {                                                                           โ”‚\n",
-       "โ”‚    \"a\": 5,                                                                   โ”‚\n",
-       "โ”‚    \"b\": 3,                                                                   โ”‚\n",
-       "โ”‚    \"operation\": \"add\"                                                        โ”‚\n",
-       "โ”‚  }                                                                           โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[33mโ•ญโ”€\u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[33m [08/27/2025 21:36:51] \u001b[0m\u001b[33mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[33mโ”€โ•ฎ\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m ๐Ÿ”ง \u001b[1mTool Request: calculator\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[1mArguments:\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m{\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"a\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m5\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"b\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;174;129;255;48;2;39;40;34m3\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m,\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;255;70;137;48;2;39;40;34m\"operation\"\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m:\u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m \u001b[0m\u001b[38;2;230;219;116;48;2;39;40;34m\"add\"\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m\u001b[38;2;248;248;242;48;2;39;40;34m}\u001b[0m\u001b[48;2;39;40;34m \u001b[0m\u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ”‚\u001b[0m \u001b[48;2;39;40;34m \u001b[0m \u001b[33mโ”‚\u001b[0m\n", - "\u001b[33mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \r" - ] - }, - { - "data": { - "text/html": [ - "
โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ AGENT [08/27/2025 21:36:51] โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ\n",
-       "โ”‚ โœ… Tool Response: calculator                                                 โ”‚\n",
-       "โ”‚                                                                              โ”‚\n",
-       "โ”‚ The result of 5.0 add 3.0 is 8                                               โ”‚\n",
-       "โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[92mโ•ญโ”€\u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92m \u001b[0m\u001b[1;32mAGENT\u001b[0m\u001b[92m [08/27/2025 21:36:51] \u001b[0m\u001b[92mโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€\u001b[0m\u001b[92mโ”€โ•ฎ\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m โœ… \u001b[1mTool Response: calculator\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ”‚\u001b[0m The result of 5.0 add 3.0 is 8 \u001b[92mโ”‚\u001b[0m\n", - "\u001b[92mโ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Streaming timed out after 30 seconds - returning collected messages\n" - ] - } - ], - "source": [ - "# Subscribe to see normal response without guardrails\n", - "task_messages_normal = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task_normal, \n", - " only_after_timestamp=event_normal.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=30,\n", - ")\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/manifest.yaml b/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/manifest.yaml deleted file mode 100644 index 3fe94a00..00000000 --- a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/manifest.yaml +++ /dev/null @@ -1,139 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/10_temporal/050_agent_chat_guardrails - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/10_temporal/050_agent_chat_guardrails/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/10_temporal/050_agent_chat_guardrails/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - # Path to temporal worker file - # Examples: - # project/run_worker.py (standard) - # workers/temporal.py (custom structure) - # ../shared/worker.py (shared across projects) - worker: project/run_worker.py - - -# Agent Configuration -# ----------------- -agent: - # Type of agent - either sync or async - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: at050-agent-chat-guardrails - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent that demonstrates guardrails with tool-enabled multiturn chat - - # Temporal workflow configuration - # This enables your agent to run as a Temporal workflow for long-running tasks - temporal: - enabled: true - workflows: - # Name of the workflow class - # Must match the @workflow.defn name in your workflow.py - - name: at050-agent-chat-guardrails - - # Queue name for task distribution - # Used by Temporal to route tasks to your agent - # Convention: _task_queue - queue_name: 050_agent_chat_guardrails_queue - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - # env: - # - name: OPENAI_BASE_URL - # value: "https://api.openai.com/v1" - # - name: ACCOUNT_ID - # value: "your_account_id_here" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: - - name: my-registry-secret # Update with your image pull secret name - - # Global deployment settings that apply to all clusters - # These can be overridden using --override-file with custom configuration files - global: - agent: - name: "at050-agent-chat-guardrails" - description: "An AgentEx agent that demonstrates guardrails with tool-enabled multiturn chat" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/__init__.py b/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/acp.py b/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/acp.py deleted file mode 100644 index 744068d7..00000000 --- a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/acp.py +++ /dev/null @@ -1,30 +0,0 @@ -import os - -from agentex.lib.types.fastacp import TemporalACPConfig -from agentex.lib.sdk.fastacp.fastacp import FastACP - -# Create the ACP server -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - # When deployed to the cluster, the Temporal address will automatically be set to the cluster address - # For local development, we set the address manually to talk to the local Temporal service set up via docker compose - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233") - ) -) - - -# Notice that we don't need to register any handlers when we use type="temporal" -# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp -# You can see that these handlers are automatically registered when the ACP is created - -# @acp.on_task_create -# This will be handled by the method in your workflow that is decorated with @workflow.run - -# @acp.on_task_event_send -# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) - -# @acp.on_task_cancel -# This does not need to be handled by your workflow. -# It is automatically handled by the temporal client which cancels the workflow directly \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/run_worker.py b/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/run_worker.py deleted file mode 100644 index 636e9977..00000000 --- a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/run_worker.py +++ /dev/null @@ -1,34 +0,0 @@ -import asyncio - -from project.workflow import At050AgentChatGuardrailsWorkflow -from agentex.lib.utils.debug import setup_debug_if_enabled -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker - -environment_variables = EnvironmentVariables.refresh() - -logger = make_logger(__name__) - - -async def main(): - # Setup debug mode if enabled - setup_debug_if_enabled() - - task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE - if task_queue_name is None: - raise ValueError("WORKFLOW_TASK_QUEUE is not set") - - # Create a worker with automatic tracing - worker = AgentexWorker( - task_queue=task_queue_name, - ) - - await worker.run( - activities=get_all_activities(), - workflow=At050AgentChatGuardrailsWorkflow, - ) - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/workflow.py b/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/workflow.py deleted file mode 100644 index c6d2f11f..00000000 --- a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/project/workflow.py +++ /dev/null @@ -1,517 +0,0 @@ -# ruff: noqa: ARG001 -from __future__ import annotations - -import os -import json -from typing import Any, Dict, List, override - -from mcp import StdioServerParameters -from dotenv import load_dotenv - -# Simple guardrail output model for this example -from pydantic import BaseModel -from temporalio import workflow -from openai.types.shared import Reasoning - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from agentex.lib.adk.models import ModelSettings -from agentex.lib.types.tracing import SGPTracingProcessorConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.core.base.run_context import RunContextWrapper -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow -from agentex.lib.core.tracing.tracing_processor_manager import ( - add_tracing_processor_config, -) -from agentex.lib.core.temporal.activities.adk.providers.openai_activities import ( # noqa: E501 - FunctionTool, - TemporalInputGuardrail, - TemporalOutputGuardrail, -) - - -class GuardrailFunctionOutput(BaseModel): - """Output from a guardrail function.""" - output_info: Dict[str, Any] - tripwire_triggered: bool - - -# Type alias for the agent parameter in guardrail functions -Agent = Any - -environment_variables = EnvironmentVariables.refresh() -load_dotenv(dotenv_path=".env") - -add_tracing_processor_config( - SGPTracingProcessorConfig( - sgp_api_key=os.environ.get("SCALE_GP_API_KEY", ""), - sgp_account_id=os.environ.get("SCALE_GP_ACCOUNT_ID", ""), - ) -) - -if not environment_variables.WORKFLOW_NAME: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if not environment_variables.AGENT_NAME: - raise ValueError("Environment variable AGENT_NAME is not set") - -logger = make_logger(__name__) - - -class StateModel(BaseModel): - input_list: List[Dict[str, Any]] - turn_number: int - - -MCP_SERVERS = [ - StdioServerParameters( - command="npx", - args=["-y", "@modelcontextprotocol/server-sequential-thinking"], - ), - StdioServerParameters( - command="uvx", - args=["openai-websearch-mcp"], - env={"OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY", "")}, - ), -] - - -async def calculator(context: RunContextWrapper, args: str) -> str: # noqa: ARG001 - """ - Simple calculator that can perform basic arithmetic operations. - - Args: - context: The run context wrapper - args: JSON string containing the operation and operands - - Returns: - String representation of the calculation result - """ - try: - # Parse the JSON arguments - parsed_args = json.loads(args) - operation = parsed_args.get("operation") - a = parsed_args.get("a") - b = parsed_args.get("b") - - if operation is None or a is None or b is None: - return ( - "Error: Missing required parameters. " - "Please provide 'operation', 'a', and 'b'." - ) - - # Convert to numbers - try: - a = float(a) - b = float(b) - except (ValueError, TypeError): - return "Error: 'a' and 'b' must be valid numbers." - - # Perform the calculation - if operation == "add": - result = a + b - elif operation == "subtract": - result = a - b - elif operation == "multiply": - result = a * b - elif operation == "divide": - if b == 0: - return "Error: Division by zero is not allowed." - result = a / b - else: - supported_ops = "add, subtract, multiply, divide" - return ( - f"Error: Unknown operation '{operation}'. " - f"Supported operations: {supported_ops}." - ) - - # Format the result nicely - if result == int(result): - return f"The result of {a} {operation} {b} is {int(result)}" - else: - formatted = f"{result:.6f}".rstrip("0").rstrip(".") - return f"The result of {a} {operation} {b} is {formatted}" - - except json.JSONDecodeError: - return "Error: Invalid JSON format in arguments." - except Exception as e: - return f"Error: An unexpected error occurred: {str(e)}" - - -""" -Guardrails for Testing: -- Input Guardrails: - - Spaghetti: Blocks any mention of "spaghetti" in user messages - - Soup: Blocks any mention of "soup" in user messages -- Output Guardrails: - - Pizza: Blocks the AI from mentioning "pizza" in responses - - Sushi: Blocks the AI from mentioning "sushi" in responses - -To test: -- Input: "Tell me about spaghetti" or "What's your favorite soup?" -- Output: Ask "What are popular Italian foods?" (might trigger pizza guardrail) - or "What are popular Japanese foods?" (might trigger sushi guardrail) -""" - - -# Define the spaghetti guardrail function -async def check_spaghetti_guardrail( - ctx: RunContextWrapper[None], - agent: Agent, - input: str | list -) -> GuardrailFunctionOutput: - """ - A simple guardrail that checks if 'spaghetti' is mentioned in the input. - """ - # Convert input to string to check - input_text = "" - if isinstance(input, str): - input_text = input.lower() - elif isinstance(input, list): - # For list of messages, check all user messages - for msg in input: - if isinstance(msg, dict) and msg.get("role") == "user": - content = msg.get("content", "") - if isinstance(content, str): - input_text += " " + content.lower() - - # Check if spaghetti is mentioned - contains_spaghetti = "spaghetti" in input_text - - return GuardrailFunctionOutput( - output_info={ - "contains_spaghetti": contains_spaghetti, - "checked_text": ( - input_text[:200] + "..." - if len(input_text) > 200 else input_text - ), - "rejection_message": ( - "I'm sorry, but I cannot process messages about spaghetti. " - "This guardrail was put in place for demonstration purposes. " - "Please ask me about something else!" - ) if contains_spaghetti else None - }, - tripwire_triggered=contains_spaghetti - ) - - -# Define soup input guardrail function -async def check_soup_guardrail( - ctx: RunContextWrapper[None], - agent: Agent, - input: str | list -) -> GuardrailFunctionOutput: - """ - A guardrail that checks if 'soup' is mentioned in the input. - """ - # Convert input to string to check - input_text = "" - if isinstance(input, str): - input_text = input.lower() - elif isinstance(input, list): - # For list of messages, check all user messages - for msg in input: - if isinstance(msg, dict) and msg.get("role") == "user": - content = msg.get("content", "") - if isinstance(content, str): - input_text += " " + content.lower() - - # Check if soup is mentioned - contains_soup = "soup" in input_text - - return GuardrailFunctionOutput( - output_info={ - "contains_soup": contains_soup, - "checked_text": ( - input_text[:200] + "..." - if len(input_text) > 200 else input_text - ), - "rejection_message": ( - "I'm sorry, but I cannot process messages about soup. " - "This is a demonstration guardrail for testing purposes. " - "Please ask about something other than soup!" - ) if contains_soup else None - }, - tripwire_triggered=contains_soup - ) - - -# Create the input guardrails -SPAGHETTI_GUARDRAIL = TemporalInputGuardrail( - guardrail_function=check_spaghetti_guardrail, - name="spaghetti_guardrail" -) - -SOUP_GUARDRAIL = TemporalInputGuardrail( - guardrail_function=check_soup_guardrail, - name="soup_guardrail" -) - - -# Define pizza output guardrail function -async def check_pizza_guardrail( - ctx: RunContextWrapper[None], - agent: Agent, - output: str -) -> GuardrailFunctionOutput: - """ - An output guardrail that prevents mentioning pizza. - """ - output_text = output.lower() if isinstance(output, str) else "" - contains_pizza = "pizza" in output_text - - return GuardrailFunctionOutput( - output_info={ - "contains_pizza": contains_pizza, - "rejection_message": ( - "I cannot provide this response as it mentions pizza. " - "Due to content policies, I need to avoid discussing pizza. " - "Let me provide a different response." - ) if contains_pizza else None - }, - tripwire_triggered=contains_pizza - ) - - -# Define sushi output guardrail function -async def check_sushi_guardrail( - ctx: RunContextWrapper[None], - agent: Agent, - output: str -) -> GuardrailFunctionOutput: - """ - An output guardrail that prevents mentioning sushi. - """ - output_text = output.lower() if isinstance(output, str) else "" - contains_sushi = "sushi" in output_text - - return GuardrailFunctionOutput( - output_info={ - "contains_sushi": contains_sushi, - "rejection_message": ( - "I cannot mention sushi in my response. " - "This guardrail prevents discussions about sushi for demonstration purposes. " - "Please let me provide information about other topics." - ) if contains_sushi else None - }, - tripwire_triggered=contains_sushi - ) - - -# Create the output guardrails -PIZZA_GUARDRAIL = TemporalOutputGuardrail( - guardrail_function=check_pizza_guardrail, - name="pizza_guardrail" -) - -SUSHI_GUARDRAIL = TemporalOutputGuardrail( - guardrail_function=check_sushi_guardrail, - name="sushi_guardrail" -) - - -# Example output guardrail function (kept for reference) -async def check_output_length_guardrail( - ctx: RunContextWrapper[None], - agent: Agent, - output: str -) -> GuardrailFunctionOutput: - """ - A simple output guardrail that checks if the response is too long. - """ - # Check the length of the output - max_length = 1000 # Maximum allowed characters - is_too_long = len(output) > max_length if isinstance(output, str) else False - - return GuardrailFunctionOutput( - output_info={ - "output_length": len(output) if isinstance(output, str) else 0, - "max_length": max_length, - "is_too_long": is_too_long, - "rejection_message": ( - f"I'm sorry, but my response is too long ({len(output)} characters). " - f"Please ask a more specific question so I can provide a concise answer " - f"(max {max_length} characters)." - ) if is_too_long else None - }, - tripwire_triggered=is_too_long - ) - - -# Uncomment to use the output guardrail -# from agentex.lib.core.temporal.activities.adk.providers.openai_activities import TemporalOutputGuardrail -# OUTPUT_LENGTH_GUARDRAIL = TemporalOutputGuardrail( -# guardrail_function=check_output_length_guardrail, -# name="output_length_guardrail" -# ) - - -# Create the calculator tool -CALCULATOR_TOOL = FunctionTool( - name="calculator", - description=( - "Performs basic arithmetic operations (add, subtract, multiply, " - "divide) on two numbers." - ), - params_json_schema={ - "type": "object", - "properties": { - "operation": { - "type": "string", - "enum": ["add", "subtract", "multiply", "divide"], - "description": "The arithmetic operation to perform", - }, - "a": {"type": "number", "description": "The first number"}, - "b": {"type": "number", "description": "The second number"}, - }, - "required": ["operation", "a", "b"], - "additionalProperties": False, - }, - strict_json_schema=True, - on_invoke_tool=calculator, -) - - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class At050AgentChatGuardrailsWorkflow(BaseWorkflow): - """ - Minimal async workflow template for AgentEx Temporal agents. - """ - - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self._complete_task = False - self._state: StateModel | None = None - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - @override - async def on_task_event_send(self, params: SendEventParams) -> None: - - if not params.event.content: - return - if params.event.content.type != "text": - raise ValueError(f"Expected text message, got {params.event.content.type}") - - if params.event.content.author != "user": - raise ValueError( - f"Expected user message, got {params.event.content.author}" - ) - - if self._state is None: - raise ValueError("State is not initialized") - - # Increment the turn number - self._state.turn_number += 1 - # Add the new user message to the message history - self._state.input_list.append( - {"role": "user", "content": params.event.content.content} - ) - - async with adk.tracing.span( - trace_id=params.task.id, - name=f"Turn {self._state.turn_number}", - input=self._state, - ) as span: - # Echo back the user's message so it shows up in the UI. - # This is not done by default so the agent developer has full - # control over what is shown to the user. - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=params.event.content, - parent_span_id=span.id if span else None, - ) - - if not os.environ.get("OPENAI_API_KEY"): - await adk.messages.create( - task_id=params.task.id, - trace_id=params.task.id, - content=TextContent( - author="agent", - content=( - "Hey, sorry I'm unable to respond to your message " - "because you're running this example without an " - "OpenAI API key. Please set the OPENAI_API_KEY " - "environment variable to run this example. Do this " - "by either by adding a .env file to the project/ " - "directory or by setting the environment variable " - "in your terminal." - ), - ), - parent_span_id=span.id if span else None, - ) - - # Call an LLM to respond to the user's message - # When send_as_agent_task_message=True, returns a TaskMessage - result = await adk.providers.openai.run_agent_streamed_auto_send( - task_id=params.task.id, - trace_id=params.task.id, - input_list=self._state.input_list, - mcp_server_params=MCP_SERVERS, - agent_name="Tool-Enabled Assistant", - agent_instructions=( - "You are a helpful assistant that can answer " - "questions using various tools. You have access to " - "sequential thinking and web search capabilities " - "through MCP servers, as well as a calculator tool " - "for performing basic arithmetic operations. Use " - "these tools when appropriate to provide accurate " - "and well-reasoned responses." - ), - parent_span_id=span.id if span else None, - model="gpt-5-mini", - model_settings=ModelSettings( - # Include reasoning items in the response - # (IDs, summaries) - # response_include=["reasoning.encrypted_content"], - # Ask the model to include a short reasoning summary - reasoning=Reasoning(effort="medium", summary="detailed"), - ), - tools=[CALCULATOR_TOOL], - input_guardrails=[SPAGHETTI_GUARDRAIL, SOUP_GUARDRAIL], - output_guardrails=[PIZZA_GUARDRAIL, SUSHI_GUARDRAIL], - ) - - # Update state with the final input list from result - if self._state and result: - final_list = getattr(result, "final_input_list", None) - if final_list is not None: - self._state.input_list = final_list - - # Set the span output to the state for the next turn - if span and self._state: - span.output = self._state.model_dump() - - @workflow.run - @override - async def on_task_create(self, params: CreateTaskParams) -> None: - logger.info(f"Received task create params: {params}") - - # 1. Initialize the state. You can either do this here or in the - # __init__ method. This function is triggered whenever a client - # creates a task for this agent. It is not re-triggered when a new - # event is sent to the task. - self._state = StateModel( - input_list=[], - turn_number=0, - ) - - # 2. Wait for the task to be completed indefinitely. If we don't do - # this the workflow will close as soon as this function returns. - # Temporal can run hundreds of millions of workflows in parallel, - # so you don't need to worry about too many workflows running at once. - - # Thus, if you want this agent to field events indefinitely (or for - # a long time) you need to wait for a condition to be met. - - await workflow.wait_condition( - lambda: self._complete_task, - timeout=None, # Set a timeout if you want to prevent the task - # from running indefinitely. Generally this is not needed. - # Temporal can run hundreds of millions of workflows in parallel - # and more. Only do this if you have a specific reason to do so. - ) diff --git a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/pyproject.toml b/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/pyproject.toml deleted file mode 100644 index d3815934..00000000 --- a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/pyproject.toml +++ /dev/null @@ -1,34 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "at010-agent-chat" -version = "0.1.0" -description = "An AgentEx agentthat streams multiturn tool-enabled chat with tracing" -readme = "README.md" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "debugpy>=1.8.15", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/tests/test_agent.py b/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/tests/test_agent.py deleted file mode 100644 index 1b1f7a40..00000000 --- a/examples/tutorials/10_async/10_temporal/050_agent_chat_guardrails/tests/test_agent.py +++ /dev/null @@ -1,136 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: at050-agent-chat-guardrails) -""" - -import os - -import pytest -import pytest_asyncio - -from agentex import AsyncAgentex - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "at050-agent-chat-guardrails") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and polling for the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # TODO: Poll for the initial task creation message (if your agent sends one) - # async for message in poll_messages( - # client=client, - # task_id=task.id, - # timeout=30, - # sleep_interval=1.0, - # ): - # assert isinstance(message, TaskMessage) - # if message.content and message.content.type == "text" and message.content.author == "agent": - # # Check for your expected initial message - # assert "expected initial text" in message.content.content - # break - - # TODO: Send an event and poll for response using the yielding helper function - # user_message = "Your test message here" - # async for message in send_event_and_poll_yielding( - # client=client, - # agent_id=agent_id, - # task_id=task.id, - # user_message=user_message, - # timeout=30, - # sleep_interval=1.0, - # ): - # assert isinstance(message, TaskMessage) - # if message.content and message.content.type == "text" and message.content.author == "agent": - # # Check for your expected response - # assert "expected response text" in message.content.content - # break - pass - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and streaming the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # user_message = "Your test message here" - - # # Collect events from stream - # all_events = [] - - # async def collect_stream_events(): - # async for event in stream_agent_response( - # client=client, - # task_id=task.id, - # timeout=30, - # ): - # all_events.append(event) - - # # Start streaming task - # stream_task = asyncio.create_task(collect_stream_events()) - - # # Send the event - # event_content = TextContentParam(type="text", author="user", content=user_message) - # await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # # Wait for streaming to complete - # await stream_task - - # # TODO: Add your validation here - # assert len(all_events) > 0, "No events received in streaming response" - pass - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/.dockerignore b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/.dockerignore deleted file mode 100644 index c4948947..00000000 --- a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/Dockerfile b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/Dockerfile deleted file mode 100644 index d38075e5..00000000 --- a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/Dockerfile +++ /dev/null @@ -1,62 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - nodejs \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/** - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/10_temporal/060_open_ai_agents_sdk_hello_world/pyproject.toml /app/060_open_ai_agents_sdk_hello_world/pyproject.toml -COPY 10_async/10_temporal/060_open_ai_agents_sdk_hello_world/README.md /app/060_open_ai_agents_sdk_hello_world/README.md - -WORKDIR /app/060_open_ai_agents_sdk_hello_world - -# Copy the project code -COPY 10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project /app/060_open_ai_agents_sdk_hello_world/project - -# Copy the test files -COPY 10_async/10_temporal/060_open_ai_agents_sdk_hello_world/tests /app/060_open_ai_agents_sdk_hello_world/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies -RUN uv pip install --system .[dev] - -WORKDIR /app/060_open_ai_agents_sdk_hello_world - -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=at060-open-ai-agents-sdk-hello-world - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/README.md b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/README.md deleted file mode 100644 index 00b1fcea..00000000 --- a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/README.md +++ /dev/null @@ -1,105 +0,0 @@ -# [Temporal] OpenAI Agents SDK - Hello World - -**Part of the [OpenAI SDK + Temporal integration series](../README.md)** - -## What You'll Learn - -The OpenAI Agents SDK plugin automatically converts LLM calls into durable Temporal activities. When `Runner.run()` executes, the LLM invocation becomes an `invoke_model_activity` visible in Temporal UI with full observability, automatic retries, and durability. - -**Key insight:** You don't need to wrap agent calls in activities manually - the plugin handles this automatically, making non-deterministic LLM calls work seamlessly in Temporal workflows. - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root (includes Temporal) -- Temporal UI available at http://localhost:8233 -- OpenAI API key configured (see setup below) -- Understanding of Temporal workflows (see [000_hello_acp](../000_hello_acp/)) - -## Setup - -This tutorial uses the OpenAI Agents SDK plugin, which needs to be added in two places: - -### 1. Add Plugin to ACP (`project/acp.py`) -```python -from agentex.lib.plugins.openai_agents import OpenAIAgentsPlugin - -acp = FastACP.create( - config=TemporalACPConfig( - plugins=[OpenAIAgentsPlugin()] # Add this - ) -) -``` - -### 2. Add Plugin to Worker (`project/run_worker.py`) -```python -from agentex.lib.plugins.openai_agents import OpenAIAgentsPlugin - -worker = AgentexWorker( - task_queue=task_queue_name, - plugins=[OpenAIAgentsPlugin()], # Add this -) -``` - -### 3. Configure OpenAI API Key -Add to `manifest.yaml`: -```yaml -secrets: - - name: OPENAI_API_KEY - value: "your-openai-api-key-here" -``` - -Or set in `.env` file: `OPENAI_API_KEY=your-key-here` - -## Quick Start - -```bash -cd examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world -uv run agentex agents run --manifest manifest.yaml -``` - -**Monitor:** Open Temporal UI at http://localhost:8233 to see automatic activity creation. - -## Try It - -1. Send a message to the agent (it responds in haikus) -2. Check the agent response: - -![Agent Response](../_images/hello_world_response.png) - -3. Open Temporal UI at http://localhost:8233 -4. Find your workflow execution -5. Look for the `invoke_model_activity` - this was created automatically: - -![Temporal UI](../_images/hello_world_temporal.png) - -6. Inspect the activity to see: - - Input parameters (your message) - - Output (agent's haiku response) - - Execution time - - Retry attempts (if any failures occurred) - -## Key Code - -```python -# This simple call automatically becomes a durable Temporal activity: -agent = Agent(name="Haiku Assistant", instructions="...") -result = await Runner.run(agent, user_message) -``` - -The magic happens behind the scenes - no manual activity wrapping needed. The conversation is now durable and survives process restarts. - -## Why This Matters - -**Durability:** If your worker crashes mid-conversation, Temporal resumes exactly where it left off. No lost context, no repeated work. - -**Observability:** Every LLM call is tracked as an activity with full execution history. - -**Reliability:** Failed LLM calls are automatically retried with exponential backoff. - -## When to Use -- Building agents with OpenAI's SDK -- Need durability for LLM calls -- Want automatic activity creation without manual wrapping -- Leveraging OpenAI's agent patterns with Temporal's durability - -**Next:** [070_open_ai_agents_sdk_tools](../070_open_ai_agents_sdk_tools/) - Add durable tools to your agents diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/dev.ipynb b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/dev.ipynb deleted file mode 100644 index ae143b89..00000000 --- a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/dev.ipynb +++ /dev/null @@ -1,124 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "36834357", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d1c309d6", - "metadata": {}, - "outputs": [], - "source": "AGENT_NAME = \"at060-open-ai-agents-sdk-hello-world\"" - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f6e6ef0", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Agentic agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b03b0d37", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a6927cc0", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4864e354", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/environments.yaml b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/environments.yaml deleted file mode 100644 index f9051191..00000000 --- a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/environments.yaml +++ /dev/null @@ -1,64 +0,0 @@ -# Agent Environment Configuration -# ------------------------------ -# This file defines environment-specific settings for your agent. -# This DIFFERS from the manifest.yaml file in that it is used to program things that are ONLY per environment. - -# ********** EXAMPLE ********** -# schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -# environments: -# dev: -# auth: -# principal: -# user_id: "1234567890" -# user_name: "John Doe" -# user_email: "john.doe@example.com" -# user_role: "admin" -# user_permissions: "read, write, delete" -# helm_overrides: # This is used to override the global helm values.yaml file in the agentex-agent helm charts -# replicas: 3 -# resources: -# requests: -# cpu: "1000m" -# memory: "2Gi" -# limits: -# cpu: "2000m" -# memory: "4Gi" -# env: -# - name: LOG_LEVEL -# value: "DEBUG" -# - name: ENVIRONMENT -# value: "staging" -# -# kubernetes: -# # OPTIONAL - Otherwise it will be derived from separately. However, this can be used to override the derived -# # namespace and deploy it with in the same namespace that already exists for a separate agent. -# namespace: "team-example-tutorial" -# ********** END EXAMPLE ********** - -schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -environments: - dev: - auth: - principal: - user_id: # TODO: Fill in - account_id: # TODO: Fill in - helm_overrides: - # This is used to override the global helm values.yaml file in the agentex-agent helm charts - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" - temporal-worker: - enabled: true - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/manifest.yaml b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/manifest.yaml deleted file mode 100644 index 773d5ba4..00000000 --- a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/manifest.yaml +++ /dev/null @@ -1,141 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/10_temporal/060_open_ai_agents_sdk_hello_world - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/10_temporal/060_open_ai_agents_sdk_hello_world/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/10_temporal/060_open_ai_agents_sdk_hello_world/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - # Path to temporal worker file - # Examples: - # project/run_worker.py (standard) - # workers/temporal.py (custom structure) - # ../shared/worker.py (shared across projects) - worker: project/run_worker.py - - -# Agent Configuration -# ----------------- -agent: - # Type of agent - either sync or async - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: at060-open-ai-agents-sdk-hello-world - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent - - # Temporal workflow configuration - # This enables your agent to run as a Temporal workflow for long-running tasks - temporal: - enabled: true - workflows: - # Name of the workflow class - # Must match the @workflow.defn name in your workflow.py - - name: at060-open-ai-agents-sdk-hello-world - - # Queue name for task distribution - # Used by Temporal to route tasks to your agent - # Convention: _task_queue - queue_name: at060_open_ai_agents_sdk_hello_world_queue - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - credentials: - - env_var_name: REDIS_URL - secret_name: redis-url-secret - secret_key: url - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - env: - OPENAI_API_KEY: "" - # OPENAI_BASE_URL: "" - OPENAI_ORG_ID: "" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: - - name: my-registry-secret # Update with your image pull secret name - - # Global deployment settings that apply to all clusters - # These can be overridden using --override-file with custom configuration files - global: - agent: - name: "at060-open-ai-agents-sdk-hello-world" - description: "An AgentEx agent" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project/__init__.py b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project/acp.py b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project/acp.py deleted file mode 100644 index fcdbba15..00000000 --- a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project/acp.py +++ /dev/null @@ -1,72 +0,0 @@ -import os -import sys - -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin - -# === DEBUG SETUP (AgentEx CLI Debug Support) === -if os.getenv("AGENTEX_DEBUG_ENABLED") == "true": - try: - import debugpy - debug_port = int(os.getenv("AGENTEX_DEBUG_PORT", "5679")) - debug_type = os.getenv("AGENTEX_DEBUG_TYPE", "acp") - wait_for_attach = os.getenv("AGENTEX_DEBUG_WAIT_FOR_ATTACH", "false").lower() == "true" - - # Configure debugpy - debugpy.configure(subProcess=False) - debugpy.listen(debug_port) - - print(f"๐Ÿ› [{debug_type.upper()}] Debug server listening on port {debug_port}") - - if wait_for_attach: - print(f"โณ [{debug_type.upper()}] Waiting for debugger to attach...") - debugpy.wait_for_client() - print(f"โœ… [{debug_type.upper()}] Debugger attached!") - else: - print(f"๐Ÿ“ก [{debug_type.upper()}] Ready for debugger attachment") - - except ImportError: - print("โŒ debugpy not available. Install with: pip install debugpy") - sys.exit(1) - except Exception as e: - print(f"โŒ Debug setup failed: {e}") - sys.exit(1) -# === END DEBUG SETUP === - -from agentex.lib.types.fastacp import TemporalACPConfig -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModelProvider, -) -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor - -context_interceptor = ContextInterceptor() -temporal_streaming_model_provider = TemporalStreamingModelProvider() - -# Create the ACP server -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - # When deployed to the cluster, the Temporal address will automatically be set to the cluster address - # For local development, we set the address manually to talk to the local Temporal service set up via docker compose - # We are also adding the Open AI Agents SDK plugin to the ACP. - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233"), - plugins=[OpenAIAgentsPlugin(model_provider=temporal_streaming_model_provider)], - interceptors=[context_interceptor] - ) -) - - -# Notice that we don't need to register any handlers when we use type="temporal" -# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp -# You can see that these handlers are automatically registered when the ACP is created - -# @acp.on_task_create -# This will be handled by the method in your workflow that is decorated with @workflow.run - -# @acp.on_task_event_send -# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) - -# @acp.on_task_cancel -# This does not need to be handled by your workflow. -# It is automatically handled by the temporal client which cancels the workflow directly \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project/run_worker.py b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project/run_worker.py deleted file mode 100644 index df281b58..00000000 --- a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project/run_worker.py +++ /dev/null @@ -1,69 +0,0 @@ -import asyncio - -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin - -from project.workflow import At060OpenAiAgentsSdkHelloWorldWorkflow -from agentex.lib.utils.debug import setup_debug_if_enabled -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModelProvider, -) -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor - -environment_variables = EnvironmentVariables.refresh() - -logger = make_logger(__name__) - - -async def main(): - # Setup debug mode if enabled - setup_debug_if_enabled() - - task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE - if task_queue_name is None: - raise ValueError("WORKFLOW_TASK_QUEUE is not set") - - # Add activities to the worker - all_activities = get_all_activities() + [] # add your own activities here - - # ============================================================================ - # STREAMING SETUP: Interceptor + Model Provider - # ============================================================================ - # This is where the streaming magic is configured! Two key components: - # - # 1. ContextInterceptor - # - Threads task_id through activity headers using Temporal's interceptor pattern - # - Outbound: Reads _task_id from workflow instance, injects into activity headers - # - Inbound: Extracts task_id from headers, sets streaming_task_id ContextVar - # - This enables runtime context without forking the Temporal plugin! - # - # 2. TemporalStreamingModelProvider - # - Returns TemporalStreamingModel instances that read task_id from ContextVar - # - TemporalStreamingModel.get_response() streams tokens to Redis in real-time - # - Still returns complete response to Temporal for determinism/replay safety - # - Uses AgentEx ADK streaming infrastructure (Redis XADD to stream:{task_id}) - # - # Together, these enable real-time LLM streaming while maintaining Temporal's - # durability guarantees. No forked components - uses STANDARD OpenAIAgentsPlugin! - context_interceptor = ContextInterceptor() - temporal_streaming_model_provider = TemporalStreamingModelProvider() - - # Create a worker with automatic tracing - # IMPORTANT: We use the STANDARD temporalio.contrib.openai_agents.OpenAIAgentsPlugin - # No forking needed! The interceptor + model provider handle all streaming logic. - worker = AgentexWorker( - task_queue=task_queue_name, - plugins=[OpenAIAgentsPlugin(model_provider=temporal_streaming_model_provider)], - interceptors=[context_interceptor] - ) - - await worker.run( - activities=all_activities, - workflow=At060OpenAiAgentsSdkHelloWorldWorkflow, - ) - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project/workflow.py b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project/workflow.py deleted file mode 100644 index e01f40ce..00000000 --- a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/project/workflow.py +++ /dev/null @@ -1,313 +0,0 @@ -""" -OpenAI Agents SDK + Temporal Integration: Hello World Tutorial - -This tutorial demonstrates the fundamental integration between OpenAI Agents SDK and Temporal workflows. -It shows how to: - -1. Set up a basic Temporal workflow with OpenAI Agents SDK -2. Create a simple agent that responds to user messages -3. See how agent conversations become durable through Temporal -4. Understand the automatic activity creation for model invocations - -KEY CONCEPTS DEMONSTRATED: -- Basic agent creation with OpenAI Agents SDK -- Temporal workflow durability for agent conversations -- Automatic activity creation for LLM calls (visible in Temporal UI) -- Long-running agent workflows that can survive restarts - -This is the foundation before moving to more advanced patterns with tools and activities. -""" - -import os -import json -from typing import Any, Dict, List - -from agents import Agent, Runner -from temporalio import workflow - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from agentex.lib.types.tracing import SGPTracingProcessorConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow -from agentex.lib.core.tracing.tracing_processor_manager import ( - add_tracing_processor_config, -) - -# Configure tracing processor (optional - only if you have SGP credentials) -add_tracing_processor_config( - SGPTracingProcessorConfig( - sgp_api_key=os.environ.get("SGP_API_KEY", ""), - sgp_account_id=os.environ.get("SGP_ACCOUNT_ID", ""), - ) -) - -environment_variables = EnvironmentVariables.refresh() - -if environment_variables.WORKFLOW_NAME is None: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if environment_variables.AGENT_NAME is None: - raise ValueError("Environment variable AGENT_NAME is not set") - -# Validate OpenAI API key is set -if not os.environ.get("OPENAI_API_KEY"): - raise ValueError( - "OPENAI_API_KEY environment variable is not set. " - "This tutorial requires an OpenAI API key to run the OpenAI Agents SDK. " - "Please set OPENAI_API_KEY in your environment or manifest.yaml file." - ) - -logger = make_logger(__name__) - - -class StateModel(BaseModel): - """ - State model for preserving conversation history across turns. - - This allows the agent to maintain context throughout the conversation, - making it possible to reference previous messages and build on the discussion. - """ - - input_list: List[Dict[str, Any]] - turn_number: int - - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class At060OpenAiAgentsSdkHelloWorldWorkflow(BaseWorkflow): - """ - Hello World Temporal Workflow with OpenAI Agents SDK Integration - - This workflow demonstrates the basic pattern for integrating OpenAI Agents SDK - with Temporal workflows. It shows how agent conversations become durable and - observable through Temporal's workflow engine. - - KEY FEATURES: - - Durable agent conversations that survive process restarts - - Automatic activity creation for LLM calls (visible in Temporal UI) - - Long-running workflows that can handle multiple user interactions - - Full observability and monitoring through Temporal dashboard - """ - - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self._complete_task = False - self._state: StateModel | None = None - self._task_id = None - self._trace_id = None - self._parent_span_id = None - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - async def on_task_event_send(self, params: SendEventParams) -> None: - """ - Handle incoming user messages and respond using OpenAI Agents SDK - - This signal handler demonstrates the basic integration pattern: - 1. Receive user message through Temporal signal - 2. Echo message back to UI for visibility - 3. Create and run OpenAI agent (automatically becomes a Temporal activity) - 4. Return agent's response to user - - TEMPORAL INTEGRATION MAGIC: - - When Runner.run() executes, it automatically creates a "invoke_model_activity" - - This activity is visible in Temporal UI with full observability - - If the LLM call fails, Temporal automatically retries it - - The entire conversation is durable and survives process restarts - """ - logger.info(f"Received task message instruction: {params}") - - if self._state is None: - raise ValueError("State is not initialized") - - # Increment turn number for tracing - self._state.turn_number += 1 - - self._task_id = params.task.id - self._trace_id = params.task.id - - # Add the user message to conversation history - self._state.input_list.append({"role": "user", "content": params.event.content.content}) - - # ============================================================================ - # STEP 1: Echo User Message - # ============================================================================ - # Echo back the client's message to show it in the UI. This is not done by default - # so the agent developer has full control over what is shown to the user. - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - # ============================================================================ - # STEP 2: Wrap execution in tracing span - # ============================================================================ - # Create a span to track this turn of the conversation - async with adk.tracing.span( - trace_id=params.task.id, - name=f"Turn {self._state.turn_number}", - input=self._state.model_dump(), - ) as span: - self._parent_span_id = span.id if span else None - - # ============================================================================ - # STEP 3: Create OpenAI Agent - # ============================================================================ - # Create a simple agent using OpenAI Agents SDK. This agent will respond in haikus - # to demonstrate the basic functionality. No tools needed for this hello world example. - # - # IMPORTANT: The OpenAI Agents SDK plugin (configured in acp.py and run_worker.py) - # automatically converts agent interactions into Temporal activities for durability. - - agent = Agent( - name="Haiku Assistant", - instructions="You are a friendly assistant who always responds in the form of a haiku. " - "Each response should be exactly 3 lines following the 5-7-5 syllable pattern.", - ) - - # ============================================================================ - # STEP 4: Run Agent with Temporal Durability + Streaming + Conversation History - # ============================================================================ - # This is where the magic happens! When Runner.run() executes: - # 1. The OpenAI Agents SDK makes LLM calls to generate responses - # 2. The plugin automatically wraps these calls as Temporal activities - # 3. You'll see "invoke_model_activity" appear in the Temporal UI - # 4. If the LLM call fails, Temporal retries it automatically - # 5. The conversation state is preserved even if the worker restarts - # - # STREAMING MAGIC (via Interceptors + Model Provider): - # - The ContextInterceptor threads task_id through activity headers - # - The TemporalStreamingModelProvider returns a model that streams to Redis - # - The model streams tokens in real-time while maintaining determinism - # - Complete response is still returned to Temporal for replay safety - # - # CONVERSATION HISTORY: - # - We pass self._state.input_list which contains the full conversation history - # - This allows the agent to maintain context across multiple turns - # - The agent can reference previous messages and build on the discussion - - # IMPORTANT NOTE ABOUT AGENT RUN CALLS: - # ===================================== - # Notice that we don't need to wrap the Runner.run() call in an activity! - # This might feel weird for anyone who has used Temporal before, as typically - # non-deterministic operations like LLM calls would need to be wrapped in activities. - # However, the OpenAI Agents SDK plugin is handling all of this automatically - # behind the scenes. - # - # Another benefit of this approach is that we don't have to serialize the arguments, - # which would typically be the case with Temporal activities - the plugin handles - # all of this for us, making the developer experience much smoother. - - # Pass the conversation history to Runner.run to maintain context - # The input_list contains all previous messages in OpenAI format - result = await Runner.run(agent, self._state.input_list) - - # Update the state with the assistant's response for the next turn - # The result contains the full updated conversation including the assistant's response - if hasattr(result, "messages") and result.messages: - # Extract the assistant message from the result - # OpenAI Agents SDK returns the full conversation including the new assistant message - for msg in result.messages: - # Add new assistant messages to history - # Skip messages we already have (user messages we just added) - if msg.get("role") == "assistant" and msg not in self._state.input_list: - self._state.input_list.append(msg) - - # Set span output for tracing - include full state - span.output = self._state.model_dump() - - # ============================================================================ - # WHAT YOU'LL SEE IN TEMPORAL UI: - # ============================================================================ - # After running this: - # 1. Go to localhost:8080 (Temporal UI) - # 2. Find your workflow execution - # 3. You'll see an "invoke_model_activity" that shows: - # - Execution time for the LLM call - # - Input parameters (user message) - # - Output (agent's haiku response) - # - Retry attempts (if any failures occurred) - # - # This gives you full observability into your agent's LLM interactions! - # ============================================================================ - - @workflow.run - async def on_task_create(self, params: CreateTaskParams) -> str: - """ - Temporal Workflow Entry Point - Long-Running Agent Conversation - - This method runs when the workflow starts and keeps the agent conversation alive. - It demonstrates Temporal's ability to run workflows for extended periods (minutes, - hours, days, or even years) while maintaining full durability. - - TEMPORAL WORKFLOW LIFECYCLE: - 1. Workflow starts when a task is created - 2. Sends initial acknowledgment message to user - 3. Waits indefinitely for user messages (handled by on_task_event_send signal) - 4. Each user message triggers the signal handler which runs the OpenAI agent - 5. Workflow continues running until explicitly completed or canceled - - DURABILITY BENEFITS: - - Workflow survives worker restarts, deployments, infrastructure failures - - All agent conversation history is preserved in Temporal's event store - - Can resume from exact point of failure without losing context - - Scales to handle millions of concurrent agent conversations - """ - logger.info(f"Received task create params: {params}") - - # ============================================================================ - # WORKFLOW INITIALIZATION: Initialize State - # ============================================================================ - # Initialize the conversation state with an empty history - # This will be populated as the conversation progresses - self._state = StateModel( - input_list=[], - turn_number=0, - ) - - # ============================================================================ - # WORKFLOW INITIALIZATION: Send Welcome Message - # ============================================================================ - # Acknowledge that the task has been created and the agent is ready. - # This message appears once when the conversation starts. - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"๐ŸŒธ Hello! I'm your Haiku Assistant, powered by OpenAI Agents SDK + Temporal! ๐ŸŒธ\n\n" - f"I'll respond to all your messages in beautiful haiku form. " - f"This conversation is now durable - even if I restart, our chat continues!\n\n" - f"Task created with params:\n{json.dumps(params.params, indent=2)}\n\n" - f"Send me a message and I'll respond with a haiku! ๐ŸŽ‹", - ), - ) - - # ============================================================================ - # WORKFLOW PERSISTENCE: Wait for Completion Signal - # ============================================================================ - # This is the key to Temporal's power: the workflow runs indefinitely, - # handling user messages through signals (on_task_event_send) until - # explicitly told to complete. - # - # IMPORTANT: This wait_condition keeps the workflow alive and durable: - # - No timeout = workflow can run forever (perfect for ongoing conversations) - # - Temporal can handle millions of such concurrent workflows - # - If worker crashes, workflow resumes exactly where it left off - # - All conversation state is preserved in Temporal's event log - await workflow.wait_condition( - lambda: self._complete_task, - timeout=None, # No timeout = truly long-running agent conversation - ) - return "Agent conversation completed" - - @workflow.signal - async def complete_task_signal(self) -> None: - """ - Signal to gracefully complete the agent conversation workflow - - This signal can be sent to end the workflow cleanly. In a real application, - you might trigger this when a user ends the conversation or after a period - of inactivity. - """ - logger.info("Received signal to complete the agent conversation") - self._complete_task = True diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/pyproject.toml b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/pyproject.toml deleted file mode 100644 index 5a1cd08d..00000000 --- a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "at060_open_ai_agents_sdk_hello_world" -version = "0.1.0" -description = "An AgentEx agent" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk>=0.6.0", - "openai-agents>=0.4.2", - "temporalio>=1.18.2", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", - "debugpy>=1.8.15", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/tests/test_agent.py b/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/tests/test_agent.py deleted file mode 100644 index d571e0e7..00000000 --- a/examples/tutorials/10_async/10_temporal/060_open_ai_agents_sdk_hello_world/tests/test_agent.py +++ /dev/null @@ -1,137 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: example-tutorial) -""" - -import os -import uuid - -import pytest -import pytest_asyncio -from test_utils.async_utils import ( - poll_messages, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types.task_message import TaskMessage -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "at060-open-ai-agents-sdk-hello-world") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and polling for the response.""" - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Poll for the initial task creation message - async for message in poll_messages( - client=client, - task_id=task.id, - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if message.content and message.content.type == "text" and message.content.author == "agent": - # Check for the Haiku Assistant welcome message - assert "Haiku Assistant" in message.content.content - assert "Temporal" in message.content.content - break - - # Send event and poll for response with streaming updates - user_message = "Hello how is life?" - print(f"[DEBUG 060 POLL] Sending message: '{user_message}'") - - # Use yield_updates=True to get all streaming chunks as they're written - final_message = None - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=30, - sleep_interval=1.0, - yield_updates=True, # Get updates as streaming writes chunks - ): - if message.content and message.content.type == "text" and message.content.author == "agent": - print( - f"[DEBUG 060 POLL] Received update - Status: {message.streaming_status}, " - f"Content length: {len(message.content.content)}" - ) - final_message = message - - # Stop polling once we get a DONE message - if message.streaming_status == "DONE": - print(f"[DEBUG 060 POLL] Streaming complete!") - break - - # Verify the final message has content (the haiku) - assert final_message is not None, "Should have received an agent message" - assert final_message.content is not None, "Final message should have content" - assert len(final_message.content.content) > 0, "Final message should have haiku content" - - print(f"[DEBUG 060 POLL] โœ… Successfully received haiku response!") - print(f"[DEBUG 060 POLL] Final haiku:\n{final_message.content.content}") - pass - - -class TestStreamingEvents: - """Test streaming event sending (backend verification via polling).""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """ - Streaming test placeholder. - - NOTE: SSE streaming is tested via the UI (agentex-ui subscribeTaskState). - Backend streaming functionality is verified in test_send_event_and_poll. - """ - pass - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/.dockerignore b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/.dockerignore deleted file mode 100644 index c4948947..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/Dockerfile b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/Dockerfile deleted file mode 100644 index d4b34360..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/Dockerfile +++ /dev/null @@ -1,63 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - nodejs \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/** - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/10_temporal/070_open_ai_agents_sdk_tools/pyproject.toml /app/070_open_ai_agents_sdk_tools/pyproject.toml -COPY 10_async/10_temporal/070_open_ai_agents_sdk_tools/README.md /app/070_open_ai_agents_sdk_tools/README.md - -WORKDIR /app/070_open_ai_agents_sdk_tools - -# Copy the project code -COPY 10_async/10_temporal/070_open_ai_agents_sdk_tools/project /app/070_open_ai_agents_sdk_tools/project - -# Copy the test files -COPY 10_async/10_temporal/070_open_ai_agents_sdk_tools/tests /app/070_open_ai_agents_sdk_tools/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies -RUN uv pip install --system .[dev] - -WORKDIR /app/070_open_ai_agents_sdk_tools - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=at070-open-ai-agents-sdk-tools - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/README.md b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/README.md deleted file mode 100644 index ea2c827a..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/README.md +++ /dev/null @@ -1,180 +0,0 @@ -# [Temporal] OpenAI Agents SDK - Tools - -**Part of the [OpenAI SDK + Temporal integration series](../README.md)** โ†’ Previous: [060 Hello World](../060_open_ai_agents_sdk_hello_world/) - -## What You'll Learn - -Two patterns for making agent tools durable with Temporal: - -**Pattern 1: `activity_as_tool()`** - Single activity per tool call -- Use for: Single API calls, DB queries, external operations -- Example: `get_weather` tool โ†’ creates one `get_weather` activity -- 1:1 mapping between tool calls and activities - -**Pattern 2: Function tools with multiple activities** - Multiple activities per tool call -- Use for: Multi-step operations needing guaranteed sequencing -- Example: `move_money` tool โ†’ creates `withdraw_money` activity THEN `deposit_money` activity -- 1:many mapping - your code controls execution order, not the LLM -- Ensures atomic operations (withdraw always happens before deposit) - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Temporal UI available at http://localhost:8233 -- OpenAI Agents SDK plugin configured (see [060_hello_world](../060_open_ai_agents_sdk_hello_world/)) - -## Quick Start - -```bash -cd examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools -uv run agentex agents run --manifest manifest.yaml -``` - -**Monitor:** Open Temporal UI at http://localhost:8233 to see tool calls as activities. - -## Try It - -### Pattern 1: Single Activity Tool - -Ask "What's the weather in San Francisco?" - -1. Check the agent response: - -![Weather Response](../_images/weather_response.png) - -2. Open Temporal UI (localhost:8233) -3. See a single `get_weather` activity created: - -![Weather Activity](../_images/weather_activity_tool.png) - -The activity shows the external call with retry capability. Each step (model invocation โ†’ tool call โ†’ model invocation) is durable. - -### Pattern 2: Multi-Activity Tool (Optional) - -To try the advanced banking example, uncomment the `move_money` sections in the code, then ask to move money. - -1. Check the agent response: - -![Money Transfer Response](../_images/move_money_response.png) - -2. Open Temporal UI and see TWO sequential activities: - -![Money Transfer Workflow](../_images/move_money_temporal.png) - -- First: `withdraw_money` activity executes -- Then: `deposit_money` activity executes -- Each activity shows its parameters and execution time - -**Critical insight:** If the system crashes after withdraw but before deposit, Temporal resumes exactly where it left off. The deposit will still happen - guaranteed transactional integrity. - -## Key Code - -### Pattern 1: Single Activity Tool -```python -# Define the activity -@activity.defn -async def get_weather(city: str) -> str: - """Get the weather for a given city""" - # This could be an API call - Temporal handles retries - return f"The weather in {city} is sunny" - -# Use activity_as_tool to convert it -weather_agent = Agent( - name="Weather Assistant", - instructions="Use the get_weather tool to answer weather questions.", - tools=[ - activity_as_tool(get_weather, start_to_close_timeout=timedelta(seconds=10)) - ] -) -``` - -### Pattern 2: Multi-Activity Tool -```python -# Define individual activities -@activity.defn -async def withdraw_money(from_account: str, amount: float) -> str: - # Simulate API call - await asyncio.sleep(5) - return f"Withdrew ${amount} from {from_account}" - -@activity.defn -async def deposit_money(to_account: str, amount: float) -> str: - # Simulate API call - await asyncio.sleep(10) - return f"Deposited ${amount} into {to_account}" - -# Create a function tool that orchestrates both activities -@function_tool -async def move_money(from_account: str, to_account: str, amount: float) -> str: - """Move money from one account to another""" - - # Step 1: Withdraw (becomes an activity) - await workflow.start_activity( - "withdraw_money", - args=[from_account, amount], - start_to_close_timeout=timedelta(days=1) - ) - - # Step 2: Deposit (becomes an activity) - await workflow.start_activity( - "deposit_money", - args=[to_account, amount], - start_to_close_timeout=timedelta(days=1) - ) - - return "Money transferred successfully" - -# Use the tool in your agent -money_agent = Agent( - name="Money Mover", - instructions="Use move_money to transfer funds between accounts.", - tools=[move_money] -) -``` - -## When to Use Each Pattern - -### Use Pattern 1 when: -- Tool performs a single external operation (API call, DB query) -- Operation is already idempotent -- No sequencing guarantees needed - -### Use Pattern 2 when: -- Tool requires multiple sequential operations -- Order must be guaranteed (withdraw THEN deposit) -- Operations need to be atomic from the agent's perspective -- You want transactional integrity across steps - -## Why This Matters - -**Without Temporal:** If you withdraw money but crash before depositing, you're stuck in a broken state. The money is gone from the source account with no way to recover. - -**With Temporal (Pattern 2):** -- Guaranteed execution with exact resumption after failures -- If the system crashes after withdraw, Temporal resumes and completes deposit -- Each step is tracked and retried independently -- Full observability of the entire operation - -**Key insight:** Pattern 2 moves sequencing control from the LLM (which might call tools in wrong order) to your deterministic code (which guarantees correct order). The LLM still decides *when* to call the tool, but your code controls *how* the operations execute. - -This makes agents production-ready for: -- Financial transactions -- Order fulfillment workflows -- Multi-step API integrations -- Any operation where partial completion is dangerous - -## When to Use - -**Pattern 1 (activity_as_tool):** -- Single API calls -- Database queries -- External service integrations -- Operations that are naturally atomic - -**Pattern 2 (Multi-activity tools):** -- Financial transactions requiring sequencing -- Multi-step operations with dependencies -- Operations where order matters critically -- Workflows needing guaranteed atomicity - -**Next:** [080_open_ai_agents_sdk_human_in_the_loop](../080_open_ai_agents_sdk_human_in_the_loop/) - Add human approval workflows diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/dev.ipynb b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/dev.ipynb deleted file mode 100644 index bcfc7182..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/dev.ipynb +++ /dev/null @@ -1,124 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "36834357", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d1c309d6", - "metadata": {}, - "outputs": [], - "source": "AGENT_NAME = \"at070-open-ai-agents-sdk-tools\"" - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f6e6ef0", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Agentic agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b03b0d37", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a6927cc0", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4864e354", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/environments.yaml b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/environments.yaml deleted file mode 100644 index f9051191..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/environments.yaml +++ /dev/null @@ -1,64 +0,0 @@ -# Agent Environment Configuration -# ------------------------------ -# This file defines environment-specific settings for your agent. -# This DIFFERS from the manifest.yaml file in that it is used to program things that are ONLY per environment. - -# ********** EXAMPLE ********** -# schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -# environments: -# dev: -# auth: -# principal: -# user_id: "1234567890" -# user_name: "John Doe" -# user_email: "john.doe@example.com" -# user_role: "admin" -# user_permissions: "read, write, delete" -# helm_overrides: # This is used to override the global helm values.yaml file in the agentex-agent helm charts -# replicas: 3 -# resources: -# requests: -# cpu: "1000m" -# memory: "2Gi" -# limits: -# cpu: "2000m" -# memory: "4Gi" -# env: -# - name: LOG_LEVEL -# value: "DEBUG" -# - name: ENVIRONMENT -# value: "staging" -# -# kubernetes: -# # OPTIONAL - Otherwise it will be derived from separately. However, this can be used to override the derived -# # namespace and deploy it with in the same namespace that already exists for a separate agent. -# namespace: "team-example-tutorial" -# ********** END EXAMPLE ********** - -schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -environments: - dev: - auth: - principal: - user_id: # TODO: Fill in - account_id: # TODO: Fill in - helm_overrides: - # This is used to override the global helm values.yaml file in the agentex-agent helm charts - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" - temporal-worker: - enabled: true - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/manifest.yaml b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/manifest.yaml deleted file mode 100644 index 40bf9e78..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/manifest.yaml +++ /dev/null @@ -1,140 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/10_temporal/070_open_ai_agents_sdk_tools - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/10_temporal/070_open_ai_agents_sdk_tools/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/10_temporal/070_open_ai_agents_sdk_tools/.dockerignore - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - # Path to temporal worker file - # Examples: - # project/run_worker.py (standard) - # workers/temporal.py (custom structure) - # ../shared/worker.py (shared across projects) - worker: project/run_worker.py - -# Agent Configuration -# ----------------- -agent: - # Type of agent - either sync or async - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: at070-open-ai-agents-sdk-tools - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent - - # Temporal workflow configuration - # This enables your agent to run as a Temporal workflow for long-running tasks - temporal: - enabled: true - workflows: - # Name of the workflow class - # Must match the @workflow.defn name in your workflow.py - - name: at070-open-ai-agents-sdk-tools - - # Queue name for task distribution - # Used by Temporal to route tasks to your agent - # Convention: _task_queue - queue_name: at070_open_ai_agents_sdk_tools_queue - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - credentials: - - env_var_name: REDIS_URL - secret_name: redis-url-secret - secret_key: url - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - env: - OPENAI_API_KEY: "" - # OPENAI_BASE_URL: "" - OPENAI_ORG_ID: "" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: - - name: my-registry-secret # Update with your image pull secret name - - # Global deployment settings that apply to all clusters - # These can be overridden using --override-file with custom configuration files - global: - agent: - name: "at070-open-ai-agents-sdk-tools" - description: "An AgentEx agent" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" - diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/__init__.py b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/acp.py b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/acp.py deleted file mode 100644 index 3028093b..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/acp.py +++ /dev/null @@ -1,72 +0,0 @@ -import os -import sys - -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin - -# === DEBUG SETUP (AgentEx CLI Debug Support) === -if os.getenv("AGENTEX_DEBUG_ENABLED") == "true": - try: - import debugpy - debug_port = int(os.getenv("AGENTEX_DEBUG_PORT", "5679")) - debug_type = os.getenv("AGENTEX_DEBUG_TYPE", "acp") - wait_for_attach = os.getenv("AGENTEX_DEBUG_WAIT_FOR_ATTACH", "false").lower() == "true" - - # Configure debugpy - debugpy.configure(subProcess=False) - debugpy.listen(debug_port) - - print(f"๐Ÿ› [{debug_type.upper()}] Debug server listening on port {debug_port}") - - if wait_for_attach: - print(f"โณ [{debug_type.upper()}] Waiting for debugger to attach...") - debugpy.wait_for_client() - print(f"โœ… [{debug_type.upper()}] Debugger attached!") - else: - print(f"๐Ÿ“ก [{debug_type.upper()}] Ready for debugger attachment") - - except ImportError: - print("โŒ debugpy not available. Install with: pip install debugpy") - sys.exit(1) - except Exception as e: - print(f"โŒ Debug setup failed: {e}") - sys.exit(1) -# === END DEBUG SETUP === - -from agentex.lib.types.fastacp import TemporalACPConfig -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModelProvider, -) -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor - -context_interceptor = ContextInterceptor() -temporal_streaming_model_provider = TemporalStreamingModelProvider() - -# Create the ACP server -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - # When deployed to the cluster, the Temporal address will automatically be set to the cluster address - # For local development, we set the address manually to talk to the local Temporal service set up via docker compose - # We are also adding the Open AI Agents SDK plugin to the ACP. - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233"), - plugins=[OpenAIAgentsPlugin(model_provider=temporal_streaming_model_provider)], - interceptors=[context_interceptor] - ) -) - - -# Notice that we don't need to register any handlers when we use type="temporal" -# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp -# You can see that these handlers are automatically registered when the ACP is created - -# @acp.on_task_create -# This will be handled by the method in your workflow that is decorated with @workflow.run - -# @acp.on_task_event_send -# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) - -# @acp.on_task_cancel -# This does not need to be handled by your workflow. -# It is automatically handled by the temporal client which cancels the workflow directly \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/activities.py b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/activities.py deleted file mode 100644 index 35ab678d..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/activities.py +++ /dev/null @@ -1,104 +0,0 @@ -import random -import asyncio - -from temporalio import activity - -from agentex.lib.utils.logging import make_logger - -logger = make_logger(__name__) -# ============================================================================ -# Temporal Activities for OpenAI Agents SDK Integration -# ============================================================================ -# This file defines Temporal activities that can be used in two different patterns: -# -# PATTERN 1: Direct conversion to agent tools using activity_as_tool() -# PATTERN 2: Called internally by function_tools for multi-step operations -# -# Activities represent NON-DETERMINISTIC operations that need durability: -# - API calls, database queries, file I/O, network operations -# - Any operation that could fail and needs automatic retries -# - Operations with variable latency or external dependencies - -# ============================================================================ -# PATTERN 1 EXAMPLE: Simple External Tool as Activity -# ============================================================================ -# This activity demonstrates PATTERN 1 usage: -# - Single non-deterministic operation (simulated API call) -# - Converted directly to an agent tool using activity_as_tool() -# - Each tool call creates exactly ONE activity in the workflow - -@activity.defn -async def get_weather(city: str) -> str: - """Get the weather for a given city. - - PATTERN 1 USAGE: This activity gets converted to an agent tool using: - activity_as_tool(get_weather, start_to_close_timeout=timedelta(seconds=10)) - - When the agent calls the weather tool: - 1. This activity runs with Temporal durability guarantees - 2. If it fails, Temporal automatically retries it - 3. The result is returned directly to the agent - """ - # Simulate API call to weather service - if city == "New York City": - return "The weather in New York City is 22 degrees Celsius" - else: - return "The weather is unknown" - -# ============================================================================ -# PATTERN 2 EXAMPLES: Activities Used Within Function Tools -# ============================================================================ -# These activities demonstrate PATTERN 2 usage: -# - Called internally by the move_money function tool (see tools.py) -# - Multiple activities coordinated by a single tool -# - Guarantees execution sequence and atomicity - -@activity.defn -async def withdraw_money(from_account: str, amount: float) -> str: - """Withdraw money from an account. - - PATTERN 2 USAGE: This activity is called internally by the move_money tool. - It's NOT converted to an agent tool directly - instead, it's orchestrated - by code inside the function_tool to guarantee proper sequencing. - """ - # Simulate variable API call latency (realistic for banking operations) - random_delay = random.randint(1, 5) - await asyncio.sleep(random_delay) - - # In a real implementation, this would make an API call to a banking service - logger.info(f"Withdrew ${amount} from {from_account}") - return f"Successfully withdrew ${amount} from {from_account}" - -@activity.defn -async def deposit_money(to_account: str, amount: float) -> str: - """Deposit money into an account. - - PATTERN 2 USAGE: This activity is called internally by the move_money tool - AFTER the withdraw_money activity succeeds. This guarantees the proper - sequence: withdraw โ†’ deposit, making the operation atomic. - """ - # Simulate banking API latency - await asyncio.sleep(2) - - # In a real implementation, this would make an API call to a banking service - logger.info(f"Successfully deposited ${amount} into {to_account}") - return f"Successfully deposited ${amount} into {to_account}" - -# ============================================================================ -# KEY INSIGHTS: -# ============================================================================ -# -# 1. ACTIVITY DURABILITY: All activities are automatically retried by Temporal -# if they fail, providing resilience against network issues, service outages, etc. -# -# 2. PATTERN 1 vs PATTERN 2 CHOICE: -# - Use Pattern 1 for simple, independent operations -# - Use Pattern 2 when you need guaranteed sequencing of multiple operations -# -# 3. OBSERVABILITY: Each activity execution appears in the Temporal UI with: -# - Execution time, retry attempts, input parameters, return values -# - Full traceability from agent tool call to activity execution -# -# 4. PARAMETERS: Notice how Pattern 2 activities now accept proper parameters -# (from_account, to_account, amount) that get passed through from the tool -# ============================================================================ diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/run_worker.py b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/run_worker.py deleted file mode 100644 index 4aa50e18..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/run_worker.py +++ /dev/null @@ -1,71 +0,0 @@ -import asyncio - -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin - -from project.workflow import At070OpenAiAgentsSdkToolsWorkflow -from project.activities import get_weather, deposit_money, withdraw_money -from agentex.lib.utils.debug import setup_debug_if_enabled -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker -from agentex.lib.core.temporal.plugins.openai_agents.hooks.activities import stream_lifecycle_content -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModelProvider, -) -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor - -environment_variables = EnvironmentVariables.refresh() - -logger = make_logger(__name__) - - -async def main(): - # Setup debug mode if enabled - setup_debug_if_enabled() - - task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE - if task_queue_name is None: - raise ValueError("WORKFLOW_TASK_QUEUE is not set") - - # Add activities to the worker - all_activities = get_all_activities() + [withdraw_money, deposit_money, get_weather, stream_lifecycle_content] # add your own activities here - - # ============================================================================ - # STREAMING SETUP: Interceptor + Model Provider - # ============================================================================ - # This is where the streaming magic is configured! Two key components: - # - # 1. ContextInterceptor - # - Threads task_id through activity headers using Temporal's interceptor pattern - # - Outbound: Reads _task_id from workflow instance, injects into activity headers - # - Inbound: Extracts task_id from headers, sets streaming_task_id ContextVar - # - This enables runtime context without forking the Temporal plugin! - # - # 2. TemporalStreamingModelProvider - # - Returns TemporalStreamingModel instances that read task_id from ContextVar - # - TemporalStreamingModel.get_response() streams tokens to Redis in real-time - # - Still returns complete response to Temporal for determinism/replay safety - # - Uses AgentEx ADK streaming infrastructure (Redis XADD to stream:{task_id}) - # - # Together, these enable real-time LLM streaming while maintaining Temporal's - # durability guarantees. No forked components - uses STANDARD OpenAIAgentsPlugin! - context_interceptor = ContextInterceptor() - temporal_streaming_model_provider = TemporalStreamingModelProvider() - - # Create a worker with automatic tracing - # IMPORTANT: We use the STANDARD temporalio.contrib.openai_agents.OpenAIAgentsPlugin - # No forking needed! The interceptor + model provider handle all streaming logic. - worker = AgentexWorker( - task_queue=task_queue_name, - plugins=[OpenAIAgentsPlugin(model_provider=temporal_streaming_model_provider)], - interceptors=[context_interceptor], - ) - - await worker.run( - activities=all_activities, - workflow=At070OpenAiAgentsSdkToolsWorkflow, - ) - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/tools.py b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/tools.py deleted file mode 100644 index 142bcc55..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/tools.py +++ /dev/null @@ -1,49 +0,0 @@ -from datetime import timedelta - -from agents import function_tool -from temporalio import workflow - -from project.activities import deposit_money, withdraw_money - -# ============================================================================ -# PATTERN 2 EXAMPLE: Multiple Activities Within Tools -# ============================================================================ -# This demonstrates how to create a single tool that orchestrates multiple -# Temporal activities internally. This pattern is ideal when you need to: -# 1. Guarantee the sequence of operations (withdraw THEN deposit) -# 2. Make the entire operation atomic from the agent's perspective -# 3. Avoid relying on the LLM to correctly sequence multiple tool calls - -@function_tool -async def move_money(from_account: str, to_account: str, amount: float) -> str: - """Move money from one account to another atomically. - - This tool demonstrates PATTERN 2: Instead of having the LLM make two separate - tool calls (withdraw + deposit), we create ONE tool that internally coordinates - multiple activities. This guarantees: - - withdraw_money activity runs first - - deposit_money activity only runs if withdrawal succeeds - - Both operations are durable and will retry on failure - - The entire operation appears atomic to the agent - """ - - # STEP 1: Start the withdrawal activity - # This creates a Temporal activity that will be retried if it fails - withdraw_result = await workflow.execute_activity( - withdraw_money, - args=[from_account, amount], - start_to_close_timeout=timedelta(days=1) # Long timeout for banking operations - ) - - # STEP 2: Only after successful withdrawal, start the deposit activity - # This guarantees the sequence: withdraw THEN deposit - deposit_result = await workflow.execute_activity( - deposit_money, - args=[to_account, amount], - start_to_close_timeout=timedelta(days=1) - ) - - # PATTERN 2 BENEFIT: From the agent's perspective, this was ONE tool call - # But in Temporal UI, you'll see TWO activities executed in sequence - # Each activity gets its own retry logic and durability guarantees - return f"Successfully moved ${amount} from {from_account} to {to_account}" diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/workflow.py b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/workflow.py deleted file mode 100644 index 2204d3a0..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/project/workflow.py +++ /dev/null @@ -1,358 +0,0 @@ -""" -OpenAI Agents SDK + Temporal Integration Tutorial - -This tutorial demonstrates two key patterns for integrating OpenAI Agents SDK with Temporal workflows: - -PATTERN 1: Simple External Tools as Activities (activity_as_tool) -- Convert individual Temporal activities directly into agent tools -- 1:1 mapping between tool calls and activities -- Best for: single non-deterministic operations (API calls, DB queries) -- Example: get_weather activity โ†’ weather tool - -PATTERN 2: Multiple Activities Within Tools (function_tool with internal activities) -- Create function tools that coordinate multiple activities internally -- 1:many mapping between tool calls and activities -- Best for: complex multi-step operations that need guaranteed sequencing -- Example: move_money tool โ†’ withdraw_money + deposit_money activities - -Both patterns provide durability, automatic retries, and full observability through Temporal. - -WHY THIS APPROACH IS GAME-CHANGING: -=================================== -There's a crucial meta-point that should be coming through here: **why is this different?** -This approach is truly transactional because of how the `await` works in Temporal workflows. -Consider a "move money" example - if the operation fails between the withdraw and deposit, -Temporal will resume exactly where it left off - the agent gets real-world flexibility even -if systems die. - -**Why even use Temporal? Why are we adding complexity?** The gain is enormous when you -consider what happens without it: - -In a traditional approach without Temporal, if you withdraw money but then the system crashes -before depositing, you're stuck in a broken state. The money has been withdrawn, but never -deposited. In a banking scenario, you can't just "withdraw again" - the money is already gone -from the source account, and your agent has no way to recover or know what state it was in. - -This is why you can't build very complicated agents without this confidence in transactional -behavior. Temporal gives us: - -- **Guaranteed execution**: If the workflow starts, it will complete, even through failures -- **Exact resumption**: Pick up exactly where we left off, not start over -- **Transactional integrity**: Either both operations complete, or the workflow can be designed - to handle partial completion -- **Production reliability**: Build agents that can handle real-world complexity and failures - -Without this foundation, agents remain fragile toys. With Temporal, they become production-ready -systems that can handle the complexities of the real world. -""" - -import os -import json -import asyncio -from typing import Any, Dict, List -from datetime import timedelta - -from agents import Agent, Runner -from temporalio import workflow -from temporalio.contrib import openai_agents - -from agentex.lib import adk -from project.activities import get_weather -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from agentex.lib.types.tracing import SGPTracingProcessorConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow -from agentex.lib.core.tracing.tracing_processor_manager import ( - add_tracing_processor_config, -) -from agentex.lib.core.temporal.plugins.openai_agents.hooks.hooks import TemporalStreamingHooks - -# Configure tracing processor (optional - only if you have SGP credentials) -add_tracing_processor_config( - SGPTracingProcessorConfig( - sgp_api_key=os.environ.get("SGP_API_KEY", ""), - sgp_account_id=os.environ.get("SGP_ACCOUNT_ID", ""), - ) -) - -environment_variables = EnvironmentVariables.refresh() - -if environment_variables.WORKFLOW_NAME is None: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if environment_variables.AGENT_NAME is None: - raise ValueError("Environment variable AGENT_NAME is not set") - -# Validate OpenAI API key is set -if not os.environ.get("OPENAI_API_KEY"): - raise ValueError( - "OPENAI_API_KEY environment variable is not set. " - "This tutorial requires an OpenAI API key to run the OpenAI Agents SDK. " - "Please set OPENAI_API_KEY in your environment or manifest.yaml file." - ) - -logger = make_logger(__name__) - - -class StateModel(BaseModel): - """ - State model for preserving conversation history across turns. - - This allows the agent to maintain context throughout the conversation, - making it possible to reference previous messages and build on the discussion. - """ - - input_list: List[Dict[str, Any]] - turn_number: int - - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class At070OpenAiAgentsSdkToolsWorkflow(BaseWorkflow): - """ - Minimal async workflow template for AgentEx Temporal agents. - """ - - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self._complete_task = False - self._state: StateModel | None = None - self._pending_confirmation: asyncio.Queue[str] = asyncio.Queue() - self._task_id = None - self._trace_id = None - self._parent_span_id = None - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - async def on_task_event_send(self, params: SendEventParams) -> None: - logger.info(f"Received task message instruction: {params}") - - if self._state is None: - raise ValueError("State is not initialized") - - # Increment turn number for tracing - self._state.turn_number += 1 - - self._task_id = params.task.id - self._trace_id = params.task.id - - # Add the user message to conversation history - self._state.input_list.append({"role": "user", "content": params.event.content.content}) - - # Echo back the client's message to show it in the UI. This is not done by default - # so the agent developer has full control over what is shown to the user. - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - # ============================================================================ - # OpenAI Agents SDK + Temporal Integration: Two Patterns for Tool Creation - # ============================================================================ - - # #### When to Use Activities for Tools - # - # You'll want to use the activity pattern for tools in the following scenarios: - # - # - **API calls within the tool**: Whenever your tool makes an API call (external - # service, database, etc.), you must wrap it as an activity since these are - # non-deterministic operations that could fail or return different results - # - **Idempotent single operations**: When the tool performs an already idempotent - # single call that you want to ensure gets executed reliably with Temporal's retry - # guarantees - # - # Let's start with the case where it is non-deterministic. If this is the case, we - # want this tool to be an activity to guarantee that it will be executed. The way to - # do this is to add some syntax to make the tool call an activity. Let's create a tool - # that gives us the weather and create a weather agent. For this example, we will just - # return a hard-coded string but we can easily imagine this being an API call to a - # weather service which would make it non-deterministic. First we will create a new - # file called `activities.py`. Here we will create a function to get the weather and - # simply add an activity annotation on top. - - # There are TWO key patterns for integrating tools with the OpenAI Agents SDK in Temporal: - # - # PATTERN 1: Simple External Tools as Activities - # PATTERN 2: Multiple Activities Within Tools - # - # Choose the right pattern based on your use case: - - # ============================================================================ - # PATTERN 1: Simple External Tools as Activities - # ============================================================================ - # Use this pattern when: - # - You have a single non-deterministic operation (API call, DB query, etc.) - # - You want each tool call to be a single Temporal activity - # - You want simple 1:1 mapping between tool calls and activities - # - # HOW IT WORKS: - # 1. Define your function as a Temporal activity with @activity.defn (see activities.py) - # 2. Convert the activity to a tool using activity_as_tool() - # 3. Each time the agent calls this tool, it creates ONE activity in the workflow - # - # BENEFITS: - # - Automatic retries and durability for each tool call - # - Clear observability - each tool call shows as an activity in Temporal UI - # - Temporal handles all the failure recovery automatically - - weather_agent = Agent( - name="Weather Assistant", - instructions="You are a helpful weather agent. Use the get_weather tool to get the weather for a given city.", - tools=[ - # activity_as_tool() converts a Temporal activity into an agent tool - # The get_weather activity will be executed with durability guarantees - openai_agents.workflow.activity_as_tool( - get_weather, # This is defined in activities.py as @activity.defn - start_to_close_timeout=timedelta(seconds=10), - ), - ], - ) - - # ============================================================================ - # STREAMING SETUP: Store task_id for the Interceptor - # ============================================================================ - # These instance variables are read by ContextWorkflowOutboundInterceptor - # which injects them into activity headers. This enables streaming without - # forking the Temporal plugin! - # - # How streaming works (Interceptor + Model Provider + Hooks): - # 1. We store task_id in workflow instance variable (here) - # 2. ContextWorkflowOutboundInterceptor reads it via workflow.instance() - # 3. Interceptor injects task_id into activity headers - # 4. ContextActivityInboundInterceptor extracts from headers - # 5. Sets streaming_task_id ContextVar inside the activity - # 6. TemporalStreamingModel reads from ContextVar and streams to Redis - # 7. TemporalStreamingHooks creates placeholder messages for tool calls - # - # This approach uses STANDARD Temporal components - no forked plugin needed! - self._task_id = params.task.id - self._trace_id = params.task.id - self._parent_span_id = params.task.id - - # ============================================================================ - # HOOKS: Create Streaming Lifecycle Messages - # ============================================================================ - # TemporalStreamingHooks integrates with OpenAI Agents SDK lifecycle events - # to create messages in the database for tool calls, reasoning, etc. - # - # What hooks do: - # - on_tool_call_start(): Creates tool_request message with arguments - # - on_tool_call_done(): Creates tool_response message with result - # - on_model_stream_part(): Called for each streaming chunk (handled by TemporalStreamingModel) - # - on_run_done(): Marks the final response as complete - # - # These hooks create the messages you see in the test output: - # - Type: tool_request - Agent deciding to call get_weather - # - Type: tool_response - Result from get_weather activity - # - Type: text - Final agent response with weather info - # - # The hooks work alongside the interceptor/model streaming to provide - # a complete view of the agent's execution in the UI. - hooks = TemporalStreamingHooks(task_id=params.task.id) - - # Run the agent - when it calls the weather tool, it will create a get_weather activity - # Hooks will create messages for tool calls, interceptor enables token streaming - # Wrap in tracing span to track this turn - async with adk.tracing.span( - trace_id=params.task.id, - name=f"Turn {self._state.turn_number}", - input=self._state.model_dump(), - ) as span: - self._parent_span_id = span.id if span else None - # Pass the conversation history to Runner.run to maintain context - result = await Runner.run(weather_agent, self._state.input_list, hooks=hooks) - - # Update the state with the assistant's response for the next turn - if hasattr(result, "messages") and result.messages: - for msg in result.messages: - # Add new assistant messages to history - # Skip messages we already have (user messages we just added) - if msg.get("role") == "assistant" and msg not in self._state.input_list: - self._state.input_list.append(msg) - - # Set span output for tracing - include full state - span.output = self._state.model_dump() - - # ============================================================================ - # PATTERN 2: Multiple Activities Within Tools - # ============================================================================ - # Use this pattern when: - # - You need multiple sequential non-deterministic operations within one tool - # - You want to guarantee the sequence of operations (not rely on LLM sequencing) - # - You need atomic operations that involve multiple steps - # - # HOW IT WORKS: - # 1. Create individual activities for each non-deterministic step (see activities.py) - # 2. Create a function tool using @function_tool that calls multiple activities internally - # 3. Each activity call uses workflow.start_activity_method() for durability - # 4. The tool coordinates the sequence deterministically (not the LLM) - # - # BENEFITS: - # - Guaranteed execution order (withdraw THEN deposit) - # - Each step is durable and retryable individually - # - Atomic operations from the agent's perspective - # - Better than having LLM make multiple separate tool calls - - # UNCOMMENT THIS SECTION TO SEE PATTERN 2 IN ACTION: - # money_mover_agent = Agent( - # name="Money Mover", - # instructions="You are a helpful money mover agent. Use the move_money tool to move money from one account to another.", - # tools=[ - # # move_money is defined in tools.py as @function_tool - # # Internally, it calls withdraw_money activity THEN deposit_money activity - # # This guarantees the sequence and makes both operations durable - # move_money, - # ], - # ) - - # # Run the agent - when it calls move_money tool, it will create TWO activities: - # # 1. withdraw_money activity - # # 2. deposit_money activity (only after withdraw succeeds) - # result = await Runner.run(money_mover_agent, params.event.content.content) - - # ============================================================================ - # PATTERN COMPARISON SUMMARY: - # ============================================================================ - # - # Pattern 1 (activity_as_tool): | Pattern 2 (function_tool with activities): - # - Single activity per tool call | - Multiple activities per tool call - # - 1:1 tool to activity mapping | - 1:many tool to activity mapping - # - Simple non-deterministic ops | - Complex multi-step operations - # - Let LLM sequence multiple tools | - Code controls activity sequencing - # - Example: get_weather, db_lookup | - Example: money_transfer, multi_step_workflow - # - # BOTH patterns provide: - # - Automatic retries and failure recovery - # - Full observability in Temporal UI - # - Durable execution guarantees - # - Seamless integration with OpenAI Agents SDK - # ============================================================================ - - @workflow.run - async def on_task_create(self, params: CreateTaskParams) -> str: - logger.info(f"Received task create params: {params}") - - # Initialize the conversation state with an empty history - self._state = StateModel( - input_list=[], - turn_number=0, - ) - - # 1. Acknowledge that the task has been created. - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"Hello! I've received your task. Normally you can do some state initialization here, or just pass and do nothing until you get your first event. For now I'm just acknowledging that I've received a task with the following params:\n\n{json.dumps(params.params, indent=2)}.\n\nYou should only see this message once, when the task is created. All subsequent events will be handled by the `on_task_event_send` handler.", - ), - ) - - await workflow.wait_condition( - lambda: self._complete_task, - timeout=None, # Set a timeout if you want to prevent the task from running indefinitely. Generally this is not needed. Temporal can run hundreds of millions of workflows in parallel and more. Only do this if you have a specific reason to do so. - ) - return "Task completed" - - @workflow.signal - async def fulfill_order_signal(self, success: bool) -> None: - if success == True: - await self._pending_confirmation.put(True) diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/pyproject.toml b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/pyproject.toml deleted file mode 100644 index 22f4e008..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "at070_open_ai_agents_sdk_tools" -version = "0.1.0" -description = "An AgentEx agent" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk>=0.6.0", - "openai-agents>=0.4.2", - "temporalio>=1.18.2", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", - "debugpy>=1.8.15", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/tests/test_agent.py b/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/tests/test_agent.py deleted file mode 100644 index d6fdc6ff..00000000 --- a/examples/tutorials/10_async/10_temporal/070_open_ai_agents_sdk_tools/tests/test_agent.py +++ /dev/null @@ -1,155 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: example-tutorial) -""" - -import os -import uuid - -import pytest -import pytest_asyncio -from test_utils.async_utils import ( - poll_messages, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types.task_message import TaskMessage -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "at070-open-ai-agents-sdk-tools") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and polling for the response.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Poll for the initial task creation message - print(f"[DEBUG 070 POLL] Polling for initial task creation message...") - async for message in poll_messages( - client=client, - task_id=task.id, - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if message.content and message.content.type == "text" and message.content.author == "agent": - # Check for the initial acknowledgment message - print(f"[DEBUG 070 POLL] Initial message: {message.content.content[:100]}") - assert "task" in message.content.content.lower() or "received" in message.content.content.lower() - break - - # Send an event asking about the weather in NYC and poll for response with streaming - user_message = "What is the weather in New York City?" - print(f"[DEBUG 070 POLL] Sending message: '{user_message}'") - - # Track what we've seen to ensure tool calls happened - seen_tool_request = False - seen_tool_response = False - final_message = None - - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=60, - sleep_interval=1.0 - ): - assert isinstance(message, TaskMessage) - print(f"[DEBUG 070 POLL] Received message - Type: {message.content.type if message.content else 'None'}, Author: {message.content.author if message.content else 'None'}, Status: {message.streaming_status}") - - # Track tool_request messages (agent calling get_weather) - if message.content and message.content.type == "tool_request": - print(f"[DEBUG 070 POLL] โœ… Saw tool_request - agent is calling get_weather tool") - seen_tool_request = True - - # Track tool_response messages (get_weather result) - if message.content and message.content.type == "tool_response": - print(f"[DEBUG 070 POLL] โœ… Saw tool_response - get_weather returned result") - seen_tool_response = True - - # Track agent text messages and their streaming updates - if message.content and message.content.type == "text" and message.content.author == "agent": - content_length = len(message.content.content) if message.content.content else 0 - print(f"[DEBUG 070 POLL] Agent text update - Status: {message.streaming_status}, Length: {content_length}") - final_message = message - - # Stop when we get DONE status - if message.streaming_status == "DONE" and content_length > 0: - print(f"[DEBUG 070 POLL] โœ… Streaming complete!") - break - - # Verify we got all the expected pieces - assert seen_tool_request, "Expected to see tool_request message (agent calling get_weather)" - assert seen_tool_response, "Expected to see tool_response message (get_weather result)" - assert final_message is not None, "Expected to see final agent text message" - assert final_message.content is not None and len(final_message.content.content) > 0, "Final message should have content" - - # Check that the response contains the temperature (22 degrees) - # The get_weather activity returns "The weather in New York City is 22 degrees Celsius" - print(f"[DEBUG 070 POLL] Final response: {final_message.content.content}") - assert "22" in final_message.content.content, "Expected weather response to contain temperature (22 degrees)" - - -class TestStreamingEvents: - """Test streaming event sending (backend verification via polling).""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """ - Streaming test placeholder. - - NOTE: SSE streaming is tested via the UI (agentex-ui subscribeTaskState). - Backend streaming functionality is verified in test_send_event_and_poll. - """ - pass - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/.dockerignore b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/.dockerignore deleted file mode 100644 index c4948947..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/Dockerfile b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/Dockerfile deleted file mode 100644 index cc4c06bf..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/Dockerfile +++ /dev/null @@ -1,62 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - nodejs \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/** - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/pyproject.toml /app/080_open_ai_agents_sdk_human_in_the_loop/pyproject.toml -COPY 10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/README.md /app/080_open_ai_agents_sdk_human_in_the_loop/README.md - -WORKDIR /app/080_open_ai_agents_sdk_human_in_the_loop - -# Copy the project code -COPY 10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project /app/080_open_ai_agents_sdk_human_in_the_loop/project - -# Copy the test files -COPY 10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/tests /app/080_open_ai_agents_sdk_human_in_the_loop/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies -RUN uv pip install --system .[dev] - -WORKDIR /app/080_open_ai_agents_sdk_human_in_the_loop - -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=at080-open-ai-agents-sdk-human-in-the-loop - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/README.md b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/README.md deleted file mode 100644 index 8ba2b678..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/README.md +++ /dev/null @@ -1,199 +0,0 @@ -# [Temporal] OpenAI Agents SDK - Human in the Loop - -**Part of the [OpenAI SDK + Temporal integration series](../README.md)** โ†’ Previous: [070 Tools](../070_open_ai_agents_sdk_tools/) - -## What You'll Learn - -How to pause agent execution and wait indefinitely for human approval using Temporal's child workflows and signals. The agent can wait for hours, days, or weeks for human input without consuming resources - and if the system crashes, it resumes exactly where it left off. - -**Pattern:** -1. Agent calls `wait_for_confirmation` tool -2. Tool spawns a child workflow that waits for a signal -3. Human approves/rejects via Temporal CLI or web UI -4. Child workflow completes, agent continues with the response - -## New Temporal Concepts - -### Signals -Signals are a way for external systems to interact with running workflows. Think of them as secure, durable messages sent to your workflow from the outside world. - -**Use cases:** -- User approving/rejecting an action in a web app -- Payment confirmation triggering shipping -- Live data feeds (stock prices) triggering trades -- Webhooks from external services updating workflow state - -**How it works:** Define a function in your workflow class with the `@workflow.signal` decorator. External systems can then send signals using: -- Temporal SDK (by workflow ID) -- Another Temporal workflow -- Temporal CLI -- Temporal Web UI - -[Learn more about signals](https://docs.temporal.io/develop/python/message-passing#send-signal-from-client) - -### Child Workflows -Child workflows are like spawning a new workflow from within your current workflow. Similar to calling a function in traditional programming, but the child workflow: -- Runs independently with its own execution history -- Inherits all Temporal durability guarantees -- Can be monitored separately in Temporal UI -- Continues running even if the parent has issues - -**Why use child workflows for human-in-the-loop?** -- The parent workflow can continue processing while waiting -- The child workflow can wait indefinitely for human input -- Full isolation between waiting logic and main agent logic -- Clean separation of concerns - -[Learn more about child workflows](https://docs.temporal.io/develop/python/child-workflows) - -## Prerequisites -- Development environment set up (see [main repo README](https://github.com/scaleapi/scale-agentex)) -- Backend services running: `make dev` from repository root -- Temporal UI available at http://localhost:8233 -- OpenAI Agents SDK plugin configured (see [060_hello_world](../060_open_ai_agents_sdk_hello_world/)) -- Understanding of tools (see [070_tools](../070_open_ai_agents_sdk_tools/)) - -## Quick Start - -```bash -cd examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop -uv run agentex agents run --manifest manifest.yaml -``` - -**Monitor:** Open Temporal UI at http://localhost:8233 to see child workflows and signals. - -## Try It - -1. Ask the agent to do something that requires approval (e.g., "Order 100 widgets") -2. The agent will call `wait_for_confirmation` and pause -3. Open Temporal UI (localhost:8233) -4. Find the parent workflow - you'll see it's waiting on the child workflow: - -![Parent Workflow Waiting](../_images/human_in_the_loop_workflow.png) - -5. Find the child workflow - it's waiting for a signal: - -![Child Workflow Waiting](../_images/human_in_the_loop_child_workflow.png) - -6. Send approval signal via CLI: - -```bash -temporal workflow signal \ - --workflow-id="" \ - --name="fulfill_order_signal" \ - --input=true -``` - -7. Watch both workflows complete - the agent resumes and finishes the action - -## Key Code - -### The Tool: Spawning a Child Workflow -```python -from agents import function_tool -from temporalio import workflow -from project.child_workflow import ChildWorkflow -from temporalio.workflow import ParentClosePolicy - -@function_tool -async def wait_for_confirmation(confirmation: bool) -> str: - """Wait for human confirmation before proceeding""" - - # Spawn a child workflow that will wait for a signal - result = await workflow.execute_child_workflow( - ChildWorkflow.on_task_create, - environment_variables.WORKFLOW_NAME + "_child", - id="child-workflow-id", - parent_close_policy=ParentClosePolicy.TERMINATE, - ) - - return result -``` - -### The Child Workflow: Waiting for Signals -```python -import asyncio -from temporalio import workflow - -@workflow.defn(name=environment_variables.WORKFLOW_NAME + "_child") -class ChildWorkflow(): - def __init__(self): - # Queue to hold signals - self._pending_confirmation: asyncio.Queue[bool] = asyncio.Queue() - - @workflow.run - async def on_task_create(self, name: str) -> str: - logger.info(f"Child workflow started: {name}") - - # Wait indefinitely until we receive a signal - await workflow.wait_condition( - lambda: not self._pending_confirmation.empty() - ) - - # Signal received - complete the workflow - return "Task completed" - - @workflow.signal - async def fulfill_order_signal(self, success: bool) -> None: - """External systems call this to approve/reject""" - if success: - await self._pending_confirmation.put(True) -``` - -### Using the Tool in Your Agent -```python -confirm_order_agent = Agent( - name="Confirm Order", - instructions="When user asks to confirm an order, use wait_for_confirmation tool.", - tools=[wait_for_confirmation] -) - -result = await Runner.run(confirm_order_agent, params.event.content.content) -``` - -## How It Works - -1. **Agent calls tool**: The LLM decides to call `wait_for_confirmation` -2. **Child workflow spawned**: A new workflow is created with its own ID -3. **Child waits**: Uses `workflow.wait_condition()` to block until signal arrives -4. **Parent waits**: Parent workflow is blocked waiting for child to complete -5. **Signal sent**: External system (CLI, web app, API) sends signal with workflow ID -6. **Signal received**: Child workflow's `fulfill_order_signal()` method is called -7. **Queue updated**: Signal handler adds item to queue -8. **Wait condition satisfied**: `wait_condition()` unblocks -9. **Child completes**: Returns result to parent -10. **Parent resumes**: Agent continues with the response - -**Critical insight:** At any point, if the system crashes: -- Both workflows are durable and will resume -- No context is lost -- The moment the signal arrives, execution continues - -## Why This Matters - -**Without Temporal:** If your system crashes while waiting for human approval, you lose all context about what was being approved. The user has to start over. - -**With Temporal:** -- The workflow waits durably (hours, days, weeks) -- If the system crashes and restarts, context is preserved -- The moment a human sends approval, workflow resumes exactly where it left off -- Full audit trail of who approved what and when - -**Production use cases:** -- **Financial transactions**: Agent initiates transfer, human approves -- **Legal document processing**: AI extracts data, lawyer reviews -- **Multi-step purchasing**: Agent negotiates, manager approves -- **Compliance workflows**: System flags issue, human decides action -- **High-stakes decisions**: Any operation requiring human judgment - -This pattern transforms agents from fully automated systems into **collaborative AI assistants** that know when to ask for help. - -## When to Use -- Financial transactions requiring approval -- High-stakes decisions needing human judgment -- Compliance workflows with mandatory review steps -- Legal or contractual operations -- Any operation where errors have serious consequences -- Workflows where AI assists but humans decide - -**Congratulations!** You've completed all AgentEx tutorials. You now know how to build production-ready agents from simple sync patterns to complex durable workflows with human oversight. diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/dev.ipynb b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/dev.ipynb deleted file mode 100644 index 3e93e183..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/dev.ipynb +++ /dev/null @@ -1,124 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "36834357", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d1c309d6", - "metadata": {}, - "outputs": [], - "source": "AGENT_NAME = \"at080-open-ai-agents-sdk-human-in-the-loop\"" - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f6e6ef0", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Async agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b03b0d37", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a6927cc0", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4864e354", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/environments.yaml b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/environments.yaml deleted file mode 100644 index f9051191..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/environments.yaml +++ /dev/null @@ -1,64 +0,0 @@ -# Agent Environment Configuration -# ------------------------------ -# This file defines environment-specific settings for your agent. -# This DIFFERS from the manifest.yaml file in that it is used to program things that are ONLY per environment. - -# ********** EXAMPLE ********** -# schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -# environments: -# dev: -# auth: -# principal: -# user_id: "1234567890" -# user_name: "John Doe" -# user_email: "john.doe@example.com" -# user_role: "admin" -# user_permissions: "read, write, delete" -# helm_overrides: # This is used to override the global helm values.yaml file in the agentex-agent helm charts -# replicas: 3 -# resources: -# requests: -# cpu: "1000m" -# memory: "2Gi" -# limits: -# cpu: "2000m" -# memory: "4Gi" -# env: -# - name: LOG_LEVEL -# value: "DEBUG" -# - name: ENVIRONMENT -# value: "staging" -# -# kubernetes: -# # OPTIONAL - Otherwise it will be derived from separately. However, this can be used to override the derived -# # namespace and deploy it with in the same namespace that already exists for a separate agent. -# namespace: "team-example-tutorial" -# ********** END EXAMPLE ********** - -schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -environments: - dev: - auth: - principal: - user_id: # TODO: Fill in - account_id: # TODO: Fill in - helm_overrides: - # This is used to override the global helm values.yaml file in the agentex-agent helm charts - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" - temporal-worker: - enabled: true - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/manifest.yaml b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/manifest.yaml deleted file mode 100644 index 09f49c3e..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/manifest.yaml +++ /dev/null @@ -1,141 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - # Path to temporal worker file - # Examples: - # project/run_worker.py (standard) - # workers/temporal.py (custom structure) - # ../shared/worker.py (shared across projects) - worker: project/run_worker.py - - -# Agent Configuration -# ----------------- -agent: - # Type of agent - either sync or async - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: at080-open-ai-agents-sdk-human-in-the-loop - - # Description of what your agent does - # Helps with documentation and discovery - description: An AgentEx agent - - # Temporal workflow configuration - # This enables your agent to run as a Temporal workflow for long-running tasks - temporal: - enabled: true - workflows: - # Name of the workflow class - # Must match the @workflow.defn name in your workflow.py - - name: at080-open-ai-agents-sdk-human-in-the-loop - - # Queue name for task distribution - # Used by Temporal to route tasks to your agent - # Convention: _task_queue - queue_name: at080_open_ai_agents_sdk_human_in_the_loop_queue - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - credentials: - - env_var_name: REDIS_URL - secret_name: redis-url-secret - secret_key: url - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - env: - OPENAI_API_KEY: "" - # OPENAI_BASE_URL: "" - OPENAI_ORG_ID: "" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: - - name: my-registry-secret # Update with your image pull secret name - - # Global deployment settings that apply to all clusters - # These can be overridden using --override-file with custom configuration files - global: - agent: - name: "at080-open-ai-agents-sdk-human-in-the-loop" - description: "An AgentEx agent" - - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/__init__.py b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/acp.py b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/acp.py deleted file mode 100644 index c05effdb..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/acp.py +++ /dev/null @@ -1,95 +0,0 @@ -import os -import sys - -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin - -# === DEBUG SETUP (AgentEx CLI Debug Support) === -if os.getenv("AGENTEX_DEBUG_ENABLED") == "true": - try: - import debugpy - debug_port = int(os.getenv("AGENTEX_DEBUG_PORT", "5679")) - debug_type = os.getenv("AGENTEX_DEBUG_TYPE", "acp") - wait_for_attach = os.getenv("AGENTEX_DEBUG_WAIT_FOR_ATTACH", "false").lower() == "true" - - # Configure debugpy - debugpy.configure(subProcess=False) - debugpy.listen(debug_port) - - print(f"๐Ÿ› [{debug_type.upper()}] Debug server listening on port {debug_port}") - - if wait_for_attach: - print(f"โณ [{debug_type.upper()}] Waiting for debugger to attach...") - debugpy.wait_for_client() - print(f"โœ… [{debug_type.upper()}] Debugger attached!") - else: - print(f"๐Ÿ“ก [{debug_type.upper()}] Ready for debugger attachment") - - except ImportError: - print("โŒ debugpy not available. Install with: pip install debugpy") - sys.exit(1) - except Exception as e: - print(f"โŒ Debug setup failed: {e}") - sys.exit(1) -# === END DEBUG SETUP === - -from agentex.lib.types.fastacp import TemporalACPConfig -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModelProvider, -) -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor - -# ============================================================================ -# STREAMING SETUP: Interceptor + Model Provider -# ============================================================================ -# This is where the streaming magic is configured! Two key components: -# -# 1. ContextInterceptor -# - Threads task_id through activity headers using Temporal's interceptor pattern -# - Outbound: Reads _task_id from workflow instance, injects into activity headers -# - Inbound: Extracts task_id from headers, sets streaming_task_id ContextVar -# - This enables runtime context without forking the Temporal plugin! -# -# 2. TemporalStreamingModelProvider -# - Returns TemporalStreamingModel instances that read task_id from ContextVar -# - TemporalStreamingModel.get_response() streams tokens to Redis in real-time -# - Still returns complete response to Temporal for determinism/replay safety -# - Uses AgentEx ADK streaming infrastructure (Redis XADD to stream:{task_id}) -# -# Together, these enable real-time LLM streaming while maintaining Temporal's -# durability guarantees. No forked components - uses STANDARD OpenAIAgentsPlugin! -context_interceptor = ContextInterceptor() -temporal_streaming_model_provider = TemporalStreamingModelProvider() - -# Create the ACP server -# IMPORTANT: We use the STANDARD temporalio.contrib.openai_agents.OpenAIAgentsPlugin -# No forking needed! The interceptor + model provider handle all streaming logic. -# -# Note: ModelActivityParameters with long timeout allows child workflows to wait -# indefinitely for human input without timing out -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - # When deployed to the cluster, the Temporal address will automatically be set to the cluster address - # For local development, we set the address manually to talk to the local Temporal service set up via docker compose - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233"), - plugins=[OpenAIAgentsPlugin(model_provider=temporal_streaming_model_provider)], - interceptors=[context_interceptor], - ) -) - - -# Notice that we don't need to register any handlers when we use type="temporal" -# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp -# You can see that these handlers are automatically registered when the ACP is created - -# @acp.on_task_create -# This will be handled by the method in your workflow that is decorated with @workflow.run - -# @acp.on_task_event_send -# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) - -# @acp.on_task_cancel -# This does not need to be handled by your workflow. -# It is automatically handled by the temporal client which cancels the workflow directly \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/activities.py b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/activities.py deleted file mode 100644 index 4cb05654..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/activities.py +++ /dev/null @@ -1,45 +0,0 @@ -import random -import asyncio - -from temporalio import activity, workflow -from temporalio.workflow import ParentClosePolicy - -from project.child_workflow import ChildWorkflow -from agentex.lib.environment_variables import EnvironmentVariables - -environment_variables = EnvironmentVariables.refresh() - -@activity.defn -async def get_weather(city: str) -> str: - """Get the weather for a given city""" - if city == "New York City": - return "The weather in New York City is 22 degrees Celsius" - else: - return "The weather is unknown" - -@activity.defn -async def withdraw_money() -> None: - """Withdraw money from an account""" - random_number = random.randint(0, 100) - await asyncio.sleep(random_number) - print("Withdrew money from account") - -@activity.defn -async def deposit_money() -> None: - """Deposit money into an account""" - await asyncio.sleep(10) - print("Deposited money into account") - - -@activity.defn -async def confirm_order() -> bool: - """Confirm order""" - result = await workflow.execute_child_workflow( - ChildWorkflow.on_task_create, - environment_variables.WORKFLOW_NAME + "_child", - id="child-workflow-id", - parent_close_policy=ParentClosePolicy.TERMINATE, - ) - - print(result) - return True diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/child_workflow.py b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/child_workflow.py deleted file mode 100644 index 3dc8520a..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/child_workflow.py +++ /dev/null @@ -1,68 +0,0 @@ -""" -Child Workflow for Human-in-the-Loop Pattern - -Child workflow that waits indefinitely for external human input via Temporal signals. -Benefits: Durable waiting, survives system failures, can wait days/weeks without resource consumption. - -Usage: External systems send signals to trigger workflow completion. -Production: Replace CLI with web dashboards, mobile apps, or API integrations. -""" - -import asyncio - -from temporalio import workflow - -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables - -environment_variables = EnvironmentVariables.refresh() -logger = make_logger(__name__) - - -@workflow.defn(name=environment_variables.WORKFLOW_NAME + "_child") -class ChildWorkflow(): - """ - Child workflow that waits for human approval via external signals. - - Lifecycle: Spawned by parent โ†’ waits for signal โ†’ human approves โ†’ completes. - Signal: temporal workflow signal --workflow-id="child-workflow-id" --name="fulfill_order_signal" --input=true - """ - - def __init__(self): - # Queue to handle signals from external systems (human input) - self._pending_confirmation: asyncio.Queue[bool] = asyncio.Queue() - - @workflow.run - async def on_task_create(self, name: str) -> str: - """ - Wait indefinitely for human approval signal. - - Uses workflow.wait_condition() to pause until external signal received. - Survives system failures and resumes exactly where it left off. - """ - logger.info(f"Child workflow started: {name}") - - while True: - # Wait until human sends approval signal (queue becomes non-empty) - await workflow.wait_condition( - lambda: not self._pending_confirmation.empty() - ) - - # Process human input and complete workflow - while not self._pending_confirmation.empty(): - break - - return "Task completed" - - @workflow.signal - async def fulfill_order_signal(self, success: bool) -> None: - """ - Receive human approval decision and trigger workflow completion. - - External systems send this signal to provide human input. - CLI: temporal workflow signal --workflow-id="child-workflow-id" --name="fulfill_order_signal" --input=true - Production: Use Temporal SDK from web apps, mobile apps, APIs, etc. - """ - # Add human decision to queue, which triggers wait_condition to resolve - if success == True: - await self._pending_confirmation.put(True) diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/run_worker.py b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/run_worker.py deleted file mode 100644 index a07439fd..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/run_worker.py +++ /dev/null @@ -1,73 +0,0 @@ -import asyncio - -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin - -from project.workflow import At080OpenAiAgentsSdkHumanInTheLoopWorkflow -from project.activities import confirm_order, deposit_money, withdraw_money -from project.child_workflow import ChildWorkflow -from agentex.lib.utils.debug import setup_debug_if_enabled -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker -from agentex.lib.core.temporal.plugins.openai_agents.hooks.activities import stream_lifecycle_content -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModelProvider, -) -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor - -environment_variables = EnvironmentVariables.refresh() - -logger = make_logger(__name__) - - -async def main(): - # Setup debug mode if enabled - setup_debug_if_enabled() - - task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE - if task_queue_name is None: - raise ValueError("WORKFLOW_TASK_QUEUE is not set") - - # Add activities to the worker - # stream_lifecycle_content is required for hooks to work (creates tool_request/tool_response messages) - all_activities = get_all_activities() + [withdraw_money, deposit_money, confirm_order, stream_lifecycle_content] # add your own activities here - - # ============================================================================ - # STREAMING SETUP: Interceptor + Model Provider - # ============================================================================ - # This is where the streaming magic is configured! Two key components: - # - # 1. ContextInterceptor - # - Threads task_id through activity headers using Temporal's interceptor pattern - # - Outbound: Reads _task_id from workflow instance, injects into activity headers - # - Inbound: Extracts task_id from headers, sets streaming_task_id ContextVar - # - This enables runtime context without forking the Temporal plugin! - # - # 2. TemporalStreamingModelProvider - # - Returns TemporalStreamingModel instances that read task_id from ContextVar - # - TemporalStreamingModel.get_response() streams tokens to Redis in real-time - # - Still returns complete response to Temporal for determinism/replay safety - # - Uses AgentEx ADK streaming infrastructure (Redis XADD to stream:{task_id}) - # - # Together, these enable real-time LLM streaming while maintaining Temporal's - # durability guarantees. No forked components - uses STANDARD OpenAIAgentsPlugin! - context_interceptor = ContextInterceptor() - temporal_streaming_model_provider = TemporalStreamingModelProvider() - - # Create a worker with automatic tracing - # IMPORTANT: We use the STANDARD temporalio.contrib.openai_agents.OpenAIAgentsPlugin - # No forking needed! The interceptor + model provider handle all streaming logic. - worker = AgentexWorker( - task_queue=task_queue_name, - plugins=[OpenAIAgentsPlugin(model_provider=temporal_streaming_model_provider)], - interceptors=[context_interceptor], - ) - - await worker.run( - activities=all_activities, - workflows=[At080OpenAiAgentsSdkHumanInTheLoopWorkflow, ChildWorkflow] - ) - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/tools.py b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/tools.py deleted file mode 100644 index 92208ac4..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/tools.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Human-in-the-Loop Tools for OpenAI Agents SDK + Temporal Integration - -Tools that pause agent execution and wait for human input using child workflows and signals. -Pattern: Agent calls tool โ†’ spawns child workflow โ†’ waits for signal โ†’ human approves โ†’ continues. -""" - -from agents import function_tool -from temporalio import workflow -from temporalio.workflow import ParentClosePolicy - -from project.child_workflow import ChildWorkflow -from agentex.lib.environment_variables import EnvironmentVariables - -environment_variables = EnvironmentVariables.refresh() - -@function_tool -async def wait_for_confirmation() -> str: - """ - Pause agent execution and wait for human approval via child workflow. - - Spawns a child workflow that waits for external signal. Human approves via: - temporal workflow signal --workflow-id="child-workflow-id" --name="fulfill_order_signal" --input=true - - Benefits: Durable waiting, survives system failures, scalable to millions of workflows. - """ - - # Spawn child workflow that waits for human signal - # Child workflow has fixed ID "child-workflow-id" so external systems can signal it - result = await workflow.execute_child_workflow( - ChildWorkflow.on_task_create, - environment_variables.WORKFLOW_NAME + "_child", - id="child-workflow-id", - parent_close_policy=ParentClosePolicy.TERMINATE, - ) - - return result \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/workflow.py b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/workflow.py deleted file mode 100644 index 4f11ac4c..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/project/workflow.py +++ /dev/null @@ -1,248 +0,0 @@ -""" -OpenAI Agents SDK + Temporal Integration: Human-in-the-Loop Tutorial - -This tutorial demonstrates how to pause agent execution and wait for human approval -using Temporal's child workflows and signals. - -KEY CONCEPTS: -- Child workflows: Independent workflows spawned by parent for human interaction -- Signals: External systems can send messages to running workflows -- Durable waiting: Agents can wait indefinitely for human input without losing state - -WHY THIS MATTERS: -Without Temporal, if your system crashes while waiting for human approval, you lose -all context. With Temporal, the agent resumes exactly where it left off after -system failures, making human-in-the-loop workflows production-ready. - -PATTERN: -1. Agent calls wait_for_confirmation tool -2. Tool spawns child workflow that waits for signal -3. Human approves via CLI/web app -4. Child workflow completes, agent continues - -Usage: `temporal workflow signal --workflow-id="child-workflow-id" --name="fulfill_order_signal" --input=true` -""" - -import os -import json -import asyncio -from typing import Any, Dict, List - -from agents import Agent, Runner -from temporalio import workflow - -from agentex.lib import adk -from project.tools import wait_for_confirmation -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from agentex.lib.types.tracing import SGPTracingProcessorConfig -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow -from agentex.lib.core.tracing.tracing_processor_manager import ( - add_tracing_processor_config, -) -from agentex.lib.core.temporal.plugins.openai_agents.hooks.hooks import TemporalStreamingHooks - -# Configure tracing processor (optional - only if you have SGP credentials) -add_tracing_processor_config( - SGPTracingProcessorConfig( - sgp_api_key=os.environ.get("SGP_API_KEY", ""), - sgp_account_id=os.environ.get("SGP_ACCOUNT_ID", ""), - ) -) - -environment_variables = EnvironmentVariables.refresh() - -if environment_variables.WORKFLOW_NAME is None: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if environment_variables.AGENT_NAME is None: - raise ValueError("Environment variable AGENT_NAME is not set") - -# Validate OpenAI API key is set -if not os.environ.get("OPENAI_API_KEY"): - raise ValueError( - "OPENAI_API_KEY environment variable is not set. " - "This tutorial requires an OpenAI API key to run the OpenAI Agents SDK. " - "Please set OPENAI_API_KEY in your environment or manifest.yaml file." - ) - -logger = make_logger(__name__) - - -class StateModel(BaseModel): - """ - State model for preserving conversation history across turns. - - This allows the agent to maintain context throughout the conversation, - making it possible to reference previous messages and build on the discussion. - """ - - input_list: List[Dict[str, Any]] - turn_number: int - - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class At080OpenAiAgentsSdkHumanInTheLoopWorkflow(BaseWorkflow): - """ - Human-in-the-Loop Temporal Workflow - - Demonstrates agents that can pause execution and wait for human approval. - When approval is needed, the agent spawns a child workflow that waits for - external signals (human input) before continuing. - - Benefits: Durable waiting, survives system failures, scalable to millions of workflows. - """ - - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self._complete_task = False - self._state: StateModel | None = None - self._pending_confirmation: asyncio.Queue[str] = asyncio.Queue() - self._task_id = None - self._trace_id = None - self._parent_span_id = None - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - async def on_task_event_send(self, params: SendEventParams) -> None: - """ - Handle user messages with human-in-the-loop approval capability. - - When the agent needs human approval, it calls wait_for_confirmation which spawns - a child workflow that waits for external signals before continuing. - """ - logger.info(f"Received task message instruction: {params}") - - if self._state is None: - raise ValueError("State is not initialized") - - # Increment turn number for tracing - self._state.turn_number += 1 - - self._task_id = params.task.id - self._trace_id = params.task.id - - # Add the user message to conversation history - self._state.input_list.append({"role": "user", "content": params.event.content.content}) - - # Echo user message back to UI - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - # ============================================================================ - # STREAMING SETUP: Store task_id for the Interceptor - # ============================================================================ - # These instance variables are read by ContextWorkflowOutboundInterceptor - # which injects them into activity headers. This enables streaming without - # forking the Temporal plugin! - # - # How streaming works (Interceptor + Model Provider + Hooks): - # 1. We store task_id in workflow instance variable (here) - # 2. ContextWorkflowOutboundInterceptor reads it via workflow.instance() - # 3. Interceptor injects task_id into activity headers - # 4. ContextActivityInboundInterceptor extracts from headers - # 5. Sets streaming_task_id ContextVar inside the activity - # 6. TemporalStreamingModel reads from ContextVar and streams to Redis - # 7. TemporalStreamingHooks creates placeholder messages for tool calls - # - # This approach uses STANDARD Temporal components - no forked plugin needed! - self._task_id = params.task.id - self._trace_id = params.task.id - self._parent_span_id = params.task.id - - # ============================================================================ - # HOOKS: Create Streaming Lifecycle Messages - # ============================================================================ - # TemporalStreamingHooks integrates with OpenAI Agents SDK lifecycle events - # to create messages in the database for tool calls, reasoning, etc. - # - # What hooks do: - # - on_tool_call_start(): Creates tool_request message with arguments - # - on_tool_call_done(): Creates tool_response message with result - # - on_model_stream_part(): Called for each streaming chunk (handled by TemporalStreamingModel) - # - on_run_done(): Marks the final response as complete - # - # For human-in-the-loop workflows, hooks create messages showing: - # - Type: tool_request - Agent deciding to call wait_for_confirmation - # - Type: tool_response - Result after human approval (child workflow completion) - # - Type: text - Final agent response after approval received - # - # The hooks work alongside the interceptor/model streaming to provide - # a complete view of the agent's execution in the UI. - hooks = TemporalStreamingHooks(task_id=params.task.id) - - # Create agent with human-in-the-loop capability - # The wait_for_confirmation tool spawns a child workflow that waits for external signals - confirm_order_agent = Agent( - name="Confirm Order", - instructions="You are a helpful confirm order agent. When a user asks you to confirm an order, use the wait_for_confirmation tool to wait for confirmation.", - tools=[ - wait_for_confirmation, - ], - ) - - # Run agent - when human approval is needed, it will spawn child workflow and wait - # Hooks will create messages for tool calls, interceptor enables token streaming - # Wrap in tracing span to track this turn - async with adk.tracing.span( - trace_id=params.task.id, - name=f"Turn {self._state.turn_number}", - input=self._state.model_dump(), - ) as span: - self._parent_span_id = span.id if span else None - # Pass the conversation history to Runner.run to maintain context - result = await Runner.run(confirm_order_agent, self._state.input_list, hooks=hooks) - - # Update the state with the assistant's response for the next turn - if hasattr(result, "messages") and result.messages: - for msg in result.messages: - # Add new assistant messages to history - # Skip messages we already have (user messages we just added) - if msg.get("role") == "assistant" and msg not in self._state.input_list: - self._state.input_list.append(msg) - - # Set span output for tracing - include full state - span.output = self._state.model_dump() - - @workflow.run - async def on_task_create(self, params: CreateTaskParams) -> str: - """ - Workflow entry point - starts the long-running human-in-the-loop agent. - - Handles both automated decisions and human approval workflows durably. - To approve waiting actions: temporal workflow signal --workflow-id="child-workflow-id" --name="fulfill_order_signal" --input=true - """ - logger.info(f"Received task create params: {params}") - - # Initialize the conversation state with an empty history - self._state = StateModel( - input_list=[], - turn_number=0, - ) - - # Send welcome message when task is created - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"Hello! I've received your task. Normally you can do some state initialization here, or just pass and do nothing until you get your first event. For now I'm just acknowledging that I've received a task with the following params:\n\n{json.dumps(params.params, indent=2)}.\n\nYou should only see this message once, when the task is created. All subsequent events will be handled by the `on_task_event_send` handler.", - ), - ) - - # Keep workflow running indefinitely to handle user messages and human approvals - # This survives system failures and can resume exactly where it left off - await workflow.wait_condition( - lambda: self._complete_task, - timeout=None, # No timeout for long-running human-in-the-loop workflows - ) - return "Task completed" - - # TEMPORAL UI (localhost:8080): - # - Main workflow shows agent activities + ChildWorkflow activity when approval needed - # - Child workflow appears as separate "child-workflow-id" that waits for signal - # - Timeline: invoke_model_activity โ†’ ChildWorkflow (waiting) โ†’ invoke_model_activity (after approval) - # - # To approve: temporal workflow signal --workflow-id="child-workflow-id" --name="fulfill_order_signal" --input=true - # Production: Replace CLI with web dashboards/APIs that send signals programmatically diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/pyproject.toml b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/pyproject.toml deleted file mode 100644 index b38ee6e6..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "at080_open_ai_agents_sdk_human_in_the_loop" -version = "0.1.0" -description = "An AgentEx agent" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk>=0.6.0", - "openai-agents>=0.4.2", - "temporalio>=1.18.2", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", - "debugpy>=1.8.15", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/tests/test_agent.py b/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/tests/test_agent.py deleted file mode 100644 index 5b0c2f74..00000000 --- a/examples/tutorials/10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/tests/test_agent.py +++ /dev/null @@ -1,201 +0,0 @@ -""" -Sample tests for AgentEx ACP agent with Human-in-the-Loop workflow. - -This test suite demonstrates how to test human-in-the-loop workflows: -- Non-streaming event sending and polling -- Detecting when workflow is waiting for human approval -- Sending Temporal signals to approve/reject -- Verifying workflow completes after approval - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Make sure Temporal is running (localhost:7233) -3. Set the AGENTEX_API_BASE_URL environment variable if not using default -4. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: example-tutorial) -- TEMPORAL_ADDRESS: Temporal server address (default: localhost:7233) -""" - -import os -import uuid -import asyncio - -import pytest -import pytest_asyncio - -# Temporal imports for signaling child workflows -from temporalio.client import Client as TemporalClient -from test_utils.async_utils import ( - poll_messages, - send_event_and_poll_yielding, -) - -from agentex import AsyncAgentex -from agentex.types.task_message import TaskMessage -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "at080-open-ai-agents-sdk-human-in-the-loop") -TEMPORAL_ADDRESS = os.environ.get("TEMPORAL_ADDRESS", "localhost:7233") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest_asyncio.fixture -async def temporal_client(): - """Create a Temporal client for sending signals to workflows.""" - client = await TemporalClient.connect(TEMPORAL_ADDRESS) - yield client - # Temporal client doesn't need explicit close in recent versions - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling with human-in-the-loop.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll_with_human_approval(self, client: AsyncAgentex, agent_id: str, temporal_client: TemporalClient): - """Test sending an event that triggers human approval workflow.""" - # Create a task for this conversation - task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - task = task_response.result - assert task is not None - - # Poll for the initial task creation message - print(f"[DEBUG 080 POLL] Polling for initial task creation message...") - async for message in poll_messages( - client=client, - task_id=task.id, - timeout=30, - sleep_interval=1.0, - ): - assert isinstance(message, TaskMessage) - if message.content and message.content.type == "text" and message.content.author == "agent": - # Check for the initial acknowledgment message - print(f"[DEBUG 080 POLL] Initial message: {message.content.content[:100]}") - assert "task" in message.content.content.lower() or "received" in message.content.content.lower() - break - - # Send an event asking to confirm an order (triggers human-in-the-loop) - user_message = "Please confirm my order" - print(f"[DEBUG 080 POLL] Sending message: '{user_message}'") - - # Track what we've seen to ensure human-in-the-loop flow happened - seen_tool_request = False - seen_tool_response = False - found_final_response = False - child_workflow_detected = False - - # Start polling for messages in the background - async def poll_and_detect(): - nonlocal seen_tool_request, seen_tool_response, found_final_response, child_workflow_detected - - async for message in send_event_and_poll_yielding( - client=client, - agent_id=agent_id, - task_id=task.id, - user_message=user_message, - timeout=120, # Longer timeout for human-in-the-loop - sleep_interval=1.0, - yield_updates=True, # Get all streaming chunks - ): - assert isinstance(message, TaskMessage) - print(f"[DEBUG 080 POLL] Received message - Type: {message.content.type if message.content else 'None'}, Author: {message.content.author if message.content else 'None'}, Status: {message.streaming_status}") - - # Track tool_request messages (agent calling wait_for_confirmation) - if message.content and message.content.type == "tool_request": - print(f"[DEBUG 080 POLL] โœ… Saw tool_request - agent is calling wait_for_confirmation tool") - print(f"[DEBUG 080 POLL] ๐Ÿ”” Child workflow should be spawned - will signal it to approve") - seen_tool_request = True - child_workflow_detected = True - - # Track tool_response messages (child workflow completion) - if message.content and message.content.type == "tool_response": - print(f"[DEBUG 080 POLL] โœ… Saw tool_response - child workflow completed after approval") - seen_tool_response = True - - # Track agent text messages and their streaming updates - if message.content and message.content.type == "text" and message.content.author == "agent": - content_length = len(message.content.content) if message.content.content else 0 - print(f"[DEBUG 080 POLL] Agent text update - Status: {message.streaming_status}, Length: {content_length}") - - # Stop when we get DONE status with actual content - if message.streaming_status == "DONE" and content_length > 0: - print(f"[DEBUG 080 POLL] โœ… Streaming complete!") - found_final_response = True - break - - # Start polling task - polling_task = asyncio.create_task(poll_and_detect()) - - # Wait a bit for the child workflow to be created - print(f"[DEBUG 080 POLL] Waiting for child workflow to spawn...") - await asyncio.sleep(5) - - # Send signal to child workflow to approve the order - # The child workflow ID is fixed as "child-workflow-id" (see tools.py) - try: - print(f"[DEBUG 080 POLL] Sending approval signal to child workflow...") - handle = temporal_client.get_workflow_handle("child-workflow-id") - await handle.signal("fulfill_order_signal", True) - print(f"[DEBUG 080 POLL] โœ… Approval signal sent successfully!") - except Exception as e: - print(f"[DEBUG 080 POLL] โš ๏ธ Warning: Could not send signal to child workflow: {e}") - print(f"[DEBUG 080 POLL] This may be expected if workflow completed before signal could be sent") - - # Wait for polling to complete - try: - await asyncio.wait_for(polling_task, timeout=60) - except asyncio.TimeoutError: - print(f"[DEBUG 080 POLL] โš ๏ธ Polling timed out - workflow may still be waiting") - polling_task.cancel() - - # Verify that we saw the complete flow: tool_request -> human approval -> tool_response -> final answer - assert seen_tool_request, "Expected to see tool_request message (agent calling wait_for_confirmation)" - assert seen_tool_response, "Expected to see tool_response message (child workflow completion after approval)" - assert found_final_response, "Expected to see final text response after human approval" - - print(f"[DEBUG 080 POLL] โœ… Human-in-the-loop workflow completed successfully!") - - -class TestStreamingEvents: - """Test streaming event sending (backend verification via polling).""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """ - Streaming test placeholder. - - NOTE: SSE streaming is tested via the UI (agentex-ui subscribeTaskState). - Backend streaming functionality is verified in test_send_event_and_poll_with_human_approval. - """ - pass - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/.dockerignore b/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/.dockerignore deleted file mode 100644 index c4948947..00000000 --- a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/.gitignore b/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/.gitignore deleted file mode 100644 index 4d50da2f..00000000 --- a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -# Local environment variables (contains secrets) -.env.local - -# Workspace directory (created at runtime) -workspace/ diff --git a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/Dockerfile b/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/Dockerfile deleted file mode 100644 index 5428e814..00000000 --- a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/Dockerfile +++ /dev/null @@ -1,62 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - nodejs \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/** - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy pyproject.toml and README.md to install dependencies -COPY 10_async/10_temporal/090_claude_agents_sdk_mvp/pyproject.toml /app/090_claude_agents_sdk_mvp/pyproject.toml -COPY 10_async/10_temporal/090_claude_agents_sdk_mvp/README.md /app/090_claude_agents_sdk_mvp/README.md - -WORKDIR /app/090_claude_agents_sdk_mvp - -# Copy the project code -COPY 10_async/10_temporal/090_claude_agents_sdk_mvp/project /app/090_claude_agents_sdk_mvp/project - -# Copy the test files -COPY 10_async/10_temporal/090_claude_agents_sdk_mvp/tests /app/090_claude_agents_sdk_mvp/tests - -# Copy shared test utilities -COPY test_utils /app/test_utils - -# Install the required Python packages with dev dependencies -RUN uv pip install --system .[dev] - -WORKDIR /app/090_claude_agents_sdk_mvp - -# Set environment variables -ENV PYTHONPATH=/app - -# Set test environment variables -ENV AGENT_NAME=at090-claude-agents-sdk-mvp -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] diff --git a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/README.md b/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/README.md deleted file mode 100644 index 2f40e53c..00000000 --- a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/README.md +++ /dev/null @@ -1,338 +0,0 @@ -# Claude Agents SDK Integration with AgentEx - -Integration of Claude Agents SDK with AgentEx's Temporal-based orchestration platform. Claude agents run in durable workflows with real-time streaming to the AgentEx UI. - -> โš ๏ธ **Note**: This integration is designed for local agent development and single-worker deployments. For distributed multi-worker Kubernetes deployments, additional infrastructure is required (see [Deployment Considerations](#deployment-considerations) below). - -## Features - -- **Durable Execution** - Workflows survive restarts via Temporal's event sourcing (single-worker) -- **Session Resume** - Conversation context maintained across turns via `session_id` -- **Workspace Isolation** - Each task gets dedicated directory for file operations -- **Real-time Streaming** - Text and tool calls stream to UI via Redis -- **Tool Execution** - Read, Write, Edit, Bash, Grep, Glob with visibility in UI -- **Subagents** - Specialized agents (code-reviewer, file-organizer) with nested tracing -- **Cost Tracking** - Token usage and API costs logged per turn -- **Automatic Retries** - Temporal policies handle transient failures - -## How It Works - -### Architecture - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Temporal Workflow โ”‚ -โ”‚ - Stores session_id in state โ”‚ -โ”‚ - Tracks turn number โ”‚ -โ”‚ - Sets _task_id, _trace_id โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ execute_activity - โ†“ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ run_claude_agent_activity โ”‚ -โ”‚ - Reads context from ContextVarโ”‚ -โ”‚ - Configures Claude SDK โ”‚ -โ”‚ - Processes messages via hooks โ”‚ -โ”‚ - Returns session_id โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ ClaudeSDKClient - โ†“ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Claude SDK โ”‚ -โ”‚ - Maintains session โ”‚ -โ”‚ - Calls Anthropic API โ”‚ -โ”‚ - Executes tools in workspace โ”‚ -โ”‚ - Triggers hooks โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -### Context Threading - -The integration reuses AgentEx's `ContextInterceptor` pattern (originally built for OpenAI): - -1. **Workflow** stores `_task_id`, `_trace_id`, `_parent_span_id` as instance variables -2. **ContextInterceptor (outbound)** reads these from workflow instance, injects into activity headers -3. **ContextInterceptor (inbound)** extracts from headers, sets `ContextVar` values -4. **Activity** reads `ContextVar` to get task_id for streaming - -This enables real-time streaming without breaking Temporal's determinism requirements. - -### Session Management - -Claude SDK sessions are preserved across turns: - -1. **First turn**: Claude SDK creates session, returns `session_id` in `SystemMessage` -2. **Message handler** extracts `session_id` from messages -3. **Activity** returns `session_id` to workflow -4. **Workflow** stores in `StateModel.claude_session_id` (Temporal checkpoints this) -5. **Next turn**: Pass `resume=session_id` to `ClaudeAgentOptions` -6. **Claude SDK** resumes session with full conversation history - -### Tool Streaming via Hooks - -Tool lifecycle events are handled by Claude SDK hooks: - -**PreToolUse Hook**: -- Called before tool execution -- Streams `ToolRequestContent` to UI โ†’ shows "Using tool: Write" -- Creates nested span for Task tool (subagents) - -**PostToolUse Hook**: -- Called after tool execution -- Streams `ToolResponseContent` to UI โ†’ shows "Used tool: Write" -- Closes subagent spans with results - -### Subagent Execution - -Subagents are defined as `AgentDefinition` objects passed to Claude SDK: - -```python -agents={ - 'code-reviewer': AgentDefinition( - description='Expert code review specialist...', - prompt='You are a code reviewer...', - tools=['Read', 'Grep', 'Glob'], # Read-only - model='sonnet', - ) -} -``` - -When Claude uses the Task tool, the SDK routes to the appropriate subagent based on description matching. Subagent execution is tracked via nested tracing spans. - -## Code Structure - -``` -claude_agents/ -โ”œโ”€โ”€ __init__.py # Public exports -โ”œโ”€โ”€ activities.py # Temporal activities -โ”‚ โ”œโ”€โ”€ create_workspace_directory -โ”‚ โ””โ”€โ”€ run_claude_agent_activity -โ”œโ”€โ”€ message_handler.py # Message processing -โ”‚ โ””โ”€โ”€ ClaudeMessageHandler -โ”‚ โ”œโ”€โ”€ Streams text blocks -โ”‚ โ”œโ”€โ”€ Extracts session_id -โ”‚ โ””โ”€โ”€ Extracts usage/cost -โ””โ”€โ”€ hooks/ - โ””โ”€โ”€ hooks.py # Claude SDK hooks - โ””โ”€โ”€ TemporalStreamingHooks - โ”œโ”€โ”€ pre_tool_use - โ””โ”€โ”€ post_tool_use -``` - -## Deployment Considerations - -This integration works well for local development and single-worker deployments. For distributed multi-worker production deployments, consider the following: - -### โš ๏ธ Session Persistence (Multi-Worker) - -**Current behavior**: Claude SDK sessions are tied to the worker process. - -- **Local dev**: โœ… Works - session persists within single worker -- **K8s multi-pod**: โš ๏ธ Session ID stored in Temporal state, but session itself lives in Claude CLI process -- **Impact**: If task moves to different pod, session becomes invalid -- **Infrastructure needed**: Session persistence layer or sticky routing to same pod - -### โš ๏ธ Workspace Storage (Multi-Worker) - -**Current behavior**: Workspaces are local directories (`./workspace/{task_id}`). - -- **Local dev**: โœ… Works - single worker accesses all files -- **K8s multi-pod**: โš ๏ธ Each pod has isolated filesystem -- **Impact**: Files created by one pod are invisible to other pods -- **Infrastructure needed**: Shared storage (NFS, EFS, GCS Fuse) via `CLAUDE_WORKSPACE_ROOT` env var - -**Solution for production**: -```bash -# Mount shared filesystem (NFS, EFS, etc.) to all pods -export CLAUDE_WORKSPACE_ROOT=/mnt/shared/workspaces - -# All workers will now share workspace access -``` - -### โ„น๏ธ Filesystem-Based Configuration - -**Current approach**: Agents and configuration are defined programmatically in code. - -- **Not used**: `.claude/agents/`, `.claude/skills/`, `CLAUDE.md` files -- **Why**: Aligns with AgentEx's code-as-configuration philosophy -- **Trade-off**: More explicit and version-controlled, but can't leverage existing Claude configs -- **To enable**: Would need to add `setting_sources=["project"]` to `ClaudeAgentOptions` - -**Current approach** (programmatic config in workflow.py): -```python -subagents = { - 'code-reviewer': AgentDefinition( - description='...', - prompt='...', - tools=['Read', 'Grep', 'Glob'], - model='sonnet', - ), -} -``` - ---- - -**Summary**: The integration is production-ready for **single-worker deployments**. Multi-worker deployments require additional infrastructure for session persistence and workspace sharing. - -## Quick Start - -### Prerequisites - -- Temporal server (localhost:7233) -- Redis (localhost:6379) -- Anthropic API key - -### Run - -```bash -# Install -rye sync --all-features - -# Configure -export ANTHROPIC_API_KEY="your-key" -export REDIS_URL="redis://localhost:6379" -export TEMPORAL_ADDRESS="localhost:7233" - -# Run from repository root -uv run agentex agents run --manifest examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/manifest.yaml -``` - -## Example Interactions - -### Context Preservation - -``` -User: "Your name is Jose" -Claude: "Nice to meet you! I'm Jose..." - -User: "What name did I assign to you?" -Claude: "You asked me to go by Jose!" โ† Remembers context -``` - -### Tool Usage - -``` -User: "Create a hello.c file with Hello World" -Claude: *streams response* -[Tool card appears: "Using tool: Write"] -[Tool card updates: "Used tool: Write"] -"Done! I've created hello.c..." -``` - -### Subagents - -``` -User: "Review the code quality in hello.c" -Claude: *delegates to code-reviewer* -[Tool card: "Using tool: Task" with subagent_type: "code-reviewer"] -[Traces view shows: "Subagent: code-reviewer" nested under turn] -``` - -## Behind the Scenes - -### Message Flow - -When a user sends a message: - -1. **Signal received** (`on_task_event_send`) - Workflow increments turn, echoes message -2. **Span created** - Tracing span wraps turn, stores `parent_span_id` for interceptor -3. **Activity called** - Workflow passes prompt, workspace, session_id, subagent defs -4. **Context threaded** - Interceptor injects task_id/trace_id into activity headers -5. **Activity starts** - Reads context from ContextVar, creates hooks -6. **Claude executes** - SDK uses hooks to stream tools, message_handler streams text -7. **Results returned** - Activity returns session_id, usage, cost -8. **State updated** - Workflow stores session_id for next turn - -### Streaming Pipeline - -**Text streaming**: -``` -Claude SDK โ†’ TextBlock โ†’ ClaudeMessageHandler._handle_text_block() -โ†’ TextDelta โ†’ adk.streaming.stream_update() -โ†’ Redis XADD โ†’ AgentEx UI -``` - -**Tool streaming**: -``` -Claude SDK โ†’ PreToolUse hook โ†’ ToolRequestContent -โ†’ adk.streaming (via hook) โ†’ Redis โ†’ UI ("Using tool...") - -Tool executes... - -Claude SDK โ†’ PostToolUse hook โ†’ ToolResponseContent -โ†’ adk.streaming (via hook) โ†’ Redis โ†’ UI ("Used tool...") -``` - -### Subagent Tracing - -When Task tool is detected in PreToolUse hook: - -```python -# Create nested span -span_ctx = adk.tracing.span( - trace_id=trace_id, - parent_id=parent_span_id, - name=f"Subagent: {subagent_type}", - input=tool_input, -) -span = await span_ctx.__aenter__() - -# Store for PostToolUse to close -self.subagent_spans[tool_use_id] = (span_ctx, span) -``` - -In PostToolUse hook, the span is closed with results, creating a complete nested trace. - -## Key Implementation Details - -### Temporal Determinism - -- **File I/O in activities**: `create_workspace_directory` is an activity (not workflow code) -- **Message iteration completes**: Use `receive_response()` (not `receive_messages()`) -- **State is serializable**: `StateModel` uses Pydantic BaseModel - -### AgentDefinition Serialization - -Temporal serializes activity arguments to JSON. AgentDefinition dataclasses become dicts, so the activity reconstructs them: - -```python -agent_defs = { - name: AgentDefinition(**agent_data) - for name, agent_data in agents.items() -} -``` - -### Hook Callback Signatures - -Claude SDK expects specific signatures: - -```python -async def pre_tool_use( - input_data: dict[str, Any], # Contains tool_name, tool_input - tool_use_id: str | None, # Unique ID for this call - context: Any, # HookContext (currently unused) -) -> dict[str, Any]: # Return {} to allow, or modify behavior -``` - -## Comparison with OpenAI Integration - -| Aspect | OpenAI | Claude | -|--------|--------|--------| -| **Plugin** | `OpenAIAgentsPlugin` (official) | Manual activity wrapper | -| **Streaming** | Token-level deltas | Message block-level | -| **Tool Results** | `ToolResultBlock` | `UserMessage` (with acceptEdits) | -| **Hooks** | `RunHooks` class | `HookMatcher` with callbacks | -| **Context Threading** | ContextInterceptor | ContextInterceptor (reused!) | -| **Subagents** | Agent handoffs | AgentDefinition config | - -## Notes - -**Message Block Streaming**: Claude SDK returns complete text blocks, not individual tokens. Text appears instantly rather than animating character-by-character. This is inherent to Claude SDK's API design. - -**In-Process Subagents**: Subagents run within Claude SDK via config-based routing, not as separate Temporal workflows. This is by design - subagents are specializations, not independent agents. - -**Manual Activity Calls**: Unlike OpenAI which has an official Temporal plugin, Claude integration requires explicit `workflow.execute_activity()` calls. A future enhancement could create an automatic plugin. - -## License - -Apache 2.0 (same as AgentEx SDK) diff --git a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/manifest.yaml b/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/manifest.yaml deleted file mode 100644 index 2c1ce21d..00000000 --- a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/manifest.yaml +++ /dev/null @@ -1,74 +0,0 @@ -kind: Agent - -# Build Configuration -build: - context: - # Root directory for the build context - root: ../../../ # Up to tutorials level to include test_utils - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - 10_async/10_temporal/090_claude_agents_sdk_mvp - - test_utils - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: 10_async/10_temporal/090_claude_agents_sdk_mvp/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: 10_async/10_temporal/090_claude_agents_sdk_mvp/.dockerignore - -# Local Development Configuration -local_development: - agent: - port: 8000 - host_address: host.docker.internal - paths: - acp: project/acp.py - worker: project/run_worker.py - -# Agent Configuration -agent: - acp_type: async - name: claude-mvp-agent - description: Claude Agents SDK MVP - proof of concept integration with AgentEx - - temporal: - enabled: true - workflows: - - name: ClaudeMvpWorkflow - queue_name: claude-mvp-queue - - credentials: - - env_var_name: ANTHROPIC_API_KEY - secret_name: anthropic-api-key - secret_key: api-key - - env_var_name: REDIS_URL - secret_name: redis-url-secret - secret_key: url - -# Deployment Configuration -deployment: - image: - repository: "" - tag: "latest" - imagePullSecrets: - - name: my-registry-secret - global: - agent: - name: "claude-mvp-agent" - description: "Claude Agents SDK MVP" - replicaCount: 1 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" diff --git a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/project/acp.py b/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/project/acp.py deleted file mode 100644 index fdb08ded..00000000 --- a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/project/acp.py +++ /dev/null @@ -1,75 +0,0 @@ -import os -import sys - -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin - -# === DEBUG SETUP (AgentEx CLI Debug Support) === -if os.getenv("AGENTEX_DEBUG_ENABLED") == "true": - print("test me") - try: - import debugpy - - debug_port = int(os.getenv("AGENTEX_DEBUG_PORT", "5679")) - debug_type = os.getenv("AGENTEX_DEBUG_TYPE", "acp") - wait_for_attach = os.getenv("AGENTEX_DEBUG_WAIT_FOR_ATTACH", "false").lower() == "true" - - # Configure debugpy - debugpy.configure(subProcess=False) - debugpy.listen(debug_port) - - print(f"๐Ÿ› [{debug_type.upper()}] Debug server listening on port {debug_port}") - - if wait_for_attach: - print(f"โณ [{debug_type.upper()}] Waiting for debugger to attach...") - debugpy.wait_for_client() - print(f"โœ… [{debug_type.upper()}] Debugger attached!") - else: - print(f"๐Ÿ“ก [{debug_type.upper()}] Ready for debugger attachment") - - except ImportError: - print("โŒ debugpy not available. Install with: pip install debugpy") - sys.exit(1) - except Exception as e: - print(f"โŒ Debug setup failed: {e}") - sys.exit(1) -# === END DEBUG SETUP === - -from agentex.lib.types.fastacp import TemporalACPConfig -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModelProvider, -) -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor - -context_interceptor = ContextInterceptor() -temporal_streaming_model_provider = TemporalStreamingModelProvider() - -# Create the ACP server -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - # When deployed to the cluster, the Temporal address will automatically be set to the cluster address - # For local development, we set the address manually to talk to the local Temporal service set up via docker compose - # We are also adding the Open AI Agents SDK plugin to the ACP. - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233"), - plugins=[OpenAIAgentsPlugin(model_provider=temporal_streaming_model_provider)], - interceptors=[context_interceptor], - ), -) - - -# Notice that we don't need to register any handlers when we use type="temporal" -# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp -# You can see that these handlers are automatically registered when the ACP is created - -# @acp.on_task_create -# This will be handled by the method in your workflow that is decorated with @workflow.run - -# @acp.on_task_event_send -# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) - -# @acp.on_task_cancel -# This does not need to be handled by your workflow. -# It is automatically handled by the temporal client which cancels the workflow directly - diff --git a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/project/run_worker.py b/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/project/run_worker.py deleted file mode 100644 index a969cd76..00000000 --- a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/project/run_worker.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Claude MVP Worker - Minimal setup - -This worker demonstrates the minimal setup needed to run Claude agents -in AgentEx's Temporal architecture. - -Key components: -- ClaudeSDKClient activity (run_claude_agent_activity) -- ContextInterceptor (reused from OpenAI - threads task_id) -- Standard AgentEx activities (messages, streaming, tracing) -""" - -import os -import asyncio - -# Import workflow -from project.workflow import ClaudeMvpWorkflow - -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker - -# Import Claude components -from agentex.lib.core.temporal.plugins.claude_agents import ( - ContextInterceptor, # Reuse from OpenAI! - run_claude_agent_activity, - create_workspace_directory, -) - -logger = make_logger(__name__) - - -async def main(): - """Start the Claude MVP worker""" - - environment_variables = EnvironmentVariables.refresh() - - logger.info("=" * 80) - logger.info("CLAUDE MVP WORKER STARTING") - logger.info("=" * 80) - logger.info(f"Workflow: {environment_variables.WORKFLOW_NAME}") - logger.info(f"Task Queue: {environment_variables.WORKFLOW_TASK_QUEUE}") - logger.info(f"Temporal Address: {environment_variables.TEMPORAL_ADDRESS}") - logger.info(f"Redis URL: {environment_variables.REDIS_URL}") - logger.info(f"Workspace Root: {environment_variables.CLAUDE_WORKSPACE_ROOT}") - logger.info(f"ANTHROPIC_API_KEY: {'SET' if os.environ.get('ANTHROPIC_API_KEY') else 'NOT SET (will fail when activity runs)'}") - - # Get all standard AgentEx activities - activities = get_all_activities() - - # Add Claude-specific activities - activities.append(run_claude_agent_activity) - activities.append(create_workspace_directory) - - logger.info(f"Registered {len(activities)} activities (including Claude activity)") - - # Create context interceptor (reuse from OpenAI!) - context_interceptor = ContextInterceptor() - - # Create worker with interceptor - worker = AgentexWorker( - task_queue=environment_variables.WORKFLOW_TASK_QUEUE, - interceptors=[context_interceptor], # Threads task_id to activities! - plugins=[], # No plugin for MVP - manual activity wrapping - ) - - logger.info("=" * 80) - logger.info("๐Ÿš€ WORKER READY - Listening for tasks...") - logger.info("=" * 80) - - # Run worker - await worker.run( - activities=activities, - workflow=ClaudeMvpWorkflow, - ) - - -if __name__ == "__main__": - try: - asyncio.run(main()) - except KeyboardInterrupt: - logger.info("\n๐Ÿ›‘ Worker stopped by user") - except Exception as e: - logger.error(f"โŒ Worker failed: {e}", exc_info=True) - raise diff --git a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/project/workflow.py b/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/project/workflow.py deleted file mode 100644 index c2204515..00000000 --- a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/project/workflow.py +++ /dev/null @@ -1,240 +0,0 @@ -"""Claude Agents SDK MVP - Minimal working example - -This workflow demonstrates the basic integration pattern between Claude Agents SDK -and AgentEx's Temporal architecture. - -What this proves: -- โœ… Claude agent runs in Temporal workflow -- โœ… File operations isolated to workspace -- โœ… Basic text streaming to UI -- โœ… Visible in Temporal UI as activities -- โœ… Temporal retry policies work - -What's missing (see NEXT_STEPS.md): -- Tool call streaming -- Proper plugin architecture -- Subagents -- Tracing -""" -from __future__ import annotations - -import os -from datetime import timedelta - -from temporalio import workflow -from temporalio.common import RetryPolicy -from claude_agent_sdk.types import AgentDefinition - -from agentex.lib import adk -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow - -# Import Claude activities -from agentex.lib.core.temporal.plugins.claude_agents import ( - run_claude_agent_activity, - create_workspace_directory, -) - -environment_variables = EnvironmentVariables.refresh() - -if environment_variables.WORKFLOW_NAME is None: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if environment_variables.AGENT_NAME is None: - raise ValueError("Environment variable AGENT_NAME is not set") - -logger = make_logger(__name__) - - -class StateModel(BaseModel): - """Workflow state for Claude session tracking - - Stores Claude session ID to maintain conversation context across turns. - This allows Claude to remember previous messages and answer follow-up questions. - """ - claude_session_id: str | None = None - turn_number: int = 0 - - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class ClaudeMvpWorkflow(BaseWorkflow): - """Minimal Claude agent workflow - MVP v0 - - This workflow: - 1. Creates isolated workspace for task - 2. Receives user messages via signals - 3. Runs Claude via Temporal activity - 4. Returns responses to user - - Key features: - - Durable execution (survives restarts) - - Workspace isolation - - Automatic retries - - Visible in Temporal UI - """ - - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self._complete_task = False - self._state: StateModel | None = None - self._task_id = None - self._trace_id = None - self._parent_span_id = None - self._workspace_path = None - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - async def on_task_event_send(self, params: SendEventParams): - """Handle user message - run Claude agent""" - - logger.info(f"Received task message: {params.event.content.content[:100]}...") - - if self._state is None: - raise ValueError("State is not initialized") - - self._task_id = params.task.id - self._trace_id = params.task.id - self._state.turn_number += 1 - - # Echo user message to UI - await adk.messages.create( - task_id=params.task.id, - content=params.event.content - ) - - # Wrap in tracing span - THIS IS REQUIRED for ContextInterceptor to work! - async with adk.tracing.span( - trace_id=params.task.id, - name=f"Turn {self._state.turn_number}", - input={ - "prompt": params.event.content.content, - "session_id": self._state.claude_session_id, - }, - ) as span: - self._parent_span_id = span.id if span else None - - try: - # Define subagents for specialized tasks - subagents = { - 'code-reviewer': AgentDefinition( - description='Expert code review specialist. Use when analyzing code quality, security, or best practices.', - prompt='You are a code review expert. Analyze code for bugs, security issues, and best practices. Be thorough but concise.', - tools=['Read', 'Grep', 'Glob'], # Read-only - model='sonnet', - ), - 'file-organizer': AgentDefinition( - description='File organization specialist. Use when creating multiple files or organizing project layout.', - prompt='You are a file organization expert. Create well-structured projects with clear naming.', - tools=['Write', 'Read', 'Bash', 'Glob'], - model='haiku', # Faster model - ), - } - - # Run Claude via activity (manual wrapper for MVP) - # ContextInterceptor reads _task_id, _trace_id, _parent_span_id and threads to activity! - result = await workflow.execute_activity( - run_claude_agent_activity, - args=[ - params.event.content.content, # prompt - self._workspace_path, # workspace - ["Read", "Write", "Edit", "Bash", "Grep", "Glob", "Task"], # allowed tools (Task for subagents!) - "acceptEdits", # permission mode - "You are a helpful coding assistant. Be concise.", # system prompt - self._state.claude_session_id, # resume session for context! - subagents, # subagent definitions! - ], - start_to_close_timeout=timedelta(minutes=5), - retry_policy=RetryPolicy( - maximum_attempts=3, - initial_interval=timedelta(seconds=1), - maximum_interval=timedelta(seconds=10), - backoff_coefficient=2.0, - ), - ) - - # Update session_id for next turn (maintains conversation context) - new_session_id = result.get("session_id") - if new_session_id: - self._state.claude_session_id = new_session_id - logger.info( - f"Turn {self._state.turn_number}: " - f"session_id={'STARTED' if self._state.turn_number == 1 else 'CONTINUED'} " - f"({new_session_id[:16]}...)" - ) - else: - logger.warning(f"No session_id returned - context may not persist") - - # Response already streamed to UI by activity - no need to send again - logger.debug(f"Turn {self._state.turn_number} completed successfully") - - except Exception as e: - logger.error(f"Error running Claude agent: {e}", exc_info=True) - # Send error message to user - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"โŒ Error: {str(e)}", - ) - ) - raise - - @workflow.run - async def on_task_create(self, params: CreateTaskParams): - """Initialize workflow - create workspace and send welcome""" - - logger.info(f"Creating Claude MVP workflow for task: {params.task.id}") - - # Initialize state with session tracking - self._state = StateModel( - claude_session_id=None, - turn_number=0, - ) - - # Create workspace via activity (avoids determinism issues with file I/O) - workspace_root = os.environ.get("CLAUDE_WORKSPACE_ROOT") - self._workspace_path = await workflow.execute_activity( - create_workspace_directory, - args=[params.task.id, workspace_root], - start_to_close_timeout=timedelta(seconds=10), - ) - - logger.info(f"Workspace ready: {self._workspace_path}") - - # Send welcome message - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=( - "๐Ÿš€ **Claude MVP Agent Ready!**\n\n" - f"Workspace: `{self._workspace_path}`\n\n" - "I'm powered by Claude Agents SDK + Temporal. Try asking me to:\n" - "- Create files: *'Create a hello.py file'*\n" - "- Read files: *'What's in hello.py?'*\n" - "- Run commands: *'List files in the workspace'*\n\n" - "Send me a message to get started! ๐Ÿ’ฌ" - ), - format="markdown", - ) - ) - - # Wait for completion signal - logger.info("Waiting for task completion...") - await workflow.wait_condition( - lambda: self._complete_task, - timeout=None, # Long-running workflow - ) - - logger.info("Claude MVP workflow completed") - return "Task completed successfully" - - @workflow.signal - async def complete_task_signal(self): - """Signal to gracefully complete the workflow""" - logger.info("Received complete_task signal") - self._complete_task = True diff --git a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/pyproject.toml b/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/pyproject.toml deleted file mode 100644 index 12573a90..00000000 --- a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/pyproject.toml +++ /dev/null @@ -1,35 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "at090_claude_agents_sdk_mvp" -version = "0.1.0" -description = "Claude Agents SDK integration with AgentEx Temporal workflows - MVP proof of concept" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk>=0.6.0", - "claude-agent-sdk>=0.1.0", - "temporalio>=1.18.2", - "anthropic>=0.40.0", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", - "debugpy>=1.8.15", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 \ No newline at end of file diff --git a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/tests/test_agent.py b/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/tests/test_agent.py deleted file mode 100644 index aae1cb91..00000000 --- a/examples/tutorials/10_async/10_temporal/090_claude_agents_sdk_mvp/tests/test_agent.py +++ /dev/null @@ -1,67 +0,0 @@ -import os - -# import uuid -# import asyncio -import pytest -import pytest_asyncio - -# from test_utils.async_utils import ( -# poll_messages, -# stream_agent_response, -# send_event_and_poll_yielding, -# ) -from agentex import AsyncAgentex - -# from agentex.types import TaskMessage -# from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -# from agentex.types.text_content_param import TextContentParam - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "claude_") - - -@pytest_asyncio.fixture -async def client(): - """Create an AgentEx client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client: AsyncAgentex, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and polling for the response.""" - pass - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, agent_id: str): - """Test sending an event and streaming the response.""" - pass - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/examples/tutorials/README.md b/examples/tutorials/README.md deleted file mode 100644 index c9231685..00000000 --- a/examples/tutorials/README.md +++ /dev/null @@ -1,155 +0,0 @@ -# AgentEx Tutorials - -Progressive tutorials for learning AgentEx from basics to production-ready patterns. - -## Prerequisites - -**Before starting any tutorial:** -1. Set up your development environment following the [main repo README](https://github.com/scaleapi/scale-agentex#setup) -2. Start backend services from repository root: - ```bash - cd /path/to/agentex-python - make dev - ``` -3. Verify Temporal UI is accessible at http://localhost:8233 - -For troubleshooting, see the [AgentEx debugging guide](https://github.com/scaleapi/scale-agentex#troubleshooting). - -## Learning Path - -```mermaid -graph TD - A[๐Ÿ‘‹ Start Here] --> B[00_sync/000_hello_acp] - B --> C[00_sync/010_multiturn] - C --> D[00_sync/020_streaming] - - D --> E{Need Task
Management?} - E -->|Yes| F[10_async/00_base/
000_hello_acp] - E -->|No| G[Continue with
sync patterns] - - F --> H[00_base/010_multiturn] - H --> I[00_base/020_streaming] - I --> J[00_base/030_tracing] - J --> K[00_base/040_other_sdks] - K --> L[00_base/080_batch_events] - - L --> M{Building for
Production?} - M -->|Yes| N[10_temporal/
000_hello_acp] - M -->|No| O[00_base/090_multi_agent] - - N --> P[10_temporal/010_agent_chat] - P --> Q[10_temporal/020_state_machine] - Q --> R[10_temporal/030_custom_activities] - R --> S[10_temporal/050_guardrails] - - S --> T{Using
OpenAI SDK?} - T -->|Yes| U[10_temporal/060_openai_hello] - U --> V[10_temporal/070_openai_tools] - V --> W[10_temporal/080_openai_hitl] - T -->|No| X[๐ŸŽ‰ Production Ready!] - W --> X - - style A fill:#e1f5e1 - style X fill:#fff3cd - style E fill:#e3f2fd - style M fill:#e3f2fd - style T fill:#e3f2fd -``` - -## Tutorial Structure - -### 00_sync/ - Synchronous Agents -Simple request-response patterns without task management. Start here if you're new to AgentEx. - -- **[000_hello_acp](00_sync/000_hello_acp/)** - Your first agent -- **[010_multiturn](00_sync/010_multiturn/)** - Maintaining conversation context -- **[020_streaming](00_sync/020_streaming/)** - Real-time response streaming - -**When to use:** Simple chatbots, stateless Q&A, quick prototypes - ---- - -### 10_async/ - Task-Based Agents - -#### 00_base/ - Non-Temporal Patterns -Task-based architecture without workflow orchestration. Adds task management on top of sync patterns. - -- **[000_hello_acp](10_async/00_base/000_hello_acp/)** - Task-based hello world -- **[010_multiturn](10_async/00_base/010_multiturn/)** - Multiturn with task management -- **[020_streaming](10_async/00_base/020_streaming/)** - Streaming with tasks -- **[030_tracing](10_async/00_base/030_tracing/)** - Observability with Scale Groundplane -- **[040_other_sdks](10_async/00_base/040_other_sdks/)** - Integrating OpenAI, Anthropic, etc. -- **[080_batch_events](10_async/00_base/080_batch_events/)** - Event batching (shows limitations โ†’ Temporal) -- **[090_multi_agent_non_temporal](10_async/00_base/090_multi_agent_non_temporal/)** - Complex multi-agent coordination - -**When to use:** Task tracking needed but workflows are simple, no durability requirements - ---- - -#### 10_temporal/ - Production Workflows -Durable, fault-tolerant agents with Temporal workflow orchestration. - -**Core Patterns:** -- **[000_hello_acp](10_async/10_temporal/000_hello_acp/)** - Temporal basics -- **[010_agent_chat](10_async/10_temporal/010_agent_chat/)** - Stateful conversations -- **[020_state_machine](10_async/10_temporal/020_state_machine/)** - Structured state management -- **[030_custom_activities](10_async/10_temporal/030_custom_activities/)** - Custom Temporal activities -- **[050_agent_chat_guardrails](10_async/10_temporal/050_agent_chat_guardrails/)** - Safety & validation - -**OpenAI Agents SDK Series:** -- **[060_openai_hello_world](10_async/10_temporal/060_open_ai_agents_sdk_hello_world/)** - Plugin-based agents -- **[070_openai_tools](10_async/10_temporal/070_open_ai_agents_sdk_tools/)** - Tool integration patterns -- **[080_openai_hitl](10_async/10_temporal/080_open_ai_agents_sdk_human_in_the_loop/)** - Human oversight workflows - -**When to use:** Production systems requiring durability, fault tolerance, long-running workflows, or complex state management - ---- - -## Quick Start - -```bash -# 1. Start backend services (from repo root) -make dev - -# 2. Navigate to a tutorial -cd examples/tutorials/00_sync/000_hello_acp - -# 3. Run it -uv run python hello_acp.py -``` - -## Common Commands - -```bash -# Format tutorial code (always scope to specific files you're modifying) -rye run format examples/tutorials/00_sync/000_hello_acp/ - -# Run all async tutorial tests -cd examples/tutorials -./run_all_async_tests.sh - -# Run specific tutorial test -cd examples/tutorials -uv run pytest 00_sync/000_hello_acp/ -v - -# Check Temporal UI (when running temporal tutorials) -open http://localhost:8233 -``` - -## Tutorial Categories at a Glance - -| Category | Tutorials | Focus | Use When | -|----------|-----------|-------|----------| -| **Sync** | 3 | Request-response basics | Learning fundamentals, simple chatbots | -| **Async Base** | 7 | Task management without workflows | Need task tracking, simple coordination | -| **Temporal** | 8 | Production-grade workflows | Need durability, fault tolerance, complex state | - -## Getting Help - -- **Each tutorial includes:** README explaining concepts, annotated source code, and tests -- **Common issues?** See [AgentEx troubleshooting guide](https://github.com/scaleapi/scale-agentex#troubleshooting) -- **Need more context?** Check the [main AgentEx documentation](https://github.com/scaleapi/scale-agentex) - ---- - -**Ready to start?** โ†’ Begin with [00_sync/000_hello_acp](00_sync/000_hello_acp/) diff --git a/examples/tutorials/TEST_RUNNER_README.md b/examples/tutorials/TEST_RUNNER_README.md deleted file mode 100644 index de8fcf66..00000000 --- a/examples/tutorials/TEST_RUNNER_README.md +++ /dev/null @@ -1,142 +0,0 @@ -# Tutorial Test Runner - -This directory contains a test runner script that automates the process of starting an agent and running its tests. - -## Prerequisites - -- Python 3.12+ -- `uv` installed and available in PATH -- `httpx` Python package (for health checks) - -## Usage - -From the `tutorials/` directory, run: - -```bash -python run_tutorial_test.py -``` - -### Examples - -```bash -# Test a sync tutorial -python run_tutorial_test.py 00_sync/000_hello_acp - -# Test an async tutorial -python run_tutorial_test.py 10_async/00_base/000_hello_acp -python run_tutorial_test.py 10_async/00_base/010_multiturn -python run_tutorial_test.py 10_async/00_base/020_streaming - -# Test with custom base URL -python run_tutorial_test.py 10_async/00_base/000_hello_acp --base-url http://localhost:5003 -``` - -## What the Script Does - -1. **Validates Paths**: Checks that the tutorial directory, manifest.yaml, and tests directory exist -2. **Starts Agent**: Runs `uv run agentex agents run --manifest manifest.yaml` in the tutorial directory -3. **Health Check**: Polls the agent's health endpoint (default: http://localhost:5003/health) until it's live -4. **Runs Tests**: Executes `uv run pytest tests/ -v --tb=short` in the tutorial directory -5. **Cleanup**: Gracefully stops the agent process (or kills it if necessary) - -## Options - -``` -positional arguments: - tutorial_dir Path to the tutorial directory (relative to current directory) - -optional arguments: - -h, --help Show help message and exit - --base-url BASE_URL Base URL for the AgentEx server (default: http://localhost:5003) -``` - -## Exit Codes - -- `0`: All tests passed successfully -- `1`: Tests failed or error occurred -- `130`: Interrupted by user (Ctrl+C) - -## Example Output - -``` -================================================================================ -AgentEx Tutorial Test Runner -================================================================================ - -๐Ÿš€ Starting agent from: 10_async/00_base/000_hello_acp -๐Ÿ“„ Manifest: 10_async/00_base/000_hello_acp/manifest.yaml -๐Ÿ’ป Running command: uv run agentex agents run --manifest manifest.yaml -๐Ÿ“ Working directory: 10_async/00_base/000_hello_acp -โœ… Agent process started (PID: 12345) - -๐Ÿ” Checking agent health at http://localhost:5003/health... -โณ Waiting for agent... (attempt 1/30) -โณ Waiting for agent... (attempt 2/30) -โœ… Agent is live! (attempt 3/30) - -โณ Waiting 2 seconds for agent to fully initialize... - -๐Ÿงช Running tests from: 10_async/00_base/000_hello_acp/tests -๐Ÿ’ป Running command: uv run pytest tests/ -v --tb=short -๐Ÿ“ Working directory: 10_async/00_base/000_hello_acp - -============================= test session starts ============================== -... -============================= X passed in Y.YYs ================================ - -โœ… All tests passed! - -๐Ÿ›‘ Stopping agent (PID: 12345)... -โœ… Agent stopped gracefully - -================================================================================ -โœ… Test run completed successfully! -================================================================================ -``` - -## Troubleshooting - -### Agent doesn't become live - -If the health check times out: -- Check that port 5003 is not already in use -- Look at the agent logs to see if there are startup errors -- Try increasing the timeout by modifying the `max_attempts` parameter in the script - -### Tests fail - -- Ensure the agent is properly configured in manifest.yaml -- Check that all dependencies are installed in the tutorial's virtual environment -- Review test output for specific failure reasons - -### "uv: command not found" - -Install `uv`: -```bash -curl -LsSf https://astral.sh/uv/install.sh | sh -``` - -### Missing httpx package - -The script requires `httpx` for health checks. It should be installed automatically via the tutorial's dependencies, but if needed: -```bash -pip install httpx -``` - -## Integration with CI/CD - -This script is designed to be CI/CD friendly: - -```bash -# Run all async tutorials -for tutorial in 10_async/00_base/*/; do - python run_tutorial_test.py "$tutorial" || exit 1 -done -``` - -## Notes - -- The script automatically sets `AGENTEX_API_BASE_URL` environment variable when running tests -- Agent processes are always cleaned up, even if tests fail or the script is interrupted -- The script uses line-buffered output for real-time feedback -- Health checks poll every 1 second for up to 30 seconds (configurable in the code) diff --git a/examples/tutorials/pytest.ini b/examples/tutorials/pytest.ini deleted file mode 100644 index 7be1a076..00000000 --- a/examples/tutorials/pytest.ini +++ /dev/null @@ -1,4 +0,0 @@ -[pytest] -pythonpath = . -testpaths = . -addopts = --import-mode=importlib diff --git a/examples/tutorials/run_agent_test.sh b/examples/tutorials/run_agent_test.sh deleted file mode 100755 index f396cfd0..00000000 --- a/examples/tutorials/run_agent_test.sh +++ /dev/null @@ -1,429 +0,0 @@ -#!/bin/bash -# -# Run a single agent tutorial test -# -# This script runs the test for a single agent tutorial. -# It starts the agent, runs tests against it, then stops the agent. -# -# Usage: -# ./run_agent_test.sh # Run single tutorial test -# ./run_agent_test.sh --build-cli # Build CLI from source and run test -# ./run_agent_test.sh --view-logs # View logs for specific tutorial -# ./run_agent_test.sh --view-logs # View most recent agent logs -# - -set -e # Exit on error - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -cd "$SCRIPT_DIR" - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Parse arguments -TUTORIAL_PATH="" -VIEW_LOGS=false -BUILD_CLI=false - -for arg in "$@"; do - if [[ "$arg" == "--view-logs" ]]; then - VIEW_LOGS=true - elif [[ "$arg" == "--build-cli" ]]; then - BUILD_CLI=true - else - TUTORIAL_PATH="$arg" - fi -done - -# Function to check prerequisites for running this test suite -check_prerequisites() { - # Check that we are in the examples/tutorials directory - if [[ "$PWD" != */examples/tutorials ]]; then - echo -e "${RED}โŒ Please run this script from the examples/tutorials directory${NC}" - exit 1 - fi - - # Check if uv is available - if ! command -v uv &> /dev/null; then - echo -e "${RED}โŒ uv is required but not installed${NC}" - echo "Please install uv: curl -LsSf https://astral.sh/uv/install.sh | sh" - exit 1 - fi - - echo -e "${GREEN}โœ… Prerequisites check passed${NC}" -} - -# Function to wait for agent to be ready -wait_for_agent_ready() { - local name=$1 - local logfile="/tmp/agentex-${name}.log" - local timeout=45 # seconds - increased to account for package installation time - local elapsed=0 - - echo -e "${YELLOW}โณ Waiting for ${name} agent to be ready...${NC}" - - while [ $elapsed -lt $timeout ]; do - # Check if agent is successfully registered - if grep -q "Successfully registered agent" "$logfile" 2>/dev/null; then - - # For temporal agents, also wait for workers to be ready - if [[ "$tutorial_path" == *"temporal"* ]]; then - # This is a temporal agent - wait for workers too - if grep -q "Running workers for task queue" "$logfile" 2>/dev/null; then - return 0 - fi - else - return 0 - fi - fi - sleep 1 - ((elapsed++)) - done - - echo -e "${RED}โŒ Timeout waiting for ${name} agent to be ready${NC}" - echo -e "${YELLOW}๐Ÿ“‹ Agent logs:${NC}" - if [[ -f "$logfile" ]]; then - echo "----------------------------------------" - tail -50 "$logfile" - echo "----------------------------------------" - else - echo "โŒ Log file not found: $logfile" - fi - return 1 -} - -# Function to start agent in background -start_agent() { - local tutorial_path=$1 - local name=$(basename "$tutorial_path") - local logfile="/tmp/agentex-${name}.log" - - echo -e "${YELLOW}๐Ÿš€ Starting ${name} agent...${NC}" - - # Check if tutorial directory exists - if [[ ! -d "$tutorial_path" ]]; then - echo -e "${RED}โŒ Tutorial directory not found: $tutorial_path${NC}" - return 1 - fi - - # Check if manifest exists - if [[ ! -f "$tutorial_path/manifest.yaml" ]]; then - echo -e "${RED}โŒ Manifest not found: $tutorial_path/manifest.yaml${NC}" - return 1 - fi - - # Save current directory - local original_dir="$PWD" - - # Change to tutorial directory - cd "$tutorial_path" || return 1 - - # Start the agent in background and capture PID - local manifest_path="$PWD/manifest.yaml" # Always use full path - - if [ "$BUILD_CLI" = true ]; then - - # Use wheel from dist directory at repo root - local wheel_file=$(ls /home/runner/work/*/*/dist/agentex_sdk-*.whl 2>/dev/null | head -n1) - if [[ -z "$wheel_file" ]]; then - echo -e "${RED}โŒ No built wheel found in dist/agentex_sdk-*.whl${NC}" - echo -e "${YELLOW}๐Ÿ’ก Please build the local SDK first by running: uv build${NC}" - echo -e "${YELLOW}๐Ÿ’ก From the repo root directory${NC}" - cd "$original_dir" - return 1 - fi - - # Use the built wheel - uv run --with "$wheel_file" agentex agents run --manifest "$manifest_path" > "$logfile" 2>&1 & - else - uv run agentex agents run --manifest manifest.yaml > "$logfile" 2>&1 & - fi - local pid=$! - - # Return to original directory - cd "$original_dir" - - echo "$pid" > "/tmp/agentex-${name}.pid" - echo -e "${GREEN}โœ… ${name} agent started (PID: $pid, logs: $logfile)${NC}" - - # Wait for agent to be ready - if ! wait_for_agent_ready "$name"; then - kill -9 $pid 2>/dev/null - return 1 - fi - - return 0 -} - -# Helper function to view agent container logs -view_agent_logs() { - local tutorial_path=$1 - - # If tutorial path is provided, view logs for that specific tutorial - if [[ -n "$tutorial_path" ]]; then - local name=$(basename "$tutorial_path") - local logfile="/tmp/agentex-${name}.log" - - echo -e "${YELLOW}๐Ÿ“‹ Viewing logs for ${name}...${NC}" - echo -e "${YELLOW}Log file: $logfile${NC}" - echo "" - - if [[ ! -f "$logfile" ]]; then - echo -e "${RED}โŒ Log file not found: $logfile${NC}" - return 1 - fi - - # Display the logs - tail -f "$logfile" - else - # No specific tutorial, find the most recent log file - local latest_log=$(ls -t /tmp/agentex-*.log 2>/dev/null | head -1) - - if [[ -z "$latest_log" ]]; then - echo -e "${RED}โŒ No agent log files found in /tmp/agentex-*.log${NC}" - echo -e "${YELLOW}Available log files:${NC}" - ls -lht /tmp/agentex-*.log 2>/dev/null || echo " (none)" - return 1 - fi - - echo -e "${YELLOW}๐Ÿ“‹ Viewing most recent agent logs...${NC}" - echo -e "${YELLOW}Log file: $latest_log${NC}" - echo "" - - # Display the logs - tail -f "$latest_log" - fi -} - -# Function to stop agent -stop_agent() { - local tutorial_path=$1 - local name=$(basename "$tutorial_path") - local pidfile="/tmp/agentex-${name}.pid" - local logfile="/tmp/agentex-${name}.log" - - echo -e "${YELLOW}๐Ÿ›‘ Stopping ${name} agent...${NC}" - - # Check if PID file exists - if [[ ! -f "$pidfile" ]]; then - echo -e "${YELLOW}โš ๏ธ No PID file found for ${name} agent${NC}" - return 0 - fi - - # Read PID from file - local pid=$(cat "$pidfile") - - # Check if process is running and kill it - if kill -0 "$pid" 2>/dev/null; then - echo -e "${YELLOW}Stopping ${name} agent (PID: $pid)${NC}" - kill "$pid" 2>/dev/null || true - rm -f "$pidfile" - else - echo -e "${YELLOW}โš ๏ธ ${name} agent was not running${NC}" - rm -f "$pidfile" - fi - - echo -e "${GREEN}โœ… ${name} agent stopped${NC}" - echo -e "${YELLOW}Logs available at: $logfile${NC}" - - return 0 -} - - -# Function to run test for a tutorial -run_test() { - local tutorial_path=$1 - local name=$(basename "$tutorial_path") - - echo -e "${YELLOW}๐Ÿงช Running tests for ${name}...${NC}" - - # Check if tutorial directory exists - if [[ ! -d "$tutorial_path" ]]; then - echo -e "${RED}โŒ Tutorial directory not found: $tutorial_path${NC}" - return 1 - fi - - # Check if test file exists - if [[ ! -f "$tutorial_path/tests/test_agent.py" ]]; then - echo -e "${RED}โŒ Test file not found: $tutorial_path/tests/test_agent.py${NC}" - return 1 - fi - - # Save current directory - local original_dir="$PWD" - - # Change to tutorial directory - cd "$tutorial_path" || return 1 - - - # Run the tests with retry mechanism - local max_retries=5 - local retry_count=0 - local exit_code=1 - - while [ $retry_count -lt $max_retries ]; do - if [ $retry_count -gt 0 ]; then - echo -e "${YELLOW}๐Ÿ”„ Retrying tests (attempt $((retry_count + 1))/$max_retries)...${NC}" - fi - - # Stream pytest output directly in real-time - uv run pytest tests/test_agent.py -v -s - exit_code=$? - - if [ $exit_code -eq 0 ]; then - break - else - retry_count=$((retry_count + 1)) - if [ $retry_count -lt $max_retries ]; then - sleep 5 - fi - fi - done - - # Return to original directory - cd "$original_dir" - - if [ $exit_code -eq 0 ]; then - echo -e "${GREEN}โœ… Tests passed for ${name}${NC}" - return 0 - else - echo -e "${RED}โŒ Tests failed for ${name}${NC}" - return 1 - fi -} - -# Function to execute test flow for a single tutorial -execute_tutorial_test() { - local tutorial=$1 - - echo "" - echo "================================================================================" - echo "Testing: $tutorial" - echo "================================================================================" - - # Start the agent - if ! start_agent "$tutorial"; then - echo -e "${RED}โŒ FAILED to start agent: $tutorial${NC}" - return 1 - fi - - # Run the tests - local test_passed=false - if run_test "$tutorial"; then - echo -e "${GREEN}โœ… PASSED: $tutorial${NC}" - test_passed=true - else - echo -e "${RED}โŒ FAILED: $tutorial${NC}" - fi - - # Stop the agent - stop_agent "$tutorial" - - echo "" - - if [ "$test_passed" = true ]; then - return 0 - else - return 1 - fi -} - -# Function to check if built wheel is available -check_built_wheel() { - - # Navigate to the repo root (two levels up from examples/tutorials) - local repo_root="../../" - local original_dir="$PWD" - - cd "$repo_root" || { - echo -e "${RED}โŒ Failed to navigate to repo root${NC}" - return 1 - } - - # Check if wheel exists in dist directory at repo root - local wheel_file=$(ls /home/runner/work/*/*/dist/agentex_sdk-*.whl 2>/dev/null | head -n1) - if [[ -z "$wheel_file" ]]; then - echo -e "${RED}โŒ No built wheel found in dist/agentex_sdk-*.whl${NC}" - echo -e "${YELLOW}๐Ÿ’ก Please build the local SDK first by running: uv build${NC}" - echo -e "${YELLOW}๐Ÿ’ก From the repo root directory${NC}" - cd "$original_dir" - return 1 - fi - - # Test the wheel by running agentex --help - if ! uv run --with "$wheel_file" agentex --help >/dev/null 2>&1; then - echo -e "${RED}โŒ Failed to run agentex with built wheel${NC}" - cd "$original_dir" - return 1 - fi - cd "$original_dir" - return 0 -} - - -# Main execution function -main() { - # Handle --view-logs flag - if [ "$VIEW_LOGS" = true ]; then - if [[ -n "$TUTORIAL_PATH" ]]; then - view_agent_logs "$TUTORIAL_PATH" - else - view_agent_logs - fi - exit 0 - fi - # Require tutorial path - if [[ -z "$TUTORIAL_PATH" ]]; then - echo -e "${RED}โŒ Error: Tutorial path is required${NC}" - echo "" - echo "Usage:" - echo " ./run_agent_test.sh # Run single tutorial test" - echo " ./run_agent_test.sh --build-cli # Build CLI from source and run test" - echo " ./run_agent_test.sh --view-logs # View logs for specific tutorial" - echo " ./run_agent_test.sh --view-logs # View most recent agent logs" - echo "" - echo "Examples:" - echo " ./run_agent_test.sh 00_sync/000_hello_acp" - echo " ./run_agent_test.sh --build-cli 00_sync/000_hello_acp" - exit 1 - fi - - echo "================================================================================" - echo "Running Tutorial Test: $TUTORIAL_PATH" - echo "================================================================================" - - # Check prerequisites - check_prerequisites - - echo "" - - # Check built wheel if requested - if [ "$BUILD_CLI" = true ]; then - if ! check_built_wheel; then - echo -e "${RED}โŒ Failed to find or verify built wheel${NC}" - exit 1 - fi - echo "" - fi - - # Execute the single tutorial test - if execute_tutorial_test "$TUTORIAL_PATH"; then - echo "" - echo "================================================================================" - echo -e "${GREEN}๐ŸŽ‰ Test passed for: $TUTORIAL_PATH${NC}" - echo "================================================================================" - exit 0 - else - echo "" - echo "================================================================================" - echo -e "${RED}โŒ Test failed for: $TUTORIAL_PATH${NC}" - echo "================================================================================" - exit 1 - fi -} - -# Run main function -main diff --git a/examples/tutorials/test_utils/async_utils.py b/examples/tutorials/test_utils/async_utils.py deleted file mode 100644 index effe8778..00000000 --- a/examples/tutorials/test_utils/async_utils.py +++ /dev/null @@ -1,254 +0,0 @@ -""" -Utility functions for testing AgentEx async agents. - -This module provides helper functions for working with async (non-temporal) agents, -including task creation, event sending, response polling, and streaming. -""" - -import json -import time -import asyncio -from typing import Optional, AsyncGenerator -from datetime import datetime, timezone - -from agentex._client import AsyncAgentex -from agentex.types.task_message import TaskMessage -from agentex.types.agent_rpc_params import ParamsSendEventRequest -from agentex.types.agent_rpc_result import StreamTaskMessageDone, StreamTaskMessageFull -from agentex.types.text_content_param import TextContentParam - - -async def send_event_and_poll_yielding( - client: AsyncAgentex, - agent_id: str, - task_id: str, - user_message: str, - timeout: int = 30, - sleep_interval: float = 1.0, - yield_updates: bool = True, -) -> AsyncGenerator[TaskMessage, None]: - """ - Send an event to an agent and poll for responses, yielding messages as they arrive. - - Polls continuously until timeout is hit or the caller exits the loop. - - Args: - client: AgentEx client instance - agent_id: The agent ID - task_id: The task ID - user_message: The message content to send - timeout: Maximum seconds to wait for a response (default: 30) - sleep_interval: Seconds to sleep between polls (default: 1.0) - yield_updates: If True, yield messages again when their content changes (default: True for streaming) - - Yields: - TaskMessage objects as they are discovered during polling - """ - # Send the event - event_content = TextContentParam(type="text", author="user", content=user_message) - - # Capture timestamp before sending to account for clock skew - # Subtract 2 second buffer to ensure we don't filter out messages we just created - # (accounts for clock skew between client and server) - messages_created_after = time.time() - 2.0 - - await client.agents.send_event( - agent_id=agent_id, params=ParamsSendEventRequest(task_id=task_id, content=event_content) - ) - # Poll continuously until timeout - # Poll for messages created after we sent the event - async for message in poll_messages( - client=client, - task_id=task_id, - timeout=timeout, - sleep_interval=sleep_interval, - messages_created_after=messages_created_after, - yield_updates=yield_updates, - ): - yield message - - -async def poll_messages( - client: AsyncAgentex, - task_id: str, - timeout: int = 30, - sleep_interval: float = 1.0, - messages_created_after: Optional[float] = None, - yield_updates: bool = False, -) -> AsyncGenerator[TaskMessage, None]: - """ - Poll for messages continuously until timeout. - - Args: - client: AgentEx client instance - task_id: The task ID to poll messages for - timeout: Maximum seconds to poll (default: 30) - sleep_interval: Seconds to sleep between polls (default: 1.0) - messages_created_after: Optional timestamp to filter messages (Unix timestamp) - yield_updates: If True, yield messages again when their content changes (for streaming) - If False, only yield each message ID once (default: False) - - Yields: - TaskMessage objects as they are discovered or updated - """ - # Keep track of messages we've already yielded - seen_message_ids = set() - # Track message content hashes to detect updates (for streaming) - message_content_hashes = {} - start_time = datetime.now() - - # Poll continuously until timeout - while (datetime.now() - start_time).seconds < timeout: - messages = await client.messages.list(task_id=task_id) - - # Sort messages by created_at to ensure chronological order - # Use datetime.min for messages without created_at timestamp - sorted_messages = sorted( - messages, - key=lambda m: m.created_at if m.created_at else datetime.min.replace(tzinfo=timezone.utc) - ) - - new_messages_found = 0 - for message in sorted_messages: - # Check if message passes timestamp filter - if messages_created_after and message.created_at: - # If message.created_at is timezone-naive, assume it's UTC - if message.created_at.tzinfo is None: - msg_timestamp = message.created_at.replace(tzinfo=timezone.utc).timestamp() - else: - msg_timestamp = message.created_at.timestamp() - if msg_timestamp < messages_created_after: - continue - - # Check if this is a new message or an update to existing message - is_new_message = message.id not in seen_message_ids - - if yield_updates: - # For streaming: track content changes - # Use getattr to safely extract content and convert to string - # This handles various content structures at runtime - raw_content = getattr(message.content, 'content', message.content) if message.content else None - content_str = str(raw_content) if raw_content is not None else "" - - # Ensure streaming_status is also properly converted to string - streaming_status_str = str(message.streaming_status) if message.streaming_status is not None else "" - content_hash = hash(content_str + streaming_status_str) - is_updated = message.id in message_content_hashes and message_content_hashes[message.id] != content_hash - - if is_new_message or is_updated: - message_content_hashes[message.id] = content_hash - seen_message_ids.add(message.id) - new_messages_found += 1 - yield message - else: - # Original behavior: only yield each message ID once - if is_new_message: - seen_message_ids.add(message.id) - new_messages_found += 1 - yield message - - # Sleep before next poll - await asyncio.sleep(sleep_interval) - - -async def send_event_and_stream( - client: AsyncAgentex, - agent_id: str, - task_id: str, - user_message: str, - timeout: int = 30, -): - """ - Send an event to an agent and stream the response, yielding events as they arrive. - - This function now uses stream_agent_response() under the hood and yields events - up the stack as they arrive. - - Args: - client: AgentEx client instance - agent_id: The agent ID - task_id: The task ID - user_message: The message content to send - timeout: Maximum seconds to wait for stream completion (default: 30) - - Yields: - Parsed event dictionaries as they arrive from the stream - - Raises: - Exception: If streaming fails - """ - # Send the event - event_content = TextContentParam(type="text", author="user", content=user_message) - - await client.agents.send_event(agent_id=agent_id, params={"task_id": task_id, "content": event_content}) - - # Stream the response using stream_agent_response and yield events up the stack - async for event in stream_agent_response( - client=client, - task_id=task_id, - timeout=timeout, - ): - yield event - - -async def stream_agent_response( - client: AsyncAgentex, - task_id: str, - timeout: int = 30, -): - """ - Stream the agent response for a given task, yielding events as they arrive. - - Args: - client: AgentEx client instance - task_id: The task ID to stream messages from - timeout: Maximum seconds to wait for stream completion (default: 30) - - Yields: - Parsed event dictionaries as they arrive from the stream - """ - try: - # Add explicit timeout wrapper to force exit after timeout seconds - async with asyncio.timeout(timeout): - async with client.tasks.with_streaming_response.stream_events(task_id=task_id, timeout=timeout) as stream: - async for line in stream.iter_lines(): - if line.startswith("data: "): - # Parse the SSE data - data = line.strip()[6:] # Remove "data: " prefix - event = json.loads(data) - # Yield each event immediately as it arrives - yield event - - except asyncio.TimeoutError: - print(f"[DEBUG] Stream timed out after {timeout}s") - except Exception as e: - print(f"[DEBUG] Stream error: {e}") - - -async def stream_task_messages( - client: AsyncAgentex, - task_id: str, - timeout: int = 30, -) -> AsyncGenerator[TaskMessage, None]: - """ - Stream the task messages for a given task, yielding messages as they arrive. - """ - async for event in stream_agent_response( - client=client, - task_id=task_id, - timeout=timeout, - ): - msg_type = event.get("type") - task_message: Optional[TaskMessage] = None - if msg_type == "full": - task_message_update_full = StreamTaskMessageFull.model_validate(event) - if task_message_update_full.parent_task_message and task_message_update_full.parent_task_message.id: - finished_message = await client.messages.retrieve(task_message_update_full.parent_task_message.id) - task_message = finished_message - elif msg_type == "done": - task_message_update_done = StreamTaskMessageDone.model_validate(event) - if task_message_update_done.parent_task_message and task_message_update_done.parent_task_message.id: - finished_message = await client.messages.retrieve(task_message_update_done.parent_task_message.id) - task_message = finished_message - if task_message: - yield task_message diff --git a/examples/tutorials/test_utils/sync.py b/examples/tutorials/test_utils/sync.py deleted file mode 100644 index 808ee0af..00000000 --- a/examples/tutorials/test_utils/sync.py +++ /dev/null @@ -1,95 +0,0 @@ -""" -Utility functions for testing AgentEx agents. - -This module provides helper functions for validating agent responses -in both streaming and non-streaming scenarios. -""" -from __future__ import annotations - -from typing import List, Callable, Optional, Generator - -from agentex.types import TextDelta, TextContent -from agentex.types.agent_rpc_result import StreamTaskMessageDone -from agentex.types.agent_rpc_response import SendMessageResponse -from agentex.types.task_message_update import StreamTaskMessageFull, StreamTaskMessageDelta - - -def validate_text_content(content: TextContent, validator: Optional[Callable[[str], bool]] = None) -> str: - """ - Validate that content is TextContent and optionally run a custom validator. - - Args: - content: The content to validate - validator: Optional function that takes the content string and returns True if valid - - Returns: - The text content as a string - - Raises: - AssertionError: If validation fails - """ - assert isinstance(content, TextContent), f"Expected TextContent, got {type(content)}" - assert isinstance(content.content, str), "Content should be a string" - - if validator: - assert validator(content.content), f"Content validation failed: {content.content}" - - return content.content - - -def validate_text_in_string(text_to_find: str, text: str): - """ - Validate that text is a string and optionally run a custom validator. - - Args: - text: The text to validate - validator: Optional function that takes the text string and returns True if valid - """ - - assert text_to_find in text, f"Expected to find '{text_to_find}' in text." - - -def collect_streaming_response( - stream_generator: Generator[SendMessageResponse, None, None], -) -> tuple[str, List[SendMessageResponse]]: - """ - Collect and validate a streaming response. - - Args: - stream_generator: The generator yielding streaming chunks - - Returns: - Tuple of (aggregated_content from deltas, full_content from full messages) - - Raises: - AssertionError: If no chunks are received or no content is found - """ - aggregated_content = "" - chunks = [] - - for chunk in stream_generator: - task_message_update = chunk.result - chunks.append(chunk) - # Collect text deltas as they arrive - if isinstance(task_message_update, StreamTaskMessageDelta) and task_message_update.delta is not None: - delta = task_message_update.delta - if isinstance(delta, TextDelta) and delta.text_delta is not None: - aggregated_content += delta.text_delta - - # Or collect full messages - elif isinstance(task_message_update, StreamTaskMessageFull): - content = task_message_update.content - if isinstance(content, TextContent): - aggregated_content = content.content - - elif isinstance(task_message_update, StreamTaskMessageDone): - # Handle non-streaming response case pattern - break - # Validate we received something - if not chunks: - raise AssertionError("No streaming chunks were received, when at least 1 was expected.") - - if not aggregated_content: - raise AssertionError("No content was received in the streaming response.") - - return aggregated_content, chunks diff --git a/pyproject.toml b/pyproject.toml index 738a20a7..529f6faf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,51 +9,21 @@ authors = [ ] dependencies = [ - "httpx>=0.27.2,<0.28", - "pydantic>=2.0.0, <3", + "httpx>=0.23.0, <1", + "pydantic>=1.9.0, <3", "typing-extensions>=4.10, <5", "anyio>=3.5.0, <5", "distro>=1.7.0, <2", "sniffio", - "typer>=0.16,<0.17", - "questionary>=2.0.1,<3", - "rich>=13.9.2,<14", - "fastapi>=0.115.0,<0.116", - "uvicorn>=0.31.1", - "watchfiles>=0.24.0,<1.0", - "python-on-whales>=0.73.0,<0.74", - "pyyaml>=6.0.2,<7", - "jsonschema>=4.23.0,<5", - "jsonref>=1.1.0,<2", - "temporalio>=1.18.2,<2", - "aiohttp>=3.10.10,<4", - "redis>=5.2.0,<6", - "litellm>=1.66.0,<2", - "kubernetes>=25.0.0,<29.0.0", - "jinja2>=3.1.3,<4", - "mcp[cli]>=1.4.1", - "scale-gp>=0.1.0a59", - "openai-agents==0.4.2", - "tzlocal>=5.3.1", - "tzdata>=2025.2", - "pytest>=8.4.0", - "json_log_formatter>=1.1.1", - "pytest-asyncio>=1.0.0", - "scale-gp-beta==0.1.0a20", - "ipykernel>=6.29.5", - "openai>=2.2,<3", # Required by openai-agents 0.4.2; litellm now supports openai 2.x (issue #13711 resolved: https://github.com/BerriAI/litellm/issues/13711) - "cloudpickle>=3.1.1", - "datadog>=0.52.1", - "ddtrace>=3.13.0", - "yaspin>=3.1.0", - "claude-agent-sdk>=0.1.0", - "anthropic>=0.40.0", ] -requires-python = ">= 3.12,<4" +requires-python = ">= 3.9" classifiers = [ "Typing :: Typed", "Intended Audience :: Developers", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", @@ -72,12 +42,6 @@ Repository = "https://github.com/scaleapi/scale-agentex-python" [project.optional-dependencies] aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.9"] -dev = [ - "ruff>=0.3.4", -] - -[project.scripts] -agentex = "agentex.lib.cli.commands.main:app" [tool.rye] managed = true @@ -94,9 +58,7 @@ dev-dependencies = [ "dirty-equals>=0.6.0", "importlib-metadata>=6.7.0", "rich>=13.7.1", - "nest_asyncio==1.6.0", "pytest-xdist>=3.6.1", - "debugpy>=1.8.15", ] [tool.rye.scripts] @@ -121,7 +83,8 @@ format = { chain = [ "check:importable" = "python -c 'import agentex'" typecheck = { chain = [ - "typecheck:pyright" + "typecheck:pyright", + "typecheck:mypy" ]} "typecheck:pyright" = "pyright" "typecheck:verify-types" = "pyright --verifytypes agentex --ignoreexternal" @@ -131,13 +94,6 @@ typecheck = { chain = [ requires = ["hatchling==1.26.3", "hatch-fancy-pypi-readme"] build-backend = "hatchling.build" -[dependency-groups] -dev = [ - "ipywidgets>=8.1.7", - "nbstripout>=0.8.1", - "yaspin>=3.1.0", -] - [tool.hatch.build] include = [ "src/*" @@ -183,20 +139,17 @@ filterwarnings = [ ] [tool.pyright] -# Default to basic type checking, but override for specific directories -typeCheckingMode = "basic" -pythonVersion = "3.12" +# this enables practically every flag given by pyright. +# there are a couple of flags that are still disabled by +# default in strict mode as they are experimental and niche. +typeCheckingMode = "strict" +pythonVersion = "3.9" exclude = [ "_dev", ".venv", ".nox", ".git", - "agentex-server", - "examples/tutorials", - # Exclude autogenerated Stainless code from type checking - "src/agentex/resources", - "src/agentex/types", ] reportImplicitOverride = true @@ -205,33 +158,6 @@ reportOverlappingOverload = false reportImportCycles = false reportPrivateUsage = false -# Ignore common issues in generated SDK code -reportMissingTypeStubs = false -reportUnknownParameterType = false -reportUnknownMemberType = false -reportUnknownArgumentType = false -reportUnknownVariableType = false - -# Enable strict type checking only for hand-written code -[[tool.pyright.executionEnvironments]] -root = "src/agentex/lib" -typeCheckingMode = "strict" -# But allow some flexibility in OpenAI module for complex type boundaries -reportArgumentType = false - -[[tool.pyright.executionEnvironments]] -root = "examples" -typeCheckingMode = "strict" -# Allow type ignores in tutorials for readability -reportUnnecessaryTypeIgnoreComment = false - -[[tool.pyright.executionEnvironments]] -root = "tests" -typeCheckingMode = "basic" -# Be loose on typing in tests unless testing types specifically -reportOptionalMemberAccess = false -reportArgumentType = false - [tool.mypy] pretty = true show_error_codes = true @@ -242,7 +168,7 @@ show_error_codes = true # # We also exclude our `tests` as mypy doesn't always infer # types correctly and Pyright will still catch any type errors. -exclude = ['src/agentex/_files.py', '_dev/.*.py', 'tests/.*', 'examples/tutorials/.*'] +exclude = ['src/agentex/_files.py', '_dev/.*.py', 'tests/.*'] strict_equality = true implicit_reexport = true @@ -259,7 +185,7 @@ warn_unused_ignores = false warn_redundant_casts = false disallow_any_generics = true -# disallow_untyped_defs = true +disallow_untyped_defs = true disallow_untyped_calls = true disallow_subclassing_any = true disallow_incomplete_defs = true @@ -275,7 +201,7 @@ cache_fine_grained = true # ``` # Changing this codegen to make mypy happy would increase complexity # and would not be worth it. -disable_error_code = "func-returns-value,overload-cannot-match,no-untyped-def" +disable_error_code = "func-returns-value,overload-cannot-match" # https://github.com/python/mypy/issues/12162 [[tool.mypy.overrides]] @@ -337,16 +263,7 @@ extra-standard-library = ["typing_extensions"] known-first-party = ["agentex", "tests"] [tool.ruff.lint.per-file-ignores] -# Exclude autogenerated files from future annotations requirement -"src/agentex/resources/**.py" = ["FA102"] -"src/agentex/types/**.py" = ["FA102"] -"src/agentex/_*.py" = ["FA102"] "bin/**.py" = ["T201", "T203"] "scripts/**.py" = ["T201", "T203"] -"tests/**.py" = ["T201", "T203", "ARG001", "ARG002", "ARG005"] +"tests/**.py" = ["T201", "T203"] "examples/**.py" = ["T201", "T203"] -"examples/**.ipynb" = ["T201", "T203"] -"examples/tutorials/**.py" = ["T201", "T203"] -"examples/tutorials/**.ipynb" = ["T201", "T203"] -"**/run_tests.py" = ["T201", "T203"] -"**/dev_tools/**.py" = ["T201", "T203"] diff --git a/requirements-dev.lock b/requirements-dev.lock index 1078b30d..8debae92 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -15,164 +15,65 @@ aiohappyeyeballs==2.6.1 aiohttp==3.13.2 # via agentex-sdk # via httpx-aiohttp - # via litellm -aiosignal==1.3.2 +aiosignal==1.4.0 # via aiohttp annotated-types==0.7.0 # via pydantic -anyio==4.10.0 +anyio==4.12.0 # via agentex-sdk # via httpx - # via mcp - # via openai - # via scale-gp - # via scale-gp-beta - # via sse-starlette - # via starlette - # via watchfiles -appnope==0.1.4 - # via ipykernel -argcomplete==3.1.2 +argcomplete==3.6.3 # via nox -asttokens==3.0.0 - # via stack-data -attrs==25.3.0 +async-timeout==5.0.1 # via aiohttp - # via jsonschema - # via referencing -bytecode==0.17.0 - # via ddtrace -cachetools==5.5.2 - # via google-auth -certifi==2023.7.22 +attrs==25.4.0 + # via aiohttp + # via nox +backports-asyncio-runner==1.2.0 + # via pytest-asyncio +certifi==2025.11.12 # via httpcore # via httpx - # via kubernetes - # via requests -charset-normalizer==3.4.3 - # via requests -click==8.2.1 - # via litellm - # via typer - # via uvicorn -cloudpickle==3.1.1 - # via agentex-sdk -colorama==0.4.6 - # via griffe -colorlog==6.7.0 +colorlog==6.10.1 # via nox -comm==0.2.3 - # via ipykernel -datadog==0.52.1 - # via agentex-sdk -ddtrace==3.15.0 - # via agentex-sdk -debugpy==1.8.16 - # via ipykernel -decorator==5.2.1 - # via ipython -dirty-equals==0.6.0 -distlib==0.3.7 +dependency-groups==1.3.1 + # via nox +dirty-equals==0.11 +distlib==0.4.0 # via virtualenv distro==1.9.0 # via agentex-sdk - # via openai - # via scale-gp - # via scale-gp-beta -envier==0.6.1 - # via ddtrace -execnet==2.1.1 +exceptiongroup==1.3.1 + # via anyio + # via pytest +execnet==2.1.2 # via pytest-xdist -executing==2.2.0 - # via stack-data -fastapi==0.115.14 - # via agentex-sdk -filelock==3.12.4 - # via huggingface-hub +filelock==3.19.1 # via virtualenv frozenlist==1.8.0 # via aiohttp # via aiosignal -fsspec==2025.7.0 - # via huggingface-hub -google-auth==2.40.3 - # via kubernetes -griffe==1.12.0 - # via openai-agents h11==0.16.0 # via httpcore - # via uvicorn -hf-xet==1.1.7 - # via huggingface-hub httpcore==1.0.9 # via httpx -httpx==0.27.2 +httpx==0.28.1 # via agentex-sdk # via httpx-aiohttp - # via litellm - # via mcp - # via openai # via respx - # via scale-gp - # via scale-gp-beta httpx-aiohttp==0.1.9 # via agentex-sdk -httpx-sse==0.4.1 - # via mcp -huggingface-hub==0.34.4 - # via tokenizers -idna==3.4 +humanize==4.13.0 + # via nox +idna==3.11 # via anyio # via httpx - # via requests # via yarl -importlib-metadata==7.0.0 - # via litellm - # via opentelemetry-api -iniconfig==2.0.0 +importlib-metadata==8.7.0 +iniconfig==2.1.0 # via pytest -ipykernel==6.30.1 - # via agentex-sdk -ipython==9.4.0 - # via ipykernel -ipython-pygments-lexers==1.1.1 - # via ipython -jedi==0.19.2 - # via ipython -jinja2==3.1.6 - # via agentex-sdk - # via litellm -jiter==0.10.0 - # via openai -json-log-formatter==1.1.1 - # via agentex-sdk -jsonref==1.1.0 - # via agentex-sdk -jsonschema==4.25.0 - # via agentex-sdk - # via litellm - # via mcp -jsonschema-specifications==2025.4.1 - # via jsonschema -jupyter-client==8.6.3 - # via ipykernel -jupyter-core==5.8.1 - # via ipykernel - # via jupyter-client -kubernetes==28.1.0 - # via agentex-sdk -litellm==1.75.5.post1 - # via agentex-sdk markdown-it-py==3.0.0 # via rich -markupsafe==3.0.2 - # via jinja2 -matplotlib-inline==0.1.7 - # via ipykernel - # via ipython -mcp==1.12.4 - # via agentex-sdk - # via openai-agents mdurl==0.1.2 # via markdown-it-py multidict==6.7.0 @@ -181,242 +82,68 @@ multidict==6.7.0 mypy==1.17.0 mypy-extensions==1.1.0 # via mypy -nest-asyncio==1.6.0 - # via ipykernel -nexus-rpc==1.1.0 - # via temporalio -nodeenv==1.8.0 +nodeenv==1.9.1 # via pyright -nox==2023.4.22 -oauthlib==3.3.1 - # via kubernetes - # via requests-oauthlib -openai==2.7.1 - # via agentex-sdk - # via litellm - # via openai-agents -openai-agents==0.4.2 - # via agentex-sdk -opentelemetry-api==1.37.0 - # via ddtrace -packaging==23.2 - # via huggingface-hub - # via ipykernel +nox==2025.11.12 +packaging==25.0 + # via dependency-groups # via nox # via pytest pathspec==0.12.1 # via mypy -parso==0.8.4 - # via jedi -pexpect==4.9.0 - # via ipython -platformdirs==3.11.0 - # via jupyter-core +platformdirs==4.4.0 # via virtualenv pluggy==1.6.0 # via pytest -prompt-toolkit==3.0.51 - # via ipython - # via questionary -propcache==0.3.1 +propcache==0.4.1 # via aiohttp # via yarl -protobuf==5.29.5 - # via ddtrace - # via temporalio -psutil==7.0.0 - # via ipykernel -ptyprocess==0.7.0 - # via pexpect -pure-eval==0.2.3 - # via stack-data -pyasn1==0.6.1 - # via pyasn1-modules - # via rsa -pyasn1-modules==0.4.2 - # via google-auth -pydantic==2.11.9 +pydantic==2.12.5 # via agentex-sdk - # via fastapi - # via litellm - # via mcp - # via openai - # via openai-agents - # via pydantic-settings - # via python-on-whales - # via scale-gp - # via scale-gp-beta -pydantic-core==2.33.2 +pydantic-core==2.41.5 # via pydantic -pydantic-settings==2.10.1 - # via mcp -pygments==2.18.0 - # via ipython - # via ipython-pygments-lexers +pygments==2.19.2 # via pytest # via rich -pyjwt==2.10.1 - # via redis pyright==1.1.399 -pytest==8.4.1 - # via agentex-sdk +pytest==8.4.2 # via pytest-asyncio # via pytest-xdist -pytest-asyncio==1.1.0 - # via agentex-sdk -pytest-xdist==3.7.0 -python-dateutil==2.8.2 - # via jupyter-client - # via kubernetes +pytest-asyncio==1.2.0 +pytest-xdist==3.8.0 +python-dateutil==2.9.0.post0 # via time-machine -python-dotenv==1.1.1 - # via litellm - # via mcp - # via pydantic-settings -python-multipart==0.0.20 - # via mcp -python-on-whales==0.73.0 - # via agentex-sdk -pytz==2023.3.post1 - # via dirty-equals -pyyaml==6.0.2 - # via agentex-sdk - # via huggingface-hub - # via kubernetes -pyzmq==27.0.1 - # via ipykernel - # via jupyter-client -questionary==2.1.0 - # via agentex-sdk -redis==5.3.1 - # via agentex-sdk -referencing==0.36.2 - # via jsonschema - # via jsonschema-specifications -regex==2025.7.34 - # via tiktoken -requests==2.32.4 - # via datadog - # via huggingface-hub - # via kubernetes - # via openai-agents - # via python-on-whales - # via requests-oauthlib - # via tiktoken -requests-oauthlib==2.0.0 - # via kubernetes respx==0.22.0 -rich==13.9.4 - # via agentex-sdk - # via typer -rpds-py==0.27.0 - # via jsonschema - # via referencing -rsa==4.9.1 - # via google-auth -ruff==0.9.4 - # via agentex-sdk -scale-gp==0.1.0a59 - # via agentex-sdk -scale-gp-beta==0.1.0a20 - # via agentex-sdk -setuptools==68.2.2 - # via nodeenv -shellingham==1.5.4 - # via typer -six==1.16.0 - # via kubernetes +rich==14.2.0 +ruff==0.14.7 +six==1.17.0 # via python-dateutil sniffio==1.3.1 # via agentex-sdk - # via anyio - # via httpx - # via openai - # via scale-gp - # via scale-gp-beta -sse-starlette==3.0.2 - # via mcp -stack-data==0.6.3 - # via ipython -starlette==0.46.2 - # via fastapi - # via mcp -temporalio==1.18.2 - # via agentex-sdk -tiktoken==0.11.0 - # via litellm -time-machine==2.9.0 -tokenizers==0.21.4 - # via litellm -tornado==6.5.2 - # via ipykernel - # via jupyter-client -tqdm==4.67.1 - # via huggingface-hub - # via openai - # via python-on-whales -traitlets==5.14.3 - # via ipykernel - # via ipython - # via jupyter-client - # via jupyter-core - # via matplotlib-inline -typer==0.16.0 - # via agentex-sdk - # via mcp - # via python-on-whales -types-protobuf==6.30.2.20250809 - # via temporalio -types-requests==2.31.0.6 - # via openai-agents -types-urllib3==1.26.25.14 - # via types-requests -typing-extensions==4.12.2 +time-machine==2.19.0 +tomli==2.3.0 + # via dependency-groups + # via mypy + # via nox + # via pytest +typing-extensions==4.15.0 # via agentex-sdk # via aiosignal # via anyio - # via fastapi - # via huggingface-hub + # via exceptiongroup + # via multidict # via mypy - # via nexus-rpc - # via openai - # via openai-agents - # via opentelemetry-api # via pydantic # via pydantic-core # via pyright - # via python-on-whales - # via referencing - # via scale-gp - # via scale-gp-beta - # via temporalio - # via typer + # via pytest-asyncio # via typing-inspection # via virtualenv typing-inspection==0.4.2 # via pydantic - # via pydantic-settings -tzdata==2025.2 - # via agentex-sdk -tzlocal==5.3.1 - # via agentex-sdk -urllib3==1.26.20 - # via kubernetes - # via requests -uvicorn==0.35.0 - # via agentex-sdk - # via mcp -virtualenv==20.24.5 +virtualenv==20.35.4 # via nox -watchfiles==0.24.0 - # via agentex-sdk -wcwidth==0.2.13 - # via prompt-toolkit -websocket-client==1.8.0 - # via kubernetes -wrapt==1.17.3 - # via ddtrace -yarl==1.20.0 +yarl==1.22.0 # via aiohttp zipp==3.23.0 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index 79519671..27f5fe33 100644 --- a/requirements.lock +++ b/requirements.lock @@ -15,362 +15,62 @@ aiohappyeyeballs==2.6.1 aiohttp==3.13.2 # via agentex-sdk # via httpx-aiohttp - # via litellm -aiosignal==1.3.2 +aiosignal==1.4.0 # via aiohttp annotated-types==0.7.0 # via pydantic -anyio==4.10.0 +anyio==4.12.0 # via agentex-sdk # via httpx - # via mcp - # via openai - # via scale-gp - # via scale-gp-beta - # via sse-starlette - # via starlette - # via watchfiles -appnope==0.1.4 - # via ipykernel -asttokens==3.0.0 - # via stack-data -attrs==25.3.0 +async-timeout==5.0.1 # via aiohttp - # via jsonschema - # via referencing -bytecode==0.17.0 - # via ddtrace -cachetools==5.5.2 - # via google-auth -certifi==2023.7.22 +attrs==25.4.0 + # via aiohttp +certifi==2025.11.12 # via httpcore # via httpx - # via kubernetes - # via requests -charset-normalizer==3.4.3 - # via requests -click==8.2.1 - # via litellm - # via typer - # via uvicorn -cloudpickle==3.1.1 - # via agentex-sdk -colorama==0.4.6 - # via griffe -comm==0.2.3 - # via ipykernel -datadog==0.52.1 - # via agentex-sdk -ddtrace==3.15.0 - # via agentex-sdk -debugpy==1.8.16 - # via ipykernel -decorator==5.2.1 - # via ipython -distro==1.8.0 - # via agentex-sdk - # via openai - # via scale-gp - # via scale-gp-beta -envier==0.6.1 - # via ddtrace -executing==2.2.0 - # via stack-data -fastapi==0.115.14 +distro==1.9.0 # via agentex-sdk -filelock==3.19.1 - # via huggingface-hub -frozenlist==1.6.2 +exceptiongroup==1.3.1 + # via anyio +frozenlist==1.8.0 # via aiohttp # via aiosignal -fsspec==2025.7.0 - # via huggingface-hub -google-auth==2.40.3 - # via kubernetes -griffe==1.12.0 - # via openai-agents h11==0.16.0 # via httpcore - # via uvicorn -hf-xet==1.1.7 - # via huggingface-hub httpcore==1.0.9 # via httpx -httpx==0.27.2 +httpx==0.28.1 # via agentex-sdk # via httpx-aiohttp - # via litellm - # via mcp - # via openai - # via scale-gp - # via scale-gp-beta httpx-aiohttp==0.1.9 # via agentex-sdk -httpx-sse==0.4.1 - # via mcp -huggingface-hub==0.34.4 - # via tokenizers -idna==3.4 +idna==3.11 # via anyio # via httpx - # via requests # via yarl -importlib-metadata==8.7.0 - # via litellm - # via opentelemetry-api -iniconfig==2.1.0 - # via pytest -ipykernel==6.30.1 - # via agentex-sdk -ipython==9.4.0 - # via ipykernel -ipython-pygments-lexers==1.1.1 - # via ipython -jedi==0.19.2 - # via ipython -jinja2==3.1.6 - # via agentex-sdk - # via litellm -jiter==0.10.0 - # via openai -json-log-formatter==1.1.1 - # via agentex-sdk -jsonref==1.1.0 - # via agentex-sdk -jsonschema==4.25.0 - # via agentex-sdk - # via litellm - # via mcp -jsonschema-specifications==2025.4.1 - # via jsonschema -jupyter-client==8.6.3 - # via ipykernel -jupyter-core==5.8.1 - # via ipykernel - # via jupyter-client -kubernetes==28.1.0 - # via agentex-sdk -litellm==1.75.5.post1 - # via agentex-sdk -markdown-it-py==4.0.0 - # via rich -markupsafe==3.0.2 - # via jinja2 -matplotlib-inline==0.1.7 - # via ipykernel - # via ipython -mcp==1.12.4 - # via agentex-sdk - # via openai-agents -mdurl==0.1.2 - # via markdown-it-py -multidict==6.4.4 +multidict==6.7.0 # via aiohttp # via yarl -nest-asyncio==1.6.0 - # via ipykernel -nexus-rpc==1.1.0 - # via temporalio -oauthlib==3.3.1 - # via kubernetes - # via requests-oauthlib -openai==2.7.1 - # via agentex-sdk - # via litellm - # via openai-agents -openai-agents==0.4.2 - # via agentex-sdk -opentelemetry-api==1.37.0 - # via ddtrace -packaging==25.0 - # via huggingface-hub - # via ipykernel - # via pytest -parso==0.8.4 - # via jedi -pexpect==4.9.0 - # via ipython -platformdirs==4.3.8 - # via jupyter-core -pluggy==1.6.0 - # via pytest -prompt-toolkit==3.0.51 - # via ipython - # via questionary -propcache==0.3.1 +propcache==0.4.1 # via aiohttp # via yarl pydantic==2.12.5 # via agentex-sdk pydantic-core==2.41.5 -protobuf==5.29.5 - # via ddtrace - # via temporalio -psutil==7.0.0 - # via ipykernel -ptyprocess==0.7.0 - # via pexpect -pure-eval==0.2.3 - # via stack-data -pyasn1==0.6.1 - # via pyasn1-modules - # via rsa -pyasn1-modules==0.4.2 - # via google-auth # via pydantic -pydantic-settings==2.10.1 - # via mcp -pygments==2.19.2 - # via ipython - # via ipython-pygments-lexers - # via pytest - # via rich -pyjwt==2.10.1 - # via redis -pytest==8.4.1 - # via agentex-sdk - # via pytest-asyncio -pytest-asyncio==1.1.0 - # via agentex-sdk -python-dateutil==2.9.0.post0 - # via jupyter-client - # via kubernetes -python-dotenv==1.1.1 - # via litellm - # via mcp - # via pydantic-settings -python-multipart==0.0.20 - # via mcp -python-on-whales==0.73.0 - # via agentex-sdk -pyyaml==6.0.2 - # via agentex-sdk - # via huggingface-hub - # via kubernetes -pyzmq==27.0.1 - # via ipykernel - # via jupyter-client -questionary==2.1.0 - # via agentex-sdk -redis==5.3.1 - # via agentex-sdk -referencing==0.36.2 - # via jsonschema - # via jsonschema-specifications -regex==2025.7.34 - # via tiktoken -requests==2.32.4 - # via datadog - # via huggingface-hub - # via kubernetes - # via openai-agents - # via python-on-whales - # via requests-oauthlib - # via tiktoken -requests-oauthlib==2.0.0 - # via kubernetes -rich==13.9.4 - # via agentex-sdk - # via typer -rpds-py==0.27.0 - # via jsonschema - # via referencing -rsa==4.9.1 - # via google-auth -ruff==0.12.9 - # via agentex-sdk -scale-gp==0.1.0a59 - # via agentex-sdk -scale-gp-beta==0.1.0a20 - # via agentex-sdk -shellingham==1.5.4 - # via typer -six==1.17.0 - # via kubernetes - # via python-dateutil -sniffio==1.3.0 +sniffio==1.3.1 # via agentex-sdk typing-extensions==4.15.0 - # via httpx - # via openai - # via scale-gp - # via scale-gp-beta -sse-starlette==3.0.2 - # via mcp -stack-data==0.6.3 - # via ipython -starlette==0.46.2 - # via fastapi - # via mcp -temporalio==1.18.2 - # via agentex-sdk -tiktoken==0.11.0 - # via litellm -tokenizers==0.21.4 - # via litellm -tornado==6.5.2 - # via ipykernel - # via jupyter-client -tqdm==4.67.1 - # via huggingface-hub - # via openai - # via python-on-whales -traitlets==5.14.3 - # via ipykernel - # via ipython - # via jupyter-client - # via jupyter-core - # via matplotlib-inline -typer==0.16.0 - # via agentex-sdk - # via mcp - # via python-on-whales -types-protobuf==6.30.2.20250809 - # via temporalio -types-requests==2.31.0.6 - # via openai-agents -types-urllib3==1.26.25.14 - # via types-requests # via agentex-sdk # via aiosignal # via anyio - # via fastapi - # via huggingface-hub - # via nexus-rpc - # via openai - # via openai-agents - # via opentelemetry-api + # via exceptiongroup + # via multidict # via pydantic # via pydantic-core - # via python-on-whales - # via referencing - # via scale-gp - # via scale-gp-beta - # via temporalio - # via typer # via typing-inspection typing-inspection==0.4.2 # via pydantic - # via pydantic-settings -tzdata==2025.2 - # via agentex-sdk -tzlocal==5.3.1 - # via agentex-sdk -urllib3==1.26.20 - # via kubernetes - # via requests -uvicorn==0.35.0 - # via agentex-sdk - # via mcp -watchfiles==0.24.0 - # via agentex-sdk -wcwidth==0.2.13 - # via prompt-toolkit -websocket-client==1.8.0 - # via kubernetes -wrapt==1.17.3 - # via ddtrace -yarl==1.20.0 +yarl==1.22.0 # via aiohttp -zipp==3.23.0 - # via importlib-metadata diff --git a/scripts/test b/scripts/test index 2c69d995..dbeda2d2 100755 --- a/scripts/test +++ b/scripts/test @@ -56,3 +56,6 @@ export DEFER_PYDANTIC_BUILD=false echo "==> Running tests" rye run pytest "$@" + +echo "==> Running Pydantic v1 tests" +rye run nox -s test-pydantic-v1 -- "$@" diff --git a/src/agentex/_base_client.py b/src/agentex/_base_client.py index 108a8a52..2d022cda 100644 --- a/src/agentex/_base_client.py +++ b/src/agentex/_base_client.py @@ -1247,9 +1247,12 @@ def patch( *, cast_to: Type[ResponseT], body: Body | None = None, + files: RequestFiles | None = None, options: RequestOptions = {}, ) -> ResponseT: - opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) + opts = FinalRequestOptions.construct( + method="patch", url=path, json_data=body, files=to_httpx_files(files), **options + ) return self.request(cast_to, opts) def put( @@ -1767,9 +1770,12 @@ async def patch( *, cast_to: Type[ResponseT], body: Body | None = None, + files: RequestFiles | None = None, options: RequestOptions = {}, ) -> ResponseT: - opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) + opts = FinalRequestOptions.construct( + method="patch", url=path, json_data=body, files=to_httpx_files(files), **options + ) return await self.request(cast_to, opts) async def put( diff --git a/src/agentex/_client.py b/src/agentex/_client.py index 170ea0ef..0cb9fc45 100644 --- a/src/agentex/_client.py +++ b/src/agentex/_client.py @@ -3,7 +3,7 @@ from __future__ import annotations import os -from typing import Any, Dict, Mapping, cast +from typing import TYPE_CHECKING, Any, Dict, Mapping, cast from typing_extensions import Self, Literal, override import httpx @@ -20,8 +20,8 @@ not_given, ) from ._utils import is_given, get_async_library +from ._compat import cached_property from ._version import __version__ -from .resources import spans, tasks, agents, events, states, tracker, deployment_history from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import APIStatusError from ._base_client import ( @@ -29,7 +29,16 @@ SyncAPIClient, AsyncAPIClient, ) -from .resources.messages import messages + +if TYPE_CHECKING: + from .resources import spans, tasks, agents, events, states, tracker, messages + from .resources.spans import SpansResource, AsyncSpansResource + from .resources.tasks import TasksResource, AsyncTasksResource + from .resources.agents import AgentsResource, AsyncAgentsResource + from .resources.events import EventsResource, AsyncEventsResource + from .resources.states import StatesResource, AsyncStatesResource + from .resources.tracker import TrackerResource, AsyncTrackerResource + from .resources.messages.messages import MessagesResource, AsyncMessagesResource __all__ = [ "ENVIRONMENTS", @@ -50,17 +59,6 @@ class Agentex(SyncAPIClient): - agents: agents.AgentsResource - tasks: tasks.TasksResource - messages: messages.MessagesResource - spans: spans.SpansResource - states: states.StatesResource - events: events.EventsResource - tracker: tracker.TrackerResource - deployment_history: deployment_history.DeploymentHistoryResource - with_raw_response: AgentexWithRawResponse - with_streaming_response: AgentexWithStreamedResponse - # client options api_key: str | None @@ -135,16 +133,55 @@ def __init__( _strict_response_validation=_strict_response_validation, ) - self.agents = agents.AgentsResource(self) - self.tasks = tasks.TasksResource(self) - self.messages = messages.MessagesResource(self) - self.spans = spans.SpansResource(self) - self.states = states.StatesResource(self) - self.events = events.EventsResource(self) - self.tracker = tracker.TrackerResource(self) - self.deployment_history = deployment_history.DeploymentHistoryResource(self) - self.with_raw_response = AgentexWithRawResponse(self) - self.with_streaming_response = AgentexWithStreamedResponse(self) + @cached_property + def agents(self) -> AgentsResource: + from .resources.agents import AgentsResource + + return AgentsResource(self) + + @cached_property + def tasks(self) -> TasksResource: + from .resources.tasks import TasksResource + + return TasksResource(self) + + @cached_property + def messages(self) -> MessagesResource: + from .resources.messages import MessagesResource + + return MessagesResource(self) + + @cached_property + def spans(self) -> SpansResource: + from .resources.spans import SpansResource + + return SpansResource(self) + + @cached_property + def states(self) -> StatesResource: + from .resources.states import StatesResource + + return StatesResource(self) + + @cached_property + def events(self) -> EventsResource: + from .resources.events import EventsResource + + return EventsResource(self) + + @cached_property + def tracker(self) -> TrackerResource: + from .resources.tracker import TrackerResource + + return TrackerResource(self) + + @cached_property + def with_raw_response(self) -> AgentexWithRawResponse: + return AgentexWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AgentexWithStreamedResponse: + return AgentexWithStreamedResponse(self) @property @override @@ -256,17 +293,6 @@ def _make_status_error( class AsyncAgentex(AsyncAPIClient): - agents: agents.AsyncAgentsResource - tasks: tasks.AsyncTasksResource - messages: messages.AsyncMessagesResource - spans: spans.AsyncSpansResource - states: states.AsyncStatesResource - events: events.AsyncEventsResource - tracker: tracker.AsyncTrackerResource - deployment_history: deployment_history.AsyncDeploymentHistoryResource - with_raw_response: AsyncAgentexWithRawResponse - with_streaming_response: AsyncAgentexWithStreamedResponse - # client options api_key: str | None @@ -341,16 +367,55 @@ def __init__( _strict_response_validation=_strict_response_validation, ) - self.agents = agents.AsyncAgentsResource(self) - self.tasks = tasks.AsyncTasksResource(self) - self.messages = messages.AsyncMessagesResource(self) - self.spans = spans.AsyncSpansResource(self) - self.states = states.AsyncStatesResource(self) - self.events = events.AsyncEventsResource(self) - self.tracker = tracker.AsyncTrackerResource(self) - self.deployment_history = deployment_history.AsyncDeploymentHistoryResource(self) - self.with_raw_response = AsyncAgentexWithRawResponse(self) - self.with_streaming_response = AsyncAgentexWithStreamedResponse(self) + @cached_property + def agents(self) -> AsyncAgentsResource: + from .resources.agents import AsyncAgentsResource + + return AsyncAgentsResource(self) + + @cached_property + def tasks(self) -> AsyncTasksResource: + from .resources.tasks import AsyncTasksResource + + return AsyncTasksResource(self) + + @cached_property + def messages(self) -> AsyncMessagesResource: + from .resources.messages import AsyncMessagesResource + + return AsyncMessagesResource(self) + + @cached_property + def spans(self) -> AsyncSpansResource: + from .resources.spans import AsyncSpansResource + + return AsyncSpansResource(self) + + @cached_property + def states(self) -> AsyncStatesResource: + from .resources.states import AsyncStatesResource + + return AsyncStatesResource(self) + + @cached_property + def events(self) -> AsyncEventsResource: + from .resources.events import AsyncEventsResource + + return AsyncEventsResource(self) + + @cached_property + def tracker(self) -> AsyncTrackerResource: + from .resources.tracker import AsyncTrackerResource + + return AsyncTrackerResource(self) + + @cached_property + def with_raw_response(self) -> AsyncAgentexWithRawResponse: + return AsyncAgentexWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAgentexWithStreamedResponse: + return AsyncAgentexWithStreamedResponse(self) @property @override @@ -462,57 +527,199 @@ def _make_status_error( class AgentexWithRawResponse: + _client: Agentex + def __init__(self, client: Agentex) -> None: - self.agents = agents.AgentsResourceWithRawResponse(client.agents) - self.tasks = tasks.TasksResourceWithRawResponse(client.tasks) - self.messages = messages.MessagesResourceWithRawResponse(client.messages) - self.spans = spans.SpansResourceWithRawResponse(client.spans) - self.states = states.StatesResourceWithRawResponse(client.states) - self.events = events.EventsResourceWithRawResponse(client.events) - self.tracker = tracker.TrackerResourceWithRawResponse(client.tracker) - self.deployment_history = deployment_history.DeploymentHistoryResourceWithRawResponse(client.deployment_history) + self._client = client + + @cached_property + def agents(self) -> agents.AgentsResourceWithRawResponse: + from .resources.agents import AgentsResourceWithRawResponse + + return AgentsResourceWithRawResponse(self._client.agents) + + @cached_property + def tasks(self) -> tasks.TasksResourceWithRawResponse: + from .resources.tasks import TasksResourceWithRawResponse + + return TasksResourceWithRawResponse(self._client.tasks) + + @cached_property + def messages(self) -> messages.MessagesResourceWithRawResponse: + from .resources.messages import MessagesResourceWithRawResponse + + return MessagesResourceWithRawResponse(self._client.messages) + + @cached_property + def spans(self) -> spans.SpansResourceWithRawResponse: + from .resources.spans import SpansResourceWithRawResponse + + return SpansResourceWithRawResponse(self._client.spans) + + @cached_property + def states(self) -> states.StatesResourceWithRawResponse: + from .resources.states import StatesResourceWithRawResponse + + return StatesResourceWithRawResponse(self._client.states) + + @cached_property + def events(self) -> events.EventsResourceWithRawResponse: + from .resources.events import EventsResourceWithRawResponse + + return EventsResourceWithRawResponse(self._client.events) + + @cached_property + def tracker(self) -> tracker.TrackerResourceWithRawResponse: + from .resources.tracker import TrackerResourceWithRawResponse + + return TrackerResourceWithRawResponse(self._client.tracker) class AsyncAgentexWithRawResponse: + _client: AsyncAgentex + def __init__(self, client: AsyncAgentex) -> None: - self.agents = agents.AsyncAgentsResourceWithRawResponse(client.agents) - self.tasks = tasks.AsyncTasksResourceWithRawResponse(client.tasks) - self.messages = messages.AsyncMessagesResourceWithRawResponse(client.messages) - self.spans = spans.AsyncSpansResourceWithRawResponse(client.spans) - self.states = states.AsyncStatesResourceWithRawResponse(client.states) - self.events = events.AsyncEventsResourceWithRawResponse(client.events) - self.tracker = tracker.AsyncTrackerResourceWithRawResponse(client.tracker) - self.deployment_history = deployment_history.AsyncDeploymentHistoryResourceWithRawResponse( - client.deployment_history - ) + self._client = client + + @cached_property + def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: + from .resources.agents import AsyncAgentsResourceWithRawResponse + + return AsyncAgentsResourceWithRawResponse(self._client.agents) + + @cached_property + def tasks(self) -> tasks.AsyncTasksResourceWithRawResponse: + from .resources.tasks import AsyncTasksResourceWithRawResponse + + return AsyncTasksResourceWithRawResponse(self._client.tasks) + + @cached_property + def messages(self) -> messages.AsyncMessagesResourceWithRawResponse: + from .resources.messages import AsyncMessagesResourceWithRawResponse + + return AsyncMessagesResourceWithRawResponse(self._client.messages) + + @cached_property + def spans(self) -> spans.AsyncSpansResourceWithRawResponse: + from .resources.spans import AsyncSpansResourceWithRawResponse + + return AsyncSpansResourceWithRawResponse(self._client.spans) + + @cached_property + def states(self) -> states.AsyncStatesResourceWithRawResponse: + from .resources.states import AsyncStatesResourceWithRawResponse + + return AsyncStatesResourceWithRawResponse(self._client.states) + + @cached_property + def events(self) -> events.AsyncEventsResourceWithRawResponse: + from .resources.events import AsyncEventsResourceWithRawResponse + + return AsyncEventsResourceWithRawResponse(self._client.events) + + @cached_property + def tracker(self) -> tracker.AsyncTrackerResourceWithRawResponse: + from .resources.tracker import AsyncTrackerResourceWithRawResponse + + return AsyncTrackerResourceWithRawResponse(self._client.tracker) class AgentexWithStreamedResponse: + _client: Agentex + def __init__(self, client: Agentex) -> None: - self.agents = agents.AgentsResourceWithStreamingResponse(client.agents) - self.tasks = tasks.TasksResourceWithStreamingResponse(client.tasks) - self.messages = messages.MessagesResourceWithStreamingResponse(client.messages) - self.spans = spans.SpansResourceWithStreamingResponse(client.spans) - self.states = states.StatesResourceWithStreamingResponse(client.states) - self.events = events.EventsResourceWithStreamingResponse(client.events) - self.tracker = tracker.TrackerResourceWithStreamingResponse(client.tracker) - self.deployment_history = deployment_history.DeploymentHistoryResourceWithStreamingResponse( - client.deployment_history - ) + self._client = client + + @cached_property + def agents(self) -> agents.AgentsResourceWithStreamingResponse: + from .resources.agents import AgentsResourceWithStreamingResponse + + return AgentsResourceWithStreamingResponse(self._client.agents) + + @cached_property + def tasks(self) -> tasks.TasksResourceWithStreamingResponse: + from .resources.tasks import TasksResourceWithStreamingResponse + + return TasksResourceWithStreamingResponse(self._client.tasks) + + @cached_property + def messages(self) -> messages.MessagesResourceWithStreamingResponse: + from .resources.messages import MessagesResourceWithStreamingResponse + + return MessagesResourceWithStreamingResponse(self._client.messages) + + @cached_property + def spans(self) -> spans.SpansResourceWithStreamingResponse: + from .resources.spans import SpansResourceWithStreamingResponse + + return SpansResourceWithStreamingResponse(self._client.spans) + + @cached_property + def states(self) -> states.StatesResourceWithStreamingResponse: + from .resources.states import StatesResourceWithStreamingResponse + + return StatesResourceWithStreamingResponse(self._client.states) + + @cached_property + def events(self) -> events.EventsResourceWithStreamingResponse: + from .resources.events import EventsResourceWithStreamingResponse + + return EventsResourceWithStreamingResponse(self._client.events) + + @cached_property + def tracker(self) -> tracker.TrackerResourceWithStreamingResponse: + from .resources.tracker import TrackerResourceWithStreamingResponse + + return TrackerResourceWithStreamingResponse(self._client.tracker) class AsyncAgentexWithStreamedResponse: + _client: AsyncAgentex + def __init__(self, client: AsyncAgentex) -> None: - self.agents = agents.AsyncAgentsResourceWithStreamingResponse(client.agents) - self.tasks = tasks.AsyncTasksResourceWithStreamingResponse(client.tasks) - self.messages = messages.AsyncMessagesResourceWithStreamingResponse(client.messages) - self.spans = spans.AsyncSpansResourceWithStreamingResponse(client.spans) - self.states = states.AsyncStatesResourceWithStreamingResponse(client.states) - self.events = events.AsyncEventsResourceWithStreamingResponse(client.events) - self.tracker = tracker.AsyncTrackerResourceWithStreamingResponse(client.tracker) - self.deployment_history = deployment_history.AsyncDeploymentHistoryResourceWithStreamingResponse( - client.deployment_history - ) + self._client = client + + @cached_property + def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: + from .resources.agents import AsyncAgentsResourceWithStreamingResponse + + return AsyncAgentsResourceWithStreamingResponse(self._client.agents) + + @cached_property + def tasks(self) -> tasks.AsyncTasksResourceWithStreamingResponse: + from .resources.tasks import AsyncTasksResourceWithStreamingResponse + + return AsyncTasksResourceWithStreamingResponse(self._client.tasks) + + @cached_property + def messages(self) -> messages.AsyncMessagesResourceWithStreamingResponse: + from .resources.messages import AsyncMessagesResourceWithStreamingResponse + + return AsyncMessagesResourceWithStreamingResponse(self._client.messages) + + @cached_property + def spans(self) -> spans.AsyncSpansResourceWithStreamingResponse: + from .resources.spans import AsyncSpansResourceWithStreamingResponse + + return AsyncSpansResourceWithStreamingResponse(self._client.spans) + + @cached_property + def states(self) -> states.AsyncStatesResourceWithStreamingResponse: + from .resources.states import AsyncStatesResourceWithStreamingResponse + + return AsyncStatesResourceWithStreamingResponse(self._client.states) + + @cached_property + def events(self) -> events.AsyncEventsResourceWithStreamingResponse: + from .resources.events import AsyncEventsResourceWithStreamingResponse + + return AsyncEventsResourceWithStreamingResponse(self._client.events) + + @cached_property + def tracker(self) -> tracker.AsyncTrackerResourceWithStreamingResponse: + from .resources.tracker import AsyncTrackerResourceWithStreamingResponse + + return AsyncTrackerResourceWithStreamingResponse(self._client.tracker) Client = Agentex diff --git a/src/agentex/_constants.py b/src/agentex/_constants.py index ccb3ec52..6ddf2c71 100644 --- a/src/agentex/_constants.py +++ b/src/agentex/_constants.py @@ -6,9 +6,9 @@ OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to" # default timeout is 1 minute -DEFAULT_TIMEOUT = httpx.Timeout(timeout=300, connect=5.0) +DEFAULT_TIMEOUT = httpx.Timeout(timeout=60, connect=5.0) DEFAULT_MAX_RETRIES = 2 -DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=1000, max_keepalive_connections=1000) +DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=100, max_keepalive_connections=20) INITIAL_RETRY_DELAY = 0.5 MAX_RETRY_DELAY = 8.0 diff --git a/src/agentex/_utils/_typing.py b/src/agentex/_utils/_typing.py index e548aa2d..193109f3 100644 --- a/src/agentex/_utils/_typing.py +++ b/src/agentex/_utils/_typing.py @@ -53,9 +53,7 @@ def is_typevar(typ: type) -> bool: _TYPE_ALIAS_TYPES: tuple[type[typing_extensions.TypeAliasType], ...] = (typing_extensions.TypeAliasType,) if sys.version_info >= (3, 12): - # NOTE: This type ignore will be overwritten by Stainless generator. - # TODO: Update Stainless config to include this type ignore or move to lib/ - _TYPE_ALIAS_TYPES = (*_TYPE_ALIAS_TYPES, typing.TypeAliasType) # type: ignore[assignment] + _TYPE_ALIAS_TYPES = (*_TYPE_ALIAS_TYPES, typing.TypeAliasType) def is_type_alias_type(tp: Any, /) -> TypeIs[typing_extensions.TypeAliasType]: diff --git a/src/agentex/lib/__init__.py b/src/agentex/lib/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/adk/__init__.py b/src/agentex/lib/adk/__init__.py deleted file mode 100644 index cc4e83db..00000000 --- a/src/agentex/lib/adk/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# ruff: noqa: I001 -# Import order matters here to avoid circular imports -# The _modules must be imported before providers/utils - -from agentex.lib.adk._modules.acp import ACPModule -from agentex.lib.adk._modules.agents import AgentsModule -from agentex.lib.adk._modules.agent_task_tracker import AgentTaskTrackerModule -from agentex.lib.adk._modules.events import EventsModule -from agentex.lib.adk._modules.messages import MessagesModule -from agentex.lib.adk._modules.state import StateModule -from agentex.lib.adk._modules.streaming import StreamingModule -from agentex.lib.adk._modules.tasks import TasksModule -from agentex.lib.adk._modules.tracing import TracingModule - -from agentex.lib.adk import providers -from agentex.lib.adk import utils - -acp = ACPModule() -agents = AgentsModule() -tasks = TasksModule() -messages = MessagesModule() -state = StateModule() -streaming = StreamingModule() -tracing = TracingModule() -events = EventsModule() -agent_task_tracker = AgentTaskTrackerModule() - -__all__ = [ - # Core - "acp", - "agents", - "tasks", - "messages", - "state", - "streaming", - "tracing", - "events", - "agent_task_tracker", - - # Providers - "providers", - # Utils - "utils", -] diff --git a/src/agentex/lib/adk/_modules/__init__.py b/src/agentex/lib/adk/_modules/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/adk/_modules/acp.py b/src/agentex/lib/adk/_modules/acp.py deleted file mode 100644 index 0c8cff05..00000000 --- a/src/agentex/lib/adk/_modules/acp.py +++ /dev/null @@ -1,290 +0,0 @@ -# ruff: noqa: I001 -# Import order matters - AsyncTracer must come after client import to avoid circular imports -from __future__ import annotations -from datetime import timedelta -from typing import Any, List - -from agentex.types import Event -from temporalio.common import RetryPolicy - -from agentex import AsyncAgentex # noqa: F401 -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.services.adk.acp.acp import ACPService -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.temporal.activities.adk.acp.acp_activities import ( - ACPActivityName, - EventSendParams, - MessageSendParams, - TaskCancelParams, - TaskCreateParams, -) -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.task_message import TaskMessage -from agentex.types.task import Task -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow -from agentex.types.task_message_content import TaskMessageContent - -logger = make_logger(__name__) - -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=0) - - -class ACPModule: - """ - Module for managing Agent to Client Protocol (ACP) agent operations in Agentex. - - This interface provides high-level methods for interacting with the agent through the ACP. - """ - - def __init__(self, acp_service: ACPService | None = None): - """ - Initialize the ACP module. - - Args: - acp_activities (Optional[ACPActivities]): Optional pre-configured ACP activities. If None, will be auto-initialized. - """ - if acp_service is None: - agentex_client = create_async_agentex_client() - tracer = AsyncTracer(agentex_client) - self._acp_service = ACPService(agentex_client=agentex_client, tracer=tracer) - else: - self._acp_service = acp_service - - async def create_task( - self, - name: str | None = None, - agent_id: str | None = None, - agent_name: str | None = None, - params: dict[str, Any] | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - request: dict[str, Any] | None = None, - ) -> Task: - """ - Create a new task. - - Args: - name: The name of the task. - agent_id: The ID of the agent to create the task for. - agent_name: The name of the agent to create the task for. - params: The parameters for the task. - start_to_close_timeout: The start to close timeout for the task. - heartbeat_timeout: The heartbeat timeout for the task. - retry_policy: The retry policy for the task. - request: Additional request context including headers to forward to the agent. - - Returns: - The task entry. - """ - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=ACPActivityName.TASK_CREATE, - request=TaskCreateParams( - name=name, - agent_id=agent_id, - agent_name=agent_name, - params=params, - trace_id=trace_id, - parent_span_id=parent_span_id, - request=request, - ), - response_type=Task, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._acp_service.task_create( - name=name, - agent_id=agent_id, - agent_name=agent_name, - params=params, - trace_id=trace_id, - parent_span_id=parent_span_id, - request=request, - ) - - async def send_event( - self, - task_id: str, - content: TaskMessageContent, - agent_id: str | None = None, - agent_name: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - request: dict[str, Any] | None = None, - ) -> Event: - """ - Send an event to a task. - - Args: - task_id: The ID of the task to send the event to. - content: The content to send to the event. - agent_id: The ID of the agent to send the event to. - agent_name: The name of the agent to send the event to. - trace_id: The trace ID for the event. - parent_span_id: The parent span ID for the event. - start_to_close_timeout: The start to close timeout for the event. - heartbeat_timeout: The heartbeat timeout for the event. - retry_policy: The retry policy for the event. - request: Additional request context including headers to forward to the agent. - - Returns: - The event entry. - """ - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=ACPActivityName.EVENT_SEND, - request=EventSendParams( - agent_id=agent_id, - agent_name=agent_name, - task_id=task_id, - content=content, - trace_id=trace_id, - parent_span_id=parent_span_id, - request=request, - ), - response_type=None, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._acp_service.event_send( - agent_id=agent_id, - agent_name=agent_name, - task_id=task_id, - content=content, - trace_id=trace_id, - parent_span_id=parent_span_id, - request=request, - ) - - async def send_message( - self, - content: TaskMessageContent, - task_id: str | None = None, - agent_id: str | None = None, - agent_name: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - request: dict[str, Any] | None = None, - ) -> List[TaskMessage]: - """ - Send a message to a task. - - Args: - content: The task message content to send to the task. - task_id: The ID of the task to send the message to. - agent_id: The ID of the agent to send the message to. - agent_name: The name of the agent to send the message to. - trace_id: The trace ID for the message. - parent_span_id: The parent span ID for the message. - start_to_close_timeout: The start to close timeout for the message. - heartbeat_timeout: The heartbeat timeout for the message. - retry_policy: The retry policy for the message. - request: Additional request context including headers to forward to the agent. - - Returns: - The message entry. - """ - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=ACPActivityName.MESSAGE_SEND, - request=MessageSendParams( - agent_id=agent_id, - agent_name=agent_name, - task_id=task_id, - content=content, - trace_id=trace_id, - parent_span_id=parent_span_id, - request=request, - ), - response_type=TaskMessage, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._acp_service.message_send( - agent_id=agent_id, - agent_name=agent_name, - task_id=task_id, - content=content, - trace_id=trace_id, - parent_span_id=parent_span_id, - request=request, - ) - - async def cancel_task( - self, - task_id: str | None = None, - task_name: str | None = None, - agent_id: str | None = None, - agent_name: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - request: dict[str, Any] | None = None, - ) -> Task: - """ - Cancel a task by sending cancel request to the agent that owns the task. - - Args: - task_id: ID of the task to cancel. - task_name: Name of the task to cancel. - agent_id: ID of the agent that owns the task. - agent_name: Name of the agent that owns the task. - trace_id: The trace ID for the task. - parent_span_id: The parent span ID for the task. - start_to_close_timeout: The start to close timeout for the task. - heartbeat_timeout: The heartbeat timeout for the task. - retry_policy: The retry policy for the task. - request: Additional request context including headers to forward to the agent. - - Returns: - The task entry. - - Raises: - ValueError: If neither agent_name nor agent_id is provided, - or if neither task_name nor task_id is provided - """ - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=ACPActivityName.TASK_CANCEL, - request=TaskCancelParams( - task_id=task_id, - task_name=task_name, - agent_id=agent_id, - agent_name=agent_name, - trace_id=trace_id, - parent_span_id=parent_span_id, - request=request, - ), - response_type=None, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._acp_service.task_cancel( - task_id=task_id, - task_name=task_name, - agent_id=agent_id, - agent_name=agent_name, - trace_id=trace_id, - parent_span_id=parent_span_id, - request=request, - ) diff --git a/src/agentex/lib/adk/_modules/agent_task_tracker.py b/src/agentex/lib/adk/_modules/agent_task_tracker.py deleted file mode 100644 index 733372ec..00000000 --- a/src/agentex/lib/adk/_modules/agent_task_tracker.py +++ /dev/null @@ -1,180 +0,0 @@ -# ruff: noqa: I001 -# Import order matters - AsyncTracer must come after client import to avoid circular imports -from __future__ import annotations -from datetime import timedelta - -from temporalio.common import RetryPolicy - -from agentex import AsyncAgentex # noqa: F401 -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.services.adk.agent_task_tracker import AgentTaskTrackerService -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.temporal.activities.adk.agent_task_tracker_activities import ( - AgentTaskTrackerActivityName, - GetAgentTaskTrackerByTaskAndAgentParams, - GetAgentTaskTrackerParams, - UpdateAgentTaskTrackerParams, -) -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.agent_task_tracker import AgentTaskTracker -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow - -logger = make_logger(__name__) - -# Default retry policy for all agent task tracker operations -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class AgentTaskTrackerModule: - """ - Module for managing agent task trackers in Agentex. - Provides high-level async methods for retrieving, filtering, and updating agent task trackers. - """ - - def __init__( - self, - agent_task_tracker_service: AgentTaskTrackerService | None = None, - ): - if agent_task_tracker_service is None: - agentex_client = create_async_agentex_client() - tracer = AsyncTracer(agentex_client) - self._agent_task_tracker_service = AgentTaskTrackerService( - agentex_client=agentex_client, tracer=tracer - ) - else: - self._agent_task_tracker_service = agent_task_tracker_service - - async def get( - self, - tracker_id: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> AgentTaskTracker: - """ - Get an agent task tracker by ID. - - Args: - tracker_id (str): The ID of the tracker. - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - AgentTaskTracker: The agent task tracker. - """ - params = GetAgentTaskTrackerParams( - tracker_id=tracker_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=AgentTaskTrackerActivityName.GET_AGENT_TASK_TRACKER, - request=params, - response_type=AgentTaskTracker, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._agent_task_tracker_service.get_agent_task_tracker( - tracker_id=tracker_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - async def get_by_task_and_agent( - self, - task_id: str, - agent_id: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> AgentTaskTracker | None: - """ - Get an agent task tracker by task ID and agent ID. - """ - params = GetAgentTaskTrackerByTaskAndAgentParams( - task_id=task_id, - agent_id=agent_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=AgentTaskTrackerActivityName.GET_AGENT_TASK_TRACKER_BY_TASK_AND_AGENT, - request=params, - response_type=AgentTaskTracker, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._agent_task_tracker_service.get_by_task_and_agent( - task_id=task_id, - agent_id=agent_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - async def update( - self, - tracker_id: str, - last_processed_event_id: str | None = None, - status: str | None = None, - status_reason: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> AgentTaskTracker: - """ - Update an agent task tracker. - - Args: - tracker_id (str): The ID of the tracker to update. - request (UpdateAgentTaskTrackerRequest): The update request containing the new values. - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - AgentTaskTracker: The updated agent task tracker. - """ - params = UpdateAgentTaskTrackerParams( - tracker_id=tracker_id, - last_processed_event_id=last_processed_event_id, - status=status, - status_reason=status_reason, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=AgentTaskTrackerActivityName.UPDATE_AGENT_TASK_TRACKER, - request=params, - response_type=AgentTaskTracker, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._agent_task_tracker_service.update_agent_task_tracker( - tracker_id=tracker_id, - last_processed_event_id=last_processed_event_id, - status=status, - status_reason=status_reason, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) diff --git a/src/agentex/lib/adk/_modules/agents.py b/src/agentex/lib/adk/_modules/agents.py deleted file mode 100644 index eee8b9f7..00000000 --- a/src/agentex/lib/adk/_modules/agents.py +++ /dev/null @@ -1,80 +0,0 @@ -# ruff: noqa: I001 -# Import order matters - AsyncTracer must come after client import to avoid circular imports -from datetime import timedelta -from typing import Optional - -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.temporal.activities.adk.agents_activities import AgentsActivityName, GetAgentParams -from temporalio.common import RetryPolicy - -from agentex import AsyncAgentex # noqa: F401 -from agentex.lib.core.services.adk.agents import AgentsService -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.agent import Agent -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow - -logger = make_logger(__name__) - -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class AgentsModule: - """ - Module for managing agents in Agentex. - Provides high-level async methods for retrieving, listing, and deleting agents. - """ - - def __init__( - self, - agents_service: Optional[AgentsService] = None, - ): - if agents_service is None: - agentex_client = create_async_agentex_client() - tracer = AsyncTracer(agentex_client) - self._agents_service = AgentsService(agentex_client=agentex_client, tracer=tracer) - else: - self._agents_service = agents_service - - async def get( - self, - *, - agent_id: Optional[str] = None, - agent_name: Optional[str] = None, - trace_id: Optional[str] = None, - parent_span_id: Optional[str] = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> Agent: - """ - Get an agent by ID or name. - Args: - agent_id: The ID of the agent to retrieve. - agent_name: The name of the agent to retrieve. - Returns: - The agent entry. - """ - params = GetAgentParams( - agent_id=agent_id, - agent_name=agent_name, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=AgentsActivityName.GET_AGENT, - request=params, - response_type=Agent, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._agents_service.get_agent( - agent_id=agent_id, - agent_name=agent_name, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) diff --git a/src/agentex/lib/adk/_modules/events.py b/src/agentex/lib/adk/_modules/events.py deleted file mode 100644 index 4995ae17..00000000 --- a/src/agentex/lib/adk/_modules/events.py +++ /dev/null @@ -1,145 +0,0 @@ -# ruff: noqa: I001 -# Import order matters - AsyncTracer must come after client import to avoid circular imports -from __future__ import annotations -from datetime import timedelta - -from temporalio.common import RetryPolicy - -from agentex import AsyncAgentex # noqa: F401 -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.services.adk.events import EventsService -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.temporal.activities.adk.events_activities import ( - EventsActivityName, - GetEventParams, - ListEventsParams, -) -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.event import Event -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow - -logger = make_logger(__name__) - -# Default retry policy for all events operations -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class EventsModule: - """ - Module for managing events in Agentex. - Provides high-level async methods for retrieving and listing events. - """ - - def __init__( - self, - events_service: EventsService | None = None, - ): - if events_service is None: - agentex_client = create_async_agentex_client() - tracer = AsyncTracer(agentex_client) - self._events_service = EventsService( - agentex_client=agentex_client, tracer=tracer - ) - else: - self._events_service = events_service - - async def get( - self, - event_id: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> Event | None: - """ - Get an event by ID. - - Args: - event_id (str): The ID of the event. - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - Optional[Event]: The event if found, None otherwise. - """ - params = GetEventParams( - event_id=event_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=EventsActivityName.GET_EVENT, - request=params, - response_type=Event, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._events_service.get_event( - event_id=event_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - async def list_events( - self, - task_id: str, - agent_id: str, - last_processed_event_id: str | None = None, - limit: int | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> list[Event]: - """ - List events for a specific task and agent. - - Args: - task_id (str): The ID of the task. - agent_id (str): The ID of the agent. - last_processed_event_id (Optional[str]): Optional event ID to get events after this ID. - limit (Optional[int]): Optional limit on number of results. - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - List[Event]: List of events ordered by sequence_id. - """ - params = ListEventsParams( - task_id=task_id, - agent_id=agent_id, - last_processed_event_id=last_processed_event_id, - limit=limit, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=EventsActivityName.LIST_EVENTS, - request=params, - response_type=list[Event], - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._events_service.list_events( - task_id=task_id, - agent_id=agent_id, - last_processed_event_id=last_processed_event_id, - limit=limit, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) diff --git a/src/agentex/lib/adk/_modules/messages.py b/src/agentex/lib/adk/_modules/messages.py deleted file mode 100644 index e81749b9..00000000 --- a/src/agentex/lib/adk/_modules/messages.py +++ /dev/null @@ -1,289 +0,0 @@ -# ruff: noqa: I001 -# Import order matters - AsyncTracer must come after client import to avoid circular imports -from __future__ import annotations -from datetime import timedelta - -from temporalio.common import RetryPolicy - -from agentex import AsyncAgentex # noqa: F401 -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.adapters.streams.adapter_redis import RedisStreamRepository -from agentex.lib.core.services.adk.messages import MessagesService -from agentex.lib.core.services.adk.streaming import StreamingService -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.temporal.activities.adk.messages_activities import ( - CreateMessageParams, - CreateMessagesBatchParams, - ListMessagesParams, - MessagesActivityName, - UpdateMessageParams, - UpdateMessagesBatchParams, -) -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.task_message import TaskMessage, TaskMessageContent -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow - -logger = make_logger(__name__) - -# Default retry policy for all message operations -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class MessagesModule: - """ - Module for managing task messages in Agentex. - Provides high-level async methods for creating, retrieving, updating, and deleting messages. - """ - - def __init__( - self, - messages_service: MessagesService | None = None, - ): - if messages_service is None: - agentex_client = create_async_agentex_client() - stream_repository = RedisStreamRepository() - streaming_service = StreamingService( - agentex_client=agentex_client, - stream_repository=stream_repository, - ) - tracer = AsyncTracer(agentex_client) - self._messages_service = MessagesService( - agentex_client=agentex_client, - streaming_service=streaming_service, - tracer=tracer, - ) - else: - self._messages_service = messages_service - - async def create( - self, - task_id: str, - content: TaskMessageContent, - emit_updates: bool = True, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> TaskMessage: - """ - Create a new message for a task. - - Args: - task_id (str): The ID of the task. - message (TaskMessage): The message to create. - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - TaskMessageEntity: The created message. - """ - params = CreateMessageParams( - trace_id=trace_id, - parent_span_id=parent_span_id, - task_id=task_id, - content=content, - emit_updates=emit_updates, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=MessagesActivityName.CREATE_MESSAGE, - request=params, - response_type=TaskMessage, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._messages_service.create_message( - task_id=task_id, - content=content, - emit_updates=emit_updates, - ) - - async def update( - self, - task_id: str, - message_id: str, - content: TaskMessageContent, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> TaskMessage: - """ - Update a message for a task. - - Args: - task_id (str): The ID of the task. - message_id (str): The ID of the message. - message (TaskMessage): The message to update. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - TaskMessageEntity: The updated message. - """ - params = UpdateMessageParams( - task_id=task_id, - message_id=message_id, - content=content, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=MessagesActivityName.UPDATE_MESSAGE, - request=params, - response_type=TaskMessage, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._messages_service.update_message( - task_id=task_id, - message_id=message_id, - content=content, - ) - - async def create_batch( - self, - task_id: str, - contents: list[TaskMessageContent], - emit_updates: bool = True, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> list[TaskMessage]: - """ - Create a batch of messages for a task. - - Args: - task_id (str): The ID of the task. - messages (List[TaskMessage]): The messages to create. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - List[TaskMessageEntity]: The created messages. - """ - params = CreateMessagesBatchParams( - task_id=task_id, - contents=contents, - emit_updates=emit_updates, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=MessagesActivityName.CREATE_MESSAGES_BATCH, - request=params, - response_type=list[TaskMessage], - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._messages_service.create_messages_batch( - task_id=task_id, - contents=contents, - emit_updates=emit_updates, - ) - - async def update_batch( - self, - task_id: str, - updates: dict[str, TaskMessageContent], - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> list[TaskMessage]: - """ - Update a batch of messages for a task. - - Args: - task_id (str): The ID of the task. - updates (Dict[str, TaskMessage]): The updates to apply to the messages. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - List[TaskMessageEntity]: The updated messages. - """ - params = UpdateMessagesBatchParams( - task_id=task_id, - updates=updates, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=MessagesActivityName.UPDATE_MESSAGES_BATCH, - request=params, - response_type=list[TaskMessage], - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._messages_service.update_messages_batch( - task_id=task_id, - updates=updates, - ) - - async def list( - self, - task_id: str, - limit: int | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> list[TaskMessage]: - """ - List messages for a task. - - Args: - task_id (str): The ID of the task. - limit (Optional[int]): The maximum number of messages to return. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - List[TaskMessageEntity]: The list of messages. - """ - params = ListMessagesParams( - task_id=task_id, - limit=limit, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=MessagesActivityName.LIST_MESSAGES, - request=params, - response_type=list[TaskMessage], - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._messages_service.list_messages( - task_id=task_id, - limit=limit, - ) diff --git a/src/agentex/lib/adk/_modules/state.py b/src/agentex/lib/adk/_modules/state.py deleted file mode 100644 index a5a343e9..00000000 --- a/src/agentex/lib/adk/_modules/state.py +++ /dev/null @@ -1,295 +0,0 @@ -# ruff: noqa: I001 -# Import order matters - AsyncTracer must come after client import to avoid circular imports -from __future__ import annotations -from datetime import timedelta -from typing import Any - -from pydantic import BaseModel -from temporalio.common import RetryPolicy - -from agentex import AsyncAgentex # noqa: F401 -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.services.adk.state import StateService -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.temporal.activities.adk.state_activities import ( - CreateStateParams, - DeleteStateParams, - GetStateParams, - StateActivityName, - UpdateStateParams, -) -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.state import State -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow - -logger = make_logger(__name__) - -# Default retry policy for all state operations -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class StateModule: - """ - Module for managing task state in Agentex. - Provides high-level async methods for creating, retrieving, updating, and deleting state. - """ - - def __init__( - self, - state_service: StateService | None = None, - ): - if state_service is None: - agentex_client = create_async_agentex_client() - tracer = AsyncTracer(agentex_client) - self._state_service = StateService( - agentex_client=agentex_client, tracer=tracer - ) - else: - self._state_service = state_service - - async def create( - self, - task_id: str, - agent_id: str, - state: dict[str, Any] | BaseModel, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> State: - """ - Create a new state for a task and agent. - - Args: - task_id (str): The ID of the task. - agent_id (str): The ID of the agent. - state (Dict[str, Any]): The state to create. - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - State: The created state. - """ - state_dict = state.model_dump() if isinstance(state, BaseModel) else state - params = CreateStateParams( - task_id=task_id, - agent_id=agent_id, - state=state_dict, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=StateActivityName.CREATE_STATE, - request=params, - response_type=State, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._state_service.create_state( - task_id=task_id, - agent_id=agent_id, - state=state_dict, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - async def get( - self, - state_id: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> State | None: - """ - Get a state by ID. - - Args: - state_id (str): The ID of the state. - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - Optional[State]: The state if found, None otherwise. - """ - params = GetStateParams( - state_id=state_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=StateActivityName.GET_STATE, - request=params, - response_type=State, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._state_service.get_state( - state_id=state_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - async def get_by_task_and_agent( - self, - task_id: str, - agent_id: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> State | None: - """ - Get a state by task and agent ID. A state is uniquely identified by task and the agent that created it. - - Args: - task_id (str): The ID of the task. - agent_id (str): The ID of the agent. - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - Optional[State]: The state if found, None otherwise. - """ - params = GetStateParams( - task_id=task_id, - agent_id=agent_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=StateActivityName.GET_STATE, - request=params, - response_type=State, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._state_service.get_state( - task_id=task_id, - agent_id=agent_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - async def update( - self, - state_id: str, - task_id: str, - agent_id: str, - state: dict[str, Any] | BaseModel, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> State: - """ - Update a state by ID. - - Args: - state_id (str): The ID of the state. - task_id (str): The ID of the task. - agent_id (str): The ID of the agent. - state (Dict[str, Any]): The state to update. - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - State: The updated state. - """ - state_dict = state.model_dump() if isinstance(state, BaseModel) else state - params = UpdateStateParams( - state_id=state_id, - task_id=task_id, - agent_id=agent_id, - state=state_dict, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=StateActivityName.UPDATE_STATE, - request=params, - response_type=State, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._state_service.update_state( - state_id=state_id, - task_id=task_id, - agent_id=agent_id, - state=state_dict, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - async def delete( - self, - state_id: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> State: - """ - Delete a state by ID. - - Args: - state_id (str): The ID of the state. - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - State: The deleted state. - """ - params = DeleteStateParams( - state_id=state_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=StateActivityName.DELETE_STATE, - request=params, - response_type=State, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._state_service.delete_state( - state_id=state_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) diff --git a/src/agentex/lib/adk/_modules/streaming.py b/src/agentex/lib/adk/_modules/streaming.py deleted file mode 100644 index ab53ed68..00000000 --- a/src/agentex/lib/adk/_modules/streaming.py +++ /dev/null @@ -1,79 +0,0 @@ -# ruff: noqa: I001 -# Import order matters - AsyncTracer must come after client import to avoid circular imports -from __future__ import annotations -from temporalio.common import RetryPolicy - -from agentex import AsyncAgentex # noqa: F401 -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.adapters.streams.adapter_redis import RedisStreamRepository -from agentex.lib.core.services.adk.streaming import ( - StreamingService, - StreamingTaskMessageContext, -) -from agentex.types.task_message_content import TaskMessageContent -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow - -logger = make_logger(__name__) - -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class StreamingModule: - """ - Module for streaming content to clients in Agentex. - - This interface wraps around the StreamingService and provides a high-level API - for streaming events to clients, supporting both synchronous and asynchronous - (Temporal workflow) contexts. - """ - - def __init__(self, streaming_service: StreamingService | None = None): - """ - Initialize the streaming interface. - - Args: - streaming_service (Optional[StreamingService]): Optional StreamingService instance. If not provided, - a new service will be created with default parameters. - """ - if streaming_service is None: - stream_repository = RedisStreamRepository() - agentex_client = create_async_agentex_client() - self._streaming_service = StreamingService( - agentex_client=agentex_client, - stream_repository=stream_repository, - ) - else: - self._streaming_service = streaming_service - - def streaming_task_message_context( - self, - task_id: str, - initial_content: TaskMessageContent, - ) -> StreamingTaskMessageContext: - """ - Create a streaming context for managing TaskMessage lifecycle. - - This is a context manager that automatically creates a TaskMessage, sends START event, - and sends DONE event when the context exits. Perfect for simple streaming scenarios. - - Args: - task_id: The ID of the task - initial_content: The initial content for the TaskMessage - agentex_client: The agentex client for creating/updating messages - - Returns: - StreamingTaskMessageContext: Context manager for streaming operations - """ - # Note: We don't support Temporal activities for streaming context methods yet - # since they involve complex state management across multiple activity calls - if in_temporal_workflow(): - logger.warning( - "Streaming context methods are not yet supported in Temporal workflows. " - "You should wrap the entire streaming context in an activity. All nondeterministic network calls should be wrapped in an activity and generators cannot operate across activities and workflows." - ) - - return self._streaming_service.streaming_task_message_context( - task_id=task_id, - initial_content=initial_content, - ) diff --git a/src/agentex/lib/adk/_modules/tasks.py b/src/agentex/lib/adk/_modules/tasks.py deleted file mode 100644 index 522f7daf..00000000 --- a/src/agentex/lib/adk/_modules/tasks.py +++ /dev/null @@ -1,130 +0,0 @@ -# ruff: noqa: I001 -# Import order matters - AsyncTracer must come after client import to avoid circular imports -from __future__ import annotations -from datetime import timedelta - -from temporalio.common import RetryPolicy - -from agentex import AsyncAgentex # noqa: F401 -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.services.adk.tasks import TasksService -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.temporal.activities.adk.tasks_activities import ( - DeleteTaskParams, - GetTaskParams, - TasksActivityName, -) -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.task import Task -from agentex.types.task_retrieve_response import TaskRetrieveResponse -from agentex.types.task_retrieve_by_name_response import TaskRetrieveByNameResponse -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow - -logger = make_logger(__name__) - -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class TasksModule: - """ - Module for managing tasks in Agentex. - Provides high-level async methods for retrieving, listing, and deleting tasks. - """ - - def __init__( - self, - tasks_service: TasksService | None = None, - ): - if tasks_service is None: - agentex_client = create_async_agentex_client() - tracer = AsyncTracer(agentex_client) - self._tasks_service = TasksService( - agentex_client=agentex_client, tracer=tracer - ) - else: - self._tasks_service = tasks_service - - async def get( - self, - *, - task_id: str | None = None, - task_name: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> TaskRetrieveResponse | TaskRetrieveByNameResponse: - """ - Get a task by ID or name. - Args: - task_id: The ID of the task to retrieve. - task_name: The name of the task to retrieve. - Returns: - The task entry. - """ - params = GetTaskParams( - task_id=task_id, - task_name=task_name, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=TasksActivityName.GET_TASK, - request=params, - response_type=Task, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._tasks_service.get_task( - task_id=task_id, - task_name=task_name, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - async def delete( - self, - *, - task_id: str | None = None, - task_name: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> Task: - """ - Delete a task by ID or name. - Args: - task_id: The ID of the task to delete. - task_name: The name of the task to delete. - Returns: - The deleted task entry. - """ - params = DeleteTaskParams( - task_id=task_id, - task_name=task_name, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=TasksActivityName.DELETE_TASK, - request=params, - response_type=Task, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._tasks_service.delete_task( # type: ignore[return-value] - task_id=task_id, - task_name=task_name, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) diff --git a/src/agentex/lib/adk/_modules/tracing.py b/src/agentex/lib/adk/_modules/tracing.py deleted file mode 100644 index 93fd2365..00000000 --- a/src/agentex/lib/adk/_modules/tracing.py +++ /dev/null @@ -1,198 +0,0 @@ -# ruff: noqa: I001 -# Import order matters - AsyncTracer must come after client import to avoid circular imports -from __future__ import annotations -from collections.abc import AsyncGenerator -from contextlib import asynccontextmanager -from datetime import timedelta -from typing import Any - -from temporalio.common import RetryPolicy - -from agentex import AsyncAgentex # noqa: F401 -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.services.adk.tracing import TracingService -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.temporal.activities.adk.tracing_activities import ( - EndSpanParams, - StartSpanParams, - TracingActivityName, -) -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.span import Span -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.utils.temporal import in_temporal_workflow - -logger = make_logger(__name__) - -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class TracingModule: - """ - Module for managing tracing and span operations in Agentex. - Provides high-level async methods for starting, ending, and managing spans for distributed tracing. - """ - - def __init__(self, tracing_service: TracingService | None = None): - """ - Initialize the tracing interface. - - Args: - tracing_activities (Optional[TracingActivities]): Optional pre-configured tracing activities. If None, will be auto-initialized. - """ - if tracing_service is None: - agentex_client = create_async_agentex_client() - tracer = AsyncTracer(agentex_client) - self._tracing_service = TracingService(tracer=tracer) - else: - self._tracing_service = tracing_service - - @asynccontextmanager - async def span( - self, - trace_id: str, - name: str, - input: list[Any] | dict[str, Any] | BaseModel | None = None, - data: list[Any] | dict[str, Any] | BaseModel | None = None, - parent_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=5), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> AsyncGenerator[Span | None, None]: - """ - Async context manager for creating and automatically closing a span. - Yields the started span object. The span is automatically ended when the context exits. - - If trace_id is falsy, acts as a no-op context manager. - - Args: - trace_id (str): The trace ID for the span. - name (str): The name of the span. - input (Union[List, Dict, BaseModel]): The input for the span. - parent_id (Optional[str]): The parent span ID for the span. - data (Optional[Union[List, Dict, BaseModel]]): The data for the span. - start_to_close_timeout (timedelta): The start to close timeout for the span. - heartbeat_timeout (timedelta): The heartbeat timeout for the span. - retry_policy (RetryPolicy): The retry policy for the span. - - Returns: - AsyncGenerator[Optional[Span], None]: An async generator that yields the started span object. - """ - if not trace_id: - yield None - return - - span: Span | None = await self.start_span( - trace_id=trace_id, - name=name, - input=input, - parent_id=parent_id, - data=data, - start_to_close_timeout=start_to_close_timeout, - heartbeat_timeout=heartbeat_timeout, - retry_policy=retry_policy, - ) - try: - yield span - finally: - if span: - await self.end_span( - trace_id=trace_id, - span=span, - start_to_close_timeout=start_to_close_timeout, - heartbeat_timeout=heartbeat_timeout, - retry_policy=retry_policy, - ) - - async def start_span( - self, - trace_id: str, - name: str, - input: list[Any] | dict[str, Any] | BaseModel | None = None, - parent_id: str | None = None, - data: list[Any] | dict[str, Any] | BaseModel | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=1), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> Span | None: - """ - Start a new span in the trace. - - Args: - trace_id (str): The trace ID for the span. - name (str): The name of the span. - input (Union[List, Dict, BaseModel]): The input for the span. - parent_id (Optional[str]): The parent span ID for the span. - data (Optional[Union[List, Dict, BaseModel]]): The data for the span. - start_to_close_timeout (timedelta): The start to close timeout for the span. - heartbeat_timeout (timedelta): The heartbeat timeout for the span. - retry_policy (RetryPolicy): The retry policy for the span. - - Returns: - Span: The started span object. - """ - params = StartSpanParams( - trace_id=trace_id, - parent_id=parent_id, - name=name, - input=input, - data=data, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=TracingActivityName.START_SPAN, - request=params, - response_type=Span, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._tracing_service.start_span( - trace_id=trace_id, - name=name, - input=input, - parent_id=parent_id, - data=data, - ) - - async def end_span( - self, - trace_id: str, - span: Span, - start_to_close_timeout: timedelta = timedelta(seconds=5), - heartbeat_timeout: timedelta = timedelta(seconds=1), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> Span: - """ - End an existing span in the trace. - - Args: - trace_id (str): The trace ID for the span. - span (Span): The span to end. - start_to_close_timeout (timedelta): The start to close timeout for the span. - heartbeat_timeout (timedelta): The heartbeat timeout for the span. - retry_policy (RetryPolicy): The retry policy for the span. - - Returns: - Span: The ended span object. - """ - params = EndSpanParams( - trace_id=trace_id, - span=span, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=TracingActivityName.END_SPAN, - request=params, - response_type=Span, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - else: - return await self._tracing_service.end_span( - trace_id=trace_id, - span=span, - ) diff --git a/src/agentex/lib/adk/providers/__init__.py b/src/agentex/lib/adk/providers/__init__.py deleted file mode 100644 index 9167396f..00000000 --- a/src/agentex/lib/adk/providers/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from agentex.lib.adk.providers._modules.sgp import SGPModule -from agentex.lib.adk.providers._modules.openai import OpenAIModule -from agentex.lib.adk.providers._modules.litellm import LiteLLMModule - -openai = OpenAIModule() -litellm = LiteLLMModule() -sgp = SGPModule() - -__all__ = ["openai", "litellm", "sgp"] diff --git a/src/agentex/lib/adk/providers/_modules/__init__.py b/src/agentex/lib/adk/providers/_modules/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/adk/providers/_modules/litellm.py b/src/agentex/lib/adk/providers/_modules/litellm.py deleted file mode 100644 index 2f012f7e..00000000 --- a/src/agentex/lib/adk/providers/_modules/litellm.py +++ /dev/null @@ -1,234 +0,0 @@ -from __future__ import annotations - -from datetime import timedelta -from collections.abc import AsyncGenerator - -from temporalio.common import RetryPolicy - -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow -from agentex.types.task_message import TaskMessage -from agentex.lib.types.llm_messages import LLMConfig, Completion -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.services.adk.streaming import StreamingService -from agentex.lib.core.adapters.llm.adapter_litellm import LiteLLMGateway -from agentex.lib.core.adapters.streams.adapter_redis import RedisStreamRepository -from agentex.lib.core.services.adk.providers.litellm import LiteLLMService -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.temporal.activities.adk.providers.litellm_activities import ( - LiteLLMActivityName, - ChatCompletionParams, - ChatCompletionAutoSendParams, - ChatCompletionStreamAutoSendParams, -) - -logger = make_logger(__name__) - -# Default retry policy for all LiteLLM operations -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class LiteLLMModule: - """ - Module for managing LiteLLM agent operations in Agentex. - Provides high-level methods for chat completion, streaming. - """ - - def __init__( - self, - litellm_service: LiteLLMService | None = None, - ): - if litellm_service is None: - # Create default service - agentex_client = create_async_agentex_client() - stream_repository = RedisStreamRepository() - streaming_service = StreamingService( - agentex_client=agentex_client, - stream_repository=stream_repository, - ) - litellm_gateway = LiteLLMGateway() - tracer = AsyncTracer(agentex_client) - self._litellm_service = LiteLLMService( - agentex_client=agentex_client, - llm_gateway=litellm_gateway, - streaming_service=streaming_service, - tracer=tracer, - ) - else: - self._litellm_service = litellm_service - - async def chat_completion( - self, - llm_config: LLMConfig, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=120), - heartbeat_timeout: timedelta = timedelta(seconds=120), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> Completion: - """ - Perform a chat completion using LiteLLM. - - Args: - llm_config (LLMConfig): The configuration for the LLM. - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - Completion: An OpenAI compatible Completion object - """ - if in_temporal_workflow(): - params = ChatCompletionParams( - trace_id=trace_id, parent_span_id=parent_span_id, llm_config=llm_config - ) - return await ActivityHelpers.execute_activity( - activity_name=LiteLLMActivityName.CHAT_COMPLETION, - request=params, - response_type=Completion, - start_to_close_timeout=start_to_close_timeout, - heartbeat_timeout=heartbeat_timeout, - retry_policy=retry_policy, - ) - else: - return await self._litellm_service.chat_completion( - llm_config=llm_config, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - async def chat_completion_auto_send( - self, - task_id: str, - llm_config: LLMConfig, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=120), - heartbeat_timeout: timedelta = timedelta(seconds=120), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> TaskMessage | None: - """ - Chat completion with automatic TaskMessage creation. - - Args: - task_id (str): The ID of the task. - llm_config (LLMConfig): The configuration for the LLM (must have stream=False). - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - TaskMessage: The final TaskMessage - """ - if in_temporal_workflow(): - # Use streaming activity with stream=False for non-streaming auto-send - params = ChatCompletionAutoSendParams( - trace_id=trace_id, - parent_span_id=parent_span_id, - task_id=task_id, - llm_config=llm_config, - ) - return await ActivityHelpers.execute_activity( - activity_name=LiteLLMActivityName.CHAT_COMPLETION_AUTO_SEND, - request=params, - response_type=TaskMessage, - start_to_close_timeout=start_to_close_timeout, - heartbeat_timeout=heartbeat_timeout, - retry_policy=retry_policy, - ) - else: - return await self._litellm_service.chat_completion_auto_send( - task_id=task_id, - llm_config=llm_config, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - async def chat_completion_stream( - self, - llm_config: LLMConfig, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> AsyncGenerator[Completion, None]: - """ - Stream chat completion chunks using LiteLLM. - - DEFAULT: Returns raw streaming chunks for manual handling. - - NOTE: This method does NOT work in Temporal workflows! - Temporal activities cannot return generators. Use chat_completion_stream_auto_send() instead. - - Args: - llm_config (LLMConfig): The configuration for the LLM (must have stream=True). - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - AsyncGenerator[Completion, None]: Generator yielding completion chunks - - Raises: - ValueError: If called from within a Temporal workflow - """ - # Delegate to service - it handles temporal workflow checks - async for chunk in self._litellm_service.chat_completion_stream( - llm_config=llm_config, - trace_id=trace_id, - parent_span_id=parent_span_id, - ): - yield chunk - - async def chat_completion_stream_auto_send( - self, - task_id: str, - llm_config: LLMConfig, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=120), - heartbeat_timeout: timedelta = timedelta(seconds=120), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> TaskMessage | None: - """ - Stream chat completion with automatic TaskMessage creation and streaming. - - Args: - task_id (str): The ID of the task to run the agent for. - llm_config (LLMConfig): The configuration for the LLM (must have stream=True). - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - TaskMessage: The final TaskMessage after streaming is complete - """ - if in_temporal_workflow(): - params = ChatCompletionStreamAutoSendParams( - trace_id=trace_id, - parent_span_id=parent_span_id, - task_id=task_id, - llm_config=llm_config, - ) - return await ActivityHelpers.execute_activity( - activity_name=LiteLLMActivityName.CHAT_COMPLETION_STREAM_AUTO_SEND, - request=params, - response_type=TaskMessage, - start_to_close_timeout=start_to_close_timeout, - heartbeat_timeout=heartbeat_timeout, - retry_policy=retry_policy, - ) - else: - return await self._litellm_service.chat_completion_stream_auto_send( - task_id=task_id, - llm_config=llm_config, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) diff --git a/src/agentex/lib/adk/providers/_modules/openai.py b/src/agentex/lib/adk/providers/_modules/openai.py deleted file mode 100644 index b9824227..00000000 --- a/src/agentex/lib/adk/providers/_modules/openai.py +++ /dev/null @@ -1,512 +0,0 @@ -from __future__ import annotations - -import sys -from typing import Any, Literal -from datetime import timedelta - -from mcp import StdioServerParameters -from agents import Agent, RunResult, RunResultStreaming -from agents.tool import Tool -from agents.agent import StopAtTools, ToolsToFinalOutputFunction -from agents.guardrail import InputGuardrail, OutputGuardrail -from temporalio.common import RetryPolicy -from agents.agent_output import AgentOutputSchemaBase -from agents.model_settings import ModelSettings - -# Use warnings.deprecated in Python 3.13+, typing_extensions.deprecated for older versions -if sys.version_info >= (3, 13): - from warnings import deprecated -else: - from typing_extensions import deprecated - -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.lib.types.agent_results import ( - SerializableRunResult, - SerializableRunResultStreaming, -) -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.services.adk.streaming import StreamingService -from agentex.lib.core.services.adk.providers.openai import OpenAIService -from agentex.lib.core.adapters.streams.adapter_redis import RedisStreamRepository -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.temporal.activities.adk.providers.openai_activities import ( - RunAgentParams, - OpenAIActivityName, - RunAgentAutoSendParams, - RunAgentStreamedAutoSendParams, -) - -logger = make_logger(__name__) - -# Default retry policy for all OpenAI operations -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class OpenAIModule: - """ - Module for managing OpenAI agent operations in Agentex. - Provides high-level methods for running agents with and without streaming. - """ - - def __init__( - self, - openai_service: OpenAIService | None = None, - ): - if openai_service is None: - # Create default service - agentex_client = create_async_agentex_client() - stream_repository = RedisStreamRepository() - streaming_service = StreamingService( - agentex_client=agentex_client, - stream_repository=stream_repository, - ) - tracer = AsyncTracer(agentex_client) - self._openai_service = OpenAIService( - agentex_client=agentex_client, - streaming_service=streaming_service, - tracer=tracer, - ) - else: - self._openai_service = openai_service - - async def run_agent( - self, - input_list: list[dict[str, Any]], - agent_name: str, - agent_instructions: str, - mcp_server_params: list[StdioServerParameters] | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=600), - heartbeat_timeout: timedelta = timedelta(seconds=600), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - handoff_description: str | None = None, - handoffs: list[Agent] | None = None, - model: str | None = None, - model_settings: ModelSettings | None = None, - tools: list[Tool] | None = None, - output_type: type[Any] | AgentOutputSchemaBase | None = None, - tool_use_behavior: ( - Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction - ) = "run_llm_again", - mcp_timeout_seconds: int | None = None, - input_guardrails: list[InputGuardrail] | None = None, - output_guardrails: list[OutputGuardrail] | None = None, - max_turns: int | None = None, - previous_response_id: str | None = None, - ) -> SerializableRunResult | RunResult: - """ - Run an agent without streaming or TaskMessage creation. - - DEFAULT: No TaskMessage creation, returns only the result. - - Args: - input_list: List of input data for the agent. - mcp_server_params: MCP server parameters for the agent. - agent_name: The name of the agent to run. - agent_instructions: Instructions for the agent. - trace_id: Optional trace ID for tracing. - parent_span_id: Optional parent span for tracing. - start_to_close_timeout: Maximum time allowed for the operation. - heartbeat_timeout: Maximum time between heartbeats. - retry_policy: Policy for retrying failed operations. - handoff_description: Optional description of the handoff. - handoffs: Optional list of handoffs. - model: Optional model to use. - model_settings: Optional model settings. - tools: Optional list of tools. - output_type: Optional output type. - tool_use_behavior: Optional tool use behavior. - mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds. - input_guardrails: Optional list of input guardrails to run on initial user input. - output_guardrails: Optional list of output guardrails to run on final agent output. - max_turns: Maximum number of turns the agent can take. Uses Runner's default if None. - previous_response_id: Optional previous response ID for conversation continuity. - - Returns: - Union[SerializableRunResult, RunResult]: SerializableRunResult when in Temporal, RunResult otherwise. - """ - # Default to empty list if not provided - if mcp_server_params is None: - mcp_server_params = [] - - if in_temporal_workflow(): - params = RunAgentParams( - trace_id=trace_id, - parent_span_id=parent_span_id, - input_list=input_list, - mcp_server_params=mcp_server_params, - agent_name=agent_name, - agent_instructions=agent_instructions, - handoff_description=handoff_description, - handoffs=handoffs, # type: ignore[arg-type] - model=model, - model_settings=model_settings, # type: ignore[arg-type] - tools=tools, # type: ignore[arg-type] - output_type=output_type, - tool_use_behavior=tool_use_behavior, # type: ignore[arg-type] - mcp_timeout_seconds=mcp_timeout_seconds, - input_guardrails=input_guardrails, # type: ignore[arg-type] - output_guardrails=output_guardrails, # type: ignore[arg-type] - max_turns=max_turns, - previous_response_id=previous_response_id, - ) - return await ActivityHelpers.execute_activity( - activity_name=OpenAIActivityName.RUN_AGENT, - request=params, - response_type=SerializableRunResult, - start_to_close_timeout=start_to_close_timeout, - heartbeat_timeout=heartbeat_timeout, - retry_policy=retry_policy, - ) - else: - return await self._openai_service.run_agent( - input_list=input_list, - mcp_server_params=mcp_server_params, - agent_name=agent_name, - agent_instructions=agent_instructions, - trace_id=trace_id, - parent_span_id=parent_span_id, - handoff_description=handoff_description, - handoffs=handoffs, - model=model, - model_settings=model_settings, - tools=tools, - output_type=output_type, - tool_use_behavior=tool_use_behavior, - mcp_timeout_seconds=mcp_timeout_seconds, - input_guardrails=input_guardrails, - output_guardrails=output_guardrails, - max_turns=max_turns, - previous_response_id=previous_response_id, - ) - - async def run_agent_auto_send( - self, - task_id: str, - input_list: list[dict[str, Any]], - agent_name: str, - agent_instructions: str, - mcp_server_params: list[StdioServerParameters] | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=600), - heartbeat_timeout: timedelta = timedelta(seconds=600), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - handoff_description: str | None = None, - handoffs: list[Agent] | None = None, - model: str | None = None, - model_settings: ModelSettings | None = None, - tools: list[Tool] | None = None, - output_type: type[Any] | AgentOutputSchemaBase | None = None, - tool_use_behavior: ( - Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction - ) = "run_llm_again", - mcp_timeout_seconds: int | None = None, - input_guardrails: list[InputGuardrail] | None = None, - output_guardrails: list[OutputGuardrail] | None = None, - max_turns: int | None = None, - previous_response_id: str | None = None, - ) -> SerializableRunResult | RunResult: - """ - Run an agent with automatic TaskMessage creation. - - Args: - task_id: The ID of the task to run the agent for. - input_list: List of input data for the agent. - mcp_server_params: MCP server parameters for the agent. - agent_name: The name of the agent to run. - agent_instructions: Instructions for the agent. - trace_id: Optional trace ID for tracing. - parent_span_id: Optional parent span for tracing. - start_to_close_timeout: Maximum time allowed for the operation. - heartbeat_timeout: Maximum time between heartbeats. - retry_policy: Policy for retrying failed operations. - handoff_description: Optional description of the handoff. - handoffs: Optional list of handoffs. - model: Optional model to use. - model_settings: Optional model settings. - tools: Optional list of tools. - output_type: Optional output type. - tool_use_behavior: Optional tool use behavior. - mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds. - input_guardrails: Optional list of input guardrails to run on initial user input. - output_guardrails: Optional list of output guardrails to run on final agent output. - max_turns: Maximum number of turns the agent can take. Uses Runner's default if None. - previous_response_id: Optional previous response ID for conversation continuity. - - Returns: - Union[SerializableRunResult, RunResult]: SerializableRunResult when in Temporal, RunResult otherwise. - """ - # Default to empty list if not provided - if mcp_server_params is None: - mcp_server_params = [] - - if in_temporal_workflow(): - params = RunAgentAutoSendParams( - trace_id=trace_id, - parent_span_id=parent_span_id, - task_id=task_id, - input_list=input_list, - mcp_server_params=mcp_server_params, - agent_name=agent_name, - agent_instructions=agent_instructions, - handoff_description=handoff_description, - handoffs=handoffs, # type: ignore[arg-type] - model=model, - model_settings=model_settings, # type: ignore[arg-type] - tools=tools, # type: ignore[arg-type] - output_type=output_type, - tool_use_behavior=tool_use_behavior, # type: ignore[arg-type] - mcp_timeout_seconds=mcp_timeout_seconds, - input_guardrails=input_guardrails, # type: ignore[arg-type] - output_guardrails=output_guardrails, # type: ignore[arg-type] - max_turns=max_turns, - previous_response_id=previous_response_id, - ) - return await ActivityHelpers.execute_activity( - activity_name=OpenAIActivityName.RUN_AGENT_AUTO_SEND, - request=params, - response_type=SerializableRunResult, - start_to_close_timeout=start_to_close_timeout, - heartbeat_timeout=heartbeat_timeout, - retry_policy=retry_policy, - ) - else: - return await self._openai_service.run_agent_auto_send( - task_id=task_id, - input_list=input_list, - mcp_server_params=mcp_server_params, - agent_name=agent_name, - agent_instructions=agent_instructions, - trace_id=trace_id, - parent_span_id=parent_span_id, - handoff_description=handoff_description, - handoffs=handoffs, - model=model, - model_settings=model_settings, - tools=tools, - output_type=output_type, - tool_use_behavior=tool_use_behavior, - mcp_timeout_seconds=mcp_timeout_seconds, - input_guardrails=input_guardrails, - output_guardrails=output_guardrails, - max_turns=max_turns, - previous_response_id=previous_response_id, - ) - - async def run_agent_streamed( - self, - input_list: list[dict[str, Any]], - agent_name: str, - agent_instructions: str, - mcp_server_params: list[StdioServerParameters] | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - handoff_description: str | None = None, - handoffs: list[Agent] | None = None, - model: str | None = None, - model_settings: ModelSettings | None = None, - tools: list[Tool] | None = None, - output_type: type[Any] | AgentOutputSchemaBase | None = None, - tool_use_behavior: ( - Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction - ) = "run_llm_again", - mcp_timeout_seconds: int | None = None, - input_guardrails: list[InputGuardrail] | None = None, - output_guardrails: list[OutputGuardrail] | None = None, - max_turns: int | None = None, - previous_response_id: str | None = None, - ) -> RunResultStreaming: - """ - Run an agent with streaming enabled but no TaskMessage creation. - - DEFAULT: No TaskMessage creation, returns only the result. - - NOTE: This method does NOT work in Temporal workflows! - Use run_agent_streamed_auto_send() instead for Temporal workflows. - - Args: - input_list: List of input data for the agent. - mcp_server_params: MCP server parameters for the agent. - agent_name: The name of the agent to run. - agent_instructions: Instructions for the agent. - trace_id: Optional trace ID for tracing. - parent_span_id: Optional parent span for tracing. - start_to_close_timeout: Maximum time allowed for the operation. - heartbeat_timeout: Maximum time between heartbeats. - retry_policy: Policy for retrying failed operations. - handoff_description: Optional description of the handoff. - handoffs: Optional list of handoffs. - model: Optional model to use. - model_settings: Optional model settings. - tools: Optional list of tools. - output_type: Optional output type. - tool_use_behavior: Optional tool use behavior. - mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds. - input_guardrails: Optional list of input guardrails to run on initial user input. - output_guardrails: Optional list of output guardrails to run on final agent output. - max_turns: Maximum number of turns the agent can take. Uses Runner's default if None. - previous_response_id: Optional previous response ID for conversation continuity. - - Returns: - RunResultStreaming: The result of the agent run with streaming. - - Raises: - ValueError: If called from within a Temporal workflow - """ - # Default to empty list if not provided - if mcp_server_params is None: - mcp_server_params = [] - - # Temporal workflows should use the auto_send variant - if in_temporal_workflow(): - raise ValueError( - "run_agent_streamed() cannot be used in Temporal workflows. " - "Use run_agent_streamed_auto_send() instead, which properly handles " - "TaskMessage creation and streaming through the streaming service." - ) - - return await self._openai_service.run_agent_streamed( - input_list=input_list, - mcp_server_params=mcp_server_params, - agent_name=agent_name, - agent_instructions=agent_instructions, - trace_id=trace_id, - parent_span_id=parent_span_id, - handoff_description=handoff_description, - handoffs=handoffs, - model=model, - model_settings=model_settings, - tools=tools, - output_type=output_type, - tool_use_behavior=tool_use_behavior, - mcp_timeout_seconds=mcp_timeout_seconds, - input_guardrails=input_guardrails, - output_guardrails=output_guardrails, - max_turns=max_turns, - previous_response_id=previous_response_id, - ) - - @deprecated( - "Use the OpenAI Agents SDK integration with Temporal instead. " - "See examples in tutorials/10_async/10_temporal/ for migration guidance." - ) - async def run_agent_streamed_auto_send( - self, - task_id: str, - input_list: list[dict[str, Any]], - agent_name: str, - agent_instructions: str, - mcp_server_params: list[StdioServerParameters] | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=600), - heartbeat_timeout: timedelta = timedelta(seconds=600), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - handoff_description: str | None = None, - handoffs: list[Agent] | None = None, - model: str | None = None, - model_settings: ModelSettings | None = None, - tools: list[Tool] | None = None, - output_type: type[Any] | AgentOutputSchemaBase | None = None, - tool_use_behavior: ( - Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction - ) = "run_llm_again", - mcp_timeout_seconds: int | None = None, - input_guardrails: list[InputGuardrail] | None = None, - output_guardrails: list[OutputGuardrail] | None = None, - max_turns: int | None = None, - previous_response_id: str | None = None, - ) -> SerializableRunResultStreaming | RunResultStreaming: - """ - Run an agent with streaming enabled and automatic TaskMessage creation. - - .. deprecated:: - Use the OpenAI Agents SDK integration with Temporal instead. - See examples in tutorials/10_async/10_temporal/ for migration guidance. - - Args: - task_id: The ID of the task to run the agent for. - input_list: List of input data for the agent. - mcp_server_params: MCP server parameters for the agent. - agent_name: The name of the agent to run. - agent_instructions: Instructions for the agent. - trace_id: Optional trace ID for tracing. - parent_span_id: Optional parent span for tracing. - start_to_close_timeout: Maximum time allowed for the operation. - heartbeat_timeout: Maximum time between heartbeats. - retry_policy: Policy for retrying failed operations. - handoff_description: Optional description of the handoff. - handoffs: Optional list of handoffs. - model: Optional model to use. - model_settings: Optional model settings. - tools: Optional list of tools. - input_guardrails: Optional list of input guardrails to run on initial user input. - output_guardrails: Optional list of output guardrails to run on final agent output. - output_type: Optional output type. - tool_use_behavior: Optional tool use behavior. - mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds. - max_turns: Maximum number of turns the agent can take. Uses Runner's default if None. - previous_response_id: Optional previous response ID for conversation continuity. - - Returns: - Union[SerializableRunResultStreaming, RunResultStreaming]: SerializableRunResultStreaming when in Temporal, RunResultStreaming otherwise. - """ - # Default to empty list if not provided - if mcp_server_params is None: - mcp_server_params = [] - - if in_temporal_workflow(): - params = RunAgentStreamedAutoSendParams( - trace_id=trace_id, - parent_span_id=parent_span_id, - task_id=task_id, - input_list=input_list, - mcp_server_params=mcp_server_params, - agent_name=agent_name, - agent_instructions=agent_instructions, - handoff_description=handoff_description, - handoffs=handoffs, - model=model, - model_settings=model_settings, - tools=tools, - output_type=output_type, - tool_use_behavior=tool_use_behavior, - mcp_timeout_seconds=mcp_timeout_seconds, - input_guardrails=input_guardrails, - output_guardrails=output_guardrails, - max_turns=max_turns, - ) - return await ActivityHelpers.execute_activity( - activity_name=OpenAIActivityName.RUN_AGENT_STREAMED_AUTO_SEND, - request=params, - response_type=SerializableRunResultStreaming, - start_to_close_timeout=start_to_close_timeout, - heartbeat_timeout=heartbeat_timeout, - retry_policy=retry_policy, - ) - else: - return await self._openai_service.run_agent_streamed_auto_send( - task_id=task_id, - input_list=input_list, - mcp_server_params=mcp_server_params, - agent_name=agent_name, - agent_instructions=agent_instructions, - trace_id=trace_id, - parent_span_id=parent_span_id, - handoff_description=handoff_description, - handoffs=handoffs, - model=model, - model_settings=model_settings, - tools=tools, - output_type=output_type, - tool_use_behavior=tool_use_behavior, - mcp_timeout_seconds=mcp_timeout_seconds, - input_guardrails=input_guardrails, - output_guardrails=output_guardrails, - max_turns=max_turns, - previous_response_id=previous_response_id, - ) \ No newline at end of file diff --git a/src/agentex/lib/adk/providers/_modules/sgp.py b/src/agentex/lib/adk/providers/_modules/sgp.py deleted file mode 100644 index fab765b7..00000000 --- a/src/agentex/lib/adk/providers/_modules/sgp.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import annotations - -from datetime import timedelta - -from scale_gp import SGPClient, SGPClientError -from temporalio.common import RetryPolicy - -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.services.adk.providers.sgp import SGPService -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.temporal.activities.adk.providers.sgp_activities import ( - SGPActivityName, - DownloadFileParams, - FileContentResponse, -) - -logger = make_logger(__name__) - -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class SGPModule: - """ - Module for managing SGP agent operations in Agentex. - Provides high-level methods for chat completion, streaming, and message classification. - """ - - def __init__( - self, - sgp_service: SGPService | None = None, - ): - if sgp_service is None: - try: - sgp_client = SGPClient() - agentex_client = create_async_agentex_client() - tracer = AsyncTracer(agentex_client) - self._sgp_service = SGPService(sgp_client=sgp_client, tracer=tracer) - except SGPClientError: - self._sgp_service = None - else: - self._sgp_service = sgp_service - - async def download_file_content( - self, - params: DownloadFileParams, - start_to_close_timeout: timedelta = timedelta(seconds=30), - heartbeat_timeout: timedelta = timedelta(seconds=30), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> FileContentResponse: - """ - Download the content of a file from SGP. - - Args: - params (DownloadFileParams): The parameters for the download file content activity. - start_to_close_timeout (timedelta): The start to close timeout. - heartbeat_timeout (timedelta): The heartbeat timeout. - retry_policy (RetryPolicy): The retry policy. - - Returns: - FileContentResponse: The content of the file - """ - if self._sgp_service is None: - raise ValueError( - "SGP activities are disabled because the SGP client could not be initialized. Please check that the SGP_API_KEY environment variable is set." - ) - - params = DownloadFileParams( - file_id=params.file_id, - filename=params.filename, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=SGPActivityName.DOWNLOAD_FILE_CONTENT, - request=params, - response_type=FileContentResponse, - start_to_close_timeout=start_to_close_timeout, - heartbeat_timeout=heartbeat_timeout, - retry_policy=retry_policy, - ) - else: - return await self._sgp_service.download_file_content( - file_id=params.file_id, - filename=params.filename, - ) diff --git a/src/agentex/lib/adk/providers/_modules/sync_provider.py b/src/agentex/lib/adk/providers/_modules/sync_provider.py deleted file mode 100644 index a34cfcda..00000000 --- a/src/agentex/lib/adk/providers/_modules/sync_provider.py +++ /dev/null @@ -1,690 +0,0 @@ -"""Simple OpenAI Provider wrapper that adds logging to demonstrate streaming is working.""" - -from __future__ import annotations - -from typing import Any, Union, Optional, override - -from agents import ( - Tool, - Model, - Handoff, - ModelTracing, - ModelResponse, - ModelSettings, - TResponseInputItem, - AgentOutputSchemaBase, -) -from openai.types.responses import ( - ResponseTextDeltaEvent, - ResponseFunctionToolCall, - ResponseFunctionWebSearch, - ResponseOutputItemDoneEvent, - ResponseOutputItemAddedEvent, - ResponseCodeInterpreterToolCall, - ResponseReasoningSummaryPartAddedEvent, - ResponseReasoningSummaryTextDeltaEvent, -) -from agents.models.openai_provider import OpenAIProvider -from openai.types.responses.response_reasoning_text_done_event import ResponseReasoningTextDoneEvent -from openai.types.responses.response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent -from openai.types.responses.response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent - -from agentex import AsyncAgentex -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.task_message_delta import TextDelta -from agentex.types.task_message_update import ( - StreamTaskMessageDone, - StreamTaskMessageFull, - StreamTaskMessageDelta, - StreamTaskMessageStart, -) -from agentex.types.task_message_content import TextContent -from agentex.types.tool_request_content import ToolRequestContent -from agentex.types.tool_response_content import ToolResponseContent -from agentex.types.reasoning_content_delta import ReasoningContentDelta -from agentex.types.reasoning_summary_delta import ReasoningSummaryDelta - -logger = make_logger(__name__) - - -def _serialize_item(item: Any) -> dict[str, Any]: - """ - Universal serializer for any item type from OpenAI Agents SDK. - - Uses model_dump() for Pydantic models, otherwise extracts attributes manually. - Filters out internal Pydantic fields that can't be serialized. - """ - if hasattr(item, 'model_dump'): - # Pydantic model - use model_dump for proper serialization - try: - return item.model_dump(mode='json', exclude_unset=True) - except Exception: - # Fallback to dict conversion - return dict(item) if hasattr(item, '__iter__') else {} - else: - # Not a Pydantic model - extract attributes manually - item_dict = {} - for attr_name in dir(item): - if not attr_name.startswith('_') and attr_name not in ('model_fields', 'model_config', 'model_computed_fields'): - try: - attr_value = getattr(item, attr_name, None) - # Skip methods and None values - if attr_value is not None and not callable(attr_value): - # Convert to JSON-serializable format - if hasattr(attr_value, 'model_dump'): - item_dict[attr_name] = attr_value.model_dump() - elif isinstance(attr_value, (str, int, float, bool, list, dict)): - item_dict[attr_name] = attr_value - else: - item_dict[attr_name] = str(attr_value) - except Exception: - # Skip attributes that can't be accessed - pass - return item_dict - - -class SyncStreamingModel(Model): - """Simple model wrapper that adds logging to stream_response and supports tracing.""" - - def __init__(self, original_model: Model, trace_id: str | None = None, parent_span_id: str | None = None, tracer: AsyncTracer | None = None): - """Initialize with the original OpenAI model to wrap. - Args: - original_model: The OpenAI model instance to wrap - trace_id: Optional trace ID for distributed tracing - parent_span_id: Optional parent span ID for tracing hierarchy - tracer: Optional AsyncTracer for distributed tracing - """ - self.original_model = original_model - self.trace_id = trace_id - self.parent_span_id = parent_span_id - self.tracer = tracer - - @override - async def get_response( - self, - system_instructions: Optional[str], - input: Union[str, list[TResponseInputItem]], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: Optional[AgentOutputSchemaBase], - handoffs: list[Handoff], - tracing: ModelTracing, - *, - previous_response_id: Optional[str] = None, - conversation_id: Optional[str] = None, - prompt: Any = None, - ) -> ModelResponse: - """Pass through to the original model's get_response with tracing support.""" - - # Wrap the request in a tracing span if tracer is available - if self.tracer and self.trace_id: - trace = self.tracer.trace(self.trace_id) - async with trace.span( - parent_id=self.parent_span_id, - name="run_agent", - input={ - "system_instructions": system_instructions, - "input": input, - "model_settings": str(model_settings) if model_settings else None, - "tools": [tool.name for tool in tools] if tools else [], - "output_schema": str(output_schema) if output_schema else None, - "handoffs": [str(h) for h in handoffs] if handoffs else [], - "previous_response_id": previous_response_id, - }, - ) as span: - # Build kwargs, excluding conversation_id if not supported - kwargs = { - "system_instructions": system_instructions, - "input": input, - "model_settings": model_settings, - "tools": tools, - "output_schema": output_schema, - "handoffs": handoffs, - "tracing": tracing, - "previous_response_id": previous_response_id, - "prompt": prompt, - } - - # Only add conversation_id if the model supports it - if hasattr(self.original_model, 'supports_conversation_id'): - kwargs["conversation_id"] = conversation_id - - response = await self.original_model.get_response(**kwargs) - - # Set span output with structured data - if span and response: - new_items = [] - final_output = None - - # Extract final output text from response - response_final_output = getattr(response, 'final_output', None) - if response_final_output: - final_output = response_final_output - - # Extract items from the response output - response_output = getattr(response, 'output', None) - if response_output: - output_items = response_output if isinstance(response_output, list) else [response_output] - - for item in output_items: - try: - item_dict = _serialize_item(item) - if item_dict: - new_items.append(item_dict) - - # Extract final_output from message type if available - if item_dict.get('type') == 'message' and not final_output: - content = item_dict.get('content', []) - if content and isinstance(content, list): - for content_part in content: - if isinstance(content_part, dict) and 'text' in content_part: - final_output = content_part['text'] - break - except Exception as e: - logger.warning(f"Failed to serialize item in get_response: {e}") - continue - - span.output = { - "new_items": new_items, - "final_output": final_output, - } - - return response - else: - # No tracing, just call normally - # Build kwargs, excluding conversation_id if not supported - kwargs = { - "system_instructions": system_instructions, - "input": input, - "model_settings": model_settings, - "tools": tools, - "output_schema": output_schema, - "handoffs": handoffs, - "tracing": tracing, - "previous_response_id": previous_response_id, - "prompt": prompt, - } - - # Only add conversation_id if the model supports it - if hasattr(self.original_model, 'supports_conversation_id'): - kwargs["conversation_id"] = conversation_id - - return await self.original_model.get_response(**kwargs) - - @override - async def stream_response( - self, - system_instructions: Optional[str], - input: Union[str, list[TResponseInputItem]], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: Optional[AgentOutputSchemaBase], - handoffs: list[Handoff], - tracing: ModelTracing, - *, - previous_response_id: Optional[str] = None, - conversation_id: Optional[str] = None, - prompt: Any = None, - ): # Return type is generic AsyncIterator for flexibility - """Wrap the original model's stream_response and pass through OpenAI events. - This method passes through the OpenAI stream events from the underlying model. - The conversion to AgentEx types happens in the ACP layer. - """ - - # Wrap the streaming in a tracing span if tracer is available - if self.tracer and self.trace_id: - trace = self.tracer.trace(self.trace_id) - - # Manually start the span instead of using context manager - span = await trace.start_span( - parent_id=self.parent_span_id, - name="run_agent_streamed", - input={ - "system_instructions": system_instructions, - "input": input, - "model_settings": str(model_settings) if model_settings else None, - "tools": [tool.name for tool in tools] if tools else [], - "output_schema": str(output_schema) if output_schema else None, - "handoffs": [str(h) for h in handoffs] if handoffs else [], - "previous_response_id": previous_response_id, - }, - ) - - try: - # Get the stream from the original model - stream_kwargs = { - "system_instructions": system_instructions, - "input": input, - "model_settings": model_settings, - "tools": tools, - "output_schema": output_schema, - "handoffs": handoffs, - "tracing": tracing, - "previous_response_id": previous_response_id, - "prompt": prompt, - } - - # Only add conversation_id if the model supports it - if hasattr(self.original_model, 'supports_conversation_id'): - stream_kwargs["conversation_id"] = conversation_id - - # Get the stream response from the original model and yield each event - stream_response = self.original_model.stream_response(**stream_kwargs) - - # Pass through each event from the original stream and track items - new_items = [] - final_response_text = "" - - async for event in stream_response: - event_type = getattr(event, 'type', 'no-type') - - # Handle response.output_item.done events which contain completed items - if event_type == 'response.output_item.done': - item = getattr(event, 'item', None) - if item is not None: - try: - item_dict = _serialize_item(item) - if item_dict: - new_items.append(item_dict) - - # Update final_response_text from message type if available - if item_dict.get('type') == 'message': - content = item_dict.get('content', []) - if content and isinstance(content, list): - for content_part in content: - if isinstance(content_part, dict) and 'text' in content_part: - final_response_text = content_part['text'] - break - except Exception as e: - logger.warning(f"Failed to serialize item in stream_response: {e}") - continue - - yield event - - # Set span output with structured data including tool calls and final response - span.output = { - "new_items": new_items, - "final_output": final_response_text if final_response_text else None, - } - finally: - # End the span after all events have been yielded - await trace.end_span(span) - else: - # No tracing, just stream normally - # Get the stream from the original model - stream_kwargs = { - "system_instructions": system_instructions, - "input": input, - "model_settings": model_settings, - "tools": tools, - "output_schema": output_schema, - "handoffs": handoffs, - "tracing": tracing, - "previous_response_id": previous_response_id, - "prompt": prompt, - } - - # Only add conversation_id if the model supports it - if hasattr(self.original_model, 'supports_conversation_id'): - stream_kwargs["conversation_id"] = conversation_id - - # Get the stream response from the original model and yield each event - stream_response = self.original_model.stream_response(**stream_kwargs) - - # Pass through each event from the original stream - async for event in stream_response: - yield event - -class SyncStreamingProvider(OpenAIProvider): - """Simple OpenAI provider wrapper that adds logging to streaming and supports tracing.""" - - def __init__(self, trace_id: str | None = None, parent_span_id: str | None = None, *args, **kwargs): - """Initialize the provider with tracing support. - Args: - trace_id: Optional trace ID for distributed tracing - parent_span_id: Optional parent span ID for tracing hierarchy - *args: Additional positional arguments for OpenAIProvider - **kwargs: Additional keyword arguments for OpenAIProvider - """ - super().__init__(*args, **kwargs) - self.trace_id = trace_id - self.parent_span_id = parent_span_id - - # Initialize AsyncTracer with client directly in the provider - if trace_id: - agentex_client = AsyncAgentex() - self.tracer = AsyncTracer(agentex_client) - else: - self.tracer = None - - @override - def get_model(self, model_name: Optional[str] = None) -> Model: - """Get a model wrapped with our logging capabilities and tracing. - Args: - model_name: The name of the model to retrieve - Returns: - A SyncStreamingModel that wraps the original OpenAI model - """ - # Get the original model from the parent class - original_model = super().get_model(model_name) - - # Wrap it with our logging capabilities and tracing info - wrapped_model = SyncStreamingModel(original_model, self.trace_id, self.parent_span_id, self.tracer) - - return wrapped_model - - -def _extract_tool_call_info(tool_call_item: Any) -> tuple[str, str, dict[str, Any]]: - """ - Extract call_id, tool_name, and tool_arguments from a tool call item. - Args: - tool_call_item: The tool call item to process - Returns: - A tuple of (call_id, tool_name, tool_arguments) - """ - # Generic handling for different tool call types - # Try 'call_id' first, then 'id', then generate placeholder - if hasattr(tool_call_item, "call_id"): - call_id = tool_call_item.call_id - elif hasattr(tool_call_item, "id"): - call_id = tool_call_item.id - else: - call_id = f"unknown_call_{id(tool_call_item)}" - - if isinstance(tool_call_item, ResponseFunctionWebSearch): - tool_name = "web_search" - tool_arguments = {"action": tool_call_item.action.model_dump(), "status": tool_call_item.status} - elif isinstance(tool_call_item, ResponseCodeInterpreterToolCall): - tool_name = "code_interpreter" - tool_arguments = {"code": tool_call_item.code, "status": tool_call_item.status} - elif isinstance(tool_call_item, ResponseFunctionToolCall): - # Handle standard function tool calls - tool_name = tool_call_item.name - # Handle the arguments field which might be a string or None - if tool_call_item.arguments: - if isinstance(tool_call_item.arguments, str): - import json - tool_arguments = json.loads(tool_call_item.arguments) if tool_call_item.arguments else {} - else: - tool_arguments = tool_call_item.arguments - else: - tool_arguments = {} - else: - # Generic handling for any tool call type - tool_name = getattr(tool_call_item, "name", type(tool_call_item).__name__) - # Handle the arguments field which might be a string or None - if hasattr(tool_call_item, "arguments"): - arguments = tool_call_item.arguments - if isinstance(arguments, str): - import json - tool_arguments = json.loads(arguments) if arguments else {} - elif arguments is None: - tool_arguments = {} - else: - tool_arguments = arguments - else: - tool_arguments = tool_call_item.model_dump() - - return call_id, tool_name, tool_arguments - - -def _extract_tool_response_info(tool_map: dict[str, Any], tool_output_item: Any) -> tuple[str, str, str]: - """ - Extract call_id, tool_name, and content from a tool output item. - Args: - tool_map: Dictionary mapping call_ids to tool names - tool_output_item: The tool output item to process - Returns: - A tuple of (call_id, tool_name, content) - """ - - # Handle different formats of tool_output_item - if isinstance(tool_output_item, dict): - call_id = tool_output_item.get("call_id", tool_output_item.get("id", f"unknown_call_{id(tool_output_item)}")) - content = tool_output_item.get("output", str(tool_output_item)) - else: - # Try to get call_id from attributes - if hasattr(tool_output_item, "call_id"): - call_id = tool_output_item.call_id - elif hasattr(tool_output_item, "id"): - call_id = tool_output_item.id - else: - call_id = f"unknown_call_{id(tool_output_item)}" - - # Get content - if hasattr(tool_output_item, "output"): - content = tool_output_item.output - else: - content = str(tool_output_item) - - # Get tool name from map - tool_name = tool_map.get(call_id, "unknown_tool") - - return call_id, tool_name, content - - -async def convert_openai_to_agentex_events(stream_response): - """Convert OpenAI streaming events to AgentEx TaskMessageUpdate events with reasoning support. - - This is an enhanced version of the base converter that includes support for: - - Reasoning content deltas (for o1 models) - - Reasoning summary deltas (for o1 models) - - Args: - stream_response: An async iterator of OpenAI streaming events - Yields: - TaskMessageUpdate: AgentEx streaming events (StreamTaskMessageDelta, StreamTaskMessageFull, or StreamTaskMessageDone) - """ - - tool_map = {} - event_count = 0 - message_index = 0 # Track message index for proper sequencing - seen_tool_output = False # Track if we've seen tool output to know when final text starts - item_id_to_index = {} # Map item_id to message index - item_id_to_type = {} # Map item_id to content type (text, reasoning_content, reasoning_summary) - - async for event in stream_response: - event_count += 1 - - # Check for raw response events which contain the actual OpenAI streaming events - if hasattr(event, 'type') and event.type == 'raw_response_event': - if hasattr(event, 'data'): - raw_event = event.data - - # Check for ResponseOutputItemAddedEvent which signals a new message starting - if isinstance(raw_event, ResponseOutputItemAddedEvent): - # Don't increment here - we'll increment when we see the actual text delta - # This is just a signal that a new message is starting - pass - - # Handle item completion - send done event to close the message - elif isinstance(raw_event, ResponseOutputItemDoneEvent): - item_id = raw_event.item.id - if item_id in item_id_to_index: - # Get the message type to decide whether to send done event - message_type = item_id_to_type.get(item_id, "text") - - # Don't send done events for reasoning content/summary - # They just end with their last delta - if message_type not in ("reasoning_content", "reasoning_summary"): - yield StreamTaskMessageDone( - type="done", - index=item_id_to_index[item_id], - ) - - # Skip reasoning summary part added events - we handle them on delta - elif isinstance(raw_event, ResponseReasoningSummaryPartAddedEvent): - pass - - # Handle reasoning summary text delta events - elif isinstance(raw_event, ResponseReasoningSummaryTextDeltaEvent): - item_id = raw_event.item_id - summary_index = raw_event.summary_index - - # If this is a new item_id we haven't seen, create a new message - if item_id and item_id not in item_id_to_index: - message_index += 1 - item_id_to_index[item_id] = message_index - item_id_to_type[item_id] = "reasoning_summary" - - # Send a start event for this new reasoning summary message - yield StreamTaskMessageStart( - type="start", - index=item_id_to_index[item_id], - content=TextContent( - type="text", - author="agent", - content="", # Start with empty content - ), - ) - - # Use the index for this item_id - current_index = item_id_to_index.get(item_id, message_index) - - # Yield reasoning summary delta - yield StreamTaskMessageDelta( - type="delta", - index=current_index, - delta=ReasoningSummaryDelta( - type="reasoning_summary", - summary_index=summary_index, - summary_delta=raw_event.delta, - ), - ) - - # Handle reasoning summary text done events - elif isinstance(raw_event, ResponseReasoningSummaryTextDoneEvent): - # We do NOT close the streaming context here - # as there can be multiple reasoning summaries. - # The context will be closed when the entire - # output item is done (ResponseOutputItemDoneEvent) - pass - - # Handle reasoning content text delta events - elif isinstance(raw_event, ResponseReasoningTextDeltaEvent): - item_id = raw_event.item_id - content_index = raw_event.content_index - - # If this is a new item_id we haven't seen, create a new message - if item_id and item_id not in item_id_to_index: - message_index += 1 - item_id_to_index[item_id] = message_index - item_id_to_type[item_id] = "reasoning_content" - - # Send a start event for this new reasoning content message - yield StreamTaskMessageStart( - type="start", - index=item_id_to_index[item_id], - content=TextContent( - type="text", - author="agent", - content="", # Start with empty content - ), - ) - - # Use the index for this item_id - current_index = item_id_to_index.get(item_id, message_index) - - # Yield reasoning content delta - yield StreamTaskMessageDelta( - type="delta", - index=current_index, - delta=ReasoningContentDelta( - type="reasoning_content", - content_index=content_index, - content_delta=raw_event.delta, - ), - ) - - # Handle reasoning content text done events - elif isinstance(raw_event, ResponseReasoningTextDoneEvent): - # We do NOT close the streaming context here - # as there can be multiple reasoning content texts. - # The context will be closed when the entire - # output item is done (ResponseOutputItemDoneEvent) - pass - - # Check if this is a text delta event from OpenAI - elif isinstance(raw_event, ResponseTextDeltaEvent): - # Check if this event has an item_id - item_id = getattr(raw_event, 'item_id', None) - - # If this is a new item_id we haven't seen, it's a new message - if item_id and item_id not in item_id_to_index: - # Check if this is truly a NEW text message after tools - # We need to differentiate between the first text and the final text after tools - if seen_tool_output: - # This is the final text message after tool execution - message_index += 1 - item_id_to_index[item_id] = message_index - else: - item_id_to_index[item_id] = message_index - - item_id_to_type[item_id] = "text" - - # Send a start event with empty content for this new text message - yield StreamTaskMessageStart( - type="start", - index=item_id_to_index[item_id], - content=TextContent( - type="text", - author="agent", - content="", # Start with empty content, deltas will fill it - ), - ) - - # Use the index for this item_id - current_index = item_id_to_index.get(item_id, message_index) - - delta_message = StreamTaskMessageDelta( - type="delta", - index=current_index, - delta=TextDelta( - type="text", - text_delta=raw_event.delta, - ), - ) - yield delta_message - - elif hasattr(event, 'type') and event.type == 'run_item_stream_event': - # Skip reasoning_item events - they're handled via raw_response_event above - if hasattr(event, 'item') and event.item.type == 'reasoning_item': - continue - - # Check for tool_call_item type (this is when a tool is being called) - elif hasattr(event, 'item') and event.item.type == 'tool_call_item': - # Extract tool call information using the helper method - call_id, tool_name, tool_arguments = _extract_tool_call_info(event.item.raw_item) - tool_map[call_id] = tool_name - tool_request_content = ToolRequestContent( - tool_call_id=call_id, - name=tool_name, - arguments=tool_arguments, - author="agent", - ) - message_index += 1 # Increment for new message - yield StreamTaskMessageFull( - index=message_index, - type="full", - content=tool_request_content, - ) - - # Check for tool_call_output_item type (this is when a tool returns output) - elif hasattr(event, 'item') and event.item.type == 'tool_call_output_item': - # Extract tool response information using the helper method - call_id, tool_name, content = _extract_tool_response_info(tool_map, event.item.raw_item) - tool_response_content = ToolResponseContent( - tool_call_id=call_id, - name=tool_name, - content=content, - author="agent", - ) - message_index += 1 # Increment for new message - seen_tool_output = True # Mark that we've seen tool output so next text gets new index - yield StreamTaskMessageFull( - type="full", - index=message_index, - content=tool_response_content, - ) - diff --git a/src/agentex/lib/adk/utils/__init__.py b/src/agentex/lib/adk/utils/__init__.py deleted file mode 100644 index c190cb6e..00000000 --- a/src/agentex/lib/adk/utils/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from agentex.lib.adk.utils._modules.templating import TemplatingModule - -__all__ = ["templating"] - -templating = TemplatingModule() diff --git a/src/agentex/lib/adk/utils/_modules/__init__.py b/src/agentex/lib/adk/utils/_modules/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/adk/utils/_modules/client.py b/src/agentex/lib/adk/utils/_modules/client.py deleted file mode 100644 index 72528963..00000000 --- a/src/agentex/lib/adk/utils/_modules/client.py +++ /dev/null @@ -1,32 +0,0 @@ -from typing import override - -import httpx - -from agentex import AsyncAgentex -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables - -logger = make_logger(__name__) - - -class EnvAuth(httpx.Auth): - def __init__(self, header_name="x-agent-api-key"): - self.header_name = header_name - - @override - def auth_flow(self, request): - # This gets called for every request - env_vars = EnvironmentVariables.refresh() - if env_vars: - agent_api_key = env_vars.AGENT_API_KEY - if agent_api_key: - request.headers[self.header_name] = agent_api_key - masked_key = agent_api_key[-4:] if agent_api_key and len(agent_api_key) > 4 else "****" - logger.info(f"Adding header {self.header_name}:{masked_key}") - yield request - - -def create_async_agentex_client(**kwargs) -> AsyncAgentex: - client = AsyncAgentex(**kwargs) - client._client.auth = EnvAuth() - return client diff --git a/src/agentex/lib/adk/utils/_modules/templating.py b/src/agentex/lib/adk/utils/_modules/templating.py deleted file mode 100644 index 29e6b6b2..00000000 --- a/src/agentex/lib/adk/utils/_modules/templating.py +++ /dev/null @@ -1,96 +0,0 @@ -from __future__ import annotations - -from typing import Any -from datetime import timedelta - -from temporalio.common import RetryPolicy - -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import in_temporal_workflow -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.services.adk.utils.templating import TemplatingService -from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers -from agentex.lib.core.temporal.activities.adk.utils.templating_activities import ( - JinjaActivityName, - RenderJinjaParams, -) - -logger = make_logger(__name__) - -DEFAULT_RETRY_POLICY = RetryPolicy(maximum_attempts=1) - - -class TemplatingModule: - """ - Module for managing templating operations in Agentex. - - This interface provides high-level methods for rendering Jinja templates, abstracting away - the underlying activity and workflow execution. It supports both synchronous and asynchronous - (Temporal workflow) contexts. - """ - - def __init__( - self, - templating_service: TemplatingService | None = None, - ): - """ - Initialize the templating interface. - - Args: - templating_service (Optional[TemplatingService]): Optional pre-configured templating service. If None, will be auto-initialized. - """ - if templating_service is None: - agentex_client = create_async_agentex_client() - tracer = AsyncTracer(agentex_client) - self._templating_service = TemplatingService(tracer=tracer) - else: - self._templating_service = templating_service - - async def render_jinja( - self, - trace_id: str, - template: str, - variables: dict[str, Any], - parent_span_id: str | None = None, - start_to_close_timeout: timedelta = timedelta(seconds=10), - heartbeat_timeout: timedelta = timedelta(seconds=10), - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - ) -> str: - """ - Render a Jinja template. - - Args: - trace_id (str): Unique identifier for tracing and correlation. - template (str): The Jinja template string to render. - variables (Dict[str, Any]): Variables to use in the template. - parent_span_id (Optional[str]): Optional parent span for tracing. - start_to_close_timeout (timedelta): Maximum time allowed for the operation. - heartbeat_timeout (timedelta): Maximum time between heartbeats. - retry_policy (RetryPolicy): Policy for retrying failed operations. - - Returns: - str: The rendered template as a string. - """ - render_jinja_params = RenderJinjaParams( - trace_id=trace_id, - parent_span_id=parent_span_id, - template=template, - variables=variables, - ) - if in_temporal_workflow(): - return await ActivityHelpers.execute_activity( - activity_name=JinjaActivityName.RENDER_JINJA, - request=render_jinja_params, - response_type=str, - start_to_close_timeout=start_to_close_timeout, - heartbeat_timeout=heartbeat_timeout, - retry_policy=retry_policy, - ) - else: - return await self._templating_service.render_jinja( - template=template, - variables=variables, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) diff --git a/src/agentex/lib/cli/__init__.py b/src/agentex/lib/cli/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/cli/commands/__init__.py b/src/agentex/lib/cli/commands/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/cli/commands/agents.py b/src/agentex/lib/cli/commands/agents.py deleted file mode 100644 index 10dab0d0..00000000 --- a/src/agentex/lib/cli/commands/agents.py +++ /dev/null @@ -1,373 +0,0 @@ -from __future__ import annotations - -import builtins -from pathlib import Path - -import typer -import questionary -from rich import print_json -from rich.panel import Panel -from rich.console import Console - -from agentex import Agentex -from agentex.lib.cli.debug import DebugMode, DebugConfig -from agentex.lib.utils.logging import make_logger -from agentex.lib.cli.utils.cli_utils import handle_questionary_cancellation -from agentex.lib.sdk.config.validation import ( - EnvironmentsValidationError, - generate_helpful_error_message, - validate_manifest_and_environments, -) -from agentex.lib.cli.utils.kubectl_utils import ( - validate_namespace, - check_and_switch_cluster_context, -) -from agentex.lib.sdk.config.agent_manifest import AgentManifest -from agentex.lib.cli.handlers.agent_handlers import ( - run_agent, - build_agent, -) -from agentex.lib.cli.handlers.deploy_handlers import ( - HelmError, - DeploymentError, - InputDeployOverrides, - deploy_agent, -) -from agentex.lib.cli.handlers.cleanup_handlers import cleanup_agent_workflows - -logger = make_logger(__name__) -console = Console() - -agents = typer.Typer() - - -@agents.command() -def get( - agent_id: str = typer.Argument(..., help="ID of the agent to get"), -): - """ - Get the agent with the given name. - """ - logger.info(f"Getting agent with ID: {agent_id}") - client = Agentex() - agent = client.agents.retrieve(agent_id=agent_id) - logger.info(f"Agent retrieved: {agent}") - print_json(data=agent.to_dict(), default=str) - - -@agents.command() -def list(): - """ - List all agents. - """ - logger.info("Listing all agents") - client = Agentex() - agents = client.agents.list() - logger.info(f"Agents retrieved: {agents}") - print_json(data=[agent.to_dict() for agent in agents], default=str) - - -@agents.command() -def delete( - agent_name: str = typer.Argument(..., help="Name of the agent to delete"), -): - """ - Delete the agent with the given name. - """ - logger.info(f"Deleting agent with name: {agent_name}") - client = Agentex() - client.agents.delete_by_name(agent_name=agent_name) - logger.info(f"Agent deleted: {agent_name}") - - -@agents.command() -def cleanup_workflows( - agent_name: str = typer.Argument(..., help="Name of the agent to cleanup workflows for"), - force: bool = typer.Option(False, help="Force cleanup using direct Temporal termination (bypasses development check)"), -): - """ - Clean up all running workflows for an agent. - - By default, uses graceful cancellation via agent RPC. - With --force, directly terminates workflows via Temporal client. - This is a convenience command that does the same thing as 'agentex tasks cleanup'. - """ - try: - console.print(f"[blue]Cleaning up workflows for agent '{agent_name}'...[/blue]") - - cleanup_agent_workflows( - agent_name=agent_name, - force=force, - development_only=True - ) - - console.print(f"[green]โœ“ Workflow cleanup completed for agent '{agent_name}'[/green]") - - except Exception as e: - console.print(f"[red]Cleanup failed: {str(e)}[/red]") - logger.exception("Agent workflow cleanup failed") - raise typer.Exit(1) from e - - -@agents.command() -def build( - manifest: str = typer.Option(..., help="Path to the manifest you want to use"), - registry: str | None = typer.Option( - None, help="Registry URL for pushing the built image" - ), - repository_name: str | None = typer.Option( - None, help="Repository name to use for the built image" - ), - platforms: str | None = typer.Option( - None, help="Platform to build the image for. Please enter a comma separated list of platforms." - ), - push: bool = typer.Option(False, help="Whether to push the image to the registry"), - secret: str | None = typer.Option( - None, - help="Docker build secret in the format 'id=secret-id,src=path-to-secret-file'", - ), - tag: str | None = typer.Option( - None, help="Image tag to use (defaults to 'latest')" - ), - build_arg: builtins.list[str] | None = typer.Option( # noqa: B008 - None, - help="Docker build argument in the format 'KEY=VALUE' (can be used multiple times)", - ), -): - """ - Build an agent image locally from the given manifest. - """ - typer.echo(f"Building agent image from manifest: {manifest}") - - # Validate required parameters for building - if push and not registry: - typer.echo("Error: --registry is required when --push is enabled", err=True) - raise typer.Exit(1) - - # Only proceed with build if we have a registry (for now, to match existing behavior) - if not registry: - typer.echo("No registry provided, skipping image build") - return - - platform_list = platforms.split(",") if platforms else ["linux/amd64"] - - try: - image_url = build_agent( - manifest_path=manifest, - registry_url=registry, - repository_name=repository_name, - platforms=platform_list, - push=push, - secret=secret or "", # Provide default empty string - tag=tag or "latest", # Provide default - build_args=build_arg or [], # Provide default empty list - ) - if image_url: - typer.echo(f"Successfully built image: {image_url}") - else: - typer.echo("Image build completed but no URL returned") - except Exception as e: - typer.echo(f"Error building agent image: {str(e)}", err=True) - logger.exception("Error building agent image") - raise typer.Exit(1) from e - - -@agents.command() -def run( - manifest: str = typer.Option(..., help="Path to the manifest you want to use"), - cleanup_on_start: bool = typer.Option( - False, - help="Clean up existing workflows for this agent before starting" - ), - # Debug options - debug: bool = typer.Option(False, help="Enable debug mode for both worker and ACP (disables auto-reload)"), - debug_worker: bool = typer.Option(False, help="Enable debug mode for temporal worker only"), - debug_acp: bool = typer.Option(False, help="Enable debug mode for ACP server only"), - debug_port: int = typer.Option(5678, help="Port for remote debugging (worker uses this, ACP uses port+1)"), - wait_for_debugger: bool = typer.Option(False, help="Wait for debugger to attach before starting"), -) -> None: - """ - Run an agent locally from the given manifest. - """ - typer.echo(f"Running agent from manifest: {manifest}") - - # Optionally cleanup existing workflows before starting - if cleanup_on_start: - try: - # Parse manifest to get agent name - manifest_obj = AgentManifest.from_yaml(file_path=manifest) - agent_name = manifest_obj.agent.name - - console.print(f"[yellow]Cleaning up existing workflows for agent '{agent_name}'...[/yellow]") - cleanup_agent_workflows( - agent_name=agent_name, - force=False, - development_only=True - ) - console.print("[green]โœ“ Pre-run cleanup completed[/green]") - - except Exception as e: - console.print(f"[yellow]โš  Pre-run cleanup failed: {str(e)}[/yellow]") - logger.warning(f"Pre-run cleanup failed: {e}") - - # Create debug configuration based on CLI flags - debug_config = None - if debug or debug_worker or debug_acp: - # Determine debug mode - if debug: - mode = DebugMode.BOTH - elif debug_worker and debug_acp: - mode = DebugMode.BOTH - elif debug_worker: - mode = DebugMode.WORKER - elif debug_acp: - mode = DebugMode.ACP - else: - mode = DebugMode.NONE - - debug_config = DebugConfig( - enabled=True, - mode=mode, - port=debug_port, - wait_for_attach=wait_for_debugger, - auto_port=False # Use fixed port to match VS Code launch.json - ) - - console.print(f"[blue]๐Ÿ› Debug mode enabled: {mode.value}[/blue]") - if wait_for_debugger: - console.print("[yellow]โณ Processes will wait for debugger attachment[/yellow]") - - try: - run_agent(manifest_path=manifest, debug_config=debug_config) - except Exception as e: - typer.echo(f"Error running agent: {str(e)}", err=True) - logger.exception("Error running agent") - raise typer.Exit(1) from e - - -@agents.command() -def deploy( - cluster: str = typer.Option( - ..., help="Target cluster name (must match kubectl context)" - ), - manifest: str = typer.Option("manifest.yaml", help="Path to the manifest file"), - namespace: str | None = typer.Option( - None, - help="Override Kubernetes namespace (defaults to namespace from environments.yaml)", - ), - environment: str | None = typer.Option( - None, help="Environment name (dev, prod, etc.) - must be defined in environments.yaml. If not provided, the namespace must be set explicitly." - ), - tag: str | None = typer.Option(None, help="Override the image tag for deployment"), - repository: str | None = typer.Option( - None, help="Override the repository for deployment" - ), - interactive: bool = typer.Option( - True, "--interactive/--no-interactive", help="Enable interactive prompts" - ), -): - """Deploy an agent to a Kubernetes cluster using Helm""" - - console.print( - Panel.fit("๐Ÿš€ [bold blue]Deploy Agent[/bold blue]", border_style="blue") - ) - - try: - # Validate manifest exists - manifest_path = Path(manifest) - if not manifest_path.exists(): - console.print(f"[red]Error:[/red] Manifest file not found: {manifest}") - raise typer.Exit(1) - - # Validate manifest and environments configuration - try: - if environment: - _, environments_config = validate_manifest_and_environments( - str(manifest_path), - required_environment=environment - ) - agent_env_config = environments_config.get_config_for_env(environment) - console.print(f"[green]โœ“[/green] Environment config validated: {environment}") - else: - agent_env_config = None - console.print(f"[yellow]โš [/yellow] No environment provided, skipping environment-specific config") - - except EnvironmentsValidationError as e: - error_msg = generate_helpful_error_message(e, "Environment validation failed") - console.print(f"[red]Configuration Error:[/red]\n{error_msg}") - raise typer.Exit(1) from e - except Exception as e: - console.print(f"[red]Error:[/red] Failed to validate configuration: {e}") - raise typer.Exit(1) from e - - # Load manifest for credential validation - manifest_obj = AgentManifest.from_yaml(str(manifest_path)) - - # Use namespace from environment config if not overridden - if not namespace and agent_env_config: - namespace_from_config = agent_env_config.kubernetes.namespace if agent_env_config.kubernetes else None - if namespace_from_config: - console.print(f"[blue]โ„น[/blue] Using namespace from environments.yaml: {namespace_from_config}") - namespace = namespace_from_config - else: - raise DeploymentError(f"No namespace found in environments.yaml for environment: {environment}, and not passed in as --namespace") - elif not namespace: - raise DeploymentError("No namespace provided, and not passed in as --namespace and no environment provided to read from an environments.yaml file") - - # Confirm deployment (only in interactive mode) - console.print("\n[bold]Deployment Summary:[/bold]") - console.print(f" Manifest: {manifest}") - console.print(f" Environment: {environment}") - console.print(f" Cluster: {cluster}") - console.print(f" Namespace: {namespace}") - if tag: - console.print(f" Image Tag: {tag}") - - if interactive: - proceed = questionary.confirm("Proceed with deployment?").ask() - proceed = handle_questionary_cancellation( - proceed, "deployment confirmation" - ) - - if not proceed: - console.print("Deployment cancelled") - raise typer.Exit(0) - else: - console.print("Proceeding with deployment (non-interactive mode)") - - check_and_switch_cluster_context(cluster) - if not validate_namespace(namespace, cluster): - console.print( - f"[red]Error:[/red] Namespace '{namespace}' does not exist in cluster '{cluster}'" - ) - raise typer.Exit(1) - - deploy_overrides = InputDeployOverrides(repository=repository, image_tag=tag) - - # Deploy agent - deploy_agent( - manifest_path=str(manifest_path), - cluster_name=cluster, - namespace=namespace, - deploy_overrides=deploy_overrides, - environment_name=environment, - ) - - # Use the already loaded manifest object - release_name = f"{manifest_obj.agent.name}-{cluster}" - - console.print( - "\n[bold green]๐ŸŽ‰ Deployment completed successfully![/bold green]" - ) - console.print("\nTo check deployment status:") - console.print(f" kubectl get pods -n {namespace}") - console.print(f" helm status {release_name} -n {namespace}") - - except (DeploymentError, HelmError) as e: - console.print(f"[red]Deployment failed:[/red] {str(e)}") - logger.exception("Deployment failed") - raise typer.Exit(1) from e - except Exception as e: - console.print(f"[red]Unexpected error:[/red] {str(e)}") - logger.exception("Unexpected error during deployment") - raise typer.Exit(1) from e diff --git a/src/agentex/lib/cli/commands/init.py b/src/agentex/lib/cli/commands/init.py deleted file mode 100644 index d00149b5..00000000 --- a/src/agentex/lib/cli/commands/init.py +++ /dev/null @@ -1,385 +0,0 @@ -from __future__ import annotations - -from enum import Enum -from typing import Any, Dict -from pathlib import Path - -import questionary -from jinja2 import Environment, FileSystemLoader -from rich.rule import Rule -from rich.text import Text -from rich.panel import Panel -from rich.table import Table -from rich.console import Console - -from agentex.lib.utils.logging import make_logger - -logger = make_logger(__name__) -console = Console() - -# Get the templates directory relative to this file -TEMPLATES_DIR = Path(__file__).parent.parent / "templates" - - -class TemplateType(str, Enum): - TEMPORAL = "temporal" - TEMPORAL_OPENAI_AGENTS = "temporal-openai-agents" - DEFAULT = "default" - SYNC = "sync" - - -def render_template( - template_path: str, context: Dict[str, Any], template_type: TemplateType -) -> str: - """Render a template with the given context""" - env = Environment(loader=FileSystemLoader(TEMPLATES_DIR / template_type.value)) - template = env.get_template(template_path) - return template.render(**context) - - -def create_project_structure( - path: Path, context: Dict[str, Any], template_type: TemplateType, use_uv: bool -): - """Create the project structure from templates""" - # Create project directory - project_dir: Path = path / context["project_name"] - project_dir.mkdir(parents=True, exist_ok=True) - - # Create project/code directory - code_dir: Path = project_dir / "project" - code_dir.mkdir(parents=True, exist_ok=True) - - # Create __init__.py - (code_dir / "__init__.py").touch() - - # Define project files based on template type - project_files = { - TemplateType.TEMPORAL: ["acp.py", "workflow.py", "run_worker.py"], - TemplateType.TEMPORAL_OPENAI_AGENTS: ["acp.py", "workflow.py", "run_worker.py", "activities.py"], - TemplateType.DEFAULT: ["acp.py"], - TemplateType.SYNC: ["acp.py"], - }[template_type] - - # Create project/code files - for template in project_files: - template_path = f"project/{template}.j2" - output_path = code_dir / template - output_path.write_text(render_template(template_path, context, template_type)) - - # Create root files - root_templates = { - ".dockerignore.j2": ".dockerignore", - "manifest.yaml.j2": "manifest.yaml", - "README.md.j2": "README.md", - "environments.yaml.j2": "environments.yaml", - } - - # Add package management file based on uv choice - if use_uv: - root_templates["pyproject.toml.j2"] = "pyproject.toml" - root_templates["Dockerfile-uv.j2"] = "Dockerfile" - else: - root_templates["requirements.txt.j2"] = "requirements.txt" - root_templates["Dockerfile.j2"] = "Dockerfile" - - # Add development notebook for agents - root_templates["dev.ipynb.j2"] = "dev.ipynb" - - for template, output in root_templates.items(): - output_path = project_dir / output - output_path.write_text(render_template(template, context, template_type)) - - console.print(f"\n[green]โœ“[/green] Created project structure at: {project_dir}") - - -def get_project_context(answers: Dict[str, Any], project_path: Path, manifest_root: Path) -> Dict[str, Any]: # noqa: ARG001 - """Get the project context from user answers""" - # Use agent_directory_name as project_name - project_name = answers["agent_directory_name"].replace("-", "_") - - # Now, this is actually the exact same as the project_name because we changed the build root to be ../ - project_path_from_build_root = project_name - - return { - **answers, - "project_name": project_name, - "workflow_class": "".join( - word.capitalize() for word in answers["agent_name"].split("-") - ) - + "Workflow", - "workflow_name": answers["agent_name"], - "queue_name": project_name + "_queue", - "project_path_from_build_root": project_path_from_build_root, - } - - -def init(): - """Initialize a new agent project""" - console.print( - Panel.fit( - "๐Ÿค– [bold blue]Initialize New Agent Project[/bold blue]", - border_style="blue", - ) - ) - - # Use a Rich table for template descriptions - table = Table(show_header=True, header_style="bold blue") - table.add_column("Template", style="cyan", no_wrap=True) - table.add_column("Description", style="white") - table.add_row( - "[bold cyan]Async - ACP Only[/bold cyan]", - "Asynchronous, non-blocking agent that can process multiple concurrent requests. Best for straightforward asynchronous agents that don't need durable execution. Good for asynchronous workflows, stateful applications, and multi-step analysis.", - ) - table.add_row( - "[bold cyan]Async - Temporal[/bold cyan]", - "Asynchronous, non-blocking agent with durable execution for all steps. Best for production-grade agents that require complex multi-step tool calls, human-in-the-loop approvals, and long-running processes that require transactional reliability.", - ) - table.add_row( - "[bold cyan]Sync ACP[/bold cyan]", - "Synchronous agent that processes one request per task with a simple request-response pattern. Best for low-latency use cases, FAQ bots, translation services, and data lookups.", - ) - console.print() - console.print(table) - console.print() - - def validate_agent_name(text: str) -> bool | str: - """Validate agent name follows required format""" - is_valid = len(text) >= 1 and text.replace("-", "").isalnum() and text.islower() - if not is_valid: - return "Invalid name. Use only lowercase letters, numbers, and hyphens. Examples: 'my-agent', 'newsbot'" - return True - - # Gather project information - template_type = questionary.select( - "What type of template would you like to create?", - choices=[ - {"name": "Async - ACP Only", "value": TemplateType.DEFAULT}, - {"name": "Async - Temporal", "value": "temporal_submenu"}, - {"name": "Sync ACP", "value": TemplateType.SYNC}, - ], - ).ask() - if not template_type: - return - - # If Temporal was selected, show sub-menu for Temporal variants - if template_type == "temporal_submenu": - console.print() - template_type = questionary.select( - "Which Temporal template would you like to use?", - choices=[ - {"name": "Basic Temporal", "value": TemplateType.TEMPORAL}, - {"name": "Temporal + OpenAI Agents SDK (Recommended)", "value": TemplateType.TEMPORAL_OPENAI_AGENTS}, - ], - ).ask() - if not template_type: - return - - project_path = questionary.path( - "Where would you like to create your project?", default="." - ).ask() - if not project_path: - return - - agent_name = questionary.text( - "What's your agent name? (letters, numbers, and hyphens only)", - validate=validate_agent_name, - ).ask() - if not agent_name: - return - - agent_directory_name = questionary.text( - "What do you want to name the project folder for your agent?", - default=agent_name, - ).ask() - if not agent_directory_name: - return - - description = questionary.text( - "Provide a brief description of your agent:", default="An Agentex agent" - ).ask() - if not description: - return - - use_uv = questionary.select( - "Would you like to use uv for package management?", - choices=[ - {"name": "Yes (Recommended)", "value": True}, - {"name": "No", "value": False}, - ], - ).ask() - - answers = { - "template_type": template_type, - "project_path": project_path, - "agent_name": agent_name, - "agent_directory_name": agent_directory_name, - "description": description, - "use_uv": use_uv, - } - - # Derive all names from agent_directory_name and path - project_path = Path(answers["project_path"]).resolve() - manifest_root = Path("../../") - - # Get project context - context = get_project_context(answers, project_path, manifest_root) - context["template_type"] = answers["template_type"].value - context["use_uv"] = answers["use_uv"] - - # Create project structure - create_project_structure( - project_path, context, answers["template_type"], answers["use_uv"] - ) - - # Show success message - console.print() - success_text = Text("โœ… Project created successfully!", style="bold green") - success_panel = Panel( - success_text, - border_style="green", - padding=(0, 2), - title="[bold white]Status[/bold white]", - title_align="left" - ) - console.print(success_panel) - - # Main header - console.print() - console.print(Rule("[bold blue]Next Steps[/bold blue]", style="blue")) - console.print() - - # Local Development Section - local_steps = Text() - local_steps.append("1. ", style="bold white") - local_steps.append("Navigate to your project directory:\n", style="white") - local_steps.append(f" cd {project_path}/{context['project_name']}\n\n", style="dim cyan") - - local_steps.append("2. ", style="bold white") - local_steps.append("Review the generated files. ", style="white") - local_steps.append("project/acp.py", style="yellow") - local_steps.append(" is your agent's entrypoint.\n", style="white") - local_steps.append(" See ", style="dim white") - local_steps.append("https://agentex.sgp.scale.com/docs", style="blue underline") - local_steps.append(" for how to customize different agent types", style="dim white") - local_steps.append("\n\n", style="white") - - local_steps.append("3. ", style="bold white") - local_steps.append("Set up your environment and test locally ", style="white") - local_steps.append("(no deployment needed)", style="dim white") - local_steps.append(":\n", style="white") - local_steps.append(" uv venv && uv sync && source .venv/bin/activate", style="dim cyan") - local_steps.append("\n agentex agents run --manifest manifest.yaml", style="dim cyan") - - local_panel = Panel( - local_steps, - title="[bold blue]Development Setup[/bold blue]", - title_align="left", - border_style="blue", - padding=(1, 2) - ) - console.print(local_panel) - console.print() - - # Prerequisites Note - prereq_text = Text() - prereq_text.append("The above is all you need for local development. Once you're ready for production, read this box and below.\n\n", style="white") - - prereq_text.append("โ€ข ", style="bold white") - prereq_text.append("Prerequisites for Production: ", style="bold yellow") - prereq_text.append("You need Agentex hosted on a Kubernetes cluster.\n", style="white") - prereq_text.append(" See ", style="dim white") - prereq_text.append("https://agentex.sgp.scale.com/docs", style="blue underline") - prereq_text.append(" for setup instructions. ", style="dim white") - prereq_text.append("Scale GenAI Platform (SGP) customers", style="dim cyan") - prereq_text.append(" already have this setup as part of their enterprise license.\n\n", style="dim white") - - prereq_text.append("โ€ข ", style="bold white") - prereq_text.append("Best Practice: ", style="bold blue") - prereq_text.append("Use CI/CD pipelines for production deployments, not manual commands.\n", style="white") - prereq_text.append(" Commands below demonstrate Agentex's quick deployment capabilities.", style="dim white") - - prereq_panel = Panel( - prereq_text, - border_style="yellow", - padding=(1, 2) - ) - console.print(prereq_panel) - console.print() - - # Production Setup Section (includes deployment) - prod_steps = Text() - prod_steps.append("4. ", style="bold white") - prod_steps.append("Configure where to push your container image", style="white") - prod_steps.append(":\n", style="white") - prod_steps.append(" Edit ", style="dim white") - prod_steps.append("manifest.yaml", style="dim yellow") - prod_steps.append(" โ†’ ", style="dim white") - prod_steps.append("deployment.image.repository", style="dim yellow") - prod_steps.append(" โ†’ replace ", style="dim white") - prod_steps.append('""', style="dim red") - prod_steps.append(" with your registry", style="dim white") - prod_steps.append("\n Examples: ", style="dim white") - prod_steps.append("123456789012.dkr.ecr.us-west-2.amazonaws.com/my-agent", style="dim blue") - prod_steps.append(", ", style="dim white") - prod_steps.append("gcr.io/my-project", style="dim blue") - prod_steps.append(", ", style="dim white") - prod_steps.append("myregistry.azurecr.io", style="dim blue") - prod_steps.append("\n\n", style="white") - - prod_steps.append("5. ", style="bold white") - prod_steps.append("Build your agent as a container and push to registry", style="white") - prod_steps.append(":\n", style="white") - prod_steps.append(" agentex agents build --manifest manifest.yaml --registry --push", style="dim cyan") - prod_steps.append("\n\n", style="white") - - prod_steps.append("6. ", style="bold white") - prod_steps.append("Upload secrets to cluster ", style="white") - prod_steps.append("(API keys, credentials your agent needs)", style="dim white") - prod_steps.append(":\n", style="white") - prod_steps.append(" agentex secrets sync --manifest manifest.yaml --cluster your-cluster", style="dim cyan") - prod_steps.append("\n ", style="white") - prod_steps.append("Note: ", style="dim yellow") - prod_steps.append("Secrets are ", style="dim white") - prod_steps.append("never stored in manifest.yaml", style="dim red") - prod_steps.append(". You provide them via ", style="dim white") - prod_steps.append("--values file", style="dim blue") - prod_steps.append(" or interactive prompts", style="dim white") - prod_steps.append("\n\n", style="white") - - prod_steps.append("7. ", style="bold white") - prod_steps.append("Deploy your agent to run on the cluster", style="white") - prod_steps.append(":\n", style="white") - prod_steps.append(" agentex agents deploy --cluster your-cluster --namespace your-namespace", style="dim cyan") - prod_steps.append("\n\n", style="white") - prod_steps.append("Note: These commands use Helm charts hosted by Scale to deploy agents.", style="dim italic") - - prod_panel = Panel( - prod_steps, - title="[bold magenta]Production Setup & Deployment[/bold magenta]", - title_align="left", - border_style="magenta", - padding=(1, 2) - ) - console.print(prod_panel) - - # Professional footer with helpful context - console.print() - console.print(Rule(style="dim white")) - - # Add helpful context about the workflow - help_text = Text() - help_text.append("โ„น๏ธ ", style="blue") - help_text.append("Quick Start: ", style="bold white") - help_text.append("Steps 1-3 for local development. Steps 4-7 require Agentex cluster for production.", style="dim white") - console.print(" ", help_text) - - tip_text = Text() - tip_text.append("๐Ÿ’ก ", style="yellow") - tip_text.append("Need help? ", style="bold white") - tip_text.append("Use ", style="dim white") - tip_text.append("agentex --help", style="cyan") - tip_text.append(" or ", style="dim white") - tip_text.append("agentex [command] --help", style="cyan") - tip_text.append(" for detailed options", style="dim white") - console.print(" ", tip_text) - console.print() diff --git a/src/agentex/lib/cli/commands/main.py b/src/agentex/lib/cli/commands/main.py deleted file mode 100644 index fa3c098d..00000000 --- a/src/agentex/lib/cli/commands/main.py +++ /dev/null @@ -1,32 +0,0 @@ -import typer - -from agentex.lib.cli.commands.uv import uv -from agentex.lib.cli.commands.init import init -from agentex.lib.cli.commands.tasks import tasks -from agentex.lib.cli.commands.agents import agents -from agentex.lib.cli.commands.secrets import secrets - -# Create the main Typer application -app = typer.Typer( - context_settings={"help_option_names": ["-h", "--help"], "max_content_width": 800}, - pretty_exceptions_show_locals=False, - pretty_exceptions_enable=False, - add_completion=False, -) - -# Add the subcommands -app.add_typer(agents, name="agents", help="Get, list, run, build, and deploy agents") -app.add_typer(tasks, name="tasks", help="Get, list, and delete tasks") -app.add_typer(secrets, name="secrets", help="Sync, get, list, and delete secrets") -app.add_typer( - uv, name="uv", help="Wrapper for uv command with AgentEx-specific enhancements" -) - -# Add init command with documentation -app.command( - help="Initialize a new agent project with a template (interactive)", -)(init) - - -if __name__ == "__main__": - app() diff --git a/src/agentex/lib/cli/commands/secrets.py b/src/agentex/lib/cli/commands/secrets.py deleted file mode 100644 index ee5e5477..00000000 --- a/src/agentex/lib/cli/commands/secrets.py +++ /dev/null @@ -1,171 +0,0 @@ -from __future__ import annotations - -from pathlib import Path - -import typer -import questionary -from rich import print_json -from rich.panel import Panel -from rich.console import Console - -from agentex.lib.utils.logging import make_logger -from agentex.lib.cli.utils.cli_utils import handle_questionary_cancellation -from agentex.lib.cli.utils.kubectl_utils import ( - validate_namespace, - check_and_switch_cluster_context, -) -from agentex.lib.sdk.config.agent_manifest import AgentManifest -from agentex.lib.cli.handlers.secret_handlers import ( - get_secret, - sync_secrets, - delete_secret, - get_kubernetes_secrets_by_type, -) - -logger = make_logger(__name__) -console = Console() - -secrets = typer.Typer() - - -@secrets.command() -def list( - namespace: str = typer.Option( - "agentex-agents", help="Kubernetes namespace to list secrets from" - ), - cluster: str | None = typer.Option( - None, help="Cluster context to use (defaults to current context)" - ), -): - """List names of available secrets""" - logger.info(f"Listing secrets in namespace: {namespace}") - - if cluster: - check_and_switch_cluster_context(cluster) - if not validate_namespace(namespace, cluster): - console.print( - f"[red]Error:[/red] Namespace '{namespace}' does not exist in cluster '{cluster}'" - ) - raise typer.Exit(1) - - secrets_list = get_kubernetes_secrets_by_type(namespace=namespace, context=cluster) - print_json(data=secrets_list) - - -@secrets.command() -def get( - name: str = typer.Argument(..., help="Name of the secret to get"), - namespace: str = typer.Option( - "agentex-agents", help="Kubernetes namespace for the secret" - ), - cluster: str | None = typer.Option( - None, help="Cluster context to use (defaults to current context)" - ), -): - """Get details about a secret""" - logger.info(f"Getting secret: {name} from namespace: {namespace}") - - if cluster: - check_and_switch_cluster_context(cluster) - if not validate_namespace(namespace, cluster): - console.print( - f"[red]Error:[/red] Namespace '{namespace}' does not exist in cluster '{cluster}'" - ) - raise typer.Exit(1) - - secret = get_secret(name=name, namespace=namespace, context=cluster) - print_json(data=secret) - - -@secrets.command() -def delete( - name: str = typer.Argument(..., help="Name of the secret to delete"), - namespace: str = typer.Option( - "agentex-agents", help="Kubernetes namespace for the secret" - ), - cluster: str | None = typer.Option( - None, help="Cluster context to use (defaults to current context)" - ), -): - """Delete a secret""" - logger.info(f"Deleting secret: {name} from namespace: {namespace}") - - if cluster: - check_and_switch_cluster_context(cluster) - if not validate_namespace(namespace, cluster): - console.print( - f"[red]Error:[/red] Namespace '{namespace}' does not exist in cluster '{cluster}'" - ) - raise typer.Exit(1) - - delete_secret(name=name, namespace=namespace, context=cluster) - - -@secrets.command() -def sync( - manifest: str = typer.Option(..., help="Path to the manifest file"), - # TODO: should cluster be here or be in manifest as well? - cluster: str = typer.Option(..., "--cluster", help="Cluster to sync secrets to"), - interactive: bool = typer.Option( - True, "--interactive/--no-interactive", help="Enable interactive prompts" - ), - namespace: str | None = typer.Option( - None, - help="Kubernetes namespace to deploy to (required in non-interactive mode)", - ), - values: str = typer.Option(None, "--values", help="Path to the values file"), -): - """Sync secrets from the cluster to the local environment""" - console.print( - Panel.fit("๐Ÿš€ [bold blue]Sync Secrets[/bold blue]", border_style="blue") - ) - - manifest_path = Path(manifest) - if not manifest_path.exists(): - console.print(f"[red]Error:[/red] Manifest file not found: {manifest}") - raise typer.Exit(1) - - # In non-interactive mode, require namespace - if not interactive and not namespace: - console.print( - "[red]Error:[/red] --namespace is required in non-interactive mode" - ) - raise typer.Exit(1) - - # Get namespace if not provided (only in interactive mode) - if not namespace: - namespace = questionary.text( - "Enter Kubernetes namespace:", default="default" - ).ask() - namespace = handle_questionary_cancellation(namespace, "namespace input") - - if not namespace: - console.print("Deployment cancelled") - raise typer.Exit(0) - - if values: - values_path = Path(values) - if not values_path.exists(): - console.print(f"[red]Error:[/red] Values file not found: {values_path}") - raise typer.Exit(1) - - # Validate cluster and namespace - check_and_switch_cluster_context(cluster) - if not validate_namespace(namespace, cluster): - console.print( - f"[red]Error:[/red] Namespace '{namespace}' does not exist in cluster '{cluster}'" - ) - raise typer.Exit(1) - - agent_manifest = AgentManifest.from_yaml(file_path=manifest) - - # Always call sync_secrets - it will handle the case of no credentials - sync_secrets( - manifest_obj=agent_manifest, - cluster=cluster, - namespace=namespace, - interactive=interactive, - values_path=str(values) if values else None, - ) - - console.print("[green]Successfully synced secrets[/green]") diff --git a/src/agentex/lib/cli/commands/tasks.py b/src/agentex/lib/cli/commands/tasks.py deleted file mode 100644 index 43d54894..00000000 --- a/src/agentex/lib/cli/commands/tasks.py +++ /dev/null @@ -1,119 +0,0 @@ -from typing import Any - -import typer -from rich import print_json -from rich.console import Console - -from agentex import Agentex -from agentex.lib.utils.logging import make_logger -from agentex.lib.cli.handlers.cleanup_handlers import cleanup_agent_workflows - -logger = make_logger(__name__) -console = Console() - -tasks = typer.Typer() - - -@tasks.command() -def get( - task_id: str = typer.Argument(..., help="ID of the task to get"), -): - """ - Get the task with the given ID. - """ - logger.info(f"Getting task: {task_id}") - client = Agentex() - task = client.tasks.retrieve(task_id=task_id) - logger.info(f"Full Task {task_id}:") - print_json(data=task.to_dict(), default=str) - - -@tasks.command() -def list(): - """ - List all tasks. - """ - client = Agentex() - tasks = client.tasks.list() - print_json(data=[task.to_dict() for task in tasks], default=str) - - -@tasks.command() -def list_running( - agent_name: str = typer.Option(..., help="Name of the agent to list running tasks for"), -): - """ - List all currently running tasks for a specific agent. - """ - client = Agentex() - if agent_name: - all_tasks = client.tasks.list(agent_name=agent_name) - else: - all_tasks = client.tasks.list() - running_tasks = [task for task in all_tasks if hasattr(task, "status") and task.status == "RUNNING"] - - if not running_tasks: - console.print(f"[yellow]No running tasks found for agent '{agent_name}'[/yellow]") - return - - console.print(f"[green]Found {len(running_tasks)} running task(s) for agent '{agent_name}':[/green]") - - # Convert to dict with proper datetime serialization - serializable_tasks: list[dict[str, Any]] = [] # type: ignore[misc] - for task in running_tasks: - try: - # Use model_dump with mode='json' for proper datetime handling - if hasattr(task, "model_dump"): - serializable_tasks.append(task.model_dump(mode="json")) - else: - # Fallback for non-Pydantic objects - serializable_tasks.append( - {"id": getattr(task, "id", "unknown"), "status": getattr(task, "status", "unknown")} - ) - except Exception as e: - logger.warning(f"Failed to serialize task: {e}") - # Minimal fallback - serializable_tasks.append( - {"id": getattr(task, "id", "unknown"), "status": getattr(task, "status", "unknown")} - ) - - print_json(data=serializable_tasks, default=str) - - -@tasks.command() -def delete( - task_id: str = typer.Argument(..., help="ID of the task to delete"), -): - """ - Delete the task with the given ID. - """ - logger.info(f"Deleting task: {task_id}") - client = Agentex() - client.tasks.delete(task_id=task_id) - logger.info(f"Task deleted: {task_id}") - - -@tasks.command() -def cleanup( - agent_name: str = typer.Option(..., help="Name of the agent to cleanup tasks for"), - force: bool = typer.Option( - False, help="Force cleanup using direct Temporal termination (bypasses development check)" - ), -): - """ - Clean up all running tasks/workflows for an agent. - - By default, uses graceful cancellation via agent RPC. - With --force, directly terminates workflows via Temporal client. - """ - try: - console.print(f"[blue]Starting cleanup for agent '{agent_name}'...[/blue]") - - cleanup_agent_workflows(agent_name=agent_name, force=force, development_only=True) - - console.print(f"[green]โœ“ Cleanup completed for agent '{agent_name}'[/green]") - - except Exception as e: - console.print(f"[red]Cleanup failed: {str(e)}[/red]") - logger.exception("Task cleanup failed") - raise typer.Exit(1) from e diff --git a/src/agentex/lib/cli/commands/uv.py b/src/agentex/lib/cli/commands/uv.py deleted file mode 100644 index e192b0e5..00000000 --- a/src/agentex/lib/cli/commands/uv.py +++ /dev/null @@ -1,135 +0,0 @@ -from __future__ import annotations - -import os -import sys -import subprocess - -import typer - -from agentex.lib.utils.logging import make_logger - -logger = make_logger(__name__) - - -uv = typer.Typer( - help="Wrapper for uv command with AgentEx-specific enhancements", - context_settings={"help_option_names": ["-h", "--help"]}, -) - -sync_args = typer.Argument(None, help="Additional arguments to pass to uv sync") - - -@uv.command() -def sync( - ctx: typer.Context, - index: str | None = typer.Option( - None, "--index", "-i", help="UV index URL to use for sync" - ), - group: str | None = typer.Option( - None, - "--group", - "-g", - help="Include dependencies from the specified dependency group", - ), - args: list[str] = sync_args, -): - """Sync dependencies with optional UV_INDEX support""" - args = args or [] - - # Check if help was requested - if "--help" in args or "-h" in args: - # Show our custom help instead of passing to uv - typer.echo(ctx.get_help()) - return - - if index: - os.environ["UV_INDEX_URL"] = index - logger.info(f"Using provided UV_INDEX_URL: {index}") - - # Build the uv sync command - cmd = ["uv", "sync"] - - # Add group if specified - if group: - cmd.extend(["--group", group]) - logger.info(f"Using dependency group: {group}") - - # Add any additional arguments - cmd.extend(args) - - try: - result = subprocess.run(cmd, check=True) - sys.exit(result.returncode) - except subprocess.CalledProcessError as e: - logger.error(f"uv sync failed with exit code {e.returncode}") - sys.exit(e.returncode) - except FileNotFoundError: - logger.error("uv command not found. Please install uv first.") - sys.exit(1) - - -add_args = typer.Argument(None, help="Additional arguments to pass to uv add") - - -@uv.command() -def add( - ctx: typer.Context, - index: str | None = typer.Option( - None, "--index", "-i", help="UV index URL to use for add" - ), - args: list[str] = add_args, -): - """Add dependencies with optional UV_INDEX support""" - - args = args or [] - - # Check if help was requested - if "--help" in args or "-h" in args: - # Show our custom help instead of passing to uv - typer.echo(ctx.get_help()) - return - - if index: - os.environ["UV_INDEX_URL"] = index - logger.info(f"Using provided UV_INDEX_URL: {index}") - - # Build the uv add command - cmd = ["uv", "add"] + (args or []) - - try: - result = subprocess.run(cmd, check=True) - sys.exit(result.returncode) - except subprocess.CalledProcessError as e: - logger.error(f"uv add failed with exit code {e.returncode}") - sys.exit(e.returncode) - except FileNotFoundError: - logger.error("uv command not found. Please install uv first.") - sys.exit(1) - - -run_args = typer.Argument(None, help="Arguments to pass to uv") - - -@uv.command() -def run( - ctx: typer.Context, - args: list[str] = run_args, -): - """Run any uv command with arguments""" - if not args: - # If no arguments provided, show help - typer.echo(ctx.get_help()) - return - - # Build the uv command - cmd = ["uv"] + args - - try: - result = subprocess.run(cmd, check=True) - sys.exit(result.returncode) - except subprocess.CalledProcessError as e: - logger.error(f"uv command failed with exit code {e.returncode}") - sys.exit(e.returncode) - except FileNotFoundError: - logger.error("uv command not found. Please install uv first.") - sys.exit(1) diff --git a/src/agentex/lib/cli/debug/__init__.py b/src/agentex/lib/cli/debug/__init__.py deleted file mode 100644 index 764b3565..00000000 --- a/src/agentex/lib/cli/debug/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -""" -Debug functionality for AgentEx CLI - -Provides debug support for temporal workers and ACP servers during local development. -""" - -from .debug_config import DebugMode, DebugConfig -from .debug_handlers import start_acp_server_debug, start_temporal_worker_debug - -__all__ = [ - "DebugConfig", - "DebugMode", - "start_acp_server_debug", - "start_temporal_worker_debug", -] \ No newline at end of file diff --git a/src/agentex/lib/cli/debug/debug_config.py b/src/agentex/lib/cli/debug/debug_config.py deleted file mode 100644 index 3b30e68e..00000000 --- a/src/agentex/lib/cli/debug/debug_config.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -Debug configuration models for AgentEx CLI debugging. -""" - -import socket -from enum import Enum - -from agentex.lib.utils.model_utils import BaseModel - - -class DebugMode(str, Enum): - """Debug mode options""" - WORKER = "worker" - ACP = "acp" - BOTH = "both" - NONE = "none" - - -class DebugConfig(BaseModel): - """Configuration for debug mode""" - - enabled: bool = False - mode: DebugMode = DebugMode.NONE - port: int = 5678 - wait_for_attach: bool = False - auto_port: bool = True # Automatically find available port if specified port is busy - - @classmethod - def create_worker_debug( - cls, - port: int = 5678, - wait_for_attach: bool = False, - auto_port: bool = True - ) -> "DebugConfig": - """Create debug config for worker debugging""" - return cls( - enabled=True, - mode=DebugMode.WORKER, - port=port, - wait_for_attach=wait_for_attach, - auto_port=auto_port - ) - - @classmethod - def create_acp_debug( - cls, - port: int = 5679, - wait_for_attach: bool = False, - auto_port: bool = True - ) -> "DebugConfig": - """Create debug config for ACP debugging""" - return cls( - enabled=True, - mode=DebugMode.ACP, - port=port, - wait_for_attach=wait_for_attach, - auto_port=auto_port - ) - - @classmethod - def create_both_debug( - cls, - worker_port: int = 5678, - _acp_port: int = 5679, - wait_for_attach: bool = False, - auto_port: bool = True - ) -> "DebugConfig": - """Create debug config for both worker and ACP debugging""" - return cls( - enabled=True, - mode=DebugMode.BOTH, - port=worker_port, # Primary port for worker - wait_for_attach=wait_for_attach, - auto_port=auto_port - ) - - def should_debug_worker(self) -> bool: - """Check if worker should be debugged""" - return self.enabled and self.mode in (DebugMode.WORKER, DebugMode.BOTH) - - def should_debug_acp(self) -> bool: - """Check if ACP should be debugged""" - return self.enabled and self.mode in (DebugMode.ACP, DebugMode.BOTH) - - def get_worker_port(self) -> int: - """Get port for worker debugging""" - return self.port - - def get_acp_port(self) -> int: - """Get port for ACP debugging""" - if self.mode == DebugMode.BOTH: - return self.port + 1 # Use port + 1 for ACP when debugging both - return self.port - - -def find_available_port(start_port: int = 5678, max_attempts: int = 10) -> int: - """Find an available port starting from start_port""" - for port in range(start_port, start_port + max_attempts): - try: - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('localhost', port)) - return port - except OSError: - continue - - # If we can't find an available port, just return the start port - # and let the debug server handle the error - return start_port - - -def resolve_debug_port(config: DebugConfig, target_port: int) -> int: - """Resolve the actual port to use for debugging""" - if config.auto_port: - return find_available_port(target_port) - return target_port \ No newline at end of file diff --git a/src/agentex/lib/cli/debug/debug_handlers.py b/src/agentex/lib/cli/debug/debug_handlers.py deleted file mode 100644 index 98746387..00000000 --- a/src/agentex/lib/cli/debug/debug_handlers.py +++ /dev/null @@ -1,176 +0,0 @@ -""" -Debug process handlers for AgentEx CLI. - -Provides debug-enabled versions of ACP server and temporal worker startup. -""" - -import sys -import asyncio -import asyncio.subprocess -from typing import TYPE_CHECKING, Dict -from pathlib import Path - -from rich.console import Console - -if TYPE_CHECKING: - pass - -from agentex.lib.utils.logging import make_logger - -from .debug_config import DebugConfig, resolve_debug_port - -logger = make_logger(__name__) -console = Console() - - -async def start_temporal_worker_debug( - worker_path: Path, - env: Dict[str, str], - debug_config: DebugConfig -): - """Start temporal worker with debug support""" - - if not debug_config.should_debug_worker(): - raise ValueError("Debug config is not configured for worker debugging") - - # Resolve the actual debug port - debug_port = resolve_debug_port(debug_config, debug_config.get_worker_port()) - - # Add debug environment variables - debug_env = env.copy() - debug_env.update({ - "AGENTEX_DEBUG_ENABLED": "true", - "AGENTEX_DEBUG_PORT": str(debug_port), - "AGENTEX_DEBUG_WAIT_FOR_ATTACH": str(debug_config.wait_for_attach).lower(), - "AGENTEX_DEBUG_TYPE": "worker" - }) - - # Start the worker process - # For debugging, use absolute path to run_worker.py to run from workspace root - worker_script = worker_path.parent / "run_worker.py" - cmd = [sys.executable, str(worker_script)] - - console.print(f"[blue]๐Ÿ› Starting Temporal worker in debug mode[/blue]") - console.print(f"[yellow]๐Ÿ“ก Debug server will listen on port {debug_port}[/yellow]") - console.print(f"[green]โœ“ VS Code should connect to: localhost:{debug_port}[/green]") - - if debug_config.wait_for_attach: - console.print(f"[yellow]โณ Worker will wait for debugger to attach[/yellow]") - - console.print(f"[dim]๐Ÿ’ก In your IDE: Attach to localhost:{debug_port}[/dim]") - console.print(f"[dim]๐Ÿ”ง If connection fails, check that VS Code launch.json uses port {debug_port}[/dim]") - - return await asyncio.create_subprocess_exec( - *cmd, - cwd=Path.cwd(), # Run from current working directory (workspace root) - env=debug_env, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - ) - - -async def start_acp_server_debug( - acp_path: Path, - port: int, - env: Dict[str, str], - debug_config: DebugConfig -): - """Start ACP server with debug support""" - - if not debug_config.should_debug_acp(): - raise ValueError("Debug config is not configured for ACP debugging") - - # Resolve the actual debug port - debug_port = resolve_debug_port(debug_config, debug_config.get_acp_port()) - - # Add debug environment variables - debug_env = env.copy() - debug_env.update({ - "AGENTEX_DEBUG_ENABLED": "true", - "AGENTEX_DEBUG_PORT": str(debug_port), - "AGENTEX_DEBUG_WAIT_FOR_ATTACH": str(debug_config.wait_for_attach).lower(), - "AGENTEX_DEBUG_TYPE": "acp" - }) - - # Disable uvicorn auto-reload in debug mode to prevent conflicts - cmd = [ - sys.executable, - "-m", - "uvicorn", - f"{acp_path.parent.name}.acp:acp", - "--port", - str(port), - "--host", - "0.0.0.0", - # Note: No --reload flag when debugging - ] - - console.print(f"[blue]๐Ÿ› Starting ACP server in debug mode[/blue]") - console.print(f"[yellow]๐Ÿ“ก Debug server will listen on port {debug_port}[/yellow]") - - if debug_config.wait_for_attach: - console.print(f"[yellow]โณ ACP server will wait for debugger to attach[/yellow]") - - console.print(f"[dim]๐Ÿ’ก In your IDE: Attach to localhost:{debug_port}[/dim]") - - return await asyncio.create_subprocess_exec( - *cmd, - cwd=acp_path.parent.parent, - env=debug_env, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - ) - - -def create_debug_startup_script() -> str: - """Create a Python script snippet for debug initialization""" - return ''' -import os -import sys - -# Debug initialization for AgentEx -if os.getenv("AGENTEX_DEBUG_ENABLED") == "true": - try: - import debugpy - debug_port = int(os.getenv("AGENTEX_DEBUG_PORT", "5678")) - debug_type = os.getenv("AGENTEX_DEBUG_TYPE", "unknown") - wait_for_attach = os.getenv("AGENTEX_DEBUG_WAIT_FOR_ATTACH", "false").lower() == "true" - - # Configure debugpy - debugpy.configure(subProcess=False) - debugpy.listen(debug_port) - - print(f"๐Ÿ› [{debug_type.upper()}] Debug server listening on port {debug_port}") - - if wait_for_attach: - print(f"โณ [{debug_type.upper()}] Waiting for debugger to attach...") - debugpy.wait_for_client() - print(f"โœ… [{debug_type.upper()}] Debugger attached!") - else: - print(f"๐Ÿ“ก [{debug_type.upper()}] Ready for debugger attachment") - - except ImportError: - print("โŒ debugpy not available. Install with: pip install debugpy") - sys.exit(1) - except Exception as e: - print(f"โŒ Debug setup failed: {e}") - sys.exit(1) -''' - - -def inject_debug_code_to_worker_template() -> str: - """Generate debug code to inject into worker template""" - return """ -# === DEBUG SETUP (Auto-generated by AgentEx CLI) === -""" + create_debug_startup_script() + """ -# === END DEBUG SETUP === -""" - - -def inject_debug_code_to_acp_template() -> str: - """Generate debug code to inject into ACP template""" - return """ -# === DEBUG SETUP (Auto-generated by AgentEx CLI) === -""" + create_debug_startup_script() + """ -# === END DEBUG SETUP === -""" \ No newline at end of file diff --git a/src/agentex/lib/cli/handlers/__init__.py b/src/agentex/lib/cli/handlers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/cli/handlers/agent_handlers.py b/src/agentex/lib/cli/handlers/agent_handlers.py deleted file mode 100644 index 81608018..00000000 --- a/src/agentex/lib/cli/handlers/agent_handlers.py +++ /dev/null @@ -1,160 +0,0 @@ -from __future__ import annotations - -from pathlib import Path - -from rich.console import Console -from python_on_whales import DockerException, docker - -from agentex.lib.cli.debug import DebugConfig -from agentex.lib.utils.logging import make_logger -from agentex.lib.cli.handlers.run_handlers import RunError, run_agent as _run_agent -from agentex.lib.sdk.config.agent_manifest import AgentManifest - -logger = make_logger(__name__) -console = Console() - - -class DockerBuildError(Exception): - """An error occurred during docker build""" - - -def build_agent( - manifest_path: str, - registry_url: str, - repository_name: str | None, - platforms: list[str], - push: bool = False, - secret: str | None = None, - tag: str | None = None, - build_args: list[str] | None = None, -) -> str: - """Build the agent locally and optionally push to registry - - Args: - manifest_path: Path to the agent manifest file - registry_url: Registry URL for pushing the image - push: Whether to push the image to the registry - secret: Docker build secret in format 'id=secret-id,src=path-to-secret-file' - tag: Image tag to use (defaults to 'latest') - build_args: List of Docker build arguments in format 'KEY=VALUE' - - Returns: - The image URL - """ - agent_manifest = AgentManifest.from_yaml(file_path=manifest_path) - build_context_root = ( - Path(manifest_path).parent / agent_manifest.build.context.root - ).resolve() - - repository_name = repository_name or agent_manifest.agent.name - - # Prepare image name - if registry_url: - image_name = f"{registry_url}/{repository_name}" - else: - image_name = repository_name - - if tag: - image_name = f"{image_name}:{tag}" - else: - image_name = f"{image_name}:latest" - - with agent_manifest.context_manager(build_context_root) as build_context: - logger.info(f"Building image {image_name} locally...") - - # Log build context information for debugging - logger.info(f"Build context path: {build_context.path}") - logger.info( - f"Dockerfile path: {build_context.path / build_context.dockerfile_path}" # type: ignore[operator] - ) - - try: - # Prepare build arguments - docker_build_kwargs = { - "context_path": str(build_context.path), - "file": str(build_context.path / build_context.dockerfile_path), # type: ignore[operator] - "tags": [image_name], - "platforms": platforms, - } - - # Add Docker build args if provided - if build_args: - docker_build_args = {} - for arg in build_args: - if "=" in arg: - key, value = arg.split("=", 1) - docker_build_args[key] = value - else: - logger.warning( - f"Invalid build arg format: {arg}. Expected KEY=VALUE" - ) - - if docker_build_args: - docker_build_kwargs["build_args"] = docker_build_args - logger.info(f"Using build args: {list(docker_build_args.keys())}") - - # Add secret if provided - if secret: - docker_build_kwargs["secrets"] = [secret] - - if push: - # Build and push in one step for multi-platform builds - logger.info("Building and pushing image...") - docker_build_kwargs["push"] = ( - True # Push directly after build for multi-platform - ) - docker.buildx.build(**docker_build_kwargs) - - logger.info(f"Successfully built and pushed {image_name}") - else: - # Build only - logger.info("Building image...") - docker.buildx.build(**docker_build_kwargs) - - logger.info(f"Successfully built {image_name}") - - except DockerException as error: - error_msg = error.stderr if error.stderr else str(error) - action = "build or push" if push else "build" - logger.error(f"{action.capitalize()} failed: {error_msg}", exc_info=True) - raise DockerBuildError( - f"Docker {action} failed: {error_msg}\n" - f"Build context: {build_context.path}\n" - f"Dockerfile path: {build_context.dockerfile_path}" - ) from error - - return image_name - - -def run_agent(manifest_path: str, debug_config: "DebugConfig | None" = None): - """Run an agent locally from the given manifest""" - import sys - import signal - import asyncio - - # Flag to track if we're shutting down - shutting_down = False - - def signal_handler(signum, _frame): - """Handle signals by raising KeyboardInterrupt""" - nonlocal shutting_down - if shutting_down: - # If we're already shutting down and get another signal, force exit - logger.info(f"Force exit on signal {signum}") - sys.exit(1) - - shutting_down = True - logger.info(f"Received signal {signum}, shutting down...") - raise KeyboardInterrupt() - - # Set up signal handling for the main thread - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - - try: - asyncio.run(_run_agent(manifest_path, debug_config)) - except KeyboardInterrupt: - logger.info("Shutdown completed.") - sys.exit(0) - except RunError as e: - raise RuntimeError(str(e)) from e diff --git a/src/agentex/lib/cli/handlers/cleanup_handlers.py b/src/agentex/lib/cli/handlers/cleanup_handlers.py deleted file mode 100644 index 1d67b55e..00000000 --- a/src/agentex/lib/cli/handlers/cleanup_handlers.py +++ /dev/null @@ -1,183 +0,0 @@ -import os -import asyncio - -from rich.console import Console - -from agentex import Agentex -from agentex.lib.utils.logging import make_logger - -# Import Temporal client for direct workflow termination -try: - from temporalio.client import Client as TemporalClient # type: ignore -except ImportError: - TemporalClient = None - -logger = make_logger(__name__) -console = Console() - - -def should_cleanup_on_restart() -> bool: - """ - Check if cleanup should be performed on restart. - - Returns True if: - - ENVIRONMENT=development, OR - - AUTO_CLEANUP_ON_RESTART=true - """ - env = os.getenv("ENVIRONMENT", "").lower() - auto_cleanup = os.getenv("AUTO_CLEANUP_ON_RESTART", "true").lower() - - return env == "development" or auto_cleanup == "true" - - -def cleanup_agent_workflows( - agent_name: str, - force: bool = False, - development_only: bool = True -) -> None: - """ - Clean up all running workflows for an agent during development. - - This cancels (graceful) all running tasks for the specified agent. - When force=True, directly terminates workflows via Temporal client. - - Args: - agent_name: Name of the agent to cleanup workflows for - force: If True, directly terminate workflows via Temporal client - development_only: Only perform cleanup in development environment - """ - - # Safety check - only run in development mode by default - if development_only and not force and not should_cleanup_on_restart(): - logger.warning("Cleanup skipped - not in development mode. Use --force to override.") - return - - method = "terminate (direct)" if force else "cancel (via agent)" - console.print(f"[blue]Cleaning up workflows for agent '{agent_name}' using {method}...[/blue]") - - try: - client = Agentex() - - # Get all running tasks - if agent_name: - all_tasks = client.tasks.list(agent_name=agent_name) - else: - all_tasks = client.tasks.list() - running_tasks = [task for task in all_tasks if hasattr(task, 'status') and task.status == "RUNNING"] - - if not running_tasks: - console.print("[yellow]No running tasks found[/yellow]") - return - - console.print(f"[blue]Cleaning up {len(running_tasks)} running task(s) for agent '{agent_name}'...[/blue]") - - successful_cleanups = 0 - total_tasks = len(running_tasks) - - for task in running_tasks: - task_cleanup_success = False - - if force: - # Force mode: Do both graceful RPC cancellation AND direct Temporal termination - rpc_success = False - temporal_success = False - - try: - # First: Graceful cancellation via agent RPC (handles database/agent cleanup) - cleanup_single_task(client, agent_name, task.id) - logger.debug(f"Completed RPC cancellation for task {task.id}") - rpc_success = True - except Exception as e: - logger.warning(f"RPC cancellation failed for task {task.id}: {e}") - - try: - # Second: Direct Temporal termination (ensures workflow is forcefully stopped) - asyncio.run(cleanup_single_task_direct(task.id)) - logger.debug(f"Completed Temporal termination for task {task.id}") - temporal_success = True - except Exception as e: - logger.warning(f"Temporal termination failed for task {task.id}: {e}") - - # Count as success if either operation succeeded - task_cleanup_success = rpc_success or temporal_success - - else: - # Normal mode: Only graceful cancellation via agent RPC - try: - cleanup_single_task(client, agent_name, task.id) - task_cleanup_success = True - except Exception as e: - logger.error(f"Failed to cleanup task {task.id}: {e}") - task_cleanup_success = False - - if task_cleanup_success: - successful_cleanups += 1 - logger.debug(f"Successfully cleaned up task {task.id}") - else: - logger.error(f"Failed to cleanup task {task.id}") - # Don't increment successful_cleanups for actual failures - - if successful_cleanups == total_tasks: - console.print(f"[green]โœ“ Successfully cleaned up all {successful_cleanups} task(s) for agent '{agent_name}'[/green]") - elif successful_cleanups > 0: - console.print(f"[yellow]โš  Successfully cleaned up {successful_cleanups}/{total_tasks} task(s) for agent '{agent_name}'[/yellow]") - else: - console.print(f"[red]โœ— Failed to cleanup any tasks for agent '{agent_name}'[/red]") - - except Exception as e: - console.print(f"[red]Agent workflow cleanup failed: {str(e)}[/red]") - logger.exception("Agent workflow cleanup failed") - raise - - -async def cleanup_single_task_direct(task_id: str) -> None: - """ - Directly terminate a workflow using Temporal client. - - Args: - task_id: ID of the task (used as workflow_id) - """ - if TemporalClient is None: - raise ImportError("temporalio package not available for direct workflow termination") - - try: - # Connect to Temporal server (assumes default localhost:7233) - client = await TemporalClient.connect("localhost:7233") # type: ignore - - # Get workflow handle and terminate - handle = client.get_workflow_handle(workflow_id=task_id) # type: ignore - await handle.terminate() # type: ignore - - logger.debug(f"Successfully terminated workflow {task_id} via Temporal client") - - except Exception as e: - # Check if the workflow was already completed - this is actually a success case - if "workflow execution already completed" in str(e).lower(): - logger.debug(f"Workflow {task_id} was already completed - no termination needed") - return # Don't raise an exception for this case - - logger.error(f"Failed to terminate workflow {task_id} via Temporal client: {e}") - raise - - -def cleanup_single_task(client: Agentex, agent_name: str, task_id: str) -> None: - """ - Clean up a single task/workflow using agent RPC cancel method. - - Args: - client: Agentex client instance - agent_name: Name of the agent that owns the task - task_id: ID of the task to cleanup - """ - try: - # Use the agent RPC method to cancel the task - client.agents.rpc_by_name( - agent_name=agent_name, - method="task/cancel", - params={"task_id": task_id} - ) - logger.debug(f"Successfully cancelled task {task_id} via agent '{agent_name}'") - - except Exception as e: - logger.warning(f"RPC task/cancel failed for task {task_id}: {e}") - raise \ No newline at end of file diff --git a/src/agentex/lib/cli/handlers/deploy_handlers.py b/src/agentex/lib/cli/handlers/deploy_handlers.py deleted file mode 100644 index c5af88cd..00000000 --- a/src/agentex/lib/cli/handlers/deploy_handlers.py +++ /dev/null @@ -1,394 +0,0 @@ -from __future__ import annotations - -import os -import tempfile -import subprocess -from typing import Any -from pathlib import Path - -import yaml -from pydantic import Field, BaseModel -from rich.console import Console - -from agentex.lib.utils.logging import make_logger -from agentex.lib.cli.utils.exceptions import HelmError, DeploymentError -from agentex.lib.cli.utils.path_utils import PathResolutionError, calculate_docker_acp_module -from agentex.lib.environment_variables import EnvVarKeys -from agentex.lib.cli.utils.kubectl_utils import check_and_switch_cluster_context -from agentex.lib.sdk.config.agent_config import AgentConfig -from agentex.lib.sdk.config.agent_manifest import AgentManifest -from agentex.lib.sdk.config.environment_config import AgentEnvironmentConfig - -logger = make_logger(__name__) -console = Console() - -TEMPORAL_WORKER_KEY = "temporal-worker" -AGENTEX_AGENTS_HELM_CHART_VERSION = "0.1.9" - - -class InputDeployOverrides(BaseModel): - repository: str | None = Field(default=None, description="Override the repository for deployment") - image_tag: str | None = Field(default=None, description="Override the image tag for deployment") - - -def check_helm_installed() -> bool: - """Check if helm is installed and available""" - try: - result = subprocess.run(["helm", "version", "--short"], capture_output=True, text=True, check=True) - logger.info(f"Helm version: {result.stdout.strip()}") - return True - except (subprocess.CalledProcessError, FileNotFoundError): - return False - - -def add_helm_repo(helm_repository_name: str, helm_repository_url: str) -> None: - """Add the agentex helm repository if not already added""" - try: - # Check if repo already exists - result = subprocess.run(["helm", "repo", "list"], capture_output=True, text=True, check=True) - - if helm_repository_name not in result.stdout: - console.print("Adding agentex helm repository...") - subprocess.run( - [ - "helm", - "repo", - "add", - helm_repository_name, - helm_repository_url, - ], - check=True, - ) - else: - logger.info("Helm repository already exists. Running update...") - - subprocess.run(["helm", "repo", "update"], check=True) - console.print("[green]โœ“[/green] Helm repository update successfully") - - except subprocess.CalledProcessError as e: - raise HelmError(f"Failed to add helm repository: {e}") from e - - -def convert_env_vars_dict_to_list(env_vars: dict[str, str]) -> list[dict[str, str]]: - """Convert a dictionary of environment variables to a list of dictionaries""" - return [{"name": key, "value": value} for key, value in env_vars.items()] - - -def add_acp_command_to_helm_values(helm_values: dict[str, Any], manifest: AgentManifest, manifest_path: str) -> None: - """Add dynamic ACP command to helm values based on manifest configuration""" - try: - docker_acp_module = calculate_docker_acp_module(manifest, manifest_path) - # Create the uvicorn command with the correct module path - helm_values["command"] = ["uvicorn", f"{docker_acp_module}:acp", "--host", "0.0.0.0", "--port", "8000"] - logger.info(f"Using dynamic ACP command: uvicorn {docker_acp_module}:acp") - except (PathResolutionError, Exception) as e: - # Fallback to default command structure - logger.warning(f"Could not calculate dynamic ACP module ({e}), using default: project.acp") - helm_values["command"] = ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - - -def merge_deployment_configs( - manifest: AgentManifest, - agent_env_config: AgentEnvironmentConfig | None, - deploy_overrides: InputDeployOverrides, - manifest_path: str, -) -> dict[str, Any]: - agent_config: AgentConfig = manifest.agent - - """Merge global deployment config with environment-specific overrides into helm values""" - if not manifest.deployment: - raise DeploymentError("No deployment configuration found in manifest") - - repository = deploy_overrides.repository or manifest.deployment.image.repository - image_tag = deploy_overrides.image_tag or manifest.deployment.image.tag - - if not repository or not image_tag: - raise DeploymentError("Repository and image tag are required") - - # Start with global configuration - helm_values: dict[str, Any] = { - "global": { - "image": { - "repository": repository, - "tag": image_tag, - "pullPolicy": "IfNotPresent", - }, - "agent": { - "name": manifest.agent.name, - "description": manifest.agent.description, - "acp_type": manifest.agent.acp_type, - }, - }, - "replicaCount": manifest.deployment.global_config.replicaCount, - "resources": { - "requests": { - "cpu": manifest.deployment.global_config.resources.requests.cpu, - "memory": manifest.deployment.global_config.resources.requests.memory, - }, - "limits": { - "cpu": manifest.deployment.global_config.resources.limits.cpu, - "memory": manifest.deployment.global_config.resources.limits.memory, - }, - }, - # Enable autoscaling by default for production deployments - "autoscaling": { - "enabled": True, - "minReplicas": 1, - "maxReplicas": 10, - "targetCPUUtilizationPercentage": 50, - }, - } - - # Handle temporal configuration using new helper methods - if agent_config.is_temporal_agent(): - temporal_config = agent_config.get_temporal_workflow_config() - if temporal_config: - helm_values[TEMPORAL_WORKER_KEY] = { - "enabled": True, - # Enable autoscaling for temporal workers as well - "autoscaling": { - "enabled": True, - "minReplicas": 1, - "maxReplicas": 10, - "targetCPUUtilizationPercentage": 50, - }, - } - helm_values["global"]["workflow"] = { - "name": temporal_config.name, - "taskQueue": temporal_config.queue_name, - } - - # Collect all environment variables with proper precedence - # Priority: manifest -> environments.yaml -> secrets (highest) - all_env_vars: dict[str, str] = {} - secret_env_vars: list[dict[str, str]] = [] - - # Start with agent_config env vars from manifest - if agent_config.env: - all_env_vars.update(agent_config.env) - - # Override with environment config env vars if they exist - if agent_env_config and agent_env_config.helm_overrides and "env" in agent_env_config.helm_overrides: - env_overrides = agent_env_config.helm_overrides["env"] - if isinstance(env_overrides, list): - # Convert list format to dict for easier merging - env_override_dict: dict[str, str] = {} - for env_var in env_overrides: - if isinstance(env_var, dict) and "name" in env_var and "value" in env_var: - env_override_dict[str(env_var["name"])] = str(env_var["value"]) - all_env_vars.update(env_override_dict) - - # Handle credentials and check for conflicts - if agent_config.credentials: - for credential in agent_config.credentials: - # Handle both CredentialMapping objects and legacy dict format - if isinstance(credential, dict): - env_var_name = credential["env_var_name"] - secret_name = credential["secret_name"] - secret_key = credential["secret_key"] - else: - env_var_name = credential.env_var_name - secret_name = credential.secret_name - secret_key = credential.secret_key - - # Check if the environment variable name conflicts with existing env vars - if env_var_name in all_env_vars: - logger.warning( - f"Environment variable '{env_var_name}' is defined in both " - f"env and secretEnvVars. The secret value will take precedence." - ) - # Remove from regular env vars since secret takes precedence - del all_env_vars[env_var_name] - - secret_env_vars.append( - { - "name": env_var_name, - "secretName": secret_name, - "secretKey": secret_key, - } - ) - - # Apply agent environment configuration overrides - if agent_env_config: - # Add auth principal env var if environment config is set - if agent_env_config.auth: - from agentex.lib.cli.utils.auth_utils import _encode_principal_context_from_env_config - - encoded_principal = _encode_principal_context_from_env_config(agent_env_config.auth) - logger.info(f"Encoding auth principal from {agent_env_config.auth}") - if encoded_principal: - all_env_vars[EnvVarKeys.AUTH_PRINCIPAL_B64.value] = encoded_principal - else: - raise DeploymentError(f"Auth principal unable to be encoded for agent_env_config: {agent_env_config}") - - logger.info(f"Defined agent helm overrides: {agent_env_config.helm_overrides}") - logger.info(f"Before-merge helm values: {helm_values}") - if agent_env_config.helm_overrides: - _deep_merge(helm_values, agent_env_config.helm_overrides) - logger.info(f"After-merge helm values: {helm_values}") - - # Set final environment variables - # Environment variable precedence: manifest -> environments.yaml -> secrets (highest) - if all_env_vars: - helm_values["env"] = convert_env_vars_dict_to_list(all_env_vars) - - if secret_env_vars: - helm_values["secretEnvVars"] = secret_env_vars - - # Set environment variables for temporal worker if enabled - if TEMPORAL_WORKER_KEY in helm_values: - if all_env_vars: - helm_values[TEMPORAL_WORKER_KEY]["env"] = convert_env_vars_dict_to_list(all_env_vars) - if secret_env_vars: - helm_values[TEMPORAL_WORKER_KEY]["secretEnvVars"] = secret_env_vars - - # Handle image pull secrets - if manifest.deployment and manifest.deployment.imagePullSecrets: - pull_secrets = [pull_secret.model_dump() for pull_secret in manifest.deployment.imagePullSecrets] - helm_values["global"]["imagePullSecrets"] = pull_secrets - helm_values["imagePullSecrets"] = pull_secrets - - # Add dynamic ACP command based on manifest configuration if command is not set in helm overrides - helm_overrides_command = ( - agent_env_config and agent_env_config.helm_overrides and "command" in agent_env_config.helm_overrides - ) - if not helm_overrides_command: - add_acp_command_to_helm_values(helm_values, manifest, manifest_path) - - logger.info("Deploying with the following helm values: %s", helm_values) - return helm_values - - -def _deep_merge(base_dict: dict[str, Any], override_dict: dict[str, Any]) -> None: - """Deep merge override_dict into base_dict""" - for key, value in override_dict.items(): - if key in base_dict and isinstance(base_dict[key], dict) and isinstance(value, dict): - _deep_merge(base_dict[key], value) - else: - base_dict[key] = value - - -def create_helm_values_file(helm_values: dict[str, Any]) -> str: - """Create a temporary helm values file""" - with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: - yaml.dump(helm_values, f, default_flow_style=False) - return f.name - - -def deploy_agent( - manifest_path: str, - cluster_name: str, - namespace: str, - deploy_overrides: InputDeployOverrides, - environment_name: str | None = None, -) -> None: - """Deploy an agent using helm""" - - # Validate prerequisites - if not check_helm_installed(): - raise DeploymentError("Helm is not installed. Please install helm first.") - - # Switch to the specified cluster context - check_and_switch_cluster_context(cluster_name) - - manifest = AgentManifest.from_yaml(file_path=manifest_path) - - # Load agent environment configuration - agent_env_config = None - if environment_name: - manifest_dir = Path(manifest_path).parent - environments_config = manifest.load_environments_config(manifest_dir) - if environments_config: - agent_env_config = environments_config.get_config_for_env(environment_name) - console.print(f"[green]โœ“[/green] Using environment config: {environment_name}") - else: - console.print(f"[yellow]โš [/yellow] No environments.yaml found, skipping environment-specific config") - - if agent_env_config: - helm_repository_name = agent_env_config.helm_repository_name - helm_repository_url = agent_env_config.helm_repository_url - else: - helm_repository_name = "scale-egp" - helm_repository_url = "https://scale-egp-helm-charts-us-west-2.s3.amazonaws.com/charts" - # Add helm repository/update - add_helm_repo(helm_repository_name, helm_repository_url) - - # Merge configurations - helm_values = merge_deployment_configs(manifest, agent_env_config, deploy_overrides, manifest_path) - - # Create values file - values_file = create_helm_values_file(helm_values) - - try: - agent_name = manifest.agent.name - release_name = agent_name - - console.print( - f"Deploying agent [bold]{agent_name}[/bold] to cluster [bold]{cluster_name}[/bold] in namespace [bold]{namespace}[/bold]" - ) - - # Check if release exists - try: - subprocess.run( - ["helm", "status", release_name, "-n", namespace], - capture_output=True, - check=True, - ) - - # Release exists, do upgrade - console.print("Existing deployment found, upgrading...") - command = [ - "helm", - "upgrade", - release_name, - f"{helm_repository_name}/agentex-agent", - "--version", - AGENTEX_AGENTS_HELM_CHART_VERSION, - "-f", - values_file, - "-n", - namespace, - "--atomic", - "--timeout", - "10m", - ] - console.print(f"[blue]โ„น[/blue] Running command: {' '.join(command)}") - subprocess.run(command, check=True) - console.print("[green]โœ“[/green] Agent upgraded successfully") - - except subprocess.CalledProcessError: - # Release doesn't exist, do install - console.print("Installing new deployment...") - command = [ - "helm", - "install", - release_name, - f"{helm_repository_name}/agentex-agent", - "--version", - AGENTEX_AGENTS_HELM_CHART_VERSION, - "-f", - values_file, - "-n", - namespace, - "--create-namespace", - "--atomic", - "--timeout", - "10m", - ] - console.print(f"[blue]โ„น[/blue] Running command: {' '.join(command)}") - subprocess.run(command, check=True) - console.print("[green]โœ“[/green] Agent deployed successfully") - - # Show success message with helpful commands - console.print("\n[green]๐ŸŽ‰ Deployment completed successfully![/green]") - console.print(f"[blue]Check deployment status:[/blue] helm status {release_name} -n {namespace}") - console.print(f"[blue]View logs:[/blue] kubectl logs -l app.kubernetes.io/name=agentex-agent -n {namespace}") - - except subprocess.CalledProcessError as e: - raise HelmError( - f"Helm deployment failed: {e}\n" - f"Note: Due to --atomic flag, any partial deployment has been automatically rolled back." - ) from e - finally: - # Clean up values file - os.unlink(values_file) diff --git a/src/agentex/lib/cli/handlers/run_handlers.py b/src/agentex/lib/cli/handlers/run_handlers.py deleted file mode 100644 index adf44a19..00000000 --- a/src/agentex/lib/cli/handlers/run_handlers.py +++ /dev/null @@ -1,412 +0,0 @@ -from __future__ import annotations - -import os -import sys -import asyncio -from pathlib import Path - -from rich.panel import Panel -from rich.console import Console - -# Import debug functionality -from agentex.lib.cli.debug import DebugConfig, start_acp_server_debug, start_temporal_worker_debug -from agentex.lib.utils.logging import make_logger -from agentex.lib.cli.utils.path_utils import ( - get_file_paths, - calculate_uvicorn_target_for_local, -) -from agentex.lib.environment_variables import EnvVarKeys -from agentex.lib.sdk.config.agent_manifest import AgentManifest -from agentex.lib.cli.handlers.cleanup_handlers import cleanup_agent_workflows, should_cleanup_on_restart - -logger = make_logger(__name__) -console = Console() - - -class RunError(Exception): - """An error occurred during agent run""" - - -class ProcessManager: - """Manages multiple subprocesses with proper cleanup""" - - def __init__(self): - self.processes: list[asyncio.subprocess.Process] = [] - self.shutdown_event = asyncio.Event() - - def add_process(self, process: asyncio.subprocess.Process): - """Add a process to be managed""" - self.processes.append(process) - - async def wait_for_shutdown(self): - """Wait for shutdown signal""" - await self.shutdown_event.wait() - - def shutdown(self): - """Signal shutdown and terminate all processes""" - self.shutdown_event.set() - - async def cleanup_processes(self): - """Clean up all processes""" - if not self.processes: - return - - console.print("\n[yellow]Shutting down processes...[/yellow]") - - # Send SIGTERM to all processes - for process in self.processes: - if process.returncode is None: # Process is still running - try: - process.terminate() - except ProcessLookupError: - pass # Process already terminated - - # Wait for graceful shutdown with shorter timeout - try: - await asyncio.wait_for( - asyncio.gather(*[p.wait() for p in self.processes], return_exceptions=True), - timeout=2.0, # Reduced from 5.0 seconds - ) - except TimeoutError: - # Force kill if not terminated gracefully - console.print("[yellow]Force killing unresponsive processes...[/yellow]") - for process in self.processes: - if process.returncode is None: - try: - process.kill() - await asyncio.wait_for(process.wait(), timeout=1.0) - except (ProcessLookupError, TimeoutError): - pass # Process already dead or kill failed - - console.print("[green]All processes stopped[/green]") - - -async def start_temporal_worker_with_reload( - worker_path: Path, env: dict[str, str], process_manager: ProcessManager, manifest_dir: Path -) -> asyncio.Task[None]: - """Start temporal worker with auto-reload using watchfiles""" - try: - from watchfiles import awatch - except ImportError: - console.print("[yellow]watchfiles not installed, falling back to basic worker start[/yellow]") - console.print("[dim]Install with: pip install watchfiles[/dim]") - # Fallback to regular worker without reload - worker_process = await start_temporal_worker(worker_path, env, manifest_dir) - process_manager.add_process(worker_process) - return asyncio.create_task(stream_process_output(worker_process, "WORKER")) - - async def worker_runner() -> None: - current_process: asyncio.subprocess.Process | None = None - output_task: asyncio.Task[None] | None = None - - console.print(f"[blue]Starting Temporal worker with auto-reload from {worker_path}...[/blue]") - - async def start_worker() -> asyncio.subprocess.Process: - nonlocal current_process, output_task - - # PRE-RESTART CLEANUP - NEW! - if current_process is not None: - # Extract agent name from worker path for cleanup - - agent_name = env.get("AGENT_NAME") - console.print(f"FOUND AGENT_NAME FROM ENV VARS: {agent_name} {agent_name is None}") - if agent_name is None: - agent_name = worker_path.parent.parent.name - - # Perform cleanup if configured - if should_cleanup_on_restart(): - console.print("[yellow]Cleaning up workflows before worker restart...[/yellow]") - try: - cleanup_agent_workflows(agent_name) - except Exception as e: - logger.warning(f"Cleanup failed: {e}") - console.print(f"[yellow]โš  Cleanup failed: {str(e)}[/yellow]") - - # Clean up previous process - if current_process and current_process.returncode is None: - current_process.terminate() - try: - await asyncio.wait_for(current_process.wait(), timeout=2.0) - except asyncio.TimeoutError: - current_process.kill() - await current_process.wait() - - # Cancel previous output task - if output_task: - output_task.cancel() - try: - await output_task - except asyncio.CancelledError: - pass - - current_process = await start_temporal_worker(worker_path, env, manifest_dir) - process_manager.add_process(current_process) - console.print("[green]Temporal worker started[/green]") - return current_process - - try: - # Start initial worker - current_process = await start_worker() - if current_process: - output_task = asyncio.create_task(stream_process_output(current_process, "WORKER")) - - # Watch for file changes - async for changes in awatch(manifest_dir, recursive=True): - # Filter for Python files - py_changes = [(change, path) for change, path in changes if str(path).endswith('.py')] - - if py_changes: - changed_files = [str(Path(path).relative_to(worker_path.parent)) for _, path in py_changes] - console.print(f"[yellow]File changes detected: {changed_files}[/yellow]") - console.print("[yellow]Restarting Temporal worker...[/yellow]") - - # Restart worker (with cleanup handled in start_worker) - await start_worker() - if current_process: - output_task = asyncio.create_task(stream_process_output(current_process, "WORKER")) - - except asyncio.CancelledError: - # Clean shutdown - if output_task: - output_task.cancel() - try: - await output_task - except asyncio.CancelledError: - pass - - if current_process and current_process.returncode is None: - current_process.terminate() - try: - await asyncio.wait_for(current_process.wait(), timeout=2.0) - except asyncio.TimeoutError: - current_process.kill() - await current_process.wait() - raise - - return asyncio.create_task(worker_runner()) - - -async def start_acp_server( - acp_path: Path, port: int, env: dict[str, str], manifest_dir: Path -) -> asyncio.subprocess.Process: - """Start the ACP server process""" - # Use file path relative to manifest directory if possible - uvicorn_target = calculate_uvicorn_target_for_local(acp_path, manifest_dir) - - cmd = [ - sys.executable, - "-m", - "uvicorn", - f"{uvicorn_target}:acp", - "--reload", - "--reload-dir", - str(acp_path.parent), # Watch the project directory specifically - "--port", - str(port), - "--host", - "0.0.0.0", - ] - - console.print(f"[blue]Starting ACP server from {acp_path} on port {port}...[/blue]") - return await asyncio.create_subprocess_exec( - *cmd, - cwd=manifest_dir, # Always use manifest directory as CWD for consistency - env=env, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - ) - - -async def start_temporal_worker( - worker_path: Path, env: dict[str, str], manifest_dir: Path -) -> asyncio.subprocess.Process: - """Start the temporal worker process""" - run_worker_target = calculate_uvicorn_target_for_local(worker_path, manifest_dir) - - cmd = [sys.executable, "-m", run_worker_target] - - console.print(f"[blue]Starting Temporal worker from {worker_path}...[/blue]") - - return await asyncio.create_subprocess_exec( - *cmd, - cwd=manifest_dir, # Use worker directory as CWD for imports to work - env=env, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - ) - - -async def stream_process_output(process: asyncio.subprocess.Process, prefix: str): - """Stream process output with prefix""" - try: - if process.stdout is None: - return - while True: - line = await process.stdout.readline() - if not line: - break - decoded_line = line.decode("utf-8").rstrip() - if decoded_line: # Only print non-empty lines - console.print(f"[dim]{prefix}:[/dim] {decoded_line}") - except Exception as e: - logger.debug(f"Output streaming ended for {prefix}: {e}") - - -async def run_agent(manifest_path: str, debug_config: "DebugConfig | None" = None): - """Run an agent locally from the given manifest""" - - # Validate manifest exists - manifest_file = Path(manifest_path) - - if not manifest_file.exists(): - raise RunError(f"Manifest file not found: {manifest_path}") - - # Parse manifest - try: - manifest = AgentManifest.from_yaml(file_path=manifest_path) - except Exception as e: - raise RunError(f"Failed to parse manifest: {str(e)}") from e - - # Get and validate file paths - try: - file_paths = get_file_paths(manifest, manifest_path) - except Exception as e: - raise RunError(str(e)) from e - - # Check if temporal agent and validate worker file - if is_temporal_agent(manifest): - if not file_paths["worker"]: - raise RunError("Temporal agent requires a worker file path to be configured") - - # Create environment for subprocesses - agent_env = create_agent_environment(manifest) - - # Setup process manager - process_manager = ProcessManager() - - try: - console.print( - Panel.fit( - f"๐Ÿš€ [bold blue]Running Agent: {manifest.agent.name}[/bold blue]", - border_style="blue", - ) - ) - - # Start ACP server (with debug support if enabled) - manifest_dir = Path(manifest_path).parent - if debug_config and debug_config.should_debug_acp(): - acp_process = await start_acp_server_debug( - file_paths["acp"], manifest.local_development.agent.port, agent_env, debug_config # type: ignore[union-attr] - ) - else: - acp_process = await start_acp_server( - file_paths["acp"], manifest.local_development.agent.port, agent_env, manifest_dir # type: ignore[union-attr] - ) - process_manager.add_process(acp_process) - - # Start output streaming for ACP - acp_output_task = asyncio.create_task(stream_process_output(acp_process, "ACP")) - - tasks = [acp_output_task] - - # Start temporal worker if needed (with debug support if enabled) - if is_temporal_agent(manifest) and file_paths["worker"]: - if debug_config and debug_config.should_debug_worker(): - # In debug mode, start worker without auto-reload to prevent conflicts - worker_process = await start_temporal_worker_debug( - file_paths["worker"], agent_env, debug_config - ) - process_manager.add_process(worker_process) - worker_task = asyncio.create_task(stream_process_output(worker_process, "WORKER")) - else: - # Normal mode with auto-reload - worker_task = await start_temporal_worker_with_reload(file_paths["worker"], agent_env, process_manager, manifest_dir) - tasks.append(worker_task) - - console.print( - f"\n[green]โœ“ Agent running at: http://localhost:{manifest.local_development.agent.port}[/green]" # type: ignore[union-attr] - ) - console.print("[dim]Press Ctrl+C to stop[/dim]\n") - - # Wait for shutdown signal or process failure - try: - await process_manager.wait_for_shutdown() - except KeyboardInterrupt: - console.print("\n[yellow]Received shutdown signal...[/yellow]") - - # Cancel output streaming tasks - for task in tasks: - task.cancel() - try: - await task - except asyncio.CancelledError: - pass - - except Exception as e: - logger.exception("Error running agent") - raise RunError(f"Failed to run agent: {str(e)}") from e - - finally: - # Ensure cleanup happens - await process_manager.cleanup_processes() - - - - - -def create_agent_environment(manifest: AgentManifest) -> dict[str, str]: - """Create environment variables for agent processes without modifying os.environ""" - # Start with current environment - env = dict(os.environ) - - agent_config = manifest.agent - - # TODO: Combine this logic with the deploy_handlers so that we can reuse the env vars - env_vars = { - "ENVIRONMENT": "development", - "TEMPORAL_ADDRESS": "localhost:7233", - "REDIS_URL": "redis://localhost:6379", - "AGENT_NAME": manifest.agent.name, - "ACP_TYPE": manifest.agent.acp_type, - "ACP_URL": f"http://{manifest.local_development.agent.host_address}", # type: ignore[union-attr] - "ACP_PORT": str(manifest.local_development.agent.port), # type: ignore[union-attr] - } - - if manifest.agent.agent_input_type: - env_vars["AGENT_INPUT_TYPE"] = manifest.agent.agent_input_type - - # Add authorization principal if set - for local development, auth is optional - from agentex.lib.cli.utils.auth_utils import _encode_principal_context - encoded_principal = _encode_principal_context(manifest) - if encoded_principal: - env_vars[EnvVarKeys.AUTH_PRINCIPAL_B64] = encoded_principal - else: - logger.info("No auth principal configured - agent will run without authentication context") - - # Add description if available - if manifest.agent.description: - env_vars["AGENT_DESCRIPTION"] = manifest.agent.description - - # Add temporal-specific variables if this is a temporal agent - if manifest.agent.is_temporal_agent(): - temporal_config = manifest.agent.get_temporal_workflow_config() - if temporal_config: - env_vars["WORKFLOW_NAME"] = temporal_config.name - env_vars["WORKFLOW_TASK_QUEUE"] = temporal_config.queue_name - - # Set health check port from temporal config - if manifest.agent.temporal and manifest.agent.temporal.health_check_port is not None: - env_vars["HEALTH_CHECK_PORT"] = str(manifest.agent.temporal.health_check_port) - - if agent_config.env: - for key, value in agent_config.env.items(): - env_vars[key] = value - - env.update(env_vars) - - return env - - -def is_temporal_agent(manifest: AgentManifest) -> bool: - """Check if this is a temporal agent""" - return manifest.agent.is_temporal_agent() diff --git a/src/agentex/lib/cli/handlers/secret_handlers.py b/src/agentex/lib/cli/handlers/secret_handlers.py deleted file mode 100644 index c424de0e..00000000 --- a/src/agentex/lib/cli/handlers/secret_handlers.py +++ /dev/null @@ -1,672 +0,0 @@ -from __future__ import annotations - -import json -import base64 -from typing import Any -from pathlib import Path -from collections import defaultdict - -import yaml -import typer -import questionary -from rich.console import Console -from kubernetes.client.rest import ApiException - -from agentex.lib.utils.logging import make_logger -from agentex.lib.types.credentials import CredentialMapping -from agentex.lib.cli.utils.cli_utils import handle_questionary_cancellation -from agentex.lib.cli.utils.kubectl_utils import get_k8s_client -from agentex.lib.sdk.config.agent_config import AgentConfig -from agentex.lib.sdk.config.agent_manifest import AgentManifest -from agentex.lib.sdk.config.deployment_config import ( - DeploymentConfig, - ImagePullSecretConfig, - InjectedSecretsValues, -) -from agentex.lib.cli.utils.kubernetes_secrets_utils import ( - VALID_SECRET_TYPES, - KUBERNETES_SECRET_TYPE_OPAQUE, - KUBERNETES_SECRET_TO_MANIFEST_KEY, - KUBERNETES_SECRET_TYPE_DOCKERCONFIGJSON, - get_secret_data, - create_secret_with_data, - update_secret_with_data, - create_image_pull_secret_with_data, - update_image_pull_secret_with_data, -) - -logger = make_logger(__name__) -console = Console() - - -# TODO: parse this into a Pydantic model. -def load_values_file(values_path: str) -> dict[str, dict[str, str]]: - """Load and parse the values file (YAML/JSON)""" - try: - path = Path(values_path) - content = path.read_text() - - if path.suffix.lower() in [".yaml", ".yml"]: - data = yaml.safe_load(content) - elif path.suffix.lower() == ".json": - data = json.loads(content) - else: - # Try YAML first, then JSON - try: - data = yaml.safe_load(content) - except yaml.YAMLError: - data = json.loads(content) - return InjectedSecretsValues.model_validate(data).model_dump() - - except Exception as e: - raise RuntimeError( - f"Failed to load values file '{values_path}': {str(e)}" - ) from e - - -def interactive_secret_input(secret_name: str, secret_key: str) -> str: - """Prompt user for secret value with appropriate input method""" - console.print( - f"\n[bold]Enter value for secret '[cyan]{secret_name}[/cyan]' key '[cyan]{secret_key}[/cyan]':[/bold]" - ) - - input_type = questionary.select( - "What type of value is this?", - choices=[ - "Simple text", - "Sensitive/password (hidden input)", - "Multi-line text", - "JSON/YAML content", - "Read from file", - ], - ).ask() - - input_type = handle_questionary_cancellation(input_type, "secret input") - - if input_type == "Sensitive/password (hidden input)": - result = questionary.password("Enter value (input will be hidden):").ask() - return handle_questionary_cancellation(result, "password input") - - elif input_type == "Multi-line text": - console.print( - "[yellow]Enter multi-line text (press Ctrl+D when finished):[/yellow]" - ) - lines = [] - try: - while True: - line = input() - lines.append(line) - except EOFError: - pass - except KeyboardInterrupt: - console.print("[yellow]Multi-line input cancelled by user[/yellow]") - raise typer.Exit(0) # noqa - return "\n".join(lines) - - elif input_type == "JSON/YAML content": - value = questionary.text("Enter JSON/YAML content:").ask() - value = handle_questionary_cancellation(value, "JSON/YAML input") - # Validate JSON/YAML format - try: - json.loads(value) - except json.JSONDecodeError: - try: - yaml.safe_load(value) - except yaml.YAMLError: - console.print( - "[yellow]Warning: Content doesn't appear to be valid JSON or YAML[/yellow]" - ) - return value - - elif input_type == "Read from file": - file_path = questionary.path("Enter file path:").ask() - file_path = handle_questionary_cancellation(file_path, "file path input") - try: - return Path(file_path).read_text().strip() - except Exception as e: - console.print(f"[red]Error reading file: {e}[/red]") - manual_value = questionary.text("Enter value manually:").ask() - return handle_questionary_cancellation(manual_value, "manual value input") - - else: # Simple text - result = questionary.text("Enter value:").ask() - return handle_questionary_cancellation(result, "text input") - - -def get_secret(name: str, namespace: str, context: str | None = None) -> dict[str, Any]: - """Get details about a secret""" - v1 = get_k8s_client(context) - - try: - secret = v1.read_namespaced_secret(name=name, namespace=namespace) - return { - "name": secret.metadata.name, # type: ignore[union-attr] - "namespace": namespace, - "created": secret.metadata.creation_timestamp.isoformat(), # type: ignore[union-attr] - "exists": True, - } - except ApiException as e: - if e.status == 404: - console.print( - f"[red]Error: Secret '{name}' not found in namespace '{namespace}'[/red]" - ) - return {"name": name, "namespace": namespace, "exists": False} - raise RuntimeError(f"Failed to get secret: {str(e)}") from e - - -def delete_secret(name: str, namespace: str, context: str | None = None) -> None: - """Delete a secret""" - v1 = get_k8s_client(context) - - try: - v1.delete_namespaced_secret(name=name, namespace=namespace) - console.print( - f"[green]Deleted secret '{name}' from namespace '{namespace}'[/green]" - ) - except ApiException as e: - if e.status == 404: - console.print( - f"[red]Error: Secret '{name}' not found in namespace '{namespace}'[/red]" - ) - else: - console.print(f"[red]Error deleting secret: {e.reason}[/red]") - raise RuntimeError(f"Failed to delete secret: {str(e)}") from e - - -def get_kubernetes_secrets_by_type( - namespace: str, context: str | None = None -) -> dict[str, list[dict[str, Any]]]: - """List metadata about secrets in the namespace""" - v1 = get_k8s_client(context) - - try: - secrets = v1.list_namespaced_secret(namespace=namespace) - secret_type_to_secret = defaultdict(list) - for secret in secrets.items: - if secret.type in VALID_SECRET_TYPES: - secret_type_to_secret[secret.type].append( - { - "name": secret.metadata.name, - "namespace": namespace, - "created": secret.metadata.creation_timestamp.isoformat(), - } - ) - - return secret_type_to_secret - except ApiException as e: - console.print( - f"[red]Error listing secrets in namespace '{namespace}': {e.reason}[/red]" - ) - raise RuntimeError(f"Failed to list secrets: {str(e)}") from e - - # NOTE: This corresponds with KUBERNETES_SECRET_TYPE_OPAQUE - - -def sync_user_defined_secrets( - manifest_obj: AgentManifest, - found_secrets: list[dict], - values_data: dict[str, Any], - cluster: str, - namespace: str, - interactive: bool, - changes: dict[str, list[str]], -) -> None: - """Sync user defined secrets between manifest, cluster, and values file""" - console.print( - f"[bold]Syncing user defined secrets to cluster: {cluster} namespace: {namespace}[/bold]" - ) - - # Get the secrets from the cluster using the specified namespace and cluster context - cluster_secret_names = {secret["name"] for secret in found_secrets} - # Get the secrets from the manifest - agent_config: AgentConfig = manifest_obj.agent - manifest_credentials: list[CredentialMapping] = agent_config.credentials or [] # type: ignore[assignment] - - if not manifest_credentials: - console.print("[yellow]No credentials found in manifest[/yellow]") - return - - # Build required secrets map from manifest - required_secrets = {} # {secret_name: {secret_key: env_var_name}} - for cred in manifest_credentials: - if cred.secret_name not in required_secrets: - required_secrets[cred.secret_name] = {} - required_secrets[cred.secret_name][cred.secret_key] = cred.env_var_name - - # Process each required secret - for secret_name, required_keys in required_secrets.items(): - current_secret_data = get_secret_data(secret_name, namespace, cluster) - new_secret_data = {} - secret_needs_update = False - - # Process each required key in this secret - for secret_key, _ in required_keys.items(): - current_value = current_secret_data.get(secret_key) - - # Get the new value - if ( - values_data - and secret_name in values_data - and secret_key in values_data[secret_name] - ): - new_value = values_data[secret_name][secret_key] - elif interactive: - if current_value: - console.print( - f"[blue]Secret '{secret_name}' key '{secret_key}' already exists[/blue]" - ) - update_choice = questionary.select( - "What would you like to do?", - choices=[ - "Keep current value", - "Update with new value", - "Show current value", - ], - ).ask() - update_choice = handle_questionary_cancellation( - update_choice, "secret update choice" - ) - - if update_choice == "Show current value": - console.print(f"Current value: [dim]{current_value}[/dim]") - update_choice = questionary.select( - "What would you like to do?", - choices=["Keep current value", "Update with new value"], - ).ask() - update_choice = handle_questionary_cancellation( - update_choice, "secret update choice" - ) - - if update_choice == "Update with new value": - new_value = interactive_secret_input(secret_name, secret_key) - else: - new_value = current_value - else: - console.print( - f"[yellow]Secret '{secret_name}' key '{secret_key}' does not exist[/yellow]" - ) - new_value = interactive_secret_input(secret_name, secret_key) - else: - raise RuntimeError( - f"No value provided for secret '{secret_name}' key '{secret_key}'. Provide values file or use interactive mode." - ) - - # Must be a string because kubernetes always expects a - new_value = str(new_value) - new_secret_data[secret_key] = new_value - - # Check if value changed - if current_value != new_value: - secret_needs_update = True - else: - changes["noop"].append( - f"Secret '{secret_name}' key '{secret_key}' is up to date" - ) - - # Determine action needed - if secret_name not in cluster_secret_names: - changes["create"].append( - f"Create secret '{secret_name}' with keys: {list(required_keys.keys())}" - ) - create_secret_with_data(secret_name, new_secret_data, namespace, cluster) - elif secret_needs_update: - changes["update"].append(f"Update secret '{secret_name}' (values changed)") - update_secret_with_data(secret_name, new_secret_data, namespace, cluster) - - # Handle orphaned secrets (in cluster but not in manifest) - orphaned_secrets = cluster_secret_names - set(required_secrets.keys()) - if orphaned_secrets: - console.print( - f"\n[yellow]Warning: Found {len(orphaned_secrets)} secrets in cluster not defined in manifest:[/yellow]" - ) - for secret in orphaned_secrets: - console.print(f" - {secret}") - - -def create_dockerconfigjson_string( - registry: str, username: str, password: str, email: str | None = None -) -> str: - """Create raw dockerconfigjson string data for use with Kubernetes string_data field""" - # Create the auth field (base64 encoded username:password) - auth_string = f"{username}:{password}" - auth_b64 = base64.b64encode(auth_string.encode("utf-8")).decode("utf-8") - - # Build the auth entry - auth_entry = {"username": username, "password": password, "auth": auth_b64} - - # Only include email if provided - if email: - auth_entry["email"] = email - - # Create the full dockerconfig structure - docker_config = {"auths": {registry: auth_entry}} - - # Return raw JSON string (Kubernetes will handle base64 encoding when using string_data) - return json.dumps(docker_config) - - -def parse_dockerconfigjson_data(input_data: str) -> dict[str, dict[str, str]]: - """Parse existing dockerconfigjson data to extract registry credentials""" - try: - # Decode base64 - config = json.loads(input_data) - - # Extract auths section - auths = config.get("auths", {}) - - # Convert to comparable format: {registry: {username, password, email}} - parsed_auths = {} - for registry, auth_data in auths.items(): - # Try to decode the base64 auth field first - username = "" - password = "" - if "auth" in auth_data: - try: - auth_b64 = auth_data["auth"] - username_password = base64.b64decode(auth_b64).decode("utf-8") - if ":" in username_password: - username, password = username_password.split(":", 1) - except Exception: - pass - - # Fall back to direct username/password fields if auth decode failed - if not username: - username = auth_data.get("username", "") - if not password: - password = auth_data.get("password", "") - - parsed_auths[registry] = { - "username": username, - "password": password, - "email": auth_data.get("email", ""), - } - - return parsed_auths - except Exception: - return {} # If parsing fails, assume empty/invalid - - -def credentials_changed( - current_auths: dict[str, dict[str, str]], - new_registry: str, - new_username: str, - new_password: str, - new_email: str = "", -) -> bool: - """Check if credentials have actually changed""" - - # If registry doesn't exist in current, it's a change - if new_registry not in current_auths: - return True - - current_creds = current_auths[new_registry] - # Compare each field - if ( - current_creds.get("username", "") != new_username - or current_creds.get("password", "") != new_password - or current_creds.get("email", "") != (new_email or "") - ): - return True - else: - return False # No changes detected - - -def interactive_image_pull_secret_input(secret_name: str) -> dict[str, str]: - """Prompt user for image pull secret values""" - console.print( - f"\n[bold]Configure image pull secret '[cyan]{secret_name}[/cyan]':[/bold]" - ) - - registry = questionary.text( - "Registry URL (e.g., docker.io, gcr.io, your-registry.com):", - default="docker.io", - ).ask() - registry = handle_questionary_cancellation(registry, "registry input") - - username = questionary.text("Username:").ask() - username = handle_questionary_cancellation(username, "username input") - - password = questionary.password("Password (input will be hidden):").ask() - password = handle_questionary_cancellation(password, "password input") - - email_choice = questionary.confirm( - "Do you want to include an email address? (optional)" - ).ask() - email_choice = handle_questionary_cancellation(email_choice, "email choice") - email = "" - if email_choice: - email = questionary.text("Email address:").ask() or "" - if email is None: # Handle None from questionary - email = "" - - return { - "registry": registry, - "username": username, - "password": password, - "email": email, - } - - -def sync_image_pull_secrets( - manifest_obj: AgentManifest, - found_dockerconfigjson_secrets: list[dict], - values_data: dict[str, Any], - cluster: str, - namespace: str, - interactive: bool, - changes: dict[str, list[str]], -) -> None: - """Sync image pull secrets between manifest, cluster, and values file""" - console.print( - f"[bold]Syncing image pull secrets to cluster: {cluster} namespace: {namespace}[/bold]" - ) - - # Get the secrets of type KUBERNETES_SECRET_TYPE_DOCKERCONFIGJSON - cluster_dockerconfigjson_secret_names = { - secret["name"] for secret in found_dockerconfigjson_secrets - } - - # Get the secrets from the manifest - deployment_config: DeploymentConfig = manifest_obj.deployment # type: ignore[assignment] - manifest_image_pull_secrets: list[ImagePullSecretConfig] = ( - deployment_config.imagePullSecrets or [] - ) - - if not manifest_image_pull_secrets: - logger.info("No image pull secrets found in manifest") - return - - # Get image pull secrets from values data - image_pull_values = values_data - - # Process each required image pull secret - for pull_secret in manifest_image_pull_secrets: - secret_name = pull_secret.name - current_secret_data = get_secret_data(secret_name, namespace, cluster) - - # Get new values - new_registry = "" - new_username = "" - new_password = "" - new_email = "" - - if secret_name in image_pull_values: - # Get values from values file - secret_config = image_pull_values[secret_name] - new_registry = secret_config.get("registry", "") - new_username = secret_config.get("username", "") - new_password = secret_config.get("password", "") - new_email = secret_config.get("email", "") - - if not new_registry or not new_username or not new_password: - raise RuntimeError( - f"Incomplete image pull secret configuration for '{secret_name}'. " - f"Required: registry, username, password. Optional: email" - ) - elif interactive: - # Get values interactively - if secret_name in cluster_dockerconfigjson_secret_names: - console.print( - f"[blue]Image pull secret '{secret_name}' already exists[/blue]" - ) - update_choice = questionary.select( - "What would you like to do?", - choices=["Keep current credentials", "Update with new credentials"], - ).ask() - update_choice = handle_questionary_cancellation( - update_choice, "image pull secret update choice" - ) - - if update_choice == "Keep current credentials": - continue # Skip this secret - - console.print( - f"[yellow]Image pull secret '{secret_name}' needs configuration[/yellow]" - ) - creds = interactive_image_pull_secret_input(secret_name) - new_registry = creds["registry"] - new_username = creds["username"] - new_password = creds["password"] - new_email = creds["email"] - else: - raise RuntimeError( - f"No configuration provided for image pull secret '{secret_name}'. " - f"Provide values file or use interactive mode." - ) - - # Check if update is needed - secret_needs_update = False - action = "" - - if secret_name not in cluster_dockerconfigjson_secret_names: - # Secret doesn't exist, needs creation - secret_needs_update = True - action = "create" - else: - # Secret exists, check if values changed - current_dockerconfig = current_secret_data.get(".dockerconfigjson", {}) - current_auths = parse_dockerconfigjson_data(current_dockerconfig) - if credentials_changed( - current_auths, new_registry, new_username, new_password, new_email - ): - secret_needs_update = True - action = "update" - else: - changes["noop"].append( - f"Secret '{secret_name}' key '{secret_name}' is up to date" - ) - - # Only perform action if update is needed - if secret_needs_update: - dockerconfig_string = create_dockerconfigjson_string( - new_registry, new_username, new_password, new_email - ) - secret_data = {".dockerconfigjson": dockerconfig_string} - - if action == "create": - changes[action].append( - f"Create image pull secret '{secret_name}' for registry '{new_registry}'" - ) - create_image_pull_secret_with_data( - secret_name, secret_data, namespace, cluster - ) - elif action == "update": - changes[action].append( - f"Update image pull secret '{secret_name}' (credentials changed)" - ) - update_image_pull_secret_with_data( - secret_name, secret_data, namespace, cluster - ) - - -def print_changes_summary(change_type: str, changes: dict[str, list[str]]) -> None: - # Show summary - console.print(f"\n[bold]Sync Summary for {change_type}:[/bold]") - if changes["create"]: - console.print("[green]Created:[/green]") - for change in changes["create"]: - console.print(f" โœ“ {change}") - - if changes["update"]: - console.print("[yellow]Updated:[/yellow]") - for change in changes["update"]: - console.print(f" โš  {change}") - - if changes["noop"]: - console.print("[yellow]No changes:[/yellow]") - for change in changes["noop"]: - console.print(f" โœ“ {change}") - del changes["noop"] - - if not any(changes.values()): - console.print( - f"[green]โœ“ All secrets are already in sync for {change_type}[/green]" - ) - - console.print("") - - -def sync_secrets( - manifest_obj: AgentManifest, - cluster: str, - namespace: str, - interactive: bool, - values_path: str | None, -) -> None: - """Sync secrets between manifest, cluster, and values file""" - logger.info(f"Syncing secrets to cluster: {cluster} namespace: {namespace}") - - # Load values from file if provided - values_data = {} - if values_path: - try: - # TODO: Convert this to a pydantic model to validate the values file - values_data = load_values_file(values_path) - console.print(f"[green]Loaded values from {values_path}[/green]") - except Exception as e: - console.print(f"[red]Error loading values file: {e}[/red]") - raise - - # Get the secrets from the cluster using the specified namespace and cluster context - cluster_secrets_by_type = get_kubernetes_secrets_by_type( - namespace=namespace, context=cluster - ) - - # Track changes for summary - changes = {"create": [], "update": [], "noop": []} - - sync_user_defined_secrets( - manifest_obj, - cluster_secrets_by_type[KUBERNETES_SECRET_TYPE_OPAQUE], - values_data.get( - KUBERNETES_SECRET_TO_MANIFEST_KEY[KUBERNETES_SECRET_TYPE_OPAQUE], {} - ), - cluster, - namespace, - interactive, - changes, - ) - - print_changes_summary("User Defined Secrets", changes) - - # Track changes for summary - changes = {"create": [], "update": [], "noop": []} - - sync_image_pull_secrets( - manifest_obj, - cluster_secrets_by_type[KUBERNETES_SECRET_TYPE_DOCKERCONFIGJSON], - values_data.get( - KUBERNETES_SECRET_TO_MANIFEST_KEY[KUBERNETES_SECRET_TYPE_DOCKERCONFIGJSON], - {}, - ), - cluster, - namespace, - interactive, - changes, - ) - - print_changes_summary("Image Pull Secrets", changes) - - console.print( - f"\n[green]Secret sync completed for cluster '{cluster}' namespace '{namespace}'[/green]" - ) diff --git a/src/agentex/lib/cli/templates/default/.dockerignore.j2 b/src/agentex/lib/cli/templates/default/.dockerignore.j2 deleted file mode 100644 index c2d7fca4..00000000 --- a/src/agentex/lib/cli/templates/default/.dockerignore.j2 +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/src/agentex/lib/cli/templates/default/Dockerfile-uv.j2 b/src/agentex/lib/cli/templates/default/Dockerfile-uv.j2 deleted file mode 100644 index 2ac5be7d..00000000 --- a/src/agentex/lib/cli/templates/default/Dockerfile-uv.j2 +++ /dev/null @@ -1,42 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - nodejs \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/** - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy just the pyproject.toml file to optimize caching -COPY {{ project_path_from_build_root }}/pyproject.toml /app/{{ project_path_from_build_root }}/pyproject.toml - -WORKDIR /app/{{ project_path_from_build_root }} - -# Install the required Python packages using uv -RUN uv pip install --system . - -# Copy the project code -COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project - -# Set environment variables -ENV PYTHONPATH=/app - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/default/Dockerfile.j2 b/src/agentex/lib/cli/templates/default/Dockerfile.j2 deleted file mode 100644 index 0395caf7..00000000 --- a/src/agentex/lib/cli/templates/default/Dockerfile.j2 +++ /dev/null @@ -1,42 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - node \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy just the requirements file to optimize caching -COPY {{ project_path_from_build_root }}/requirements.txt /app/{{ project_path_from_build_root }}/requirements.txt - -WORKDIR /app/{{ project_path_from_build_root }} - -# Install the required Python packages -RUN uv pip install --system -r requirements.txt - -# Copy the project code -COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project - -# Set environment variables -ENV PYTHONPATH=/app - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/default/README.md.j2 b/src/agentex/lib/cli/templates/default/README.md.j2 deleted file mode 100644 index 0b5317de..00000000 --- a/src/agentex/lib/cli/templates/default/README.md.j2 +++ /dev/null @@ -1,214 +0,0 @@ -# {{ agent_name }} - AgentEx Starter Template - -This is a generic starter template for building agents with the AgentEx framework. It provides a basic implementation of the Agent 2 Client Protocol (ACP) to help you get started quickly. - -## What You'll Learn - -- **Tasks**: A task is a grouping mechanism for related messages. Think of it as a conversation thread or a session. -- **Messages**: Messages are communication objects within a task. They can contain text, data, or instructions. -- **ACP Events**: The agent responds to four main events: - - `task_received`: When a new task is created - - `task_message_received`: When a message is sent within a task - - `task_approved`: When a task is approved - - `task_canceled`: When a task is canceled - -## Running the Agent - -1. Run the agent locally: -```bash -agentex agents run --manifest manifest.yaml -``` - -The agent will start on port 8000 and print messages whenever it receives any of the ACP events. - -## What's Inside - -This template: -- Sets up a basic ACP server -- Handles each of the required ACP events with simple print statements -- Provides a foundation for building more complex agents - -## Next Steps - -For more advanced agent development, check out the AgentEx tutorials: - -- **Tutorials 00-08**: Learn about building synchronous agents with ACP -- **Tutorials 09-10**: Learn how to use Temporal to power asynchronous agents - - Tutorial 09: Basic Temporal workflow setup - - Tutorial 10: Advanced Temporal patterns and best practices - -These tutorials will help you understand: -- How to handle long-running tasks -- Implementing state machines -- Managing complex workflows -- Best practices for async agent development - -## The Manifest File - -The `manifest.yaml` file is your agent's configuration file. It defines: -- How your agent should be built and packaged -- What files are included in your agent's Docker image -- Your agent's name and description -- Local development settings (like the port your agent runs on) - -This file is essential for both local development and deployment of your agent. - -## Project Structure - -``` -{{ project_name }}/ -โ”œโ”€โ”€ project/ # Your agent's code -โ”‚ โ”œโ”€โ”€ __init__.py -โ”‚ โ””โ”€โ”€ acp.py # ACP server and event handlers -โ”œโ”€โ”€ Dockerfile # Container definition -โ”œโ”€โ”€ manifest.yaml # Deployment config -โ”œโ”€โ”€ dev.ipynb # Development notebook for testing -{% if use_uv %} -โ””โ”€โ”€ pyproject.toml # Dependencies (uv) -{% else %} -โ””โ”€โ”€ requirements.txt # Dependencies (pip) -{% endif %} -``` - -## Development - -### 1. Customize Event Handlers -- Modify the handlers in `acp.py` to implement your agent's logic -- Add your own tools and capabilities -- Implement custom state management - -### 2. Test Your Agent with the Development Notebook -Use the included `dev.ipynb` Jupyter notebook to test your agent interactively: - -```bash -# Start Jupyter notebook (make sure you have jupyter installed) -jupyter notebook dev.ipynb - -# Or use VS Code to open the notebook directly -code dev.ipynb -``` - -The notebook includes: -- **Setup**: Connect to your local AgentEx backend -- **Task creation**: Create a new task for the conversation -- **Event sending**: Send events to the agent and get responses -- **Async message subscription**: Subscribe to server-side events to receive agent responses -- **Rich message display**: Beautiful formatting with timestamps and author information - -The notebook automatically uses your agent name (`{{ agent_name }}`) and demonstrates the async ACP workflow: create task โ†’ send event โ†’ subscribe to responses. - -### 3. Manage Dependencies - -{% if use_uv %} -You chose **uv** for package management. Here's how to work with dependencies: - -```bash -# Add new dependencies -agentex uv add requests openai anthropic - -# Install/sync dependencies -agentex uv sync - -# Run commands with uv -uv run agentex agents run --manifest manifest.yaml -``` - -**Benefits of uv:** -- Faster dependency resolution and installation -- Better dependency isolation -- Modern Python packaging standards - -{% else %} -You chose **pip** for package management. Here's how to work with dependencies: - -```bash -# Edit requirements.txt manually to add dependencies -echo "requests" >> requirements.txt -echo "openai" >> requirements.txt - -# Install dependencies -pip install -r requirements.txt -``` - -**Benefits of pip:** -- Familiar workflow for most Python developers -- Simple requirements.txt management -- Wide compatibility -{% endif %} - -### 4. Configure Credentials -Options: -1. Add any required credentials to your manifest.yaml via the `env` section -2. Export them in your shell: `export OPENAI_API_KEY=...` -3. For local development, create a `.env.local` file in the project directory - -```python -import os -from dotenv import load_dotenv - -if os.environ.get("ENVIRONMENT") == "development": - load_dotenv() -``` - -## Local Development - - -### 1. Start the Agentex Backend -```bash -# Navigate to the backend directory -cd agentex - -# Start all services using Docker Compose -make dev - -# Optional: In a separate terminal, use lazydocker for a better UI (everything should say "healthy") -lzd -``` - -### 2. Setup Your Agent's requirements/pyproject.toml -```bash -agentex uv sync [--group editable-apy] -source .venv/bin/activate - -# OR -conda create -n {{ project_name }} python=3.12 -conda activate {{ project_name }} -pip install -r requirements.txt -``` -### 3. Run Your Agent -```bash -# From this directory -export ENVIRONMENT=development && [uv run] agentex agents run --manifest manifest.yaml -``` - -### 4. Interact with Your Agent - -Option 0: CLI (deprecated - to be replaced once a new CLI is implemented - please use the web UI for now!) -```bash -# Submit a task via CLI -agentex tasks submit --agent {{ agent_name }} --task "Your task here" -``` - -Option 1: Web UI -```bash -# Start the local web interface -cd agentex-web -make dev - -# Then open http://localhost:3000 in your browser to chat with your agent -``` - -## Development Tips - -### Environment Variables -- Set environment variables in project/.env for any required credentials -- Or configure them in the manifest.yaml under the `env` section -- The `.env` file is automatically loaded in development mode - -### To build the agent Docker image locally (normally not necessary): - -1. Build the agent image: -```bash -agentex agents build --manifest manifest.yaml -``` - diff --git a/src/agentex/lib/cli/templates/default/dev.ipynb.j2 b/src/agentex/lib/cli/templates/default/dev.ipynb.j2 deleted file mode 100644 index d3a68303..00000000 --- a/src/agentex/lib/cli/templates/default/dev.ipynb.j2 +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "36834357", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d1c309d6", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"{{ agent_name }}\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f6e6ef0", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Async agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b03b0d37", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a6927cc0", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4864e354", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/src/agentex/lib/cli/templates/default/environments.yaml.j2 b/src/agentex/lib/cli/templates/default/environments.yaml.j2 deleted file mode 100644 index f802776f..00000000 --- a/src/agentex/lib/cli/templates/default/environments.yaml.j2 +++ /dev/null @@ -1,57 +0,0 @@ -# Agent Environment Configuration -# ------------------------------ -# This file defines environment-specific settings for your agent. -# This DIFFERS from the manifest.yaml file in that it is used to program things that are ONLY per environment. - -# ********** EXAMPLE ********** -# schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -# environments: -# dev: -# auth: -# principal: -# user_id: "1234567890" -# user_name: "John Doe" -# user_email: "john.doe@example.com" -# user_role: "admin" -# user_permissions: "read, write, delete" -# helm_overrides: # This is used to override the global helm values.yaml file in the agentex-agent helm charts -# replicas: 3 -# resources: -# requests: -# cpu: "1000m" -# memory: "2Gi" -# limits: -# cpu: "2000m" -# memory: "4Gi" -# env: -# - name: LOG_LEVEL -# value: "DEBUG" -# - name: ENVIRONMENT -# value: "staging" -# -# kubernetes: -# # OPTIONAL - Otherwise it will be derived from separately. However, this can be used to override the derived -# # namespace and deploy it with in the same namespace that already exists for a separate agent. -# namespace: "team-{{agent_name}}" -# ********** END EXAMPLE ********** - -schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -environments: - dev: - auth: - principal: - user_id: # TODO: Fill in - account_id: # TODO: Fill in - helm_overrides: - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" - temporal: - enabled: false - - diff --git a/src/agentex/lib/cli/templates/default/manifest.yaml.j2 b/src/agentex/lib/cli/templates/default/manifest.yaml.j2 deleted file mode 100644 index 18406097..00000000 --- a/src/agentex/lib/cli/templates/default/manifest.yaml.j2 +++ /dev/null @@ -1,119 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../ # Keep this as the default root - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - {{ project_path_from_build_root }} - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: {{ project_path_from_build_root }}/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: {{ project_path_from_build_root }}/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - -# Agent Configuration -# ----------------- -agent: - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: {{ agent_name }} - - # Description of what your agent does - # Helps with documentation and discovery - description: {{ description }} - - # Temporal workflow configuration - # Set enabled: true to use Temporal workflows for long-running tasks - temporal: - enabled: false - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - credentials: - - env_var_name: REDIS_URL - secret_name: redis-url-secret - secret_key: url - # credentials: - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - env: {} - # OPENAI_API_KEY: "" - # OPENAI_BASE_URL: "" - # OPENAI_ORG_ID: "" - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: [] # Update with your image pull secret names - # - name: my-registry-secret - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific environments (environments.yaml) - global: - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/default/project/acp.py.j2 b/src/agentex/lib/cli/templates/default/project/acp.py.j2 deleted file mode 100644 index 5478b51b..00000000 --- a/src/agentex/lib/cli/templates/default/project/acp.py.j2 +++ /dev/null @@ -1,56 +0,0 @@ -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.lib.types.fastacp import AsyncACPConfig -from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib import adk - - -logger = make_logger(__name__) - - -# Create an ACP server -# This sets up the core server that will handle task creation, events, and cancellation -# The `type="base"` configuration is the default configuration for the ACP server -acp = FastACP.create( - acp_type="async", - config=AsyncACPConfig( - type="base", - ), -) - - -# This handler is called first whenever a new task is created. -# It's a good place to initialize any state or resources needed for the task. -@acp.on_task_event_send -async def handle_task_event_send(params: SendEventParams): - # For this tutorial, we log the parameters sent to the handler - # so you can see where and how messages within a long running task are handled - logger.info(f"Received task event send rpc: {params}") - - # 1. Echo back the client's message to show it in the UI. This is not done by default so the agent developer has full control over what is shown to the user. - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - # 2. Send a simple response message. - # In future tutorials, this is where we'll add more sophisticated response logic. - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"Hello! I've received your message. I can't respond right now, but in future tutorials we'll see how you can get me to intelligently respond to your message.", - ), - ) - -@acp.on_task_cancel -async def handle_task_canceled(params: CancelTaskParams): - # For this tutorial, we print the parameters sent to the handler - # so you can see where and how task cancellation is handled - logger.info(f"Received task cancel rpc: {params}") - -@acp.on_task_create -async def handle_task_create(params: CreateTaskParams): - # For this tutorial, we log the parameters sent to the handler - # so you can see where and how task creation is handled - - # Here is where you can initialize any state or resources needed for the task. - logger.info(f"Received task create rpc: {params}") diff --git a/src/agentex/lib/cli/templates/default/pyproject.toml.j2 b/src/agentex/lib/cli/templates/default/pyproject.toml.j2 deleted file mode 100644 index 34e04e6a..00000000 --- a/src/agentex/lib/cli/templates/default/pyproject.toml.j2 +++ /dev/null @@ -1,32 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "{{ project_name }}" -version = "0.1.0" -description = "{{ description }}" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/src/agentex/lib/cli/templates/default/requirements.txt.j2 b/src/agentex/lib/cli/templates/default/requirements.txt.j2 deleted file mode 100644 index 0b8ae19b..00000000 --- a/src/agentex/lib/cli/templates/default/requirements.txt.j2 +++ /dev/null @@ -1,5 +0,0 @@ -# Install agentex-sdk from local path -agentex-sdk - -# Scale GenAI Platform Python SDK -scale-gp diff --git a/src/agentex/lib/cli/templates/default/test_agent.py.j2 b/src/agentex/lib/cli/templates/default/test_agent.py.j2 deleted file mode 100644 index ee71f177..00000000 --- a/src/agentex/lib/cli/templates/default/test_agent.py.j2 +++ /dev/null @@ -1,147 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: {{ agent_name }}) -""" - -import os -import uuid -import asyncio -import pytest -import pytest_asyncio -from agentex import AsyncAgentex -from agentex.types import TaskMessage -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.text_content_param import TextContentParam -from test_utils.async_utils import ( - poll_for_agent_response, - send_event_and_poll_yielding, - stream_agent_response, - validate_text_in_response, - poll_messages, -) - - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "{{ agent_name }}") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, _agent_name: str, agent_id: str): - """Test sending an event and polling for the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # TODO: Poll for the initial task creation message (if your agent sends one) - # async for message in poll_messages( - # client=client, - # task_id=task.id, - # timeout=30, - # sleep_interval=1.0, - # ): - # assert isinstance(message, TaskMessage) - # if message.content and message.content.type == "text" and message.content.author == "agent": - # # Check for your expected initial message - # assert "expected initial text" in message.content.content - # break - - # TODO: Send an event and poll for response using the yielding helper function - # user_message = "Your test message here" - # async for message in send_event_and_poll_yielding( - # client=client, - # agent_id=agent_id, - # task_id=task.id, - # user_message=user_message, - # timeout=30, - # sleep_interval=1.0, - # ): - # assert isinstance(message, TaskMessage) - # if message.content and message.content.type == "text" and message.content.author == "agent": - # # Check for your expected response - # assert "expected response text" in message.content.content - # break - pass - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, _agent_name: str, agent_id: str): - """Test sending an event and streaming the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # user_message = "Your test message here" - - # # Collect events from stream - # all_events = [] - - # async def collect_stream_events(): - # async for event in stream_agent_response( - # client=client, - # task_id=task.id, - # timeout=30, - # ): - # all_events.append(event) - - # # Start streaming task - # stream_task = asyncio.create_task(collect_stream_events()) - - # # Send the event - # event_content = TextContentParam(type="text", author="user", content=user_message) - # await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # # Wait for streaming to complete - # await stream_task - - # # TODO: Add your validation here - # assert len(all_events) > 0, "No events received in streaming response" - pass - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/src/agentex/lib/cli/templates/sync/.dockerignore.j2 b/src/agentex/lib/cli/templates/sync/.dockerignore.j2 deleted file mode 100644 index c2d7fca4..00000000 --- a/src/agentex/lib/cli/templates/sync/.dockerignore.j2 +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/src/agentex/lib/cli/templates/sync/Dockerfile-uv.j2 b/src/agentex/lib/cli/templates/sync/Dockerfile-uv.j2 deleted file mode 100644 index 2ac5be7d..00000000 --- a/src/agentex/lib/cli/templates/sync/Dockerfile-uv.j2 +++ /dev/null @@ -1,42 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - nodejs \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/** - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy just the pyproject.toml file to optimize caching -COPY {{ project_path_from_build_root }}/pyproject.toml /app/{{ project_path_from_build_root }}/pyproject.toml - -WORKDIR /app/{{ project_path_from_build_root }} - -# Install the required Python packages using uv -RUN uv pip install --system . - -# Copy the project code -COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project - -# Set environment variables -ENV PYTHONPATH=/app - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/sync/Dockerfile.j2 b/src/agentex/lib/cli/templates/sync/Dockerfile.j2 deleted file mode 100644 index 4d9f41d4..00000000 --- a/src/agentex/lib/cli/templates/sync/Dockerfile.j2 +++ /dev/null @@ -1,43 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - node \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy just the requirements file to optimize caching -COPY {{ project_path_from_build_root }}/requirements.txt /app/{{ project_path_from_build_root }}/requirements.txt - -WORKDIR /app/{{ project_path_from_build_root }} - -# Install the required Python packages -RUN uv pip install --system -r requirements.txt - -# Copy the project code -COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project - - -# Set environment variables -ENV PYTHONPATH=/app - -# Run the agent using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/sync/README.md.j2 b/src/agentex/lib/cli/templates/sync/README.md.j2 deleted file mode 100644 index b2105705..00000000 --- a/src/agentex/lib/cli/templates/sync/README.md.j2 +++ /dev/null @@ -1,313 +0,0 @@ -# {{ agent_name }} - AgentEx Sync ACP Template - -This is a starter template for building synchronous agents with the AgentEx framework. It provides a basic implementation of the Agent 2 Client Protocol (ACP) with immediate response capabilities to help you get started quickly. - -## What You'll Learn - -- **Tasks**: A task is a grouping mechanism for related messages. Think of it as a conversation thread or a session. -- **Messages**: Messages are communication objects within a task. They can contain text, data, or instructions. -- **Sync ACP**: Synchronous Agent Communication Protocol that requires immediate responses -- **Message Handling**: How to process and respond to messages in real-time - -## Running the Agent - -1. Run the agent locally: -```bash -agentex agents run --manifest manifest.yaml -``` - -The agent will start on port 8000 and respond immediately to any messages it receives. - -## What's Inside - -This template: -- Sets up a basic sync ACP server -- Handles incoming messages with immediate responses -- Provides a foundation for building real-time agents -- Can include streaming support for long responses - -## Next Steps - -For more advanced agent development, check out the AgentEx tutorials: - -- **Tutorials 00-08**: Learn about building synchronous agents with ACP -- **Tutorials 09-10**: Learn how to use Temporal to power asynchronous agents - - Tutorial 09: Basic Temporal workflow setup - - Tutorial 10: Advanced Temporal patterns and best practices - -These tutorials will help you understand: -- How to handle long-running tasks -- Implementing state machines -- Managing complex workflows -- Best practices for async agent development - -## The Manifest File - -The `manifest.yaml` file is your agent's configuration file. It defines: -- How your agent should be built and packaged -- What files are included in your agent's Docker image -- Your agent's name and description -- Local development settings (like the port your agent runs on) - -This file is essential for both local development and deployment of your agent. - -## Project Structure - -``` -{{ project_name }}/ -โ”œโ”€โ”€ project/ # Your agent's code -โ”‚ โ”œโ”€โ”€ __init__.py -โ”‚ โ””โ”€โ”€ acp.py # ACP server and event handlers -โ”œโ”€โ”€ Dockerfile # Container definition -โ”œโ”€โ”€ manifest.yaml # Deployment config -โ”œโ”€โ”€ dev.ipynb # Development notebook for testing -{% if use_uv %} -โ””โ”€โ”€ pyproject.toml # Dependencies (uv) -{% else %} -โ””โ”€โ”€ requirements.txt # Dependencies (pip) -{% endif %} -``` - -## Development - -### 1. Customize Message Handlers -- Modify the handlers in `acp.py` to implement your agent's logic -- Add your own tools and capabilities -- Implement custom response generation - -### 2. Test Your Agent with the Development Notebook -Use the included `dev.ipynb` Jupyter notebook to test your agent interactively: - -```bash -# Start Jupyter notebook (make sure you have jupyter installed) -jupyter notebook dev.ipynb - -# Or use VS Code to open the notebook directly -code dev.ipynb -``` - -The notebook includes: -- **Setup**: Connect to your local AgentEx backend -- **Non-streaming tests**: Send messages and get complete responses -- **Streaming tests**: Test real-time streaming responses -- **Task management**: Optional task creation and management - -The notebook automatically uses your agent name (`{{ agent_name }}`) and provides examples for both streaming and non-streaming message handling. - -### 3. Manage Dependencies - -{% if use_uv %} -You chose **uv** for package management. Here's how to work with dependencies: - -```bash -# Add new dependencies -agentex uv add requests openai anthropic - -# Install/sync dependencies -agentex uv sync - -# Run commands with uv -uv run agentex agents run --manifest manifest.yaml -``` - -**Benefits of uv:** -- Faster dependency resolution and installation -- Better dependency isolation -- Modern Python packaging standards - -{% else %} -You chose **pip** for package management. Here's how to work with dependencies: - -```bash -# Edit requirements.txt manually to add dependencies -echo "requests" >> requirements.txt -echo "openai" >> requirements.txt - -# Install dependencies -pip install -r requirements.txt -``` - -**Benefits of pip:** -- Familiar workflow for most Python developers -- Simple requirements.txt management -- Wide compatibility -{% endif %} - -### 4. Configure Credentials -Options: -1. Add any required credentials to your manifest.yaml via the `env` section -2. Export them in your shell: `export OPENAI_API_KEY=...` -3. For local development, create a `.env.local` file in the project directory - -## Local Development - -### 1. Start the Agentex Backend -```bash -# Navigate to the backend directory -cd agentex - -# Start all services using Docker Compose -make dev - -# Optional: In a separate terminal, use lazydocker for a better UI (everything should say "healthy") -lzd -``` - -### 3. Run Your Agent -```bash -# From this directory -export ENVIRONMENT=development && agentex agents run --manifest manifest.yaml -``` - -### 4. Interact with Your Agent - -**Option 1: Web UI (Recommended)** -```bash -# Start the local web interface -cd agentex-web -make dev - -# Then open http://localhost:3000 in your browser to chat with your agent -``` - -**Option 2: CLI (Deprecated)** -```bash -# Submit a task via CLI -agentex tasks submit --agent {{ agent_name }} --task "Your task here" -``` - -## Development Tips - -### Environment Variables -- Set environment variables in project/.env for any required credentials -- Or configure them in the manifest.yaml under the `env` section -- The `.env` file is automatically loaded in development mode - -### Local Testing -- Use `export ENVIRONMENT=development` before running your agent -- This enables local service discovery and debugging features -- Your agent will automatically connect to locally running services - -### Sync ACP Considerations -- Responses must be immediate (no long-running operations) -- Use streaming for longer responses -- Keep processing lightweight and fast -- Consider caching for frequently accessed data - -### Debugging -- Check agent logs in the terminal where you ran the agent -- Use the web UI to inspect task history and responses -- Monitor backend services with `lzd` (LazyDocker) -- Test response times and optimize for speed - -### To build the agent Docker image locally (normally not necessary): - -1. Build the agent image: -```bash -agentex agents build --manifest manifest.yaml -``` -{% if use_uv %} -```bash -# Build with uv -agentex agents build --manifest manifest.yaml --push -``` -{% else %} -```bash -# Build with pip -agentex agents build --manifest manifest.yaml --push -``` -{% endif %} - - -## Advanced Features - -### Streaming Responses -Handle long responses with streaming: - -```python -# In project/acp.py -@acp.on_message_send -async def handle_message_send(params: SendMessageParams): - # For streaming responses - async def stream_response(): - for chunk in generate_response_chunks(): - yield TaskMessageUpdate( - content=chunk, - is_complete=False - ) - yield TaskMessageUpdate( - content="", - is_complete=True - ) - - return stream_response() -``` - -### Custom Response Logic -Add sophisticated response generation: - -```python -# In project/acp.py -@acp.on_message_send -async def handle_message_send(params: SendMessageParams): - # Analyze input - user_message = params.content.content - - # Generate response - response = await generate_intelligent_response(user_message) - - return TextContent( - author=MessageAuthor.AGENT, - content=response - ) -``` - -### Integration with External Services -{% if use_uv %} -```bash -# Add service clients -agentex uv add httpx requests-oauthlib - -# Add AI/ML libraries -agentex uv add openai anthropic transformers - -# Add fast processing libraries -agentex uv add numpy pandas -``` -{% else %} -```bash -# Add to requirements.txt -echo "httpx" >> requirements.txt -echo "openai" >> requirements.txt -echo "numpy" >> requirements.txt -pip install -r requirements.txt -``` -{% endif %} - -## Troubleshooting - -### Common Issues - -1. **Agent not appearing in web UI** - - Check if agent is running on port 8000 - - Verify `ENVIRONMENT=development` is set - - Check agent logs for errors - -2. **Slow response times** - - Profile your message handling code - - Consider caching expensive operations - - Optimize database queries and API calls - -3. **Dependency issues** -{% if use_uv %} - - Run `agentex uv sync` to ensure all dependencies are installed -{% else %} - - Run `pip install -r requirements.txt` - - Check if all dependencies are correctly listed in requirements.txt -{% endif %} - -4. **Port conflicts** - - Check if another service is using port 8000 - - Use `lsof -i :8000` to find conflicting processes - -Happy building with Sync ACP! ๐Ÿš€โšก \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/sync/dev.ipynb.j2 b/src/agentex/lib/cli/templates/sync/dev.ipynb.j2 deleted file mode 100644 index d8c10a65..00000000 --- a/src/agentex/lib/cli/templates/sync/dev.ipynb.j2 +++ /dev/null @@ -1,167 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "36834357", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d1c309d6", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"{{ agent_name }}\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f6e6ef0", - "metadata": {}, - "outputs": [], - "source": [ - "# # (Optional) Create a new task. If you don't create a new task, each message will be sent to a new task. The server will create the task for you.\n", - "\n", - "# import uuid\n", - "\n", - "# TASK_ID = str(uuid.uuid4())[:8]\n", - "\n", - "# rpc_response = client.agents.rpc_by_name(\n", - "# agent_name=AGENT_NAME,\n", - "# method=\"task/create\",\n", - "# params={\n", - "# \"name\": f\"{TASK_ID}-task\",\n", - "# \"params\": {}\n", - "# }\n", - "# )\n", - "\n", - "# task = rpc_response.result\n", - "# print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b03b0d37", - "metadata": {}, - "outputs": [], - "source": [ - "# Test non streaming response\n", - "from agentex.types import TextContent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_message(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"stream\": False\n", - " }\n", - ")\n", - "\n", - "if not rpc_response or not rpc_response.result:\n", - " raise ValueError(\"No result in response\")\n", - "\n", - "# Extract and print just the text content from the response\n", - "for task_message in rpc_response.result:\n", - " content = task_message.content\n", - " if isinstance(content, TextContent):\n", - " text = content.content\n", - " print(text)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "79688331", - "metadata": {}, - "outputs": [], - "source": [ - "# Test streaming response\n", - "from agentex.types.task_message_update import StreamTaskMessageDelta, StreamTaskMessageFull\n", - "from agentex.types.text_delta import TextDelta\n", - "\n", - "\n", - "# The result object of message/send will be a TaskMessageUpdate which is a union of the following types:\n", - "# - StreamTaskMessageStart: \n", - "# - An indicator that a streaming message was started, doesn't contain any useful content\n", - "# - StreamTaskMessageDelta: \n", - "# - A delta of a streaming message, contains the text delta to aggregate\n", - "# - StreamTaskMessageDone: \n", - "# - An indicator that a streaming message was done, doesn't contain any useful content\n", - "# - StreamTaskMessageFull: \n", - "# - A non-streaming message, there is nothing to aggregate, since this contains the full message, not deltas\n", - "\n", - "# Whenn processing StreamTaskMessageDelta, if you are expecting more than TextDeltas, such as DataDelta, ToolRequestDelta, or ToolResponseDelta, you can process them as well\n", - "# Whenn processing StreamTaskMessageFull, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "for agent_rpc_response_chunk in client.agents.send_message_stream(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"stream\": True\n", - " }\n", - "):\n", - " # We know that the result of the message/send when stream is set to True will be a TaskMessageUpdate\n", - " task_message_update = agent_rpc_response_chunk.result\n", - " # Print oly the text deltas as they arrive or any full messages\n", - " if isinstance(task_message_update, StreamTaskMessageDelta):\n", - " delta = task_message_update.delta\n", - " if isinstance(delta, TextDelta):\n", - " print(delta.text_delta, end=\"\", flush=True)\n", - " else:\n", - " print(f\"Found non-text {type(task_message)} object in streaming message.\")\n", - " elif isinstance(task_message_update, StreamTaskMessageFull):\n", - " content = task_message_update.content\n", - " if isinstance(content, TextContent):\n", - " print(content.content)\n", - " else:\n", - " print(f\"Found non-text {type(task_message)} object in full message.\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c5e7e042", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/src/agentex/lib/cli/templates/sync/environments.yaml.j2 b/src/agentex/lib/cli/templates/sync/environments.yaml.j2 deleted file mode 100644 index 73924abd..00000000 --- a/src/agentex/lib/cli/templates/sync/environments.yaml.j2 +++ /dev/null @@ -1,53 +0,0 @@ -# Agent Environment Configuration -# ------------------------------ -# This file defines environment-specific settings for your agent. -# This DIFFERS from the manifest.yaml file in that it is used to program things that are ONLY per environment. - -# ********** EXAMPLE ********** -# schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -# environments: -# dev: -# auth: -# principal: -# user_id: "1234567890" -# user_name: "John Doe" -# user_email: "john.doe@example.com" -# user_role: "admin" -# user_permissions: "read, write, delete" -# helm_overrides: # This is used to override the global helm values.yaml file in the agentex-agent helm charts -# replicas: 3 -# resources: -# requests: -# cpu: "1000m" -# memory: "2Gi" -# limits: -# cpu: "2000m" -# memory: "4Gi" -# env: -# - name: LOG_LEVEL -# value: "DEBUG" -# - name: ENVIRONMENT -# value: "staging" -# kubernetes: -# # OPTIONAL - Otherwise it will be derived from separately. However, this can be used to override the derived -# # namespace and deploy it with in the same namespace that already exists for a separate agent. -# namespace: "team-{{agent_name}}" -# ********** END EXAMPLE ********** - -schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -environments: - dev: - auth: - principal: - user_id: # TODO: Fill in - account_id: # TODO: Fill in - helm_overrides: - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" - diff --git a/src/agentex/lib/cli/templates/sync/manifest.yaml.j2 b/src/agentex/lib/cli/templates/sync/manifest.yaml.j2 deleted file mode 100644 index b006c617..00000000 --- a/src/agentex/lib/cli/templates/sync/manifest.yaml.j2 +++ /dev/null @@ -1,115 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../ # Keep this as the default root - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - {{ project_path_from_build_root }} - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: {{ project_path_from_build_root }}/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: {{ project_path_from_build_root }}/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - -# Agent Configuration -# ----------------- -agent: - acp_type: sync - # Unique name for your agent - # Used for task routing and monitoring - name: {{ agent_name }} - - # Description of what your agent does - # Helps with documentation and discovery - description: {{ description }} - - # Temporal workflow configuration - # Set enabled: true to use Temporal workflows for long-running tasks - temporal: - enabled: false - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - credentials: [] # Update with your credentials - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - env: {} # Update with your environment variables - # OPENAI_API_KEY: "" - # OPENAI_BASE_URL: "" - # OPENAI_ORG_ID: "" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: [] # Update with your image pull secret names - # - name: my-registry-secret - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific environments (environments.yaml) - global: - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/sync/project/acp.py.j2 b/src/agentex/lib/cli/templates/sync/project/acp.py.j2 deleted file mode 100644 index 7184d26a..00000000 --- a/src/agentex/lib/cli/templates/sync/project/acp.py.j2 +++ /dev/null @@ -1,26 +0,0 @@ -from typing import AsyncGenerator, Union -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.lib.types.acp import SendMessageParams - -from agentex.types.task_message_update import TaskMessageUpdate -from agentex.types.task_message_content import TaskMessageContent -from agentex.types.text_content import TextContent -from agentex.lib.utils.logging import make_logger - -logger = make_logger(__name__) - - -# Create an ACP server -acp = FastACP.create( - acp_type="sync", -) - -@acp.on_message_send -async def handle_message_send( - params: SendMessageParams -) -> TaskMessageContent | list[TaskMessageContent] | AsyncGenerator[TaskMessageUpdate, None]: - """Default message handler with streaming support""" - return TextContent( - author="agent", - content=f"Hello! I've received your message. Here's a generic response, but in future tutorials we'll see how you can get me to intelligently respond to your message. This is what I heard you say: {params.content.content}", - ) \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/sync/pyproject.toml.j2 b/src/agentex/lib/cli/templates/sync/pyproject.toml.j2 deleted file mode 100644 index 34e04e6a..00000000 --- a/src/agentex/lib/cli/templates/sync/pyproject.toml.j2 +++ /dev/null @@ -1,32 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "{{ project_name }}" -version = "0.1.0" -description = "{{ description }}" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/src/agentex/lib/cli/templates/sync/requirements.txt.j2 b/src/agentex/lib/cli/templates/sync/requirements.txt.j2 deleted file mode 100644 index 0b8ae19b..00000000 --- a/src/agentex/lib/cli/templates/sync/requirements.txt.j2 +++ /dev/null @@ -1,5 +0,0 @@ -# Install agentex-sdk from local path -agentex-sdk - -# Scale GenAI Platform Python SDK -scale-gp diff --git a/src/agentex/lib/cli/templates/sync/test_agent.py.j2 b/src/agentex/lib/cli/templates/sync/test_agent.py.j2 deleted file mode 100644 index 7de4684f..00000000 --- a/src/agentex/lib/cli/templates/sync/test_agent.py.j2 +++ /dev/null @@ -1,70 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming message sending -- Streaming message sending -- Task creation via RPC - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: {{ agent_name }}) -""" - -import os -import pytest -from agentex import Agentex - - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "{{ agent_name }}") - - -@pytest.fixture -def client(): - """Create an AgentEx client instance for testing.""" - return Agentex(base_url=AGENTEX_API_BASE_URL) - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest.fixture -def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingMessages: - """Test non-streaming message sending.""" - - def test_send_message(self, client: Agentex, _agent_name: str): - """Test sending a message and receiving a response.""" - # TODO: Fill in the test based on what data your agent is expected to handle - ... - - -class TestStreamingMessages: - """Test streaming message sending.""" - - def test_send_stream_message(self, client: Agentex, _agent_name: str): - """Test streaming a message and aggregating deltas.""" - # TODO: Fill in the test based on what data your agent is expected to handle - ... - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/.dockerignore.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/.dockerignore.j2 deleted file mode 100644 index c2d7fca4..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/.dockerignore.j2 +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile-uv.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile-uv.j2 deleted file mode 100644 index 81dd9c5b..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile-uv.j2 +++ /dev/null @@ -1,48 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - nodejs \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/** - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy just the pyproject.toml file to optimize caching -COPY {{ project_path_from_build_root }}/pyproject.toml /app/{{ project_path_from_build_root }}/pyproject.toml - -WORKDIR /app/{{ project_path_from_build_root }} - -# Install the required Python packages using uv -RUN uv pip install --system . - -# Copy the project code -COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile.j2 deleted file mode 100644 index 4c1798c4..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/Dockerfile.j2 +++ /dev/null @@ -1,48 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - node \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy just the requirements file to optimize caching -COPY {{ project_path_from_build_root }}/requirements.txt /app/{{ project_path_from_build_root }}/requirements.txt - -WORKDIR /app/{{ project_path_from_build_root }} - -# Install the required Python packages -RUN uv pip install --system -r requirements.txt - -# Copy the project code -COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/README.md.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/README.md.j2 deleted file mode 100644 index 071fc24b..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/README.md.j2 +++ /dev/null @@ -1,224 +0,0 @@ -# {{ agent_name }} - AgentEx Temporal + OpenAI Agents SDK Template - -This is a starter template for building AI agents with the AgentEx framework, Temporal workflows, and OpenAI Agents SDK. It provides a production-ready foundation with: - -- **Durable execution** via Temporal workflows -- **AI agent capabilities** via OpenAI Agents SDK -- **Tool use** via Temporal activities -- **Streaming responses** for real-time feedback -- **Conversation state management** across turns -- **Tracing/observability** via SGP integration - -## What You'll Learn - -- **Tasks**: A task is a grouping mechanism for related messages (like a conversation thread) -- **Messages**: Communication objects within a task (text, data, instructions) -- **Temporal Workflows**: Long-running processes with state management and async operations -- **Activities**: Non-deterministic operations (API calls, I/O) that Temporal can retry and recover -- **OpenAI Agents SDK**: Building AI agents with tools, instructions, and streaming - -## Running the Agent - -1. Run the agent locally: -```bash -agentex agents run --manifest manifest.yaml -``` - -The agent will start on port 8000 and be ready to handle conversations. - -## Project Structure - -``` -{{ project_name }}/ -โ”œโ”€โ”€ project/ # Your agent's code -โ”‚ โ”œโ”€โ”€ __init__.py -โ”‚ โ”œโ”€โ”€ acp.py # ACP server with OpenAI plugin setup -โ”‚ โ”œโ”€โ”€ workflow.py # Temporal workflow with OpenAI agent -โ”‚ โ”œโ”€โ”€ activities.py # Temporal activities (tools for your agent) -โ”‚ โ””โ”€โ”€ run_worker.py # Temporal worker setup -โ”œโ”€โ”€ Dockerfile # Container definition -โ”œโ”€โ”€ manifest.yaml # Deployment config -โ”œโ”€โ”€ dev.ipynb # Development notebook for testing -{% if use_uv %} -โ””โ”€โ”€ pyproject.toml # Dependencies (uv) -{% else %} -โ””โ”€โ”€ requirements.txt # Dependencies (pip) -{% endif %} -``` - -## Key Concepts - -### Activities as Tools - -Activities are Temporal's way of handling non-deterministic operations. In this template, activities also serve as tools for your OpenAI agent: - -```python -# In activities.py - define the activity -@activity.defn -async def get_weather() -> str: - return "Sunny, 72ยฐF" - -# In workflow.py - use it as a tool for the agent -agent = Agent( - name="my-agent", - tools=[ - openai_agents.workflow.activity_as_tool( - get_weather, - start_to_close_timeout=timedelta(minutes=5), - ), - ], -) -``` - -### Conversation State - -The workflow maintains conversation history across turns using `StateModel`: - -```python -class StateModel(BaseModel): - input_list: List[Dict[str, Any]] # Conversation history - turn_number: int # Turn counter for tracing -``` - -### Tracing - -Each conversation turn creates a tracing span for observability: - -```python -async with adk.tracing.span( - trace_id=params.task.id, - name=f"Turn {self._state.turn_number}", - input=turn_input.model_dump(), -) as span: - # Agent execution happens here -``` - -## Adding New Tools/Activities - -See the detailed instructions in `project/activities.py`. The process is: - -1. **Define** the activity in `activities.py` -2. **Register** it in `run_worker.py` -3. **Add** it as a tool in `workflow.py` - -## Temporal Dashboard - -Monitor your workflows and activities at: - -``` -http://localhost:8080 -``` - -The dashboard shows: -- Running and completed workflows -- Activity execution history -- Retries and failures -- Workflow state and signals - -## Development - -### 1. Customize the Agent - -Edit `project/workflow.py` to change: -- Agent instructions -- Model (default: `gpt-4o-mini`) -- Tools available to the agent - -### 2. Add New Activities - -See `project/activities.py` for detailed instructions on adding new tools. - -### 3. Test with the Development Notebook - -```bash -jupyter notebook dev.ipynb -# Or in VS Code -code dev.ipynb -``` - -### 4. Manage Dependencies - -{% if use_uv %} -```bash -# Add new dependencies -agentex uv add requests anthropic - -# Install/sync dependencies -agentex uv sync -``` -{% else %} -```bash -# Add to requirements.txt -echo "requests" >> requirements.txt -pip install -r requirements.txt -``` -{% endif %} - -## Local Development - -### 1. Start the Agentex Backend -```bash -cd agentex -make dev -``` - -### 2. Setup Your Agent's Environment -```bash -{% if use_uv %} -agentex uv sync -source .venv/bin/activate -{% else %} -pip install -r requirements.txt -{% endif %} -``` - -### 3. Run Your Agent -```bash -export ENVIRONMENT=development -agentex agents run --manifest manifest.yaml -``` - -### 4. Interact with Your Agent - -Via Web UI: -```bash -cd agentex-web -make dev -# Open http://localhost:3000 -``` - -## Environment Variables - -For local development, create a `.env` file: - -```bash -OPENAI_API_KEY=your-api-key -SGP_API_KEY=your-sgp-key # Optional: for tracing -SGP_ACCOUNT_ID=your-account-id # Optional: for tracing -``` - -## Troubleshooting - -### Common Issues - -1. **Agent not responding** - - Check if agent is running on port 8000 - - Verify `ENVIRONMENT=development` is set - - Check logs for errors - -2. **Temporal workflow issues** - - Check Temporal Web UI at http://localhost:8080 - - Verify Temporal server is running - - Check workflow logs - -3. **OpenAI API errors** - - Verify `OPENAI_API_KEY` is set - - Check API rate limits - - Verify model name is correct - -4. **Activity failures** - - Check activity logs in console - - Verify activity is registered in `run_worker.py` - - Check timeout settings - -Happy building with Temporal + OpenAI Agents SDK! diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/dev.ipynb.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/dev.ipynb.j2 deleted file mode 100644 index d3a68303..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/dev.ipynb.j2 +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "36834357", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d1c309d6", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"{{ agent_name }}\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f6e6ef0", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Async agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b03b0d37", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a6927cc0", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4864e354", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/environments.yaml.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/environments.yaml.j2 deleted file mode 100644 index a3df5e22..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/environments.yaml.j2 +++ /dev/null @@ -1,64 +0,0 @@ -# Agent Environment Configuration -# ------------------------------ -# This file defines environment-specific settings for your agent. -# This DIFFERS from the manifest.yaml file in that it is used to program things that are ONLY per environment. - -# ********** EXAMPLE ********** -# schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -# environments: -# dev: -# auth: -# principal: -# user_id: "1234567890" -# user_name: "John Doe" -# user_email: "john.doe@example.com" -# user_role: "admin" -# user_permissions: "read, write, delete" -# helm_overrides: # This is used to override the global helm values.yaml file in the agentex-agent helm charts -# replicas: 3 -# resources: -# requests: -# cpu: "1000m" -# memory: "2Gi" -# limits: -# cpu: "2000m" -# memory: "4Gi" -# env: -# - name: LOG_LEVEL -# value: "DEBUG" -# - name: ENVIRONMENT -# value: "staging" -# -# kubernetes: -# # OPTIONAL - Otherwise it will be derived from separately. However, this can be used to override the derived -# # namespace and deploy it with in the same namespace that already exists for a separate agent. -# namespace: "team-{{agent_name}}" -# ********** END EXAMPLE ********** - -schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -environments: - dev: - auth: - principal: - user_id: # TODO: Fill in - account_id: # TODO: Fill in - helm_overrides: - # This is used to override the global helm values.yaml file in the agentex-agent helm charts - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" - temporal-worker: - enabled: true - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/manifest.yaml.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/manifest.yaml.j2 deleted file mode 100644 index a6433ce7..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/manifest.yaml.j2 +++ /dev/null @@ -1,140 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../ # Keep this as the default root - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - {{ project_path_from_build_root }} - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: {{ project_path_from_build_root }}/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: {{ project_path_from_build_root }}/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - # Path to temporal worker file - # Examples: - # project/run_worker.py (standard) - # workers/temporal.py (custom structure) - # ../shared/worker.py (shared across projects) - worker: project/run_worker.py - - -# Agent Configuration -# ----------------- -agent: - # Type of agent - either sync or async - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: {{ agent_name }} - - # Description of what your agent does - # Helps with documentation and discovery - description: {{ description }} - - # Temporal workflow configuration - # This enables your agent to run as a Temporal workflow for long-running tasks - temporal: - enabled: true - workflows: - # Name of the workflow class - # Must match the @workflow.defn name in your workflow.py - - name: {{ workflow_name }} - - # Queue name for task distribution - # Used by Temporal to route tasks to your agent - # Convention: _task_queue - queue_name: {{ queue_name }} - - # Optional: Health check port for temporal worker - # Defaults to 80 if not specified - # health_check_port: 80 - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - credentials: - - env_var_name: REDIS_URL - secret_name: redis-url-secret - secret_key: url - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - env: {} - # OPENAI_API_KEY: "" - # OPENAI_BASE_URL: "" - # OPENAI_ORG_ID: "" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: [] # Update with your image pull secret name - # - name: my-registry-secret - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific environments (environments.yaml) - global: - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/project/acp.py.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/project/acp.py.j2 deleted file mode 100644 index 87a3fdb9..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/project/acp.py.j2 +++ /dev/null @@ -1,80 +0,0 @@ -import os -import sys -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin, ModelActivityParameters -from datetime import timedelta -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModelProvider, -) - -# === DEBUG SETUP (AgentEx CLI Debug Support) === -if os.getenv("AGENTEX_DEBUG_ENABLED") == "true": - try: - import debugpy - from agentex.lib.utils.logging import make_logger - - logger = make_logger(__name__) - debug_port = int(os.getenv("AGENTEX_DEBUG_PORT", "5679")) - debug_type = os.getenv("AGENTEX_DEBUG_TYPE", "acp") - wait_for_attach = os.getenv("AGENTEX_DEBUG_WAIT_FOR_ATTACH", "false").lower() == "true" - - # Configure debugpy - debugpy.configure(subProcess=False) - debugpy.listen(debug_port) - - logger.info(f"[{debug_type.upper()}] Debug server listening on port {debug_port}") - - if wait_for_attach: - logger.info(f"[{debug_type.upper()}] Waiting for debugger to attach...") - debugpy.wait_for_client() - logger.info(f"[{debug_type.upper()}] Debugger attached!") - else: - logger.info(f"[{debug_type.upper()}] Ready for debugger attachment") - - except ImportError: - print("debugpy not available. Install with: pip install debugpy") - sys.exit(1) - except Exception as e: - print(f"Debug setup failed: {e}") - sys.exit(1) -# === END DEBUG SETUP === - -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.lib.types.fastacp import TemporalACPConfig - -context_interceptor = ContextInterceptor() -streaming_model_provider = TemporalStreamingModelProvider() - - -# Create the ACP server -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - # When deployed to the cluster, the Temporal address will automatically be set to the cluster address - # For local development, we set the address manually to talk to the local Temporal service set up via docker compose - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233"), - plugins=[OpenAIAgentsPlugin( - model_params=ModelActivityParameters( - start_to_close_timeout=timedelta(days=1) - ), - model_provider=streaming_model_provider - )], - interceptors=[context_interceptor] - ) -) - - -# Notice that we don't need to register any handlers when we use type="temporal" -# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp -# You can see that these handlers are automatically registered when the ACP is created - -# @acp.on_task_create -# This will be handled by the method in your workflow that is decorated with @workflow.run - -# @acp.on_task_event_send -# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) - -# @acp.on_task_cancel -# This does not need to be handled by your workflow. -# It is automatically handled by the temporal client which cancels the workflow directly diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/project/activities.py.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/project/activities.py.j2 deleted file mode 100644 index 907cb287..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/project/activities.py.j2 +++ /dev/null @@ -1,116 +0,0 @@ -""" -Temporal Activities for OpenAI Agents SDK -========================================== - -WHAT ARE ACTIVITIES? --------------------- -Activities are functions that perform non-deterministic operations - things that -might have different results each time they run, such as: -- API calls (weather services, databases, external services) -- File I/O operations -- Current time/date lookups -- Random number generation -- Any operation with side effects - -Temporal workflows must be deterministic (same input = same output every time). -Activities let you safely perform non-deterministic work while Temporal handles -retries, timeouts, and failure recovery automatically. - - -HOW TO ADD NEW ACTIVITIES: --------------------------- -Adding a new activity requires 3 steps: - -1. DEFINE the activity in this file with the @activity.defn decorator: - - @activity.defn - async def my_new_activity(param: str) -> str: - # Your non-deterministic logic here - return result - -2. REGISTER it in run_worker.py by adding to the activities list: - - from project.activities import get_weather, my_new_activity - - all_activities = get_all_activities() + [ - stream_lifecycle_content, - get_weather, - my_new_activity, # Add your new activity here - ] - -3. ADD it as a tool to your OpenAI agent in workflow.py: - - from project.activities import get_weather, my_new_activity - - agent = Agent( - name="...", - tools=[ - openai_agents.workflow.activity_as_tool( - get_weather, - start_to_close_timeout=timedelta(minutes=5), - ), - openai_agents.workflow.activity_as_tool( - my_new_activity, # Add your new activity as a tool - start_to_close_timeout=timedelta(minutes=5), - ), - ], - ) - - -RUNNING ACTIVITIES OUTSIDE OPENAI AGENT SDK: --------------------------------------------- -You can also call activities directly from your workflow without going through -the OpenAI agent. This is useful for setup/teardown operations or when you need -to run an activity before the agent starts: - - from temporalio import workflow - from datetime import timedelta - - # Inside your workflow method: - result = await workflow.execute_activity( - get_weather, - start_to_close_timeout=timedelta(minutes=5), - ) - -For activities with parameters: - - result = await workflow.execute_activity( - my_activity_with_params, - "param_value", # positional args - start_to_close_timeout=timedelta(minutes=5), - ) - - -TEMPORAL DASHBOARD: -------------------- -Monitor your workflows and activities in real-time at: - - http://localhost:8080 - -The dashboard shows: -- Running and completed workflows -- Activity execution history -- Retries and failures -- Workflow state and signals -""" - -from temporalio import activity - -from agentex.lib.utils.logging import make_logger - -logger = make_logger(__name__) - - -@activity.defn -async def get_weather() -> str: - """ - Get the current weather. - - This is a dummy activity that returns a hardcoded string for demo purposes. - Replace this with a real weather API call in your implementation. - - Returns: - A string describing the current weather conditions. - """ - logger.info("get_weather activity called") - return "Sunny, 72ยฐF" diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/project/run_worker.py.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/project/run_worker.py.j2 deleted file mode 100644 index 2516d3e0..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/project/run_worker.py.j2 +++ /dev/null @@ -1,56 +0,0 @@ -import asyncio - -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.debug import setup_debug_if_enabled -from agentex.lib.environment_variables import EnvironmentVariables -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin, ModelActivityParameters -from datetime import timedelta -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ContextInterceptor -from agentex.lib.core.temporal.plugins.openai_agents.hooks.activities import stream_lifecycle_content -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModelProvider, -) -from project.workflow import {{ workflow_class }} -from project.activities import get_weather - -environment_variables = EnvironmentVariables.refresh() - -logger = make_logger(__name__) - - -async def main(): - # Setup debug mode if enabled - setup_debug_if_enabled() - - task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE - if task_queue_name is None: - raise ValueError("WORKFLOW_TASK_QUEUE is not set") - - # Register all activities here - # When you add new activities in activities.py, add them to this list - all_activities = get_all_activities() + [stream_lifecycle_content, get_weather] - - context_interceptor = ContextInterceptor() - streaming_model_provider = TemporalStreamingModelProvider() - - # Create a worker with automatic tracing - worker = AgentexWorker( - task_queue=task_queue_name, - plugins=[OpenAIAgentsPlugin( - model_params=ModelActivityParameters( - start_to_close_timeout=timedelta(days=1) - ), - model_provider=streaming_model_provider - )], - interceptors=[context_interceptor], - ) - - await worker.run( - activities=all_activities, - workflow={{ workflow_class }}, - ) - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/project/workflow.py.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/project/workflow.py.j2 deleted file mode 100644 index 94b5221f..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/project/workflow.py.j2 +++ /dev/null @@ -1,169 +0,0 @@ -import json -import os - -from temporalio import workflow - -from agentex.lib import adk -from agentex.lib.types.acp import CreateTaskParams, SendEventParams -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.environment_variables import EnvironmentVariables -from agents import Agent, Runner -from agentex.lib.core.temporal.plugins.openai_agents.hooks.hooks import TemporalStreamingHooks -from pydantic import BaseModel -from typing import List, Dict, Any -from temporalio.contrib import openai_agents -from project.activities import get_weather -from agentex.lib.core.tracing.tracing_processor_manager import ( - add_tracing_processor_config, -) -from agentex.lib.types.tracing import SGPTracingProcessorConfig -from datetime import timedelta - - -environment_variables = EnvironmentVariables.refresh() - -if environment_variables.WORKFLOW_NAME is None: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if environment_variables.AGENT_NAME is None: - raise ValueError("Environment variable AGENT_NAME is not set") - -logger = make_logger(__name__) - -# Setup tracing for SGP (Scale GenAI Platform) -# This enables visibility into your agent's execution in the SGP dashboard -add_tracing_processor_config( - SGPTracingProcessorConfig( - sgp_api_key=os.environ.get("SGP_API_KEY", ""), - sgp_account_id=os.environ.get("SGP_ACCOUNT_ID", ""), - ) -) - - -class StateModel(BaseModel): - """ - State model for preserving conversation history across turns. - - This allows the agent to maintain context throughout the conversation, - making it possible to reference previous messages and build on the discussion. - - Attributes: - input_list: The conversation history in OpenAI message format. - turn_number: Counter for tracking conversation turns (useful for tracing). - """ - - input_list: List[Dict[str, Any]] - turn_number: int - - -class TurnInput(BaseModel): - """Input model for tracing spans.""" - input_list: List[Dict[str, Any]] - - -class TurnOutput(BaseModel): - """Output model for tracing spans.""" - final_output: Any - - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class {{ workflow_class }}(BaseWorkflow): - """ - Workflow for {{ agent_name }} agent using OpenAI Agents SDK. - - This workflow: - - Maintains conversation state across turns - - Creates tracing spans for each turn - - Runs an OpenAI agent with tools (activities) - - Streams responses back to the client - """ - - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self._complete_task = False - self._state: StateModel = StateModel(input_list=[], turn_number=0) - self._task_id = None - self._trace_id = None - self._parent_span_id = None - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - async def on_task_event_send(self, params: SendEventParams) -> None: - logger.info(f"Received task message instruction: {params}") - - # Increment turn number for tracing - self._state.turn_number += 1 - - self._task_id = params.task.id - self._trace_id = params.task.id - self._parent_span_id = params.task.id - - # Add the user message to conversation history - self._state.input_list.append({"role": "user", "content": params.event.content.content}) - - # Echo back the client's message to show it in the UI - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - temporal_streaming_hooks = TemporalStreamingHooks(task_id=params.task.id) - - # Create a span to track this turn of the conversation - turn_input = TurnInput( - input_list=self._state.input_list, - ) - async with adk.tracing.span( - trace_id=params.task.id, - name=f"Turn {self._state.turn_number}", - input=turn_input.model_dump(), - ) as span: - self._parent_span_id = span.id if span else None - - # Create the OpenAI agent with tools - # Add your activities as tools using activity_as_tool() - agent = Agent( - name="{{ agent_name }}", - instructions="You are a helpful assistant. Use your tools to help the user.", - model="gpt-4o-mini", - tools=[ - openai_agents.workflow.activity_as_tool( - get_weather, - start_to_close_timeout=timedelta(minutes=5), - ), - # Add more tools here as you create new activities: - # openai_agents.workflow.activity_as_tool( - # your_new_activity, - # start_to_close_timeout=timedelta(minutes=5), - # ), - ], - ) - - # Run the agent with hooks to enable streaming responses - result = await Runner.run(agent, self._state.input_list, hooks=temporal_streaming_hooks) - - # Update the state with the assistant's response for the next turn - self._state.input_list = result.to_input_list() # type: ignore[assignment] - - # Set span output for tracing - include full state - if span: - turn_output = TurnOutput(final_output=result.final_output) - span.output = turn_output.model_dump() - - @workflow.run - async def on_task_create(self, params: CreateTaskParams) -> str: - logger.info(f"Received task create params: {params}") - - # Acknowledge that the task has been created - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"Hello! I'm {{ agent_name }}, your AI assistant. How can I help you today?\n\nParams received:\n{json.dumps(params.params, indent=2)}", - ), - ) - - await workflow.wait_condition( - lambda: self._complete_task, - timeout=None, - ) - return "Task completed" diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/pyproject.toml.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/pyproject.toml.j2 deleted file mode 100644 index a1ebab93..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/pyproject.toml.j2 +++ /dev/null @@ -1,35 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "{{ project_name }}" -version = "0.1.0" -description = "{{ description }}" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", - "temporalio", - "openai-agents>=0.4.2", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", - "debugpy>=1.8.15", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/requirements.txt.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/requirements.txt.j2 deleted file mode 100644 index d4bd7a0f..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/requirements.txt.j2 +++ /dev/null @@ -1,4 +0,0 @@ -agentex-sdk -scale-gp -temporalio -openai-agents>=0.4.2 diff --git a/src/agentex/lib/cli/templates/temporal-openai-agents/test_agent.py.j2 b/src/agentex/lib/cli/templates/temporal-openai-agents/test_agent.py.j2 deleted file mode 100644 index ee71f177..00000000 --- a/src/agentex/lib/cli/templates/temporal-openai-agents/test_agent.py.j2 +++ /dev/null @@ -1,147 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: {{ agent_name }}) -""" - -import os -import uuid -import asyncio -import pytest -import pytest_asyncio -from agentex import AsyncAgentex -from agentex.types import TaskMessage -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.text_content_param import TextContentParam -from test_utils.async_utils import ( - poll_for_agent_response, - send_event_and_poll_yielding, - stream_agent_response, - validate_text_in_response, - poll_messages, -) - - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "{{ agent_name }}") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, _agent_name: str, agent_id: str): - """Test sending an event and polling for the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # TODO: Poll for the initial task creation message (if your agent sends one) - # async for message in poll_messages( - # client=client, - # task_id=task.id, - # timeout=30, - # sleep_interval=1.0, - # ): - # assert isinstance(message, TaskMessage) - # if message.content and message.content.type == "text" and message.content.author == "agent": - # # Check for your expected initial message - # assert "expected initial text" in message.content.content - # break - - # TODO: Send an event and poll for response using the yielding helper function - # user_message = "Your test message here" - # async for message in send_event_and_poll_yielding( - # client=client, - # agent_id=agent_id, - # task_id=task.id, - # user_message=user_message, - # timeout=30, - # sleep_interval=1.0, - # ): - # assert isinstance(message, TaskMessage) - # if message.content and message.content.type == "text" and message.content.author == "agent": - # # Check for your expected response - # assert "expected response text" in message.content.content - # break - pass - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, _agent_name: str, agent_id: str): - """Test sending an event and streaming the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # user_message = "Your test message here" - - # # Collect events from stream - # all_events = [] - - # async def collect_stream_events(): - # async for event in stream_agent_response( - # client=client, - # task_id=task.id, - # timeout=30, - # ): - # all_events.append(event) - - # # Start streaming task - # stream_task = asyncio.create_task(collect_stream_events()) - - # # Send the event - # event_content = TextContentParam(type="text", author="user", content=user_message) - # await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # # Wait for streaming to complete - # await stream_task - - # # TODO: Add your validation here - # assert len(all_events) > 0, "No events received in streaming response" - pass - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/src/agentex/lib/cli/templates/temporal/.dockerignore.j2 b/src/agentex/lib/cli/templates/temporal/.dockerignore.j2 deleted file mode 100644 index c2d7fca4..00000000 --- a/src/agentex/lib/cli/templates/temporal/.dockerignore.j2 +++ /dev/null @@ -1,43 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Environments -.env** -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo - -# Git -.git -.gitignore - -# Misc -.DS_Store diff --git a/src/agentex/lib/cli/templates/temporal/Dockerfile-uv.j2 b/src/agentex/lib/cli/templates/temporal/Dockerfile-uv.j2 deleted file mode 100644 index 81dd9c5b..00000000 --- a/src/agentex/lib/cli/templates/temporal/Dockerfile-uv.j2 +++ /dev/null @@ -1,48 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - nodejs \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/** - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy just the pyproject.toml file to optimize caching -COPY {{ project_path_from_build_root }}/pyproject.toml /app/{{ project_path_from_build_root }}/pyproject.toml - -WORKDIR /app/{{ project_path_from_build_root }} - -# Install the required Python packages using uv -RUN uv pip install --system . - -# Copy the project code -COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal/Dockerfile.j2 b/src/agentex/lib/cli/templates/temporal/Dockerfile.j2 deleted file mode 100644 index 4c1798c4..00000000 --- a/src/agentex/lib/cli/templates/temporal/Dockerfile.j2 +++ /dev/null @@ -1,48 +0,0 @@ -# syntax=docker/dockerfile:1.3 -FROM python:3.12-slim -COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/ - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - htop \ - vim \ - curl \ - tar \ - python3-dev \ - postgresql-client \ - build-essential \ - libpq-dev \ - gcc \ - cmake \ - netcat-openbsd \ - node \ - npm \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -# Install tctl (Temporal CLI) -RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \ - tar -xzf /tmp/tctl.tar.gz -C /usr/local/bin && \ - chmod +x /usr/local/bin/tctl && \ - rm /tmp/tctl.tar.gz - -RUN uv pip install --system --upgrade pip setuptools wheel - -ENV UV_HTTP_TIMEOUT=1000 - -# Copy just the requirements file to optimize caching -COPY {{ project_path_from_build_root }}/requirements.txt /app/{{ project_path_from_build_root }}/requirements.txt - -WORKDIR /app/{{ project_path_from_build_root }} - -# Install the required Python packages -RUN uv pip install --system -r requirements.txt - -# Copy the project code -COPY {{ project_path_from_build_root }}/project /app/{{ project_path_from_build_root }}/project - -# Run the ACP server using uvicorn -CMD ["uvicorn", "project.acp:acp", "--host", "0.0.0.0", "--port", "8000"] - -# When we deploy the worker, we will replace the CMD with the following -# CMD ["python", "-m", "run_worker"] \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal/README.md.j2 b/src/agentex/lib/cli/templates/temporal/README.md.j2 deleted file mode 100644 index 7dc8a7dc..00000000 --- a/src/agentex/lib/cli/templates/temporal/README.md.j2 +++ /dev/null @@ -1,353 +0,0 @@ -# {{ agent_name }} - AgentEx Temporal Agent Template - -This is a starter template for building asynchronous agents with the AgentEx framework and Temporal. It provides a basic implementation of the Agent 2 Client Protocol (ACP) with Temporal workflow support to help you get started quickly. - -## What You'll Learn - -- **Tasks**: A task is a grouping mechanism for related messages. Think of it as a conversation thread or a session. -- **Messages**: Messages are communication objects within a task. They can contain text, data, or instructions. -- **ACP Events**: The agent responds to four main events: - - `task_received`: When a new task is created - - `task_message_received`: When a message is sent within a task - - `task_approved`: When a task is approved - - `task_canceled`: When a task is canceled -- **Temporal Workflows**: Long-running processes that can handle complex state management and async operations - -## Running the Agent - -1. Run the agent locally: -```bash -agentex agents run --manifest manifest.yaml -``` - -The agent will start on port 8000 and print messages whenever it receives any of the ACP events. - -## What's Inside - -This template: -- Sets up a basic ACP server with Temporal integration -- Handles each of the required ACP events -- Provides a foundation for building complex async agents -- Includes Temporal workflow and activity definitions - -## Next Steps - -For more advanced agent development, check out the AgentEx tutorials: - -- **Tutorials 00-08**: Learn about building synchronous agents with ACP -- **Tutorials 09-10**: Learn how to use Temporal to power asynchronous agents - - Tutorial 09: Basic Temporal workflow setup - - Tutorial 10: Advanced Temporal patterns and best practices - -These tutorials will help you understand: -- How to handle long-running tasks -- Implementing state machines -- Managing complex workflows -- Best practices for async agent development - -## The Manifest File - -The `manifest.yaml` file is your agent's configuration file. It defines: -- How your agent should be built and packaged -- What files are included in your agent's Docker image -- Your agent's name and description -- Local development settings (like the port your agent runs on) -- Temporal worker configuration - -This file is essential for both local development and deployment of your agent. - -## Project Structure - -``` -{{ project_name }}/ -โ”œโ”€โ”€ project/ # Your agent's code -โ”‚ โ”œโ”€โ”€ __init__.py -โ”‚ โ”œโ”€โ”€ acp.py # ACP server and event handlers -โ”‚ โ”œโ”€โ”€ workflow.py # Temporal workflow definitions -โ”‚ โ”œโ”€โ”€ activities.py # Temporal activity definitions -โ”‚ โ””โ”€โ”€ run_worker.py # Temporal worker setup -โ”œโ”€โ”€ Dockerfile # Container definition -โ”œโ”€โ”€ manifest.yaml # Deployment config -โ”œโ”€โ”€ dev.ipynb # Development notebook for testing -{% if use_uv %} -โ””โ”€โ”€ pyproject.toml # Dependencies (uv) -{% else %} -โ””โ”€โ”€ requirements.txt # Dependencies (pip) -{% endif %} -``` - -## Development - -### 1. Customize Event Handlers -- Modify the handlers in `acp.py` to implement your agent's logic -- Add your own tools and capabilities -- Implement custom state management - -### 2. Test Your Agent with the Development Notebook -Use the included `dev.ipynb` Jupyter notebook to test your agent interactively: - -```bash -# Start Jupyter notebook (make sure you have jupyter installed) -jupyter notebook dev.ipynb - -# Or use VS Code to open the notebook directly -code dev.ipynb -``` - -The notebook includes: -- **Setup**: Connect to your local AgentEx backend -- **Task creation**: Create a new task for the conversation -- **Event sending**: Send events to the agent and get responses -- **Async message subscription**: Subscribe to server-side events to receive agent responses -- **Rich message display**: Beautiful formatting with timestamps and author information - -The notebook automatically uses your agent name (`{{ agent_name }}`) and demonstrates the async ACP workflow: create task โ†’ send event โ†’ subscribe to responses. - -### 3. Develop Temporal Workflows -- Edit `workflow.py` to define your agent's async workflow logic -- Modify `activities.py` to add custom activities -- Use `run_worker.py` to configure the Temporal worker - -### 4. Manage Dependencies - -{% if use_uv %} -You chose **uv** for package management. Here's how to work with dependencies: - -```bash -# Add new dependencies -agentex uv add requests openai anthropic - -# Add Temporal-specific dependencies (already included) -agentex uv add temporalio - -# Install/sync dependencies -agentex uv sync - -# Run commands with uv -uv run agentex agents run --manifest manifest.yaml -``` - -**Benefits of uv:** -- Faster dependency resolution and installation -- Better dependency isolation -- Modern Python packaging standards - -{% else %} -You chose **pip** for package management. Here's how to work with dependencies: - -```bash -# Probably create a conda env for your agent. -# Optionally add agentex-sdk editable installation - -# Edit requirements.txt manually to add dependencies -echo "requests" >> requirements.txt -echo "openai" >> requirements.txt - -# Temporal dependencies are already included -# temporalio is already in requirements.txt - -# Install dependencies -pip install -r requirements.txt -``` - -**Benefits of pip:** -- Familiar workflow for most Python developers -- Simple requirements.txt management -- Wide compatibility -{% endif %} - -### 5. Configure Credentials -- Add any required credentials to your manifest.yaml -- For local development, create a `.env` file in the project directory -- Use `load_dotenv()` only in development mode: - -```python -import os -from dotenv import load_dotenv - -if os.environ.get("ENVIRONMENT") == "development": - load_dotenv() -``` - -## Local Development - -### 1. Start the Agentex Backend -```bash -# Navigate to the backend directory -cd agentex - -# Start all services using Docker Compose -make dev - -# Optional: In a separate terminal, use lazydocker for a better UI (everything should say "healthy") -lzd -``` - -### 2. Setup Your Agent's requirements/pyproject.toml -```bash -agentex uv sync [--group editable-apy] -source .venv/bin/activate - -# OR -conda create -n {{ project_name }} python=3.12 -conda activate {{ project_name }} -pip install -r requirements.txt -``` -### 3. Run Your Agent -```bash -# From this directory -export ENVIRONMENT=development && [uv run] agentex agents run --manifest manifest.yaml -``` -4. **Interact with your agent** - -Option 0: CLI (deprecated - to be replaced once a new CLI is implemented - please use the web UI for now!) -```bash -# Submit a task via CLI -agentex tasks submit --agent {{ agent_name }} --task "Your task here" -``` - -Option 1: Web UI -```bash -# Start the local web interface -cd agentex-web -make dev - -# Then open http://localhost:3000 in your browser to chat with your agent -``` - -## Development Tips - -### Environment Variables -- Set environment variables in project/.env for any required credentials -- Or configure them in the manifest.yaml under the `env` section -- The `.env` file is automatically loaded in development mode - -### Local Testing -- Use `export ENVIRONMENT=development` before running your agent -- This enables local service discovery and debugging features -- Your agent will automatically connect to locally running services - -### Temporal-Specific Tips -- Monitor workflows in the Temporal Web UI at http://localhost:8080 -- Use the Temporal CLI for advanced workflow management -- Check workflow logs for debugging async operations - -### Debugging -- Check agent logs in the terminal where you ran the agent -- Use the web UI to inspect task history and responses -- Monitor backend services with `lzd` (LazyDocker) -- Use Temporal Web UI for workflow debugging - -### To build the agent Docker image locally (normally not necessary): - -1. Build the agent image: -```bash -agentex agents build --manifest manifest.yaml -``` - -## Advanced Features - -### Temporal Workflows -Extend your agent with sophisticated async workflows: - -```python -# In project/workflow.py -@workflow.defn -class MyWorkflow(BaseWorkflow): - async def complex_operation(self): - # Multi-step async operations - # Error handling and retries - # State management - pass -``` - -### Custom Activities -Add custom activities for external operations. **Important**: Always specify appropriate timeouts (recommended: 10 minutes): - -```python -# In project/activities.py -from datetime import timedelta -from temporalio import activity -from temporalio.common import RetryPolicy - -@activity.defn(name="call_external_api") -async def call_external_api(data): - # HTTP requests, database operations, etc. - pass - -# In your workflow, call it with a timeout: -result = await workflow.execute_activity( - "call_external_api", - data, - start_to_close_timeout=timedelta(minutes=10), # Recommended: 10 minute timeout - heartbeat_timeout=timedelta(minutes=1), # Optional: heartbeat monitoring - retry_policy=RetryPolicy(maximum_attempts=3) # Optional: retry policy -) - -# Don't forget to register your custom activities in run_worker.py: -# all_activities = get_all_activities() + [your_custom_activity_function] -``` - -### Integration with External Services -{% if use_uv %} -```bash -# Add service clients -agentex uv add httpx requests-oauthlib - -# Add AI/ML libraries -agentex uv add openai anthropic transformers - -# Add database clients -agentex uv add asyncpg redis -``` -{% else %} -```bash -# Add to requirements.txt -echo "httpx" >> requirements.txt -echo "openai" >> requirements.txt -echo "asyncpg" >> requirements.txt -pip install -r requirements.txt -``` -{% endif %} - -## Troubleshooting - -### Common Issues - -1. **Agent not appearing in web UI** - - Check if agent is running on port 8000 - - Verify `ENVIRONMENT=development` is set - - Check agent logs for errors - -2. **Temporal workflow issues** - - Check Temporal Web UI at http://localhost:8080 - - Verify Temporal server is running in backend services - - Check workflow logs for specific errors - -3. **Dependency issues** -{% if use_uv %} - - Run `agentex uv sync` to ensure all dependencies are installed - - Verify temporalio is properly installed -{% else %} - - Run `pip install -r requirements.txt` - - Check if all dependencies are correctly listed in requirements.txt - - Verify temporalio is installed correctly -{% endif %} - -4. **Port conflicts** - - Check if another service is using port 8000 - - Use `lsof -i :8000` to find conflicting processes - -### Temporal-Specific Troubleshooting - -1. **Workflow not starting** - - Check if Temporal server is running (`docker ps`) - - Verify task queue configuration in `run_worker.py` - - Check workflow registration in the worker - -2. **Activity failures** - - Check activity logs in the console - - Verify activity registration - - Check for timeout issues - -Happy building with Temporal! ๐Ÿš€โšก \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal/dev.ipynb.j2 b/src/agentex/lib/cli/templates/temporal/dev.ipynb.j2 deleted file mode 100644 index d3a68303..00000000 --- a/src/agentex/lib/cli/templates/temporal/dev.ipynb.j2 +++ /dev/null @@ -1,126 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "36834357", - "metadata": {}, - "outputs": [], - "source": [ - "from agentex import Agentex\n", - "\n", - "client = Agentex(base_url=\"http://localhost:5003\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d1c309d6", - "metadata": {}, - "outputs": [], - "source": [ - "AGENT_NAME = \"{{ agent_name }}\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9f6e6ef0", - "metadata": {}, - "outputs": [], - "source": [ - "# (REQUIRED) Create a new task. For Async agents, you must create a task for messages to be associated with.\n", - "import uuid\n", - "\n", - "rpc_response = client.agents.create_task(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"name\": f\"{str(uuid.uuid4())[:8]}-task\",\n", - " \"params\": {}\n", - " }\n", - ")\n", - "\n", - "task = rpc_response.result\n", - "print(task)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b03b0d37", - "metadata": {}, - "outputs": [], - "source": [ - "# Send an event to the agent\n", - "\n", - "# The response is expected to be a list of TaskMessage objects, which is a union of the following types:\n", - "# - TextContent: A message with just text content \n", - "# - DataContent: A message with JSON-serializable data content\n", - "# - ToolRequestContent: A message with a tool request, which contains a JSON-serializable request to call a tool\n", - "# - ToolResponseContent: A message with a tool response, which contains response object from a tool call in its content\n", - "\n", - "# When processing the message/send response, if you are expecting more than TextContent, such as DataContent, ToolRequestContent, or ToolResponseContent, you can process them as well\n", - "\n", - "rpc_response = client.agents.send_event(\n", - " agent_name=AGENT_NAME,\n", - " params={\n", - " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n", - " \"task_id\": task.id,\n", - " }\n", - ")\n", - "\n", - "event = rpc_response.result\n", - "print(event)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a6927cc0", - "metadata": {}, - "outputs": [], - "source": [ - "# Subscribe to the async task messages produced by the agent\n", - "from agentex.lib.utils.dev_tools import subscribe_to_async_task_messages\n", - "\n", - "task_messages = subscribe_to_async_task_messages(\n", - " client=client,\n", - " task=task, \n", - " only_after_timestamp=event.created_at, \n", - " print_messages=True,\n", - " rich_print=True,\n", - " timeout=5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4864e354", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/src/agentex/lib/cli/templates/temporal/environments.yaml.j2 b/src/agentex/lib/cli/templates/temporal/environments.yaml.j2 deleted file mode 100644 index a3df5e22..00000000 --- a/src/agentex/lib/cli/templates/temporal/environments.yaml.j2 +++ /dev/null @@ -1,64 +0,0 @@ -# Agent Environment Configuration -# ------------------------------ -# This file defines environment-specific settings for your agent. -# This DIFFERS from the manifest.yaml file in that it is used to program things that are ONLY per environment. - -# ********** EXAMPLE ********** -# schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -# environments: -# dev: -# auth: -# principal: -# user_id: "1234567890" -# user_name: "John Doe" -# user_email: "john.doe@example.com" -# user_role: "admin" -# user_permissions: "read, write, delete" -# helm_overrides: # This is used to override the global helm values.yaml file in the agentex-agent helm charts -# replicas: 3 -# resources: -# requests: -# cpu: "1000m" -# memory: "2Gi" -# limits: -# cpu: "2000m" -# memory: "4Gi" -# env: -# - name: LOG_LEVEL -# value: "DEBUG" -# - name: ENVIRONMENT -# value: "staging" -# -# kubernetes: -# # OPTIONAL - Otherwise it will be derived from separately. However, this can be used to override the derived -# # namespace and deploy it with in the same namespace that already exists for a separate agent. -# namespace: "team-{{agent_name}}" -# ********** END EXAMPLE ********** - -schema_version: "v1" # This is used to validate the file structure and is not used by the agentex CLI -environments: - dev: - auth: - principal: - user_id: # TODO: Fill in - account_id: # TODO: Fill in - helm_overrides: - # This is used to override the global helm values.yaml file in the agentex-agent helm charts - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" - temporal-worker: - enabled: true - replicaCount: 2 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal/manifest.yaml.j2 b/src/agentex/lib/cli/templates/temporal/manifest.yaml.j2 deleted file mode 100644 index a6433ce7..00000000 --- a/src/agentex/lib/cli/templates/temporal/manifest.yaml.j2 +++ /dev/null @@ -1,140 +0,0 @@ -# Agent Manifest Configuration -# --------------------------- -# This file defines how your agent should be built and deployed. - -# Build Configuration -# ------------------ -# The build config defines what gets packaged into your agent's Docker image. -# This same configuration is used whether building locally or remotely. -# -# When building: -# 1. All files from include_paths are collected into a build context -# 2. The context is filtered by dockerignore rules -# 3. The Dockerfile uses this context to build your agent's image -# 4. The image is pushed to a registry and used to run your agent -build: - context: - # Root directory for the build context - root: ../ # Keep this as the default root - - # Paths to include in the Docker build context - # Must include: - # - Your agent's directory (your custom agent code) - # These paths are collected and sent to the Docker daemon for building - include_paths: - - {{ project_path_from_build_root }} - - # Path to your agent's Dockerfile - # This defines how your agent's image is built from the context - # Relative to the root directory - dockerfile: {{ project_path_from_build_root }}/Dockerfile - - # Path to your agent's .dockerignore - # Filters unnecessary files from the build context - # Helps keep build context small and builds fast - dockerignore: {{ project_path_from_build_root }}/.dockerignore - - -# Local Development Configuration -# ----------------------------- -# Only used when running the agent locally -local_development: - agent: - port: 8000 # Port where your local ACP server is running - host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct) - - # File paths for local development (relative to this manifest.yaml) - paths: - # Path to ACP server file - # Examples: - # project/acp.py (standard) - # src/server.py (custom structure) - # ../shared/acp.py (shared across projects) - # /absolute/path/acp.py (absolute path) - acp: project/acp.py - - # Path to temporal worker file - # Examples: - # project/run_worker.py (standard) - # workers/temporal.py (custom structure) - # ../shared/worker.py (shared across projects) - worker: project/run_worker.py - - -# Agent Configuration -# ----------------- -agent: - # Type of agent - either sync or async - acp_type: async - - # Unique name for your agent - # Used for task routing and monitoring - name: {{ agent_name }} - - # Description of what your agent does - # Helps with documentation and discovery - description: {{ description }} - - # Temporal workflow configuration - # This enables your agent to run as a Temporal workflow for long-running tasks - temporal: - enabled: true - workflows: - # Name of the workflow class - # Must match the @workflow.defn name in your workflow.py - - name: {{ workflow_name }} - - # Queue name for task distribution - # Used by Temporal to route tasks to your agent - # Convention: _task_queue - queue_name: {{ queue_name }} - - # Optional: Health check port for temporal worker - # Defaults to 80 if not specified - # health_check_port: 80 - - # Optional: Credentials mapping - # Maps Kubernetes secrets to environment variables - # Common credentials include: - credentials: - - env_var_name: REDIS_URL - secret_name: redis-url-secret - secret_key: url - # - env_var_name: OPENAI_API_KEY - # secret_name: openai-api-key - # secret_key: api-key - - # Optional: Set Environment variables for running your agent locally as well - # as for deployment later on - env: {} - # OPENAI_API_KEY: "" - # OPENAI_BASE_URL: "" - # OPENAI_ORG_ID: "" - - -# Deployment Configuration -# ----------------------- -# Configuration for deploying your agent to Kubernetes clusters -deployment: - # Container image configuration - image: - repository: "" # Update with your container registry - tag: "latest" # Default tag, should be versioned in production - - imagePullSecrets: [] # Update with your image pull secret name - # - name: my-registry-secret - - # Global deployment settings that apply to all clusters - # These can be overridden in cluster-specific environments (environments.yaml) - global: - # Default replica count - replicaCount: 1 - - # Default resource requirements - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1000m" - memory: "2Gi" \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal/project/acp.py.j2 b/src/agentex/lib/cli/templates/temporal/project/acp.py.j2 deleted file mode 100644 index ec06135c..00000000 --- a/src/agentex/lib/cli/templates/temporal/project/acp.py.j2 +++ /dev/null @@ -1,64 +0,0 @@ -import os -import sys - -# === DEBUG SETUP (AgentEx CLI Debug Support) === -if os.getenv("AGENTEX_DEBUG_ENABLED") == "true": - try: - import debugpy - from agentex.lib.utils.logging import make_logger - - logger = make_logger(__name__) - debug_port = int(os.getenv("AGENTEX_DEBUG_PORT", "5679")) - debug_type = os.getenv("AGENTEX_DEBUG_TYPE", "acp") - wait_for_attach = os.getenv("AGENTEX_DEBUG_WAIT_FOR_ATTACH", "false").lower() == "true" - - # Configure debugpy - debugpy.configure(subProcess=False) - debugpy.listen(debug_port) - - logger.info(f"๐Ÿ› [{debug_type.upper()}] Debug server listening on port {debug_port}") - - if wait_for_attach: - logger.info(f"โณ [{debug_type.upper()}] Waiting for debugger to attach...") - debugpy.wait_for_client() - logger.info(f"โœ… [{debug_type.upper()}] Debugger attached!") - else: - logger.info(f"๐Ÿ“ก [{debug_type.upper()}] Ready for debugger attachment") - - except ImportError: - print("โŒ debugpy not available. Install with: pip install debugpy") - sys.exit(1) - except Exception as e: - print(f"โŒ Debug setup failed: {e}") - sys.exit(1) -# === END DEBUG SETUP === - -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.lib.types.fastacp import TemporalACPConfig - - -# Create the ACP server -acp = FastACP.create( - acp_type="async", - config=TemporalACPConfig( - # When deployed to the cluster, the Temporal address will automatically be set to the cluster address - # For local development, we set the address manually to talk to the local Temporal service set up via docker compose - type="temporal", - temporal_address=os.getenv("TEMPORAL_ADDRESS", "localhost:7233") - ) -) - - -# Notice that we don't need to register any handlers when we use type="temporal" -# If you look at the code in agentex.sdk.fastacp.impl.temporal_acp -# You can see that these handlers are automatically registered when the ACP is created - -# @acp.on_task_create -# This will be handled by the method in your workflow that is decorated with @workflow.run - -# @acp.on_task_event_send -# This will be handled by the method in your workflow that is decorated with @workflow.signal(name=SignalName.RECEIVE_MESSAGE) - -# @acp.on_task_cancel -# This does not need to be handled by your workflow. -# It is automatically handled by the temporal client which cancels the workflow directly \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal/project/activities.py.j2 b/src/agentex/lib/cli/templates/temporal/project/activities.py.j2 deleted file mode 100644 index 6144b234..00000000 --- a/src/agentex/lib/cli/templates/temporal/project/activities.py.j2 +++ /dev/null @@ -1,77 +0,0 @@ -""" -Custom Temporal Activities Template -==================================== -This file is for defining custom Temporal activities that can be executed -by your workflow. Activities are used for: -- External API calls -- Database operations -- File I/O operations -- Heavy computations -- Any non-deterministic operations - -IMPORTANT: All activities should have appropriate timeouts! -Default recommendation: start_to_close_timeout=timedelta(minutes=10) -""" - -from datetime import timedelta -from typing import Any, Dict - -from pydantic import BaseModel -from temporalio import activity -from temporalio.common import RetryPolicy - -from agentex.lib.utils.logging import make_logger - -logger = make_logger(__name__) - - -# Example activity parameter models -class ExampleActivityParams(BaseModel): - """Parameters for the example activity""" - data: Dict[str, Any] - task_id: str - - -# Example custom activity -@activity.defn(name="example_custom_activity") -async def example_custom_activity(params: ExampleActivityParams) -> Dict[str, Any]: - """ - Example custom activity that demonstrates best practices. - - When calling this activity from your workflow, use: - ```python - result = await workflow.execute_activity( - "example_custom_activity", - ExampleActivityParams(data={"key": "value"}, task_id=task_id), - start_to_close_timeout=timedelta(minutes=10), # Recommended: 10 minute timeout - heartbeat_timeout=timedelta(minutes=1), # Optional: heartbeat every minute - retry_policy=RetryPolicy(maximum_attempts=3) # Optional: retry up to 3 times - ) - ``` - """ - logger.info(f"Processing activity for task {params.task_id} with data: {params.data}") - - # Your activity logic here - # This could be: - # - API calls - # - Database operations - # - File processing - # - ML model inference - # - etc. - - result = { - "status": "success", - "processed_data": params.data, - "task_id": params.task_id - } - - return result - - -# Add more custom activities below as needed -# Remember to: -# 1. Use appropriate timeouts (default: 10 minutes) -# 2. Define clear parameter models with Pydantic -# 3. Handle errors appropriately -# 4. Use logging for debugging -# 5. Keep activities focused on a single responsibility diff --git a/src/agentex/lib/cli/templates/temporal/project/run_worker.py.j2 b/src/agentex/lib/cli/templates/temporal/project/run_worker.py.j2 deleted file mode 100644 index 1721abac..00000000 --- a/src/agentex/lib/cli/templates/temporal/project/run_worker.py.j2 +++ /dev/null @@ -1,38 +0,0 @@ -import asyncio - -from agentex.lib.core.temporal.activities import get_all_activities -from agentex.lib.core.temporal.workers.worker import AgentexWorker -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.debug import setup_debug_if_enabled -from agentex.lib.environment_variables import EnvironmentVariables - -from project.workflow import {{ workflow_class }} - - -environment_variables = EnvironmentVariables.refresh() - -logger = make_logger(__name__) - - -async def main(): - # Setup debug mode if enabled - setup_debug_if_enabled() - - task_queue_name = environment_variables.WORKFLOW_TASK_QUEUE - if task_queue_name is None: - raise ValueError("WORKFLOW_TASK_QUEUE is not set") - - all_activities = get_all_activities() + [] # add your own activities here - - # Create a worker with automatic tracing - worker = AgentexWorker( - task_queue=task_queue_name, - ) - - await worker.run( - activities=all_activities, - workflow={{ workflow_class }}, - ) - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/src/agentex/lib/cli/templates/temporal/project/workflow.py.j2 b/src/agentex/lib/cli/templates/temporal/project/workflow.py.j2 deleted file mode 100644 index ad756eb1..00000000 --- a/src/agentex/lib/cli/templates/temporal/project/workflow.py.j2 +++ /dev/null @@ -1,66 +0,0 @@ -import json - -from temporalio import workflow - -from agentex.lib import adk -from agentex.lib.types.acp import CreateTaskParams, SendEventParams -from agentex.lib.core.temporal.workflows.workflow import BaseWorkflow -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.lib.environment_variables import EnvironmentVariables - -environment_variables = EnvironmentVariables.refresh() - -if environment_variables.WORKFLOW_NAME is None: - raise ValueError("Environment variable WORKFLOW_NAME is not set") - -if environment_variables.AGENT_NAME is None: - raise ValueError("Environment variable AGENT_NAME is not set") - -logger = make_logger(__name__) - -@workflow.defn(name=environment_variables.WORKFLOW_NAME) -class {{ workflow_class }}(BaseWorkflow): - """ - Minimal async workflow template for AgentEx Temporal agents. - """ - def __init__(self): - super().__init__(display_name=environment_variables.AGENT_NAME) - self._complete_task = False - - @workflow.signal(name=SignalName.RECEIVE_EVENT) - async def on_task_event_send(self, params: SendEventParams) -> None: - logger.info(f"Received task message instruction: {params}") - - # 2. Echo back the client's message to show it in the UI. This is not done by default so the agent developer has full control over what is shown to the user. - await adk.messages.create(task_id=params.task.id, content=params.event.content) - - # 3. Send a simple response message. - # In future tutorials, this is where we'll add more sophisticated response logic. - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"Hello! I've received your message. I can't respond right now, but in future tutorials we'll see how you can get me to intelligently respond to your message.", - ), - ) - - @workflow.run - async def on_task_create(self, params: CreateTaskParams) -> str: - logger.info(f"Received task create params: {params}") - - # 1. Acknowledge that the task has been created. - await adk.messages.create( - task_id=params.task.id, - content=TextContent( - author="agent", - content=f"Hello! I've received your task. Normally you can do some state initialization here, or just pass and do nothing until you get your first event. For now I'm just acknowledging that I've received a task with the following params:\n\n{json.dumps(params.params, indent=2)}.\n\nYou should only see this message once, when the task is created. All subsequent events will be handled by the `on_task_event_send` handler.", - ), - ) - - await workflow.wait_condition( - lambda: self._complete_task, - timeout=None, # Set a timeout if you want to prevent the task from running indefinitely. Generally this is not needed. Temporal can run hundreds of millions of workflows in parallel and more. Only do this if you have a specific reason to do so. - ) - return "Task completed" diff --git a/src/agentex/lib/cli/templates/temporal/pyproject.toml.j2 b/src/agentex/lib/cli/templates/temporal/pyproject.toml.j2 deleted file mode 100644 index 9e157aa4..00000000 --- a/src/agentex/lib/cli/templates/temporal/pyproject.toml.j2 +++ /dev/null @@ -1,34 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "{{ project_name }}" -version = "0.1.0" -description = "{{ description }}" -requires-python = ">=3.12" -dependencies = [ - "agentex-sdk", - "scale-gp", - "temporalio", -] - -[project.optional-dependencies] -dev = [ - "pytest", - "black", - "isort", - "flake8", - "debugpy>=1.8.15", -] - -[tool.hatch.build.targets.wheel] -packages = ["project"] - -[tool.black] -line-length = 88 -target-version = ['py312'] - -[tool.isort] -profile = "black" -line_length = 88 diff --git a/src/agentex/lib/cli/templates/temporal/requirements.txt.j2 b/src/agentex/lib/cli/templates/temporal/requirements.txt.j2 deleted file mode 100644 index 0b8ae19b..00000000 --- a/src/agentex/lib/cli/templates/temporal/requirements.txt.j2 +++ /dev/null @@ -1,5 +0,0 @@ -# Install agentex-sdk from local path -agentex-sdk - -# Scale GenAI Platform Python SDK -scale-gp diff --git a/src/agentex/lib/cli/templates/temporal/test_agent.py.j2 b/src/agentex/lib/cli/templates/temporal/test_agent.py.j2 deleted file mode 100644 index ee71f177..00000000 --- a/src/agentex/lib/cli/templates/temporal/test_agent.py.j2 +++ /dev/null @@ -1,147 +0,0 @@ -""" -Sample tests for AgentEx ACP agent. - -This test suite demonstrates how to test the main AgentEx API functions: -- Non-streaming event sending and polling -- Streaming event sending - -To run these tests: -1. Make sure the agent is running (via docker-compose or `agentex agents run`) -2. Set the AGENTEX_API_BASE_URL environment variable if not using default -3. Run: pytest test_agent.py -v - -Configuration: -- AGENTEX_API_BASE_URL: Base URL for the AgentEx server (default: http://localhost:5003) -- AGENT_NAME: Name of the agent to test (default: {{ agent_name }}) -""" - -import os -import uuid -import asyncio -import pytest -import pytest_asyncio -from agentex import AsyncAgentex -from agentex.types import TaskMessage -from agentex.types.agent_rpc_params import ParamsCreateTaskRequest -from agentex.types.text_content_param import TextContentParam -from test_utils.async_utils import ( - poll_for_agent_response, - send_event_and_poll_yielding, - stream_agent_response, - validate_text_in_response, - poll_messages, -) - - -# Configuration from environment variables -AGENTEX_API_BASE_URL = os.environ.get("AGENTEX_API_BASE_URL", "http://localhost:5003") -AGENT_NAME = os.environ.get("AGENT_NAME", "{{ agent_name }}") - - -@pytest_asyncio.fixture -async def client(): - """Create an AsyncAgentex client instance for testing.""" - client = AsyncAgentex(base_url=AGENTEX_API_BASE_URL) - yield client - await client.close() - - -@pytest.fixture -def agent_name(): - """Return the agent name for testing.""" - return AGENT_NAME - - -@pytest_asyncio.fixture -async def agent_id(client, agent_name): - """Retrieve the agent ID based on the agent name.""" - agents = await client.agents.list() - for agent in agents: - if agent.name == agent_name: - return agent.id - raise ValueError(f"Agent with name {agent_name} not found.") - - -class TestNonStreamingEvents: - """Test non-streaming event sending and polling.""" - - @pytest.mark.asyncio - async def test_send_event_and_poll(self, client: AsyncAgentex, _agent_name: str, agent_id: str): - """Test sending an event and polling for the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # TODO: Poll for the initial task creation message (if your agent sends one) - # async for message in poll_messages( - # client=client, - # task_id=task.id, - # timeout=30, - # sleep_interval=1.0, - # ): - # assert isinstance(message, TaskMessage) - # if message.content and message.content.type == "text" and message.content.author == "agent": - # # Check for your expected initial message - # assert "expected initial text" in message.content.content - # break - - # TODO: Send an event and poll for response using the yielding helper function - # user_message = "Your test message here" - # async for message in send_event_and_poll_yielding( - # client=client, - # agent_id=agent_id, - # task_id=task.id, - # user_message=user_message, - # timeout=30, - # sleep_interval=1.0, - # ): - # assert isinstance(message, TaskMessage) - # if message.content and message.content.type == "text" and message.content.author == "agent": - # # Check for your expected response - # assert "expected response text" in message.content.content - # break - pass - - -class TestStreamingEvents: - """Test streaming event sending.""" - - @pytest.mark.asyncio - async def test_send_event_and_stream(self, client: AsyncAgentex, _agent_name: str, agent_id: str): - """Test sending an event and streaming the response.""" - # TODO: Create a task for this conversation - # task_response = await client.agents.create_task(agent_id, params=ParamsCreateTaskRequest(name=uuid.uuid1().hex)) - # task = task_response.result - # assert task is not None - - # user_message = "Your test message here" - - # # Collect events from stream - # all_events = [] - - # async def collect_stream_events(): - # async for event in stream_agent_response( - # client=client, - # task_id=task.id, - # timeout=30, - # ): - # all_events.append(event) - - # # Start streaming task - # stream_task = asyncio.create_task(collect_stream_events()) - - # # Send the event - # event_content = TextContentParam(type="text", author="user", content=user_message) - # await client.agents.send_event(agent_id=agent_id, params={"task_id": task.id, "content": event_content}) - - # # Wait for streaming to complete - # await stream_task - - # # TODO: Add your validation here - # assert len(all_events) > 0, "No events received in streaming response" - pass - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/src/agentex/lib/cli/utils/__init__.py b/src/agentex/lib/cli/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/cli/utils/auth_utils.py b/src/agentex/lib/cli/utils/auth_utils.py deleted file mode 100644 index a323d1e2..00000000 --- a/src/agentex/lib/cli/utils/auth_utils.py +++ /dev/null @@ -1,61 +0,0 @@ -from __future__ import annotations - -import json -import base64 -from typing import Any, Dict - -from agentex.lib.sdk.config.agent_manifest import AgentManifest -from agentex.lib.sdk.config.environment_config import AgentAuthConfig - - -# DEPRECATED: Old function for backward compatibility -# Will be removed in future version -def _encode_principal_context(manifest: AgentManifest) -> str | None: # noqa: ARG001 - """ - DEPRECATED: This function is deprecated as AgentManifest no longer contains auth. - Use _encode_principal_context_from_env_config instead. - - This function is kept temporarily for backward compatibility during migration. - """ - # AgentManifest no longer has auth field - this will always return None - return None - - -def _encode_principal_context_from_env_config(auth_config: "AgentAuthConfig | None") -> str | None: - """ - Encode principal context from environment configuration. - - Args: - auth_config: AgentAuthConfig containing principal configuration - - Returns: - Base64-encoded JSON string of the principal, or None if no principal - """ - if auth_config is None: - return None - - principal = auth_config.principal - if not principal: - return None - - json_str = json.dumps(principal, separators=(',', ':')) - encoded_bytes = base64.b64encode(json_str.encode('utf-8')) - return encoded_bytes.decode('utf-8') - - -def _encode_principal_dict(principal: Dict[str, Any]) -> str | None: - """ - Encode principal dictionary directly. - - Args: - principal: Dictionary containing principal configuration - - Returns: - Base64-encoded JSON string of the principal, or None if principal is empty - """ - if not principal: - return None - - json_str = json.dumps(principal, separators=(',', ':')) - encoded_bytes = base64.b64encode(json_str.encode('utf-8')) - return encoded_bytes.decode('utf-8') diff --git a/src/agentex/lib/cli/utils/cli_utils.py b/src/agentex/lib/cli/utils/cli_utils.py deleted file mode 100644 index 43b3fba6..00000000 --- a/src/agentex/lib/cli/utils/cli_utils.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import annotations - -import typer -from rich.console import Console - -console = Console() - - -def handle_questionary_cancellation( - result: str | None, operation: str = "operation" -) -> str: - """Handle questionary cancellation by checking for None and exiting gracefully""" - if result is None: - console.print(f"[yellow]{operation.capitalize()} cancelled by user[/yellow]") - raise typer.Exit(0) - return result diff --git a/src/agentex/lib/cli/utils/credential_utils.py b/src/agentex/lib/cli/utils/credential_utils.py deleted file mode 100644 index 5ad2471f..00000000 --- a/src/agentex/lib/cli/utils/credential_utils.py +++ /dev/null @@ -1,103 +0,0 @@ -import subprocess - -from rich.prompt import Prompt, Confirm -from rich.console import Console - -from agentex.lib.types.credentials import CredentialMapping - -console = Console() - - -def check_secret_exists(secret_name: str, namespace: str) -> bool: - """Check if a Kubernetes secret exists in the given namespace.""" - try: - result = subprocess.run( - ["kubectl", "get", "secret", secret_name, "-n", namespace], - capture_output=True, - text=True, - check=False, - ) - return result.returncode == 0 - except Exception: - return False - - -def create_env_var_secret(credential: CredentialMapping, namespace: str) -> bool: - """Create a generic secret for environment variable credentials.""" - console.print( - f"[yellow]Secret '{credential.secret_name}' not found in namespace '{namespace}'[/yellow]" - ) - - if not Confirm.ask( - f"Would you like to create the secret '{credential.secret_name}'?" - ): - return False - - # Prompt for the secret value - secret_value = Prompt.ask( - f"Enter the value for '{credential.secret_key}'", password=True - ) - - try: - # Create the secret using kubectl - subprocess.run( - [ - "kubectl", - "create", - "secret", - "generic", - credential.secret_name, - f"--from-literal={credential.secret_key}={secret_value}", - "-n", - namespace, - ], - capture_output=True, - text=True, - check=True, - ) - - console.print( - f"[green]โœ“ Created secret '{credential.secret_name}' in namespace '{namespace}'[/green]" - ) - return True - - except subprocess.CalledProcessError as e: - console.print(f"[red]โœ— Failed to create secret: {e.stderr}[/red]") - return False - - -# def create_image_pull_secret(credential: ImagePullCredential, namespace: str) -> bool: -# """Create an image pull secret with interactive prompts.""" -# console.print(f"[yellow]Image pull secret '{credential.secret_name}' not found in namespace '{namespace}'[/yellow]") - -# if not Confirm.ask(f"Would you like to create the image pull secret '{credential.secret_name}'?"): -# return False - -# # Prompt for registry details -# registry_server = Prompt.ask("Docker registry server (e.g., docker.io, gcr.io)") -# username = Prompt.ask("Username") -# password = Prompt.ask("Password", password=True) -# email = Prompt.ask("Email (optional)", default="") - -# try: -# # Create the image pull secret using kubectl -# cmd = [ -# "kubectl", "create", "secret", "docker-registry", -# credential.secret_name, -# f"--docker-server={registry_server}", -# f"--docker-username={username}", -# f"--docker-password={password}", -# "-n", namespace -# ] - -# if email: -# cmd.append(f"--docker-email={email}") - -# result = subprocess.run(cmd, capture_output=True, text=True, check=True) - -# console.print(f"[green]โœ“ Created image pull secret '{credential.secret_name}' in namespace '{namespace}'[/green]") -# return True - -# except subprocess.CalledProcessError as e: -# console.print(f"[red]โœ— Failed to create image pull secret: {e.stderr}[/red]") -# return False diff --git a/src/agentex/lib/cli/utils/exceptions.py b/src/agentex/lib/cli/utils/exceptions.py deleted file mode 100644 index efd41b6c..00000000 --- a/src/agentex/lib/cli/utils/exceptions.py +++ /dev/null @@ -1,6 +0,0 @@ -class HelmError(Exception): - """An error occurred during helm operations""" - - -class DeploymentError(Exception): - """An error occurred during deployment""" diff --git a/src/agentex/lib/cli/utils/kubectl_utils.py b/src/agentex/lib/cli/utils/kubectl_utils.py deleted file mode 100644 index 4213233c..00000000 --- a/src/agentex/lib/cli/utils/kubectl_utils.py +++ /dev/null @@ -1,137 +0,0 @@ -from __future__ import annotations - -import subprocess - -from kubernetes import client, config -from rich.console import Console -from kubernetes.client.rest import ApiException - -from agentex.lib.utils.logging import make_logger -from agentex.lib.cli.utils.exceptions import DeploymentError - -logger = make_logger(__name__) -console = Console() - - -class KubernetesClientManager: - """Manages Kubernetes clients for different contexts""" - - def __init__(self): - self._clients: dict[str, client.CoreV1Api] = {} - - def get_client(self, context: str | None = None) -> client.CoreV1Api: - """Get a Kubernetes client for the specified context""" - if context is None: - context = get_current_context() - - if context not in self._clients: - try: - # Load config for specific context - config.load_kube_config(context=context) - self._clients[context] = client.CoreV1Api() - logger.info(f"Created Kubernetes client for context: {context}") - except Exception as e: - raise DeploymentError( - f"Failed to create Kubernetes client for context '{context}': {e}" - ) from e - - return self._clients[context] - - def clear_cache(self): - """Clear cached clients (useful when contexts change)""" - self._clients.clear() - - -def get_current_context() -> str: - """Get the current kubectl context""" - try: - contexts, active_context = config.list_kube_config_contexts() - if active_context is None: - raise DeploymentError("No active kubectl context found") - return active_context["name"] - except Exception as e: - raise DeploymentError(f"Failed to get current kubectl context: {e}") from e - - -# Global client manager instance -_client_manager = KubernetesClientManager() - - -def list_available_contexts() -> list[str]: - """List all available kubectl contexts""" - try: - contexts, _ = config.list_kube_config_contexts() - return [ctx["name"] for ctx in contexts] # type: ignore[index] - except Exception as e: - raise DeploymentError(f"Failed to list kubectl contexts: {e}") from e - - -def validate_cluster_context(cluster_name: str) -> bool: - """Check if a cluster name corresponds to an available kubectl context""" - try: - available_contexts = list_available_contexts() - return cluster_name in available_contexts - except DeploymentError: - return False - - -def switch_kubectl_context(cluster_name: str) -> None: - """Switch to the specified kubectl context""" - try: - # Use subprocess for context switching as it's a local kubeconfig operation - subprocess.run( - ["kubectl", "config", "use-context", cluster_name], - capture_output=True, - text=True, - check=True, - ) - # Clear client cache since context changed - _client_manager.clear_cache() - logger.info(f"Switched to kubectl context: {cluster_name}") - except (subprocess.CalledProcessError, FileNotFoundError) as e: - raise DeploymentError( - f"Failed to switch to kubectl context '{cluster_name}': {e}" - ) from e - - -def validate_namespace(namespace: str, context: str | None = None) -> bool: - """Check if a namespace exists in the specified cluster context""" - try: - k8s_client = _client_manager.get_client(context) - k8s_client.read_namespace(name=namespace) - return True - except ApiException as e: - if e.status == 404: - return False - raise DeploymentError(f"Failed to validate namespace '{namespace}': {e}") from e - except Exception as e: - raise DeploymentError(f"Failed to validate namespace '{namespace}': {e}") from e - - -def check_and_switch_cluster_context(cluster_name: str) -> None: - """Check and switch to the specified kubectl context""" - # Validate cluster context - if not validate_cluster_context(cluster_name): - available_contexts = list_available_contexts() - raise DeploymentError( - f"Cluster '{cluster_name}' not found in kubectl contexts.\n" - f"Available contexts: {', '.join(available_contexts)}\n" - f"Please ensure you have a valid kubeconfig for this cluster." - ) - - # Switch to the specified cluster context - current_context = get_current_context() - if current_context != cluster_name: - console.print( - f"[blue]โ„น[/blue] Switching from context '{current_context}' to '{cluster_name}'" - ) - switch_kubectl_context(cluster_name) - else: - console.print( - f"[blue]โ„น[/blue] Using current kubectl context: [bold]{cluster_name}[/bold]" - ) - - -def get_k8s_client(context: str | None = None) -> client.CoreV1Api: - """Get a Kubernetes client for the specified context (or current context if None)""" - return _client_manager.get_client(context) diff --git a/src/agentex/lib/cli/utils/kubernetes_secrets_utils.py b/src/agentex/lib/cli/utils/kubernetes_secrets_utils.py deleted file mode 100644 index 0a67a31e..00000000 --- a/src/agentex/lib/cli/utils/kubernetes_secrets_utils.py +++ /dev/null @@ -1,187 +0,0 @@ -from __future__ import annotations - -import base64 - -from kubernetes import client -from rich.console import Console -from kubernetes.client.rest import ApiException - -from agentex.lib.utils.logging import make_logger -from agentex.lib.cli.utils.kubectl_utils import get_k8s_client - -logger = make_logger(__name__) -console = Console() - -KUBERNETES_SECRET_TYPE_OPAQUE = "Opaque" -KUBERNETES_SECRET_TYPE_DOCKERCONFIGJSON = "kubernetes.io/dockerconfigjson" -KUBERNETES_SECRET_TYPE_BASIC_AUTH = "kubernetes.io/basic-auth" -KUBERNETES_SECRET_TYPE_TLS = "kubernetes.io/tls" - -VALID_SECRET_TYPES = [ - KUBERNETES_SECRET_TYPE_OPAQUE, - KUBERNETES_SECRET_TYPE_DOCKERCONFIGJSON, - KUBERNETES_SECRET_TYPE_BASIC_AUTH, - KUBERNETES_SECRET_TYPE_TLS, -] - -KUBERNETES_SECRET_TO_MANIFEST_KEY = { - KUBERNETES_SECRET_TYPE_OPAQUE: "credentials", - KUBERNETES_SECRET_TYPE_DOCKERCONFIGJSON: "imagePullSecrets", -} - - -def _create_secret_object( - name: str, data: dict[str, str], secret_type: str = KUBERNETES_SECRET_TYPE_OPAQUE -) -> client.V1Secret: - """Helper to create a V1Secret object with multiple key-value pairs""" - return client.V1Secret( - metadata=client.V1ObjectMeta(name=name), - type=secret_type, - string_data=data, # Use string_data for automatic base64 encoding - ) - - -def create_secret_with_data( - name: str, data: dict[str, str], namespace: str, context: str | None = None -) -> None: - """Create a new Kubernetes secret with multiple key-value pairs""" - v1 = get_k8s_client(context) - - try: - # Check if secret exists - v1.read_namespaced_secret(name=name, namespace=namespace) - console.print( - f"[red]Error: Secret '{name}' already exists in namespace '{namespace}'[/red]" - ) - return - except ApiException as e: - if e.status != 404: # If error is not "Not Found" - raise - - # Create the secret - secret = _create_secret_object(name, data) - - try: - v1.create_namespaced_secret(namespace=namespace, body=secret) - console.print( - f"[green]Created secret '{name}' in namespace '{namespace}' with {len(data)} keys[/green]" - ) - except ApiException as e: - console.print(f"[red]Error creating secret: {e.reason}[/red]") - raise RuntimeError(f"Failed to create secret: {str(e)}") from e - - -def update_secret_with_data( - name: str, data: dict[str, str], namespace: str, context: str | None = None -) -> None: - """Create or update a Kubernetes secret with multiple key-value pairs""" - v1 = get_k8s_client(context) - secret = _create_secret_object(name, data) - - try: - # Try to update first - v1.replace_namespaced_secret(name=name, namespace=namespace, body=secret) - console.print( - f"[green]Updated secret '{name}' in namespace '{namespace}' with {len(data)} keys[/green]" - ) - except ApiException as e: - if e.status == 404: - # Secret doesn't exist, create it - try: - v1.create_namespaced_secret(namespace=namespace, body=secret) - console.print( - f"[green]Created secret '{name}' in namespace '{namespace}' with {len(data)} keys[/green]" - ) - except ApiException as create_error: - console.print( - f"[red]Error creating secret: {create_error.reason}[/red]" - ) - raise RuntimeError( - f"Failed to create secret: {str(create_error)}" - ) from create_error - else: - console.print(f"[red]Error updating secret: {e.reason}[/red]") - raise RuntimeError(f"Failed to update secret: {str(e)}") from e - - -def create_image_pull_secret_with_data( - name: str, data: dict[str, str], namespace: str, context: str | None = None -) -> None: - """Create a new Kubernetes image pull secret with dockerconfigjson type""" - v1 = get_k8s_client(context) - - try: - # Check if secret exists - v1.read_namespaced_secret(name=name, namespace=namespace) - console.print( - f"[red]Error: Secret '{name}' already exists in namespace '{namespace}'[/red]" - ) - return - except ApiException as e: - if e.status != 404: # If error is not "Not Found" - raise - - # Create the secret with dockerconfigjson type - secret = _create_secret_object(name, data, KUBERNETES_SECRET_TYPE_DOCKERCONFIGJSON) - - try: - v1.create_namespaced_secret(namespace=namespace, body=secret) - console.print( - f"[green]Created image pull secret '{name}' in namespace '{namespace}' with {len(data)} keys[/green]" - ) - except ApiException as e: - console.print(f"[red]Error creating image pull secret: {e.reason}[/red]") - raise RuntimeError(f"Failed to create image pull secret: {str(e)}") from e - - -def update_image_pull_secret_with_data( - name: str, data: dict[str, str], namespace: str, context: str | None = None -) -> None: - """Create or update a Kubernetes image pull secret with dockerconfigjson type""" - v1 = get_k8s_client(context) - secret = _create_secret_object(name, data, KUBERNETES_SECRET_TYPE_DOCKERCONFIGJSON) - - try: - # Try to update first - v1.replace_namespaced_secret(name=name, namespace=namespace, body=secret) - console.print( - f"[green]Updated image pull secret '{name}' in namespace '{namespace}' with {len(data)} keys[/green]" - ) - except ApiException as e: - if e.status == 404: - # Secret doesn't exist, create it - try: - v1.create_namespaced_secret(namespace=namespace, body=secret) - console.print( - f"[green]Created image pull secret '{name}' in namespace '{namespace}' with {len(data)} keys[/green]" - ) - except ApiException as create_error: - console.print( - f"[red]Error creating image pull secret: {create_error.reason}[/red]" - ) - raise RuntimeError( - f"Failed to create image pull secret: {str(create_error)}" - ) from create_error - else: - console.print(f"[red]Error updating image pull secret: {e.reason}[/red]") - raise RuntimeError(f"Failed to update image pull secret: {str(e)}") from e - - -def get_secret_data( - name: str, namespace: str, context: str | None = None -) -> dict[str, str]: - """Get the actual data from a secret""" - v1 = get_k8s_client(context) - try: - secret = v1.read_namespaced_secret(name=name, namespace=namespace) - if secret.data: # type: ignore[union-attr] - # Decode base64 data - return { - key: base64.b64decode(value).decode("utf-8") - for key, value in secret.data.items() # type: ignore[union-attr] - } - return {} - except ApiException as e: - if e.status == 404: - return {} - raise RuntimeError(f"Failed to get secret data: {str(e)}") from e diff --git a/src/agentex/lib/cli/utils/path_utils.py b/src/agentex/lib/cli/utils/path_utils.py deleted file mode 100644 index 7aee3a65..00000000 --- a/src/agentex/lib/cli/utils/path_utils.py +++ /dev/null @@ -1,145 +0,0 @@ -from __future__ import annotations - -from typing import Dict -from pathlib import Path - -from agentex.lib.utils.logging import make_logger -from agentex.lib.sdk.config.agent_manifest import AgentManifest - -logger = make_logger(__name__) - - -class PathResolutionError(Exception): - """An error occurred during path resolution""" - - -def resolve_and_validate_path(base_path: Path, configured_path: str, file_type: str) -> Path: - """Resolve and validate a configured path""" - path_obj = Path(configured_path) - - if path_obj.is_absolute(): - # Absolute path - resolve to canonical form - resolved_path = path_obj.resolve() - else: - # Relative path - resolve relative to manifest directory - resolved_path = (base_path / configured_path).resolve() - - # Validate the file exists - if not resolved_path.exists(): - raise PathResolutionError( - f"{file_type} file not found: {resolved_path}\n" - f" Configured path: {configured_path}\n" - f" Resolved from manifest: {base_path}" - ) - - # Validate it's actually a file - if not resolved_path.is_file(): - raise PathResolutionError(f"{file_type} path is not a file: {resolved_path}") - - return resolved_path - - -def validate_path_security(resolved_path: Path, manifest_dir: Path) -> None: - """Basic security validation for resolved paths""" - try: - # Ensure the resolved path is accessible - resolved_path.resolve() - - # Optional: Add warnings for paths that go too far up - try: - # Check if path goes more than 3 levels up from manifest - relative_to_manifest = resolved_path.relative_to(manifest_dir.parent.parent.parent) - if str(relative_to_manifest).startswith(".."): - logger.warning( - f"Path goes significantly outside project structure: {resolved_path}" - ) - except ValueError: - # Path is outside the tree - that's okay, just log it - logger.info(f"Using path outside manifest directory tree: {resolved_path}") - - except Exception as e: - raise PathResolutionError(f"Path resolution failed: {resolved_path} - {str(e)}") from e - - -def get_file_paths(manifest: AgentManifest, manifest_path: str) -> Dict[str, Path | None]: - """Get resolved file paths from manifest configuration""" - manifest_dir = Path(manifest_path).parent.resolve() - - # Use configured paths or fall back to defaults for backward compatibility - if manifest.local_development and manifest.local_development.paths: - paths_config = manifest.local_development.paths - - # Resolve ACP path - acp_path = resolve_and_validate_path(manifest_dir, paths_config.acp, "ACP server") - validate_path_security(acp_path, manifest_dir) - - # Resolve worker path if specified - worker_path = None - if paths_config.worker: - worker_path = resolve_and_validate_path( - manifest_dir, paths_config.worker, "Temporal worker" - ) - validate_path_security(worker_path, manifest_dir) - else: - # Backward compatibility: use old hardcoded structure - project_dir = manifest_dir / "project" - acp_path = (project_dir / "acp.py").resolve() - worker_path = (project_dir / "run_worker.py").resolve() if manifest.agent.is_temporal_agent() else None - - # Validate backward compatibility paths - if not acp_path.exists(): - raise PathResolutionError(f"ACP file not found: {acp_path}") - - if worker_path and not worker_path.exists(): - raise PathResolutionError(f"Worker file not found: {worker_path}") - - return { - "acp": acp_path, - "worker": worker_path, - "acp_dir": acp_path.parent, - "worker_dir": worker_path.parent if worker_path else None, - } - - -def calculate_uvicorn_target_for_local(acp_path: Path, manifest_dir: Path) -> str: - """Calculate the uvicorn target path for local development""" - # Ensure both paths are resolved to canonical form for accurate comparison - acp_resolved = acp_path.resolve() - manifest_resolved = manifest_dir.resolve() - - try: - # Try to use path relative to manifest directory - acp_relative = acp_resolved.relative_to(manifest_resolved) - # Convert to module notation: project/acp.py -> project.acp - module_path = str(acp_relative.with_suffix('')) # Remove .py extension - module_path = module_path.replace('/', '.') # Convert slashes to dots - module_path = module_path.replace('\\', '.') # Handle Windows paths - return module_path - except ValueError: - # Path cannot be made relative - use absolute file path - logger.warning(f"ACP file {acp_resolved} cannot be made relative to manifest directory {manifest_resolved}, using absolute file path") - return str(acp_resolved) - - -def calculate_docker_acp_module(manifest: AgentManifest, manifest_path: str) -> str: - """Calculate the Python module path for the ACP file in the Docker container - - This should return the same module notation as local development for consistency. - """ - # Use the same logic as local development - manifest_dir = Path(manifest_path).parent - - # Get the configured ACP path (could be relative or absolute) - if manifest.local_development and manifest.local_development.paths: - acp_config_path = manifest.local_development.paths.acp - else: - acp_config_path = "project/acp.py" # Default - - # Resolve to actual file path - acp_path = resolve_and_validate_path(manifest_dir, acp_config_path, "ACP") - - # Use the same module calculation as local development - return calculate_uvicorn_target_for_local(acp_path, manifest_dir) - - - \ No newline at end of file diff --git a/src/agentex/lib/core/__init__.py b/src/agentex/lib/core/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/adapters/__init__.py b/src/agentex/lib/core/adapters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/adapters/llm/__init__.py b/src/agentex/lib/core/adapters/llm/__init__.py deleted file mode 100644 index 8b137891..00000000 --- a/src/agentex/lib/core/adapters/llm/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/agentex/lib/core/adapters/llm/adapter_litellm.py b/src/agentex/lib/core/adapters/llm/adapter_litellm.py deleted file mode 100644 index 7935f5f4..00000000 --- a/src/agentex/lib/core/adapters/llm/adapter_litellm.py +++ /dev/null @@ -1,51 +0,0 @@ -from typing import override -from collections.abc import Generator, AsyncGenerator - -import litellm as llm - -from agentex.lib.utils.logging import make_logger -from agentex.lib.types.llm_messages import Completion -from agentex.lib.core.adapters.llm.port import LLMGateway - -logger = make_logger(__name__) - - -class LiteLLMGateway(LLMGateway): - @override - def completion(self, *args, **kwargs) -> Completion: - if kwargs.get("stream", True): - raise ValueError( - "Please use self.completion_stream instead of self.completion to stream responses" - ) - - response = llm.completion(*args, **kwargs) - return Completion.model_validate(response) - - @override - def completion_stream(self, *args, **kwargs) -> Generator[Completion, None, None]: - if not kwargs.get("stream"): - raise ValueError("To use streaming, please set stream=True in the kwargs") - - for chunk in llm.completion(*args, **kwargs): - yield Completion.model_validate(chunk) - - @override - async def acompletion(self, *args, **kwargs) -> Completion: - if kwargs.get("stream", True): - raise ValueError( - "Please use self.acompletion_stream instead of self.acompletion to stream responses" - ) - - # Return a single completion for non-streaming - response = await llm.acompletion(*args, **kwargs) - return Completion.model_validate(response) - - @override - async def acompletion_stream( - self, *args, **kwargs - ) -> AsyncGenerator[Completion, None]: - if not kwargs.get("stream"): - raise ValueError("To use streaming, please set stream=True in the kwargs") - - async for chunk in await llm.acompletion(*args, **kwargs): # type: ignore[misc] - yield Completion.model_validate(chunk) diff --git a/src/agentex/lib/core/adapters/llm/adapter_sgp.py b/src/agentex/lib/core/adapters/llm/adapter_sgp.py deleted file mode 100644 index 31098246..00000000 --- a/src/agentex/lib/core/adapters/llm/adapter_sgp.py +++ /dev/null @@ -1,60 +0,0 @@ -from __future__ import annotations - -import os -from typing import override -from collections.abc import Generator, AsyncGenerator - -from scale_gp import SGPClient, AsyncSGPClient - -from agentex.lib.utils.logging import make_logger -from agentex.lib.types.llm_messages import Completion -from agentex.lib.core.adapters.llm.port import LLMGateway - -logger = make_logger(__name__) - - -class SGPLLMGateway(LLMGateway): - def __init__(self, sgp_api_key: str | None = None): - self.sync_client = SGPClient(api_key=os.environ.get("SGP_API_KEY", sgp_api_key)) - self.async_client = AsyncSGPClient( - api_key=os.environ.get("SGP_API_KEY", sgp_api_key) - ) - - @override - def completion(self, *args, **kwargs) -> Completion: - if kwargs.get("stream", True): - raise ValueError( - "Please use self.completion_stream instead of self.completion to stream responses" - ) - - response = self.sync_client.beta.chat.completions.create(*args, **kwargs) - return Completion.model_validate(response) - - @override - def completion_stream(self, *args, **kwargs) -> Generator[Completion, None, None]: - if not kwargs.get("stream"): - raise ValueError("To use streaming, please set stream=True in the kwargs") - - for chunk in self.sync_client.beta.chat.completions.create(*args, **kwargs): - yield Completion.model_validate(chunk) - - @override - async def acompletion(self, *args, **kwargs) -> Completion: - if kwargs.get("stream", True): - raise ValueError( - "Please use self.acompletion_stream instead of self.acompletion to stream responses" - ) - - # Return a single completion for non-streaming - response = await self.async_client.beta.chat.completions.create(*args, **kwargs) - return Completion.model_validate(response) - - @override - async def acompletion_stream( - self, *args, **kwargs - ) -> AsyncGenerator[Completion, None]: - if not kwargs.get("stream"): - raise ValueError("To use streaming, please set stream=True in the kwargs") - - async for chunk in self.async_client.beta.chat.completions.create(*args, **kwargs): # type: ignore[misc] - yield Completion.model_validate(chunk) diff --git a/src/agentex/lib/core/adapters/llm/port.py b/src/agentex/lib/core/adapters/llm/port.py deleted file mode 100644 index 4daaade4..00000000 --- a/src/agentex/lib/core/adapters/llm/port.py +++ /dev/null @@ -1,24 +0,0 @@ -from abc import ABC, abstractmethod -from collections.abc import Generator, AsyncGenerator - -from agentex.lib.types.llm_messages import Completion - - -class LLMGateway(ABC): - @abstractmethod - def completion(self, *args, **kwargs) -> Completion: - raise NotImplementedError - - @abstractmethod - def completion_stream(self, *args, **kwargs) -> Generator[Completion, None, None]: - raise NotImplementedError - - @abstractmethod - async def acompletion(self, *args, **kwargs) -> Completion: - raise NotImplementedError - - @abstractmethod - async def acompletion_stream( - self, *args, **kwargs - ) -> AsyncGenerator[Completion, None]: - raise NotImplementedError diff --git a/src/agentex/lib/core/adapters/streams/adapter_redis.py b/src/agentex/lib/core/adapters/streams/adapter_redis.py deleted file mode 100644 index 7b355ee9..00000000 --- a/src/agentex/lib/core/adapters/streams/adapter_redis.py +++ /dev/null @@ -1,133 +0,0 @@ -from __future__ import annotations - -import os -import json -import asyncio -from typing import Any, Annotated, override -from collections.abc import AsyncIterator - -import redis.asyncio as redis -from fastapi import Depends - -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.adapters.streams.port import StreamRepository - -logger = make_logger(__name__) - - -class RedisStreamRepository(StreamRepository): - """ - A simplified Redis implementation of the EventStreamRepository interface. - Optimized for text/JSON streaming with SSE. - """ - - def __init__(self, redis_url: str | None = None): - # Get Redis URL from environment if not provided - self.redis_url = redis_url or os.environ.get( - "REDIS_URL", "redis://localhost:6379" - ) - self.redis = redis.from_url(self.redis_url) - - @override - async def send_event(self, topic: str, event: dict[str, Any]) -> str: - """ - Send an event to a Redis stream. - - Args: - topic: The stream topic/name - event: The event data (will be JSON serialized) - - Returns: - The message ID from Redis - """ - try: - # Simple JSON serialization - event_json = json.dumps(event) - - # # Uncomment to debug - # logger.info(f"Sending event to Redis stream {topic}: {event_json}") - - # Add to Redis stream with a reasonable max length - message_id = await self.redis.xadd( - name=topic, - fields={"data": event_json}, - ) - - return message_id - except Exception as e: - logger.error(f"Error publishing to Redis stream {topic}: {e}") - raise - - @override - async def subscribe( - self, topic: str, last_id: str = "$" - ) -> AsyncIterator[dict[str, Any]]: - """ - Subscribe to a Redis stream and yield events as they come in. - - Args: - topic: The stream topic to subscribe to - last_id: Where to start reading from: - "$" = only new messages (default) - "0" = all messages from the beginning - "" = messages after the specified ID - - Yields: - Parsed event data - """ - - current_id = last_id - - while True: - try: - # Read new messages with a reasonable block time - streams = {topic: current_id} - response = await self.redis.xread( - streams=streams, - count=10, # Get up to 10 messages at a time (reduces overprocessing) - block=2000, # Wait up to 2 seconds for new messages - ) - - if response: - for _, messages in response: - for message_id, fields in messages: - # Update the last_id for next iteration - current_id = message_id - - # Extract and parse the JSON data - if b"data" in fields: - try: - data_str = fields[b"data"].decode("utf-8") - event = json.loads(data_str) - yield event - except Exception as e: - logger.warning( - f"Failed to parse event from Redis stream: {e}" - ) - - # Small sleep to prevent tight loops - await asyncio.sleep(0.01) - - except Exception as e: - logger.error(f"Error reading from Redis stream: {e}") - await asyncio.sleep(1) # Back off on errors - - @override - async def cleanup_stream(self, topic: str) -> None: - """ - Clean up a Redis stream. - - Args: - topic: The stream topic to clean up - """ - try: - await self.redis.delete(topic) - logger.info(f"Cleaned up Redis stream: {topic}") - except Exception as e: - logger.error(f"Error cleaning up Redis stream {topic}: {e}") - raise - - -DRedisStreamRepository = Annotated[ - RedisStreamRepository | None, Depends(RedisStreamRepository) -] diff --git a/src/agentex/lib/core/adapters/streams/port.py b/src/agentex/lib/core/adapters/streams/port.py deleted file mode 100644 index 31b5eda6..00000000 --- a/src/agentex/lib/core/adapters/streams/port.py +++ /dev/null @@ -1,52 +0,0 @@ -from __future__ import annotations - -from abc import ABC, abstractmethod -from typing import Any -from collections.abc import AsyncIterator - - -class StreamRepository(ABC): - """ - Interface for event streaming repositories. - Used to publish and subscribe to event streams. - """ - - @abstractmethod - async def send_event(self, topic: str, event: dict[str, Any]) -> str: - """ - Send an event to a stream. - - Args: - topic: The stream topic/name - event: The event data - - Returns: - The message ID or other identifier - """ - raise NotImplementedError - - @abstractmethod - async def subscribe( - self, topic: str, last_id: str = "$" - ) -> AsyncIterator[dict[str, Any]]: - """ - Subscribe to a stream and yield events as they come in. - - Args: - topic: The stream topic to subscribe to - last_id: Where to start reading from - - Yields: - Event data - """ - raise NotImplementedError - - @abstractmethod - async def cleanup_stream(self, topic: str) -> None: - """ - Clean up a stream. - - Args: - topic: The stream topic to clean up - """ - raise NotImplementedError diff --git a/src/agentex/lib/core/clients/__init__.py b/src/agentex/lib/core/clients/__init__.py deleted file mode 100644 index 8b137891..00000000 --- a/src/agentex/lib/core/clients/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/agentex/lib/core/clients/temporal/__init__.py b/src/agentex/lib/core/clients/temporal/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/clients/temporal/temporal_client.py b/src/agentex/lib/core/clients/temporal/temporal_client.py deleted file mode 100644 index 76b419b2..00000000 --- a/src/agentex/lib/core/clients/temporal/temporal_client.py +++ /dev/null @@ -1,190 +0,0 @@ -from __future__ import annotations - -from typing import Any -from datetime import timedelta -from collections.abc import Callable - -from temporalio.client import Client, WorkflowExecutionStatus -from temporalio.common import RetryPolicy as TemporalRetryPolicy, WorkflowIDReusePolicy -from temporalio.service import RPCError, RPCStatusCode - -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.core.clients.temporal.types import ( - TaskStatus, - RetryPolicy, - WorkflowState, - DuplicateWorkflowPolicy, -) -from agentex.lib.core.clients.temporal.utils import get_temporal_client - -logger = make_logger(__name__) - -DEFAULT_RETRY_POLICY = RetryPolicy( - maximum_attempts=1, - initial_interval=timedelta(seconds=1), - backoff_coefficient=2.0, - maximum_interval=timedelta(minutes=10), -) - - -TEMPORAL_STATUS_TO_UPLOAD_STATUS_AND_REASON = { - # TODO: Support canceled status - WorkflowExecutionStatus.CANCELED: WorkflowState( - status=TaskStatus.CANCELED, - reason="Task canceled by the user.", - is_terminal=True, - ), - WorkflowExecutionStatus.COMPLETED: WorkflowState( - status=TaskStatus.COMPLETED, - reason="Task completed successfully.", - is_terminal=True, - ), - WorkflowExecutionStatus.FAILED: WorkflowState( - status=TaskStatus.FAILED, - reason="Task encountered terminal failure. Please contact support if retrying does not resolve the issue.", - is_terminal=True, - ), - WorkflowExecutionStatus.RUNNING: WorkflowState( - status=TaskStatus.RUNNING, - reason="Task is running.", - is_terminal=False, - ), - WorkflowExecutionStatus.TERMINATED: WorkflowState( - status=TaskStatus.CANCELED, - reason="Task canceled by the user.", - is_terminal=True, - ), - WorkflowExecutionStatus.TIMED_OUT: WorkflowState( - status=TaskStatus.FAILED, - reason="Task timed out. Please contact support if retrying does not resolve the issue", - is_terminal=True, - ), - WorkflowExecutionStatus.CONTINUED_AS_NEW: WorkflowState( - status=TaskStatus.RUNNING, - reason="Task is running.", - is_terminal=False, - ), -} - -DUPLICATE_POLICY_TO_ID_REUSE_POLICY = { - DuplicateWorkflowPolicy.ALLOW_DUPLICATE: WorkflowIDReusePolicy.ALLOW_DUPLICATE, - DuplicateWorkflowPolicy.ALLOW_DUPLICATE_FAILED_ONLY: WorkflowIDReusePolicy.ALLOW_DUPLICATE_FAILED_ONLY, - DuplicateWorkflowPolicy.REJECT_DUPLICATE: WorkflowIDReusePolicy.REJECT_DUPLICATE, - DuplicateWorkflowPolicy.TERMINATE_IF_RUNNING: WorkflowIDReusePolicy.TERMINATE_IF_RUNNING, -} - - -class TemporalClient: - def __init__(self, temporal_client: Client | None = None, plugins: list[Any] = []): - self._client: Client | None = temporal_client - self._plugins = plugins - - @property - def client(self) -> Client: - """Get the temporal client, raising an error if not initialized.""" - if self._client is None: - raise RuntimeError("Temporal client not initialized - ensure temporal_address is properly configured") - return self._client - - @classmethod - async def create(cls, temporal_address: str, plugins: list[Any] = []): - if temporal_address in [ - "false", - "False", - "null", - "None", - "", - "undefined", - False, - None, - ]: - _client = None - else: - _client = await get_temporal_client(temporal_address, plugins=plugins) - return cls(_client, plugins) - - async def setup(self, temporal_address: str): - self._client = await self._get_temporal_client(temporal_address=temporal_address) - - async def _get_temporal_client(self, temporal_address: str) -> Client | None: - if temporal_address in [ - "false", - "False", - "null", - "None", - "", - "undefined", - False, - None, - ]: - return None - else: - return await get_temporal_client(temporal_address, plugins=self._plugins) - - async def start_workflow( - self, - *args: Any, - duplicate_policy: DuplicateWorkflowPolicy = DuplicateWorkflowPolicy.ALLOW_DUPLICATE, - retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY, - task_timeout: timedelta = timedelta(seconds=10), - execution_timeout: timedelta = timedelta(seconds=86400), - **kwargs: Any, - ) -> str: - temporal_retry_policy = TemporalRetryPolicy(**retry_policy.model_dump(exclude_unset=True)) - workflow_handle = await self.client.start_workflow( - *args, - retry_policy=temporal_retry_policy, - task_timeout=task_timeout, - execution_timeout=execution_timeout, - id_reuse_policy=DUPLICATE_POLICY_TO_ID_REUSE_POLICY[duplicate_policy], - **kwargs, - ) - return workflow_handle.id - - async def send_signal( - self, - workflow_id: str, - signal: str | Callable[[dict[str, Any] | list[Any] | str | int | float | bool | BaseModel], Any], - payload: dict[str, Any] | list[Any] | str | int | float | bool | BaseModel, - ) -> None: - handle = self.client.get_workflow_handle(workflow_id=workflow_id) - await handle.signal(signal, payload) # type: ignore[misc] - - async def query_workflow( - self, - workflow_id: str, - query: str | Callable[[dict[str, Any] | list[Any] | str | int | float | bool | BaseModel], Any], - ) -> Any: - """ - Submit a query to a workflow by name and return the results. - - Args: - workflow_id: The ID of the workflow to query - query: The name of the query or a callable query function - - Returns: - The result of the query - """ - handle = self.client.get_workflow_handle(workflow_id=workflow_id) - return await handle.query(query) - - async def get_workflow_status(self, workflow_id: str) -> WorkflowState: - try: - handle = self.client.get_workflow_handle(workflow_id=workflow_id) - description = await handle.describe() - return TEMPORAL_STATUS_TO_UPLOAD_STATUS_AND_REASON[description.status] - except RPCError as e: - if e.status == RPCStatusCode.NOT_FOUND: - return WorkflowState( - status="NOT_FOUND", - reason="Workflow not found", - is_terminal=True, - ) - raise - - async def terminate_workflow(self, workflow_id: str) -> None: - return await self.client.get_workflow_handle(workflow_id).terminate() - - async def cancel_workflow(self, workflow_id: str) -> None: - return await self.client.get_workflow_handle(workflow_id).cancel() diff --git a/src/agentex/lib/core/clients/temporal/types.py b/src/agentex/lib/core/clients/temporal/types.py deleted file mode 100644 index 8ce596d7..00000000 --- a/src/agentex/lib/core/clients/temporal/types.py +++ /dev/null @@ -1,49 +0,0 @@ -from __future__ import annotations - -from enum import Enum -from datetime import timedelta - -from pydantic import Field - -from agentex.lib.utils.model_utils import BaseModel - - -class WorkflowState(BaseModel): - status: str - is_terminal: bool - reason: str | None = None - - -class RetryPolicy(BaseModel): - initial_interval: timedelta = Field( - timedelta(seconds=1), - description="Backoff interval for the first retry. Default 1s.", - ) - backoff_coefficient: float = Field( - 2.0, - description="Coefficient to multiply previous backoff interval by to get new interval. Default 2.0.", - ) - maximum_interval: timedelta | None = Field( - None, - description="Maximum backoff interval between retries. Default 100x :py:attr:`initial_interval`.", - ) - maximum_attempts: int = Field( - 0, - description="Maximum number of attempts. If 0, the default, there is no maximum.", - ) - - -class DuplicateWorkflowPolicy(str, Enum): - ALLOW_DUPLICATE = "ALLOW_DUPLICATE" - ALLOW_DUPLICATE_FAILED_ONLY = "ALLOW_DUPLICATE_FAILED_ONLY" - REJECT_DUPLICATE = "REJECT_DUPLICATE" - TERMINATE_IF_RUNNING = "TERMINATE_IF_RUNNING" - - -class TaskStatus(str, Enum): - CANCELED = "CANCELED" - COMPLETED = "COMPLETED" - FAILED = "FAILED" - RUNNING = "RUNNING" - TERMINATED = "TERMINATED" - TIMED_OUT = "TIMED_OUT" diff --git a/src/agentex/lib/core/clients/temporal/utils.py b/src/agentex/lib/core/clients/temporal/utils.py deleted file mode 100644 index 9be7cf5c..00000000 --- a/src/agentex/lib/core/clients/temporal/utils.py +++ /dev/null @@ -1,119 +0,0 @@ -from __future__ import annotations - -from typing import Any - -from temporalio.client import Client, Plugin as ClientPlugin -from temporalio.worker import Interceptor -from temporalio.runtime import Runtime, TelemetryConfig, OpenTelemetryConfig -from temporalio.contrib.pydantic import pydantic_data_converter -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin - -# class DateTimeJSONEncoder(AdvancedJSONEncoder): -# def default(self, o: Any) -> Any: -# if isinstance(o, datetime.datetime): -# return o.isoformat() -# return super().default(o) - - -# class DateTimeJSONTypeConverter(JSONTypeConverter): -# def to_typed_value( -# self, hint: Type, value: Any -# ) -> Union[Optional[Any], _JSONTypeConverterUnhandled]: -# if hint == datetime.datetime: -# return datetime.datetime.fromisoformat(value) -# return JSONTypeConverter.Unhandled - - -# class DateTimePayloadConverter(CompositePayloadConverter): -# def __init__(self) -> None: -# json_converter = JSONPlainPayloadConverter( -# encoder=DateTimeJSONEncoder, -# custom_type_converters=[DateTimeJSONTypeConverter()], -# ) -# super().__init__( -# *[ -# c if not isinstance(c, JSONPlainPayloadConverter) else json_converter -# for c in DefaultPayloadConverter.default_encoding_payload_converters -# ] -# ) - - -# custom_data_converter = dataclasses.replace( -# DataConverter.default, -# payload_converter_class=DateTimePayloadConverter, -# ) - - -def validate_client_plugins(plugins: list[Any]) -> None: - """ - Validate that all items in the plugins list are valid Temporal client plugins. - - Args: - plugins: List of plugins to validate - - Raises: - TypeError: If any plugin is not a valid ClientPlugin instance - """ - for i, plugin in enumerate(plugins): - if not isinstance(plugin, ClientPlugin): - raise TypeError( - f"Plugin at index {i} must be an instance of temporalio.client.Plugin, " - f"got {type(plugin).__name__}. Note: WorkerPlugin is not valid for workflow clients." - ) - - -def validate_worker_interceptors(interceptors: list[Any]) -> None: - """ - Validate that all items in the interceptors list are valid Temporal worker interceptors. - - Args: - interceptors: List of interceptors to validate - - Raises: - TypeError: If any interceptor is not a valid Interceptor instance - """ - for i, interceptor in enumerate(interceptors): - if not isinstance(interceptor, Interceptor): - raise TypeError( - f"Interceptor at index {i} must be an instance of temporalio.worker.Interceptor, " - f"got {type(interceptor).__name__}" - ) - - -async def get_temporal_client(temporal_address: str, metrics_url: str | None = None, plugins: list[Any] = []) -> Client: - """ - Create a Temporal client with plugin integration. - - Args: - temporal_address: Temporal server address - metrics_url: Optional metrics endpoint URL - plugins: List of Temporal plugins to include - - Returns: - Configured Temporal client - """ - # Validate plugins if any are provided - if plugins: - validate_client_plugins(plugins) - - # Check if OpenAI plugin is present - it needs to configure its own data converter - has_openai_plugin = any( - isinstance(p, OpenAIAgentsPlugin) for p in (plugins or []) - ) - - # Only set data_converter if OpenAI plugin is not present - connect_kwargs = { - "target_host": temporal_address, - "plugins": plugins, - } - - if not has_openai_plugin: - connect_kwargs["data_converter"] = pydantic_data_converter - - if not metrics_url: - client = await Client.connect(**connect_kwargs) - else: - runtime = Runtime(telemetry=TelemetryConfig(metrics=OpenTelemetryConfig(url=metrics_url))) - connect_kwargs["runtime"] = runtime - client = await Client.connect(**connect_kwargs) - return client diff --git a/src/agentex/lib/core/services/__init__.py b/src/agentex/lib/core/services/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/services/adk/__init__.py b/src/agentex/lib/core/services/adk/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/services/adk/acp/__init__.py b/src/agentex/lib/core/services/adk/acp/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/services/adk/acp/acp.py b/src/agentex/lib/core/services/adk/acp/acp.py deleted file mode 100644 index 956e1b5d..00000000 --- a/src/agentex/lib/core/services/adk/acp/acp.py +++ /dev/null @@ -1,285 +0,0 @@ -from __future__ import annotations - -from typing import Any, List, cast - -from agentex import AsyncAgentex -from agentex.types.task import Task -from agentex.types.event import Event -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import heartbeat_if_in_workflow -from agentex.types.task_message import TaskMessage -from agentex.types.agent_rpc_params import ( - ParamsSendEventRequest as RpcParamsSendEventRequest, - ParamsCancelTaskRequest as RpcParamsCancelTaskRequest, -) -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.task_message_content import TaskMessageContent -from agentex.types.task_message_content_param import TaskMessageContentParam - -logger = make_logger(__name__) - - -class ACPService: - def __init__( - self, - agentex_client: AsyncAgentex, - tracer: AsyncTracer, - ): - self._agentex_client = agentex_client - self._tracer = tracer - - async def task_create( - self, - name: str | None = None, - agent_id: str | None = None, - agent_name: str | None = None, - params: dict[str, Any] | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - request: dict[str, Any] | None = None, - ) -> Task: - trace = self._tracer.trace(trace_id=trace_id) - async with trace.span( - parent_id=parent_span_id, - name="task_create", - input={ - "name": name, - "agent_id": agent_id, - "agent_name": agent_name, - "params": params, - }, - ) as span: - heartbeat_if_in_workflow("task create") - - # Extract headers from request; pass-through to agent - extra_headers = request.get("headers") if request else None - - if agent_name: - json_rpc_response = await self._agentex_client.agents.rpc_by_name( - agent_name=agent_name, - method="task/create", - params={ - "name": name, - "params": params, - }, - extra_headers=extra_headers, - ) - elif agent_id: - json_rpc_response = await self._agentex_client.agents.rpc( - agent_id=agent_id, - method="task/create", - params={ - "name": name, - "params": params, - }, - extra_headers=extra_headers, - ) - else: - raise ValueError("Either agent_name or agent_id must be provided") - - task_entry = Task.model_validate(json_rpc_response.result) - if span: - span.output = task_entry.model_dump() - return task_entry - - async def message_send( - self, - content: TaskMessageContent, - agent_id: str | None = None, - agent_name: str | None = None, - task_id: str | None = None, - task_name: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - request: dict[str, Any] | None = None, - ) -> List[TaskMessage]: - trace = self._tracer.trace(trace_id=trace_id) - async with trace.span( - parent_id=parent_span_id, - name="message_send", - input={ - "agent_id": agent_id, - "agent_name": agent_name, - "task_id": task_id, - "task_name": task_name, - "message": content, - }, - ) as span: - heartbeat_if_in_workflow("message send") - - # Extract headers from request; pass-through to agent - extra_headers = request.get("headers") if request else None - - if agent_name: - json_rpc_response = await self._agentex_client.agents.rpc_by_name( - agent_name=agent_name, - method="message/send", - params={ - "task_id": task_id, - "content": cast(TaskMessageContentParam, content.model_dump()), - "stream": False, - }, - extra_headers=extra_headers, - ) - elif agent_id: - json_rpc_response = await self._agentex_client.agents.rpc( - agent_id=agent_id, - method="message/send", - params={ - "task_id": task_id, - "content": cast(TaskMessageContentParam, content.model_dump()), - "stream": False, - }, - extra_headers=extra_headers, - ) - else: - raise ValueError("Either agent_name or agent_id must be provided") - - task_messages: List[TaskMessage] = [] - logger.info("json_rpc_response: %s", json_rpc_response) - if isinstance(json_rpc_response.result, list): - for message in json_rpc_response.result: - task_message = TaskMessage.model_validate(message) - task_messages.append(task_message) - else: - task_messages = [TaskMessage.model_validate(json_rpc_response.result)] - - if span: - span.output = [task_message.model_dump() for task_message in task_messages] - return task_messages - - async def event_send( - self, - content: TaskMessageContent, - agent_id: str | None = None, - agent_name: str | None = None, - task_id: str | None = None, - task_name: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - request: dict[str, Any] | None = None, - ) -> Event: - trace = self._tracer.trace(trace_id=trace_id) - async with trace.span( - parent_id=parent_span_id, - name="event_send", - input={ - "agent_id": agent_id, - "agent_name": agent_name, - "task_id": task_id, - "task_name": task_name, - "content": content, - }, - ) as span: - heartbeat_if_in_workflow("event send") - - # Extract headers from request; pass-through to agent - extra_headers = request.get("headers") if request else None - - rpc_event_params: RpcParamsSendEventRequest = { - "task_id": task_id, - "task_name": task_name, - "content": cast(TaskMessageContentParam, content.model_dump()), - } - if agent_name: - json_rpc_response = await self._agentex_client.agents.rpc_by_name( - agent_name=agent_name, - method="event/send", - params=rpc_event_params, - extra_headers=extra_headers, - ) - elif agent_id: - json_rpc_response = await self._agentex_client.agents.rpc( - agent_id=agent_id, - method="event/send", - params=rpc_event_params, - extra_headers=extra_headers, - ) - else: - raise ValueError("Either agent_name or agent_id must be provided") - - event_entry = Event.model_validate(json_rpc_response.result) - if span: - span.output = event_entry.model_dump() - return event_entry - - async def task_cancel( - self, - task_id: str | None = None, - task_name: str | None = None, - agent_id: str | None = None, - agent_name: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - request: dict[str, Any] | None = None, - ) -> Task: - """ - Cancel a task by sending cancel request to the agent that owns the task. - - Args: - task_id: ID of the task to cancel (passed to agent in params) - task_name: Name of the task to cancel (passed to agent in params) - agent_id: ID of the agent that owns the task - agent_name: Name of the agent that owns the task - trace_id: Trace ID for tracing - parent_span_id: Parent span ID for tracing - request: Additional request context including headers to forward to the agent - - Returns: - Task entry representing the cancelled task - - Raises: - ValueError: If neither agent_name nor agent_id is provided, - or if neither task_name nor task_id is provided - """ - # Require agent identification - if not agent_name and not agent_id: - raise ValueError("Either agent_name or agent_id must be provided to identify the agent that owns the task") - - # Require task identification - if not task_name and not task_id: - raise ValueError("Either task_name or task_id must be provided to identify the task to cancel") - trace = self._tracer.trace(trace_id=trace_id) - async with trace.span( - parent_id=parent_span_id, - name="task_cancel", - input={ - "task_id": task_id, - "task_name": task_name, - "agent_id": agent_id, - "agent_name": agent_name, - }, - ) as span: - heartbeat_if_in_workflow("task cancel") - - # Extract headers from request; pass-through to agent - extra_headers = request.get("headers") if request else None - - # Build params for the agent (task identification) - params: RpcParamsCancelTaskRequest = {} - if task_id: - params["task_id"] = task_id - if task_name: - params["task_name"] = task_name - - # Send cancel request to the correct agent - if agent_name: - json_rpc_response = await self._agentex_client.agents.rpc_by_name( - agent_name=agent_name, - method="task/cancel", - params=params, - extra_headers=extra_headers, - ) - else: # agent_id is provided (validated above) - assert agent_id is not None - json_rpc_response = await self._agentex_client.agents.rpc( - agent_id=agent_id, - method="task/cancel", - params=params, - extra_headers=extra_headers, - ) - - task_entry = Task.model_validate(json_rpc_response.result) - if span: - span.output = task_entry.model_dump() - return task_entry diff --git a/src/agentex/lib/core/services/adk/agent_task_tracker.py b/src/agentex/lib/core/services/adk/agent_task_tracker.py deleted file mode 100644 index 54ee4f72..00000000 --- a/src/agentex/lib/core/services/adk/agent_task_tracker.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import annotations - -from agentex import AsyncAgentex -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.agent_task_tracker import AgentTaskTracker - -logger = make_logger(__name__) - - -class AgentTaskTrackerService: - def __init__( - self, agentex_client: AsyncAgentex, tracer: AsyncTracer, - ): - self._agentex_client = agentex_client - self._tracer = tracer - - async def get_agent_task_tracker( - self, - tracker_id: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> AgentTaskTracker: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="get_agent_task_tracker", - input={"tracker_id": tracker_id}, - ) as span: - tracker = await self._agentex_client.tracker.retrieve( - tracker_id - ) - if span: - span.output = tracker.model_dump() - return tracker - - async def get_by_task_and_agent( - self, - task_id: str, - agent_id: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> AgentTaskTracker | None: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="get_by_task_and_agent", - input={"task_id": task_id, "agent_id": agent_id}, - ) as span: - trackers = await self._agentex_client.tracker.list( - task_id=task_id, - agent_id=agent_id, - ) - tracker = trackers[0] if trackers else None - if span: - span.output = tracker.model_dump() if tracker else None - return tracker - - async def update_agent_task_tracker( - self, - tracker_id: str, - last_processed_event_id: str | None = None, - status: str | None = None, - status_reason: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> AgentTaskTracker: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="update_agent_task_tracker", - input={ - "tracker_id": tracker_id, - "last_processed_event_id": last_processed_event_id, - "status": status, - "status_reason": status_reason, - }, - ) as span: - tracker = await self._agentex_client.tracker.update( - tracker_id=tracker_id, - last_processed_event_id=last_processed_event_id, - status=status, - status_reason=status_reason, - ) - if span: - span.output = tracker.model_dump() - return tracker diff --git a/src/agentex/lib/core/services/adk/agents.py b/src/agentex/lib/core/services/adk/agents.py deleted file mode 100644 index 1d26b9d5..00000000 --- a/src/agentex/lib/core/services/adk/agents.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import Optional - -from agentex import AsyncAgentex -from agentex.types.agent import Agent -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import heartbeat_if_in_workflow -from agentex.lib.core.tracing.tracer import AsyncTracer - -logger = make_logger(__name__) - - -class AgentsService: - def __init__( - self, - agentex_client: AsyncAgentex, - tracer: AsyncTracer, - ): - self._agentex_client = agentex_client - self._tracer = tracer - - async def get_agent( - self, - agent_id: Optional[str] = None, - agent_name: Optional[str] = None, - trace_id: Optional[str] = None, - parent_span_id: Optional[str] = None, - ) -> Agent: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="get_agent", - input={"agent_id": agent_id, "agent_name": agent_name}, - ) as span: - heartbeat_if_in_workflow("get agent") - if agent_id: - agent = await self._agentex_client.agents.retrieve(agent_id=agent_id) - elif agent_name: - agent = await self._agentex_client.agents.retrieve_by_name(agent_name=agent_name) - else: - raise ValueError("Either agent_id or agent_name must be provided") - if span: - span.output = agent.model_dump() - return agent diff --git a/src/agentex/lib/core/services/adk/events.py b/src/agentex/lib/core/services/adk/events.py deleted file mode 100644 index fbed9e5a..00000000 --- a/src/agentex/lib/core/services/adk/events.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import annotations - -from agentex import AsyncAgentex -from agentex.types.event import Event -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.tracing.tracer import AsyncTracer - -logger = make_logger(__name__) - - -class EventsService: - def __init__( - self, agentex_client: AsyncAgentex, tracer: AsyncTracer - ): - self._agentex_client = agentex_client - self._tracer = tracer - - async def get_event( - self, - event_id: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> Event | None: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="get_event", - input={"event_id": event_id}, - ) as span: - event = await self._agentex_client.events.retrieve(event_id=event_id) - if span: - span.output = event.model_dump() - return event - - async def list_events( - self, - task_id: str, - agent_id: str, - last_processed_event_id: str | None = None, - limit: int | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> list[Event]: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="list_events", - input={ - "task_id": task_id, - "agent_id": agent_id, - "last_processed_event_id": last_processed_event_id, - "limit": limit, - }, - ) as span: - events = await self._agentex_client.events.list( - task_id=task_id, - agent_id=agent_id, - last_processed_event_id=last_processed_event_id, - limit=limit, - ) - if span: - span.output = [event.model_dump() for event in events] - return events diff --git a/src/agentex/lib/core/services/adk/messages.py b/src/agentex/lib/core/services/adk/messages.py deleted file mode 100644 index ef1a1344..00000000 --- a/src/agentex/lib/core/services/adk/messages.py +++ /dev/null @@ -1,165 +0,0 @@ -from __future__ import annotations - -import asyncio -from typing import Any, Coroutine - -from agentex import AsyncAgentex -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import heartbeat_if_in_workflow -from agentex.types.task_message import TaskMessage, TaskMessageContent -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.task_message_update import TaskMessageUpdate, StreamTaskMessageFull -from agentex.lib.core.services.adk.streaming import StreamingService - -logger = make_logger(__name__) - - -class MessagesService: - def __init__( - self, - agentex_client: AsyncAgentex, - streaming_service: StreamingService, - tracer: AsyncTracer, - ): - self._agentex_client = agentex_client - self._streaming_service = streaming_service - self._tracer = tracer - - async def create_message( - self, - task_id: str, - content: TaskMessageContent, - emit_updates: bool = True, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> TaskMessage: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="create_message", - input={"task_id": task_id, "message": content}, - ) as span: - heartbeat_if_in_workflow("create message") - task_message = await self._agentex_client.messages.create( - task_id=task_id, - content=content.model_dump(), - ) - if emit_updates: - await self._emit_updates([task_message]) - if span: - span.output = task_message.model_dump() - return task_message - - async def update_message( - self, - task_id: str, - message_id: str, - content: TaskMessageContent, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> TaskMessage: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="update_message", - input={ - "task_id": task_id, - "message_id": message_id, - "message": content, - }, - ) as span: - heartbeat_if_in_workflow("update message") - task_message = await self._agentex_client.messages.update( - task_id=task_id, - message_id=message_id, - content=content.model_dump(), - ) - if span: - span.output = task_message.model_dump() - return task_message - - async def create_messages_batch( - self, - task_id: str, - contents: list[TaskMessageContent], - emit_updates: bool = True, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> list[TaskMessage]: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="create_messages_batch", - input={"task_id": task_id, "messages": contents}, - ) as span: - heartbeat_if_in_workflow("create messages batch") - task_messages = await self._agentex_client.messages.batch.create( - task_id=task_id, - contents=[content.model_dump() for content in contents], - ) - if emit_updates: - await self._emit_updates(task_messages) - if span: - span.output = [task_message.model_dump() for task_message in task_messages] - return task_messages - - async def update_messages_batch( - self, - task_id: str, - updates: dict[str, TaskMessageContent], - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> list[TaskMessage]: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="update_messages_batch", - input={"task_id": task_id, "updates": updates}, - ) as span: - heartbeat_if_in_workflow("update messages batch") - task_messages = await self._agentex_client.messages.batch.update( - task_id=task_id, - updates={ - message_id: content.model_dump() - for message_id, content in updates.items() - }, - ) - if span: - span.output = [task_message.model_dump() for task_message in task_messages] - return task_messages - - async def list_messages( - self, - task_id: str, - limit: int | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> list[TaskMessage]: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="list_messages", - input={"task_id": task_id, "limit": limit}, - ) as span: - heartbeat_if_in_workflow("list messages") - task_messages = await self._agentex_client.messages.list( - task_id=task_id, - limit=limit, - ) - if span: - span.output = [task_message.model_dump() for task_message in task_messages] - return task_messages - - async def _emit_updates(self, task_messages: list[TaskMessage]) -> None: - stream_update_handlers: list[Coroutine[Any, Any, TaskMessageUpdate | None]] = [] - for task_message in task_messages: - stream_update_handler = self._streaming_service.stream_update( - update=StreamTaskMessageFull( - type="full", - parent_task_message=task_message, - content=task_message.content, - ) - ) - stream_update_handlers.append(stream_update_handler) - - await asyncio.gather(*stream_update_handlers) diff --git a/src/agentex/lib/core/services/adk/providers/__init__.py b/src/agentex/lib/core/services/adk/providers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/services/adk/providers/litellm.py b/src/agentex/lib/core/services/adk/providers/litellm.py deleted file mode 100644 index 5df03f4f..00000000 --- a/src/agentex/lib/core/services/adk/providers/litellm.py +++ /dev/null @@ -1,262 +0,0 @@ -from __future__ import annotations - -from collections.abc import AsyncGenerator - -from agentex import AsyncAgentex -from agentex.lib.utils import logging -from agentex.lib.utils.temporal import heartbeat_if_in_workflow -from agentex.types.task_message import TaskMessage -from agentex.lib.utils.completions import concat_completion_chunks -from agentex.lib.types.llm_messages import ( - LLMConfig, - Completion, -) -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.task_message_delta import TextDelta -from agentex.types.task_message_update import ( - StreamTaskMessageFull, - StreamTaskMessageDelta, -) -from agentex.types.task_message_content import TextContent -from agentex.lib.core.services.adk.streaming import StreamingService -from agentex.lib.core.adapters.llm.adapter_litellm import LiteLLMGateway - -logger = logging.make_logger(__name__) - - -class LiteLLMService: - def __init__( - self, - agentex_client: AsyncAgentex, - streaming_service: StreamingService, - tracer: AsyncTracer, - llm_gateway: LiteLLMGateway | None = None, - ): - self.agentex_client = agentex_client - self.llm_gateway = llm_gateway - self.streaming_service = streaming_service - self.tracer = tracer - - async def chat_completion( - self, - llm_config: LLMConfig, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> Completion: - trace = self.tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="chat_completion", - input=llm_config.model_dump(), - ) as span: - heartbeat_if_in_workflow("chat completion") - if self.llm_gateway is None: - raise ValueError("LLM Gateway is not set") - completion = await self.llm_gateway.acompletion(**llm_config.model_dump()) - if span: - span.output = completion.model_dump() - return completion - - async def chat_completion_auto_send( - self, - task_id: str, - llm_config: LLMConfig, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> TaskMessage | None: - """ - Chat completion with automatic TaskMessage creation. This does not stream the completion. To stream use chat_completion_stream_auto_send. - - Args: - task_id (str): The ID of the task to run the agent for. - llm_config (LLMConfig): The configuration for the LLM (must have stream=True). - - Returns: - TaskMessage: A TaskMessage object - """ - - if llm_config.stream: - raise ValueError( - "LLM config must not have stream=True. To stream use `chat_completion_stream` or `chat_completion_stream_auto_send`." - ) - - if self.llm_gateway is None: - raise ValueError("LLM Gateway is not set") - - trace = self.tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="chat_completion_auto_send", - input=llm_config.model_dump(), - ) as span: - heartbeat_if_in_workflow("chat completion auto send") - - async with self.streaming_service.streaming_task_message_context( - task_id=task_id, - initial_content=TextContent( - author="agent", - content="", - format="markdown", - ), - ) as streaming_context: - completion = await self.llm_gateway.acompletion(**llm_config.model_dump()) - if ( - completion.choices - and len(completion.choices) > 0 - and completion.choices[0].message - ): - final_content = TextContent( - author="agent", - content=completion.choices[0].message.content or "", - format="markdown", - ) - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=final_content, - type="full", - ), - ) - else: - raise ValueError("No completion message returned from LLM") - - if span: - if streaming_context.task_message: - span.output = streaming_context.task_message.model_dump() - return streaming_context.task_message if streaming_context.task_message else None - - async def chat_completion_stream( - self, - llm_config: LLMConfig, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> AsyncGenerator[Completion, None]: - """ - Stream chat completion chunks using LiteLLM. - - Args: - llm_config (LLMConfig): The configuration for the LLM (must have stream=True). - trace_id (Optional[str]): The trace ID for tracing. - parent_span_id (Optional[str]): The parent span ID for tracing. - - Returns: - AsyncGenerator[Completion, None]: Generator yielding completion chunks - - Raises: - ValueError: If called from within a Temporal workflow or if stream=False - """ - if not llm_config.stream: - raise ValueError("LLM config must have stream=True for streaming") - - if self.llm_gateway is None: - raise ValueError("LLM Gateway is not set") - - trace = self.tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="chat_completion_stream", - input=llm_config.model_dump(), - ) as span: - # Direct streaming outside temporal - yield each chunk as it comes - chunks: list[Completion] = [] - async for chunk in self.llm_gateway.acompletion_stream( - **llm_config.model_dump() - ): - chunks.append(chunk) - yield chunk - if span: - span.output = concat_completion_chunks(chunks).model_dump() - - async def chat_completion_stream_auto_send( - self, - task_id: str, - llm_config: LLMConfig, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> TaskMessage | None: - """ - Stream chat completion with automatic TaskMessage creation and streaming. - - Args: - task_id (str): The ID of the task to run the agent for. - llm_config (LLMConfig): The configuration for the LLM (must have stream=True). - - Returns: - TaskMessage: A TaskMessage object - """ - heartbeat_if_in_workflow("chat completion stream") - - if self.llm_gateway is None: - raise ValueError("LLM Gateway is not set") - - if not llm_config.stream: - llm_config.stream = True - - trace = self.tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="chat_completion_stream_auto_send", - input=llm_config.model_dump(), - ) as span: - # Use streaming context manager - async with self.streaming_service.streaming_task_message_context( - task_id=task_id, - initial_content=TextContent( - author="agent", - content="", - format="markdown", - ), - ) as streaming_context: - # Get the streaming response - chunks = [] - async for response in self.llm_gateway.acompletion_stream( - **llm_config.model_dump() - ): - heartbeat_if_in_workflow("chat completion streaming") - if ( - response.choices - and len(response.choices) > 0 - and response.choices[0].delta - ): - delta = response.choices[0].delta.content - if delta: - # Stream the chunk via the context manager - await streaming_context.stream_update( - update=StreamTaskMessageDelta( - parent_task_message=streaming_context.task_message, - delta=TextDelta(text_delta=delta, type="text"), - type="delta", - ), - ) - heartbeat_if_in_workflow("content chunk streamed") - - # Store the chunk for final message assembly - chunks.append(response) - - # Update the final message content - complete_message = concat_completion_chunks(chunks) - if ( - complete_message - and complete_message.choices - and complete_message.choices[0].message - ): - final_content = TextContent( - author="agent", - content=complete_message.choices[0].message.content or "", - format="markdown", - ) - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=final_content, - type="full", - ), - ) - - heartbeat_if_in_workflow("chat completion stream complete") - - if span: - if streaming_context.task_message: - span.output = streaming_context.task_message.model_dump() - - return streaming_context.task_message if streaming_context.task_message else None diff --git a/src/agentex/lib/core/services/adk/providers/openai.py b/src/agentex/lib/core/services/adk/providers/openai.py deleted file mode 100644 index 82d420a8..00000000 --- a/src/agentex/lib/core/services/adk/providers/openai.py +++ /dev/null @@ -1,1044 +0,0 @@ -# Standard library imports -from __future__ import annotations - -from typing import Any, Literal -from contextlib import AsyncExitStack, asynccontextmanager - -from mcp import StdioServerParameters -from agents import Agent, Runner, RunResult, RunResultStreaming -from pydantic import BaseModel -from agents.mcp import MCPServerStdio -from agents.agent import StopAtTools, ToolsToFinalOutputFunction -from agents.guardrail import InputGuardrail, OutputGuardrail -from agents.exceptions import InputGuardrailTripwireTriggered, OutputGuardrailTripwireTriggered -from openai.types.responses import ( - ResponseCompletedEvent, - ResponseTextDeltaEvent, - ResponseFunctionToolCall, - ResponseFunctionWebSearch, - ResponseOutputItemDoneEvent, - ResponseCodeInterpreterToolCall, - ResponseReasoningSummaryPartDoneEvent, - ResponseReasoningSummaryPartAddedEvent, - ResponseReasoningSummaryTextDeltaEvent, -) - -# Local imports -from agentex import AsyncAgentex -from agentex.lib.utils import logging -from agentex.lib.utils.mcp import redact_mcp_server_params -from agentex.lib.utils.temporal import heartbeat_if_in_workflow -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.task_message_delta import ( - TextDelta, - ReasoningSummaryDelta, -) -from agentex.types.task_message_update import ( - StreamTaskMessageFull, - StreamTaskMessageDelta, -) -from agentex.types.task_message_content import ( - TextContent, - ReasoningContent, - ToolRequestContent, - ToolResponseContent, -) -from agentex.lib.core.services.adk.streaming import ( - StreamingService, - StreamingTaskMessageContext, -) - -logger = logging.make_logger(__name__) - - -@asynccontextmanager -async def mcp_server_context( - mcp_server_params: list[StdioServerParameters], - mcp_timeout_seconds: int | None = None, -): - """Context manager for MCP servers.""" - servers = [] - for params in mcp_server_params: - server = MCPServerStdio( - name=f"Server: {params.command}", - params=params.model_dump(), - cache_tools_list=True, - client_session_timeout_seconds=mcp_timeout_seconds, - ) - servers.append(server) - - async with AsyncExitStack() as stack: - for server in servers: - await stack.enter_async_context(server) - yield servers - - -class OpenAIService: - """Service for OpenAI agent operations using the agents library.""" - - def __init__( - self, - agentex_client: AsyncAgentex | None = None, - streaming_service: StreamingService | None = None, - tracer: AsyncTracer | None = None, - ): - self.agentex_client = agentex_client - self.streaming_service = streaming_service - self.tracer = tracer - - def _extract_tool_call_info(self, tool_call_item: Any) -> tuple[str, str, dict[str, Any]]: - """ - Extract call_id, tool_name, and tool_arguments from a tool call item. - - Args: - tool_call_item: The tool call item to process - - Returns: - A tuple of (call_id, tool_name, tool_arguments) - """ - # Generic handling for different tool call types - # Try 'call_id' first, then 'id', then generate placeholder - if hasattr(tool_call_item, "call_id"): - call_id = tool_call_item.call_id - elif hasattr(tool_call_item, "id"): - call_id = tool_call_item.id - else: - call_id = f"unknown_call_{id(tool_call_item)}" - logger.warning( - f"Warning: Tool call item {type(tool_call_item)} has " - f"neither 'call_id' nor 'id' attribute, using placeholder: " - f"{call_id}" - ) - - if isinstance(tool_call_item, ResponseFunctionWebSearch): - tool_name = "web_search" - tool_arguments = {"action": tool_call_item.action.model_dump(), "status": tool_call_item.status} - elif isinstance(tool_call_item, ResponseCodeInterpreterToolCall): - tool_name = "code_interpreter" - tool_arguments = {"code": tool_call_item.code, "status": tool_call_item.status} - else: - # Generic handling for any tool call type - tool_name = getattr(tool_call_item, "name", type(tool_call_item).__name__) - tool_arguments = tool_call_item.model_dump() - - return call_id, tool_name, tool_arguments - - def _extract_tool_response_info(self, tool_call_map: dict[str, Any], tool_output_item: Any) -> tuple[str, str, str]: - """ - Extract call_id, tool_name, and content from a tool output item. - - Args: - tool_call_map: Map of call_ids to tool_call items - tool_output_item: The tool output item to process - - Returns: - A tuple of (call_id, tool_name, content) - """ - # Extract call_id and content from the tool_output_item - # Handle both dictionary access and attribute access - if hasattr(tool_output_item, "get") and callable(tool_output_item.get): - # Dictionary-like access - call_id = tool_output_item["call_id"] - content = tool_output_item["output"] - else: - # Attribute access for structured objects - call_id = getattr(tool_output_item, "call_id", "") - content = getattr(tool_output_item, "output", "") - - # Get the name from the tool call map using generic approach - tool_call = tool_call_map[call_id] - if hasattr(tool_call, "name"): - tool_name = tool_call.name - elif hasattr(tool_call, "type"): - tool_name = tool_call.type - else: - tool_name = type(tool_call).__name__ - - return call_id, tool_name, content - - async def run_agent( - self, - input_list: list[dict[str, Any]], - mcp_server_params: list[StdioServerParameters], - agent_name: str, - agent_instructions: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - handoff_description: str | None = None, - handoffs: list[BaseModel] | None = None, - model: str | None = None, - model_settings: BaseModel | None = None, - tools: list[BaseModel] | None = None, - output_type: type[Any] | None = None, - tool_use_behavior: ( - Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction - ) = "run_llm_again", - mcp_timeout_seconds: int | None = None, - input_guardrails: list[InputGuardrail] | None = None, - output_guardrails: list[OutputGuardrail] | None = None, - max_turns: int | None = None, - previous_response_id: str | None = None, # noqa: ARG002 - ) -> RunResult: - """ - Run an agent without streaming or TaskMessage creation. - - Args: - input_list: List of input data for the agent. - mcp_server_params: MCP server parameters for the agent. - agent_name: The name of the agent to run. - agent_instructions: Instructions for the agent. - trace_id: Optional trace ID for tracing. - parent_span_id: Optional parent span ID for tracing. - handoff_description: Optional description of the handoff. - handoffs: Optional list of handoffs. - model: Optional model to use. - model_settings: Optional model settings. - tools: Optional list of tools. - output_type: Optional output type. - tool_use_behavior: Optional tool use behavior. - mcp_timeout_seconds: Optional param to set the timeout threshold - for the MCP servers. Defaults to 5 seconds. - input_guardrails: Optional list of input guardrails to run on - initial user input. - output_guardrails: Optional list of output guardrails to run on - final agent output. - mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds. - max_turns: Maximum number of turns the agent can take. Uses Runner's default if None. - Returns: - SerializableRunResult: The result of the agent run. - """ - redacted_params = redact_mcp_server_params(mcp_server_params) - - if self.tracer is None: - raise RuntimeError("Tracer not initialized - ensure tracer is provided to OpenAIService") - trace = self.tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="run_agent", - input={ - "input_list": input_list, - "mcp_server_params": redacted_params, - "agent_name": agent_name, - "agent_instructions": agent_instructions, - "handoff_description": handoff_description, - "handoffs": handoffs, - "model": model, - "model_settings": model_settings, - "tools": tools, - "output_type": output_type, - "tool_use_behavior": tool_use_behavior, - "max_turns": max_turns, - }, - ) as span: - heartbeat_if_in_workflow("run agent") - - async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers: - tools = [ - tool.to_oai_function_tool() if hasattr(tool, 'to_oai_function_tool') else tool # type: ignore[attr-defined] - for tool in tools - ] if tools else [] - handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else [] # type: ignore[misc] - - agent_kwargs = { - "name": agent_name, - "instructions": agent_instructions, - "mcp_servers": servers, - "handoff_description": handoff_description, - "handoffs": handoffs, - "model": model, - "tools": tools, - "output_type": output_type, - "tool_use_behavior": tool_use_behavior, - } - if model_settings is not None: - agent_kwargs["model_settings"] = ( - model_settings.to_oai_model_settings() if hasattr(model_settings, 'to_oai_model_settings') # type: ignore[attr-defined] - else model_settings - ) - if input_guardrails is not None: - agent_kwargs["input_guardrails"] = input_guardrails - if output_guardrails is not None: - agent_kwargs["output_guardrails"] = output_guardrails - - agent = Agent(**agent_kwargs) - - # Run without streaming - if max_turns is not None and previous_response_id is not None: - result = await Runner.run( - starting_agent=agent, - input=input_list, - max_turns=max_turns, - previous_response_id=previous_response_id, - ) - elif max_turns is not None: - result = await Runner.run(starting_agent=agent, input=input_list, max_turns=max_turns) - elif previous_response_id is not None: - result = await Runner.run( - starting_agent=agent, input=input_list, previous_response_id=previous_response_id - ) - else: - result = await Runner.run(starting_agent=agent, input=input_list) - - if span: - span.output = { - "new_items": [ - item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item - for item in result.new_items - ], - "final_output": result.final_output, - } - - return result - - async def run_agent_auto_send( - self, - task_id: str, - input_list: list[dict[str, Any]], - mcp_server_params: list[StdioServerParameters], - agent_name: str, - agent_instructions: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - handoff_description: str | None = None, - handoffs: list[BaseModel] | None = None, - model: str | None = None, - model_settings: BaseModel | None = None, - tools: list[BaseModel] | None = None, - output_type: type[Any] | None = None, - tool_use_behavior: ( - Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction - ) = "run_llm_again", - mcp_timeout_seconds: int | None = None, - input_guardrails: list[InputGuardrail] | None = None, - output_guardrails: list[OutputGuardrail] | None = None, - max_turns: int | None = None, - previous_response_id: str | None = None, # noqa: ARG002 - ) -> RunResult: - """ - Run an agent with automatic TaskMessage creation. - - Args: - task_id: The ID of the task to run the agent for. - input_list: List of input data for the agent. - mcp_server_params: MCP server parameters for the agent. - agent_name: The name of the agent to run. - agent_instructions: Instructions for the agent. - trace_id: Optional trace ID for tracing. - parent_span_id: Optional parent span ID for tracing. - handoff_description: Optional description of the handoff. - handoffs: Optional list of handoffs. - model: Optional model to use. - model_settings: Optional model settings. - tools: Optional list of tools. - output_type: Optional output type. - tool_use_behavior: Optional tool use behavior. - mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds. - input_guardrails: Optional list of input guardrails to run on initial user input. - output_guardrails: Optional list of output guardrails to run on final agent output. - max_turns: Maximum number of turns the agent can take. Uses Runner's default if None. - Returns: - SerializableRunResult: The result of the agent run. - """ - if self.streaming_service is None: - raise ValueError("StreamingService must be available for auto_send methods") - if self.agentex_client is None: - raise ValueError("Agentex client must be provided for auto_send methods") - - redacted_params = redact_mcp_server_params(mcp_server_params) - - if self.tracer is None: - raise RuntimeError("Tracer not initialized - ensure tracer is provided to OpenAIService") - trace = self.tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="run_agent_auto_send", - input={ - "task_id": task_id, - "input_list": input_list, - "mcp_server_params": redacted_params, - "agent_name": agent_name, - "agent_instructions": agent_instructions, - "handoff_description": handoff_description, - "handoffs": handoffs, - "model": model, - "model_settings": model_settings, - "tools": tools, - "output_type": output_type, - "tool_use_behavior": tool_use_behavior, - "max_turns": max_turns, - }, - ) as span: - heartbeat_if_in_workflow("run agent auto send") - - async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers: - tools = [ - tool.to_oai_function_tool() if hasattr(tool, 'to_oai_function_tool') else tool # type: ignore[attr-defined] - for tool in tools - ] if tools else [] - handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else [] # type: ignore[misc] - agent_kwargs = { - "name": agent_name, - "instructions": agent_instructions, - "mcp_servers": servers, - "handoff_description": handoff_description, - "handoffs": handoffs, - "model": model, - "tools": tools, - "output_type": output_type, - "tool_use_behavior": tool_use_behavior, - } - if model_settings is not None: - agent_kwargs["model_settings"] = ( - model_settings.to_oai_model_settings() if hasattr(model_settings, 'to_oai_model_settings') # type: ignore[attr-defined] - else model_settings - ) - if input_guardrails is not None: - agent_kwargs["input_guardrails"] = input_guardrails - if output_guardrails is not None: - agent_kwargs["output_guardrails"] = output_guardrails - - agent = Agent(**agent_kwargs) - - # Run without streaming - if max_turns is not None and previous_response_id is not None: - result = await Runner.run( - starting_agent=agent, - input=input_list, - max_turns=max_turns, - previous_response_id=previous_response_id, - ) - elif max_turns is not None: - result = await Runner.run(starting_agent=agent, input=input_list, max_turns=max_turns) - elif previous_response_id is not None: - result = await Runner.run( - starting_agent=agent, input=input_list, previous_response_id=previous_response_id - ) - else: - result = await Runner.run(starting_agent=agent, input=input_list) - - if span: - span.output = { - "new_items": [ - item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item - for item in result.new_items - ], - "final_output": result.final_output, - } - - tool_call_map: dict[str, Any] = {} - - for item in result.new_items: - if item.type == "message_output_item": - text_content = TextContent( - author="agent", - content=item.raw_item.content[0].text, # type: ignore[union-attr] - ) - # Create message for the final result using streaming context - async with self.streaming_service.streaming_task_message_context( - task_id=task_id, - initial_content=text_content, - ) as streaming_context: - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=text_content, - type="full", - ), - ) - - elif item.type == "tool_call_item": - tool_call_item = item.raw_item - - # Extract tool call information using the helper method - call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item) - tool_call_map[call_id] = tool_call_item - - tool_request_content = ToolRequestContent( - author="agent", - tool_call_id=call_id, - name=tool_name, - arguments=tool_arguments, - ) - - # Create tool request using streaming context - async with self.streaming_service.streaming_task_message_context( - task_id=task_id, - initial_content=tool_request_content, - ) as streaming_context: - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=tool_request_content, - type="full", - ), - ) - - elif item.type == "tool_call_output_item": - tool_output_item = item.raw_item - - # Extract tool response information using the helper method - call_id, tool_name, content = self._extract_tool_response_info(tool_call_map, tool_output_item) - - tool_response_content = ToolResponseContent( - author="agent", - tool_call_id=call_id, - name=tool_name, - content=content, - ) - # Create tool response using streaming context - async with self.streaming_service.streaming_task_message_context( - task_id=task_id, initial_content=tool_response_content - ) as streaming_context: - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=tool_response_content, - type="full", - ), - ) - - # Convert to serializable result - return result - - async def run_agent_streamed( - self, - input_list: list[dict[str, Any]], - mcp_server_params: list[StdioServerParameters], - agent_name: str, - agent_instructions: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - handoff_description: str | None = None, - handoffs: list[BaseModel] | None = None, - model: str | None = None, - model_settings: BaseModel | None = None, - tools: list[BaseModel] | None = None, - output_type: type[Any] | None = None, - tool_use_behavior: ( - Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction - ) = "run_llm_again", - mcp_timeout_seconds: int | None = None, - input_guardrails: list[InputGuardrail] | None = None, - output_guardrails: list[OutputGuardrail] | None = None, - max_turns: int | None = None, - previous_response_id: str | None = None, # noqa: ARG002 - ) -> RunResultStreaming: - """ - Run an agent with streaming enabled but no TaskMessage creation. - - Args: - input_list: List of input data for the agent. - mcp_server_params: MCP server parameters for the agent. - agent_name: The name of the agent to run. - agent_instructions: Instructions for the agent. - trace_id: Optional trace ID for tracing. - parent_span_id: Optional parent span ID for tracing. - handoff_description: Optional description of the handoff. - handoffs: Optional list of handoffs. - model: Optional model to use. - model_settings: Optional model settings. - tools: Optional list of tools. - output_type: Optional output type. - tool_use_behavior: Optional tool use behavior. - mcp_timeout_seconds: Optional param to set the timeout threshold - for the MCP servers. Defaults to 5 seconds. - input_guardrails: Optional list of input guardrails to run on - initial user input. - output_guardrails: Optional list of output guardrails to run on - final agent output. - mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds. - max_turns: Maximum number of turns the agent can take. Uses Runner's default if None. - Returns: - RunResultStreaming: The result of the agent run with streaming. - """ - if self.tracer is None: - raise RuntimeError("Tracer not initialized - ensure tracer is provided to OpenAIService") - trace = self.tracer.trace(trace_id) - redacted_params = redact_mcp_server_params(mcp_server_params) - - async with trace.span( - parent_id=parent_span_id, - name="run_agent_streamed", - input={ - "input_list": input_list, - "mcp_server_params": redacted_params, - "agent_name": agent_name, - "agent_instructions": agent_instructions, - "handoff_description": handoff_description, - "handoffs": handoffs, - "model": model, - "model_settings": model_settings, - "tools": tools, - "output_type": output_type, - "tool_use_behavior": tool_use_behavior, - "max_turns": max_turns, - }, - ) as span: - heartbeat_if_in_workflow("run agent streamed") - - async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers: - tools = [ - tool.to_oai_function_tool() if hasattr(tool, 'to_oai_function_tool') else tool # type: ignore[attr-defined] - for tool in tools - ] if tools else [] - handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else [] # type: ignore[misc] - agent_kwargs = { - "name": agent_name, - "instructions": agent_instructions, - "mcp_servers": servers, - "handoff_description": handoff_description, - "handoffs": handoffs, - "model": model, - "tools": tools, - "output_type": output_type, - "tool_use_behavior": tool_use_behavior, - } - if model_settings is not None: - agent_kwargs["model_settings"] = ( - model_settings.to_oai_model_settings() if hasattr(model_settings, 'to_oai_model_settings') # type: ignore[attr-defined] - else model_settings - ) - if input_guardrails is not None: - agent_kwargs["input_guardrails"] = input_guardrails - if output_guardrails is not None: - agent_kwargs["output_guardrails"] = output_guardrails - - agent = Agent(**agent_kwargs) - - # Run with streaming (but no TaskMessage creation) - if max_turns is not None and previous_response_id is not None: - result = Runner.run_streamed( - starting_agent=agent, - input=input_list, - max_turns=max_turns, - previous_response_id=previous_response_id, - ) - elif max_turns is not None: - result = Runner.run_streamed(starting_agent=agent, input=input_list, max_turns=max_turns) - elif previous_response_id is not None: - result = Runner.run_streamed( - starting_agent=agent, input=input_list, previous_response_id=previous_response_id - ) - else: - result = Runner.run_streamed(starting_agent=agent, input=input_list) - - if span: - span.output = { - "new_items": [ - item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item - for item in result.new_items - ], - "final_output": result.final_output, - } - - return result - - async def run_agent_streamed_auto_send( - self, - task_id: str, - input_list: list[dict[str, Any]], - mcp_server_params: list[StdioServerParameters], - agent_name: str, - agent_instructions: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - handoff_description: str | None = None, - handoffs: list[BaseModel] | None = None, - model: str | None = None, - model_settings: BaseModel | None = None, - tools: list[BaseModel] | None = None, - output_type: type[Any] | None = None, - tool_use_behavior: ( - Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction - ) = "run_llm_again", - mcp_timeout_seconds: int | None = None, - input_guardrails: list[InputGuardrail] | None = None, - output_guardrails: list[OutputGuardrail] | None = None, - max_turns: int | None = None, - previous_response_id: str | None = None, # noqa: ARG002 - ) -> RunResultStreaming: - """ - Run an agent with streaming enabled and automatic TaskMessage creation. - - Args: - task_id: The ID of the task to run the agent for. - input_list: List of input data for the agent. - mcp_server_params: MCP server parameters for the agent. - agent_name: The name of the agent to run. - agent_instructions: Instructions for the agent. - trace_id: Optional trace ID for tracing. - parent_span_id: Optional parent span ID for tracing. - handoff_description: Optional description of the handoff. - handoffs: Optional list of handoffs. - model: Optional model to use. - model_settings: Optional model settings. - tools: Optional list of tools. - output_type: Optional output type. - tool_use_behavior: Optional tool use behavior. - mcp_timeout_seconds: Optional param to set the timeout threshold - for the MCP servers. Defaults to 5 seconds. - input_guardrails: Optional list of input guardrails to run on - initial user input. - output_guardrails: Optional list of output guardrails to run on - final agent output. - mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds. - max_turns: Maximum number of turns the agent can take. Uses Runner's default if None. - - Returns: - RunResultStreaming: The result of the agent run with streaming. - """ - if self.streaming_service is None: - raise ValueError("StreamingService must be available for auto_send methods") - if self.agentex_client is None: - raise ValueError("Agentex client must be provided for auto_send methods") - - tool_call_map: dict[str, ResponseFunctionToolCall] = {} - - if self.tracer is None: - raise RuntimeError("Tracer not initialized - ensure tracer is provided to OpenAIService") - trace = self.tracer.trace(trace_id) - redacted_params = redact_mcp_server_params(mcp_server_params) - - async with trace.span( - parent_id=parent_span_id, - name="run_agent_streamed_auto_send", - input={ - "task_id": task_id, - "input_list": input_list, - "mcp_server_params": redacted_params, - "agent_name": agent_name, - "agent_instructions": agent_instructions, - "handoff_description": handoff_description, - "handoffs": handoffs, - "model": model, - "model_settings": model_settings, - "tools": tools, - "output_type": output_type, - "tool_use_behavior": tool_use_behavior, - "max_turns": max_turns, - }, - ) as span: - heartbeat_if_in_workflow("run agent streamed auto send") - - async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers: - tools = [ - tool.to_oai_function_tool() if hasattr(tool, 'to_oai_function_tool') else tool # type: ignore[attr-defined] - for tool in tools - ] if tools else [] - handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else [] # type: ignore[misc] - agent_kwargs = { - "name": agent_name, - "instructions": agent_instructions, - "mcp_servers": servers, - "handoff_description": handoff_description, - "handoffs": handoffs, - "model": model, - "tools": tools, - "output_type": output_type, - "tool_use_behavior": tool_use_behavior, - } - if model_settings is not None: - agent_kwargs["model_settings"] = ( - model_settings.to_oai_model_settings() if hasattr(model_settings, 'to_oai_model_settings') # type: ignore[attr-defined] - else model_settings - ) - if input_guardrails is not None: - agent_kwargs["input_guardrails"] = input_guardrails - if output_guardrails is not None: - agent_kwargs["output_guardrails"] = output_guardrails - - agent = Agent(**agent_kwargs) - - # Run with streaming - if max_turns is not None: - result = Runner.run_streamed(starting_agent=agent, input=input_list, max_turns=max_turns) - else: - result = Runner.run_streamed(starting_agent=agent, input=input_list) - - item_id_to_streaming_context: dict[str, StreamingTaskMessageContext] = {} - unclosed_item_ids: set[str] = set() - # Simple string to accumulate reasoning summary - current_reasoning_summary: str = "" - - try: - # Process streaming events with TaskMessage creation - async for event in result.stream_events(): - heartbeat_if_in_workflow("processing stream event with auto send") - - if event.type == "run_item_stream_event": - if event.item.type == "tool_call_item": - tool_call_item = event.item.raw_item - - # Extract tool call information using the helper method - call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item) - tool_call_map[call_id] = tool_call_item - - tool_request_content = ToolRequestContent( - author="agent", - tool_call_id=call_id, - name=tool_name, - arguments=tool_arguments, - ) - - # Create tool request using streaming context (immediate completion) - async with self.streaming_service.streaming_task_message_context( - task_id=task_id, - initial_content=tool_request_content, - ) as streaming_context: - # The message has already been persisted, but we still need to send an upda - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=tool_request_content, - type="full", - ), - ) - - elif event.item.type == "tool_call_output_item": - tool_output_item = event.item.raw_item - - # Extract tool response information using the helper method - call_id, tool_name, content = self._extract_tool_response_info( - tool_call_map, tool_output_item - ) - - tool_response_content = ToolResponseContent( - author="agent", - tool_call_id=call_id, - name=tool_name, - content=content, - ) - - # Create tool response using streaming context (immediate completion) - async with self.streaming_service.streaming_task_message_context( - task_id=task_id, initial_content=tool_response_content - ) as streaming_context: - # The message has already been persisted, but we still need to send an update - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=tool_response_content, - type="full", - ), - ) - - elif event.type == "raw_response_event": - if isinstance(event.data, ResponseTextDeltaEvent): - # Handle text delta - item_id = event.data.item_id - - # Check if we already have a streaming context for this item - if item_id not in item_id_to_streaming_context: - # Create a new streaming context for this item - streaming_context = self.streaming_service.streaming_task_message_context( - task_id=task_id, - initial_content=TextContent( - author="agent", - content="", - ), - ) - # Open the streaming context - item_id_to_streaming_context[item_id] = await streaming_context.open() - unclosed_item_ids.add(item_id) - else: - streaming_context = item_id_to_streaming_context[item_id] - - # Stream the delta through the streaming service - await streaming_context.stream_update( - update=StreamTaskMessageDelta( - parent_task_message=streaming_context.task_message, - delta=TextDelta(text_delta=event.data.delta, type="text"), - type="delta", - ), - ) - # Reasoning step one: new summary part added - elif isinstance(event.data, ResponseReasoningSummaryPartAddedEvent): - # We need to create a new streaming context for this reasoning item - item_id = event.data.item_id - - # Reset the reasoning summary string - current_reasoning_summary = "" - - streaming_context = self.streaming_service.streaming_task_message_context( - task_id=task_id, - initial_content=ReasoningContent( - author="agent", - summary=[], - content=[], - type="reasoning", - style="active", - ), - ) - - # Replace the existing streaming context (if it exists) - # Why do we replace? Cause all the reasoning parts use the same item_id! - item_id_to_streaming_context[item_id] = await streaming_context.open() - unclosed_item_ids.add(item_id) - - # Reasoning step two: handling summary text delta - elif isinstance(event.data, ResponseReasoningSummaryTextDeltaEvent): - # Accumulate the delta into the string - current_reasoning_summary += event.data.delta - streaming_context = item_id_to_streaming_context[item_id] - - # Stream the summary delta through the streaming service - await streaming_context.stream_update( - update=StreamTaskMessageDelta( - parent_task_message=streaming_context.task_message, - delta=ReasoningSummaryDelta( - summary_index=event.data.summary_index, - summary_delta=event.data.delta, - type="reasoning_summary", - ), - type="delta", - ), - ) - - # Reasoning step three: handling summary text done, closing the streaming context - elif isinstance(event.data, ResponseReasoningSummaryPartDoneEvent): - # Handle reasoning summary text completion - streaming_context = item_id_to_streaming_context[item_id] - - # Create the complete reasoning content with the accumulated summary - complete_reasoning_content = ReasoningContent( - author="agent", - summary=[current_reasoning_summary], - content=[], - type="reasoning", - style="static", - ) - - # Send a full message update with the complete reasoning content - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=complete_reasoning_content, - type="full", - ), - ) - - await streaming_context.close() - unclosed_item_ids.discard(item_id) - - - elif isinstance(event.data, ResponseOutputItemDoneEvent): - # Handle item completion - item_id = event.data.item.id - - # Finish the streaming context (sends DONE event and updates message) - if item_id in item_id_to_streaming_context: - streaming_context = item_id_to_streaming_context[item_id] - await streaming_context.close() - if item_id in unclosed_item_ids: - unclosed_item_ids.remove(item_id) - - elif isinstance(event.data, ResponseCompletedEvent): - # All items complete, finish all remaining streaming contexts for this session - # Create a copy to avoid modifying set during iteration - remaining_items = list(unclosed_item_ids) - for item_id in remaining_items: - if ( - item_id in unclosed_item_ids and item_id in item_id_to_streaming_context - ): # Check if still unclosed - streaming_context = item_id_to_streaming_context[item_id] - await streaming_context.close() - unclosed_item_ids.discard(item_id) - - except InputGuardrailTripwireTriggered as e: - # Handle guardrail trigger by sending a rejection message - rejection_message = "I'm sorry, but I cannot process this request due to a guardrail. Please try a different question." - - # Try to extract rejection message from the guardrail result - if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"): - output_info = getattr(e.guardrail_result.output, "output_info", {}) - if isinstance(output_info, dict) and "rejection_message" in output_info: - rejection_message = output_info["rejection_message"] - elif hasattr(e.guardrail_result, "guardrail"): - # Fall back to using guardrail name if no custom message - triggered_guardrail_name = getattr(e.guardrail_result.guardrail, "name", None) - if triggered_guardrail_name: - rejection_message = f"I'm sorry, but I cannot process this request. The '{triggered_guardrail_name}' guardrail was triggered." - - # Create and send the rejection message as a TaskMessage - async with self.streaming_service.streaming_task_message_context( - task_id=task_id, - initial_content=TextContent( - author="agent", - content=rejection_message, - ), - ) as streaming_context: - # Send the full message - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=TextContent( - author="agent", - content=rejection_message, - ), - type="full", - ), - ) - - # Re-raise to let the activity handle it - raise - - except OutputGuardrailTripwireTriggered as e: - # Handle output guardrail trigger by sending a rejection message - rejection_message = "I'm sorry, but I cannot provide this response due to a guardrail. Please try a different question." - - # Try to extract rejection message from the guardrail result - if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"): - output_info = getattr(e.guardrail_result.output, "output_info", {}) - if isinstance(output_info, dict) and "rejection_message" in output_info: - rejection_message = output_info["rejection_message"] - elif hasattr(e.guardrail_result, "guardrail"): - # Fall back to using guardrail name if no custom message - triggered_guardrail_name = getattr(e.guardrail_result.guardrail, "name", None) - if triggered_guardrail_name: - rejection_message = f"I'm sorry, but I cannot provide this response. The '{triggered_guardrail_name}' guardrail was triggered." - - # Create and send the rejection message as a TaskMessage - async with self.streaming_service.streaming_task_message_context( - task_id=task_id, - initial_content=TextContent( - author="agent", - content=rejection_message, - ), - ) as streaming_context: - # Send the full message - await streaming_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=TextContent( - author="agent", - content=rejection_message, - ), - type="full", - ), - ) - - # Re-raise to let the activity handle it - raise - - finally: - # Cleanup: ensure all streaming contexts for this session are properly finished - # Create a copy to avoid modifying set during iteration - remaining_items = list(unclosed_item_ids) - for item_id in remaining_items: - if ( - item_id in unclosed_item_ids and item_id in item_id_to_streaming_context - ): # Check if still unclosed - streaming_context = item_id_to_streaming_context[item_id] - await streaming_context.close() - unclosed_item_ids.discard(item_id) - - if span: - span.output = { - "new_items": [ - item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item - for item in result.new_items - ], - "final_output": result.final_output, - } - - return result diff --git a/src/agentex/lib/core/services/adk/providers/sgp.py b/src/agentex/lib/core/services/adk/providers/sgp.py deleted file mode 100644 index 69f765aa..00000000 --- a/src/agentex/lib/core/services/adk/providers/sgp.py +++ /dev/null @@ -1,101 +0,0 @@ -from __future__ import annotations - -import os -import base64 -import tempfile - -from scale_gp import SGPClient - -from agentex.lib.types.files import FileContentResponse -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import heartbeat_if_in_workflow -from agentex.lib.core.tracing.tracer import AsyncTracer - -logger = make_logger(__name__) - - -class SGPService: - def __init__(self, sgp_client: SGPClient, tracer: AsyncTracer): - self.sgp_client = sgp_client - self.tracer = tracer - - async def download_file_content( - self, - file_id: str, - filename: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> FileContentResponse: - """ - Download file content from SGP. - - Args: - file_id: The ID of the file to download. - filename: The filename of the file to download. - trace_id: The trace ID for tracing. - parent_span_id: The parent span ID for tracing. - - Returns: - FileContentResponse with mime_type and base64_content for constructing LLM input. - """ - trace = self.tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="download_file_content", - input={"file_id": file_id, "filename": filename}, - ) as span: - logger.info(f"Downloading file content for file_id: {file_id}") - heartbeat_if_in_workflow("downloading file content") - - # Get the SGP response - response = self.sgp_client.beta.files.content(file_id) - heartbeat_if_in_workflow("file content downloaded") - - # Determine mime type based on file extension - mime_type = "application/pdf" # Default - file_extension = os.path.splitext(filename)[1].lower() - if file_extension: - if file_extension == ".pdf": - mime_type = "application/pdf" - elif file_extension in [".doc", ".docx"]: - mime_type = "application/msword" - elif file_extension in [".txt", ".text"]: - mime_type = "text/plain" - elif file_extension in [".png"]: - mime_type = "image/png" - elif file_extension in [".jpg", ".jpeg"]: - mime_type = "image/jpeg" - - # Use a named temporary file - simpler approach - with tempfile.NamedTemporaryFile(suffix=file_extension) as temp_file: - heartbeat_if_in_workflow(f"saving to temp file: {temp_file.name}") - - # Use write_to_file method if available - if hasattr(response, "write_to_file"): - response.write_to_file(temp_file.name) - else: - # Fallback to direct writing - content_bytes = response.read() - temp_file.write(content_bytes) - temp_file.flush() - - # Seek to beginning of file for reading - temp_file.seek(0) - - # Read the file in binary mode - exactly like the example - data = temp_file.read() - - # Encode to base64 - base64_content = base64.b64encode(data).decode("utf-8") - - result = FileContentResponse( - mime_type=mime_type, base64_content=base64_content - ) - - # Record metadata for tracing - span.output = { # type: ignore[union-attr] - "file_id": file_id, - "mime_type": result.mime_type, - "content_size": len(result.base64_content), - } - return result diff --git a/src/agentex/lib/core/services/adk/state.py b/src/agentex/lib/core/services/adk/state.py deleted file mode 100644 index 6b836453..00000000 --- a/src/agentex/lib/core/services/adk/state.py +++ /dev/null @@ -1,126 +0,0 @@ -from __future__ import annotations - -from typing import Any, Dict - -from agentex import AsyncAgentex -from agentex.types.state import State -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.tracing.tracer import AsyncTracer - -logger = make_logger(__name__) - - -class StateService: - def __init__( - self, agentex_client: AsyncAgentex, tracer: AsyncTracer - ): - self._agentex_client = agentex_client - self._tracer = tracer - - async def create_state( - self, - task_id: str, - agent_id: str, - state: dict[str, Any], - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> State: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="create_state", - input={"task_id": task_id, "agent_id": agent_id, "state": state}, - ) as span: - state_model = await self._agentex_client.states.create( - task_id=task_id, - agent_id=agent_id, - state=state, - ) - if span: - span.output = state_model.model_dump() - return state_model - - async def get_state( - self, - state_id: str | None = None, - task_id: str | None = None, - agent_id: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> State | None: - trace = self._tracer.trace(trace_id) if self._tracer else None - if trace is None: - # Handle case without tracing - implement the core logic here - return await self._agentex_client.states.retrieve(state_id) - - async with trace.span( - parent_id=parent_span_id, - name="get_state", - input={ - "state_id": state_id, - "task_id": task_id, - "agent_id": agent_id, - }, - ) as span: - if state_id: - state = await self._agentex_client.states.retrieve(state_id=state_id) - elif task_id and agent_id: - states = await self._agentex_client.states.list( - task_id=task_id, - agent_id=agent_id, - ) - state = states[0] if states else None - else: - raise ValueError( - "Must provide either state_id or both task_id and agent_id" - ) - if span: - span.output = state.model_dump() if state else None - return state - - async def update_state( - self, - state_id: str, - task_id: str, - agent_id: str, - state: Dict[str, object], - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> State: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="update_state", - input={ - "state_id": state_id, - "task_id": task_id, - "agent_id": agent_id, - "state": state, - }, - ) as span: - state_model = await self._agentex_client.states.update( - state_id=state_id, - task_id=task_id, - agent_id=agent_id, - state=state, - ) - if span: - span.output = state_model.model_dump() - return state_model - - async def delete_state( - self, - state_id: str, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> State: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="delete_state", - input={"state_id": state_id}, - ) as span: - state = await self._agentex_client.states.delete(state_id) - if span: - span.output = state.model_dump() - return state diff --git a/src/agentex/lib/core/services/adk/streaming.py b/src/agentex/lib/core/services/adk/streaming.py deleted file mode 100644 index 9fc3fc95..00000000 --- a/src/agentex/lib/core/services/adk/streaming.py +++ /dev/null @@ -1,320 +0,0 @@ -from __future__ import annotations - -import json -from typing import Literal - -from agentex import AsyncAgentex -from agentex.lib.utils.logging import make_logger -from agentex.types.data_content import DataContent -from agentex.types.task_message import ( - TaskMessage, - TaskMessageContent, -) -from agentex.types.text_content import TextContent -from agentex.types.reasoning_content import ReasoningContent -from agentex.types.task_message_delta import ( - DataDelta, - TextDelta, - ToolRequestDelta, - ToolResponseDelta, - ReasoningContentDelta, - ReasoningSummaryDelta, -) -from agentex.types.task_message_update import ( - TaskMessageDelta, - TaskMessageUpdate, - StreamTaskMessageDone, - StreamTaskMessageFull, - StreamTaskMessageDelta, - StreamTaskMessageStart, -) -from agentex.types.tool_request_content import ToolRequestContent -from agentex.types.tool_response_content import ToolResponseContent -from agentex.lib.core.adapters.streams.port import StreamRepository - -logger = make_logger(__name__) - - -def _get_stream_topic(task_id: str) -> str: - return f"task:{task_id}" - - -class DeltaAccumulator: - def __init__(self): - self._accumulated_deltas: list[TaskMessageDelta] = [] - self._delta_type: Literal["text", "data", "tool_request", "tool_response", "reasoning"] | None = None - # For reasoning, we need to track both summary and content deltas - self._reasoning_summaries: dict[int, str] = {} - self._reasoning_contents: dict[int, str] = {} - - def add_delta(self, delta: TaskMessageDelta): - if self._delta_type is None: - if delta.type == "text": - self._delta_type = "text" - elif delta.type == "data": - self._delta_type = "data" - elif delta.type == "tool_request": - self._delta_type = "tool_request" - elif delta.type == "tool_response": - self._delta_type = "tool_response" - elif delta.type in ["reasoning_summary", "reasoning_content"]: - self._delta_type = "reasoning" - else: - raise ValueError(f"Unknown delta type: {delta.type}") - else: - # For reasoning, we allow both summary and content deltas - if self._delta_type == "reasoning": - if delta.type not in ["reasoning_summary", "reasoning_content"]: - raise ValueError( - f"Expected reasoning delta but got: {delta.type}" - ) - elif self._delta_type != delta.type: - raise ValueError( - f"Delta type mismatch: {self._delta_type} != {delta.type}" - ) - - # Handle reasoning deltas specially - if delta.type == "reasoning_summary": - if isinstance(delta, ReasoningSummaryDelta): - if delta.summary_index not in self._reasoning_summaries: - self._reasoning_summaries[delta.summary_index] = "" - self._reasoning_summaries[delta.summary_index] += delta.summary_delta or "" - elif delta.type == "reasoning_content": - if isinstance(delta, ReasoningContentDelta): - if delta.content_index not in self._reasoning_contents: - self._reasoning_contents[delta.content_index] = "" - self._reasoning_contents[delta.content_index] += delta.content_delta or "" - else: - self._accumulated_deltas.append(delta) - - def convert_to_content(self) -> TaskMessageContent: - if self._delta_type == "text": - # Type assertion: we know all deltas are TextDelta when _delta_type is TEXT - text_deltas = [delta for delta in self._accumulated_deltas if isinstance(delta, TextDelta)] - text_content_str = "".join( - [delta.text_delta or "" for delta in text_deltas] - ) - return TextContent( - author="agent", - content=text_content_str, - ) - elif self._delta_type == "data": - # Type assertion: we know all deltas are DataDelta when _delta_type is DATA - data_deltas = [delta for delta in self._accumulated_deltas if isinstance(delta, DataDelta)] - data_content_str = "".join( - [delta.data_delta or "" for delta in data_deltas] - ) - try: - data = json.loads(data_content_str) - except json.JSONDecodeError as e: - raise ValueError( - f"Accumulated data content is not valid JSON: {data_content_str}" - ) from e - return DataContent( - author="agent", - data=data, - ) - elif self._delta_type == "tool_request": - # Type assertion: we know all deltas are ToolRequestDelta when _delta_type is TOOL_REQUEST - tool_request_deltas = [delta for delta in self._accumulated_deltas if isinstance(delta, ToolRequestDelta)] - arguments_content_str = "".join( - [delta.arguments_delta or "" for delta in tool_request_deltas] - ) - try: - arguments = json.loads(arguments_content_str) - except json.JSONDecodeError as e: - raise ValueError( - f"Accumulated tool request arguments is not valid JSON: {arguments_content_str}" - ) from e - return ToolRequestContent( - author="agent", - tool_call_id=tool_request_deltas[0].tool_call_id, - name=tool_request_deltas[0].name, - arguments=arguments, - ) - elif self._delta_type == "tool_response": - # Type assertion: we know all deltas are ToolResponseDelta when _delta_type is TOOL_RESPONSE - tool_response_deltas = [delta for delta in self._accumulated_deltas if isinstance(delta, ToolResponseDelta)] - tool_response_content_str = "".join( - [delta.content_delta or "" for delta in tool_response_deltas] - ) - return ToolResponseContent( - author="agent", - tool_call_id=tool_response_deltas[0].tool_call_id, - name=tool_response_deltas[0].name, - content=tool_response_content_str, - ) - elif self._delta_type == "reasoning": - # Convert accumulated reasoning deltas to ReasoningContent - # Sort by index to maintain order - summary_list = [self._reasoning_summaries[i] for i in sorted(self._reasoning_summaries.keys()) if self._reasoning_summaries[i]] - content_list = [self._reasoning_contents[i] for i in sorted(self._reasoning_contents.keys()) if self._reasoning_contents[i]] - - # Only return reasoning content if we have non-empty summaries or content - if summary_list or content_list: - return ReasoningContent( - author="agent", - summary=summary_list, - content=content_list if content_list else None, - type="reasoning", - style="static", - ) - else: - # Return empty text content instead of empty reasoning - return TextContent( - author="agent", - content="", - ) - else: - raise ValueError(f"Unknown delta type: {self._delta_type}") - - -class StreamingTaskMessageContext: - def __init__( - self, - task_id: str, - initial_content: TaskMessageContent, - agentex_client: AsyncAgentex, - streaming_service: "StreamingService", - ): - self.task_id = task_id - self.initial_content = initial_content - self.task_message: TaskMessage | None = None - self._agentex_client = agentex_client - self._streaming_service = streaming_service - self._is_closed = False - self._delta_accumulator = DeltaAccumulator() - - async def __aenter__(self) -> "StreamingTaskMessageContext": - return await self.open() - - async def __aexit__(self, exc_type, exc_val, exc_tb): - return await self.close() - - async def open(self) -> "StreamingTaskMessageContext": - self._is_closed = False - - self.task_message = await self._agentex_client.messages.create( - task_id=self.task_id, - content=self.initial_content.model_dump(), - streaming_status="IN_PROGRESS", - ) - - # Send the START event - start_event = StreamTaskMessageStart( - parent_task_message=self.task_message, - content=self.initial_content, - type="start", - ) - await self._streaming_service.stream_update(start_event) - - return self - - async def close(self) -> TaskMessage: - """Close the streaming context.""" - if not self.task_message: - raise ValueError("Context not properly initialized - no task message") - - if self._is_closed: - return self.task_message # Already done - - # Send the DONE event - done_event = StreamTaskMessageDone( - parent_task_message=self.task_message, - type="done", - ) - await self._streaming_service.stream_update(done_event) - - # Update the task message with the final content - has_deltas = ( - self._delta_accumulator._accumulated_deltas or - self._delta_accumulator._reasoning_summaries or - self._delta_accumulator._reasoning_contents - ) - if has_deltas: - self.task_message.content = self._delta_accumulator.convert_to_content() - - await self._agentex_client.messages.update( - task_id=self.task_id, - message_id=self.task_message.id, - content=self.task_message.content.model_dump(), - streaming_status="DONE", - ) - - # Mark the context as done - self._is_closed = True - return self.task_message - - async def stream_update( - self, update: TaskMessageUpdate - ) -> TaskMessageUpdate | None: - """Stream an update to the repository.""" - if self._is_closed: - raise ValueError("Context is already done") - - if not self.task_message: - raise ValueError("Context not properly initialized - no task message") - - if isinstance(update, StreamTaskMessageDelta): - if update.delta is not None: - self._delta_accumulator.add_delta(update.delta) - - result = await self._streaming_service.stream_update(update) - - if isinstance(update, StreamTaskMessageDone): - await self.close() - return update - elif isinstance(update, StreamTaskMessageFull): - await self._agentex_client.messages.update( - task_id=self.task_id, - message_id=update.parent_task_message.id, # type: ignore[union-attr] - content=update.content.model_dump(), - streaming_status="DONE", - ) - self._is_closed = True - return result - - -class StreamingService: - def __init__( - self, - agentex_client: AsyncAgentex, - stream_repository: StreamRepository, - ): - self._agentex_client = agentex_client - self._stream_repository = stream_repository - - def streaming_task_message_context( - self, - task_id: str, - initial_content: TaskMessageContent, - ) -> StreamingTaskMessageContext: - return StreamingTaskMessageContext( - task_id=task_id, - initial_content=initial_content, - agentex_client=self._agentex_client, - streaming_service=self, - ) - - async def stream_update( - self, update: TaskMessageUpdate - ) -> TaskMessageUpdate | None: - """ - Stream an update to the repository. - - Args: - update: The update to stream - - Returns: - True if event was streamed successfully, False otherwise - """ - stream_topic = _get_stream_topic(update.parent_task_message.task_id) # type: ignore[union-attr] - - try: - await self._stream_repository.send_event( - topic=stream_topic, event=update.model_dump(mode="json") # type: ignore - ) - return update - except Exception as e: - logger.exception(f"Failed to stream event: {e}") - return None diff --git a/src/agentex/lib/core/services/adk/tasks.py b/src/agentex/lib/core/services/adk/tasks.py deleted file mode 100644 index 3f87f46f..00000000 --- a/src/agentex/lib/core/services/adk/tasks.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import annotations - -from agentex import AsyncAgentex -from agentex.types.task import Task -from agentex.types.shared import DeleteResponse -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import heartbeat_if_in_workflow -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.task_retrieve_response import TaskRetrieveResponse -from agentex.types.task_retrieve_by_name_response import TaskRetrieveByNameResponse - -logger = make_logger(__name__) - - -class TasksService: - def __init__( - self, - agentex_client: AsyncAgentex, - tracer: AsyncTracer, - ): - self._agentex_client = agentex_client - self._tracer = tracer - - async def get_task( - self, - task_id: str | None = None, - task_name: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> TaskRetrieveResponse | TaskRetrieveByNameResponse: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="get_task", - input={"task_id": task_id, "task_name": task_name}, - ) as span: - heartbeat_if_in_workflow("get task") - if not task_id and not task_name: - raise ValueError("Either task_id or task_name must be provided.") - if task_id: - task_model = await self._agentex_client.tasks.retrieve(task_id=task_id) - elif task_name: - task_model = await self._agentex_client.tasks.retrieve_by_name(task_name=task_name) - else: - raise ValueError("Either task_id or task_name must be provided.") - if span: - span.output = task_model.model_dump() - return task_model - - async def delete_task( - self, - task_id: str | None = None, - task_name: str | None = None, - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> Task | DeleteResponse: - trace = self._tracer.trace(trace_id) if self._tracer else None - if trace is None: - # Handle case without tracing - response = await self._agentex_client.tasks.delete(task_id) - return Task(**response.model_dump()) - - async with trace.span( - parent_id=parent_span_id, - name="delete_task", - input={"task_id": task_id, "task_name": task_name}, - ) as span: - heartbeat_if_in_workflow("delete task") - if not task_id and not task_name: - raise ValueError("Either task_id or task_name must be provided.") - if task_id: - task_model = await self._agentex_client.tasks.delete(task_id=task_id) - elif task_name: - task_model = await self._agentex_client.tasks.delete_by_name(task_name=task_name) - else: - raise ValueError("Either task_id or task_name must be provided.") - if span: - span.output = task_model.model_dump() - return task_model diff --git a/src/agentex/lib/core/services/adk/tracing.py b/src/agentex/lib/core/services/adk/tracing.py deleted file mode 100644 index 7e55c750..00000000 --- a/src/agentex/lib/core/services/adk/tracing.py +++ /dev/null @@ -1,39 +0,0 @@ -from __future__ import annotations - -from typing import Any - -from agentex.types.span import Span -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import heartbeat_if_in_workflow -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.core.tracing.tracer import AsyncTracer - -logger = make_logger(__name__) - - -class TracingService: - def __init__(self, tracer: AsyncTracer): - self._tracer = tracer - - async def start_span( - self, - trace_id: str, - name: str, - parent_id: str | None = None, - input: list[Any] | dict[str, Any] | BaseModel | None = None, - data: list[Any] | dict[str, Any] | BaseModel | None = None, - ) -> Span | None: - trace = self._tracer.trace(trace_id) - async with trace.span( - parent_id=parent_id, - name=name, - input=input or {}, - data=data, - ) as span: - heartbeat_if_in_workflow("start span") - return span if span else None - - async def end_span(self, trace_id: str, span: Span) -> Span: - trace = self._tracer.trace(trace_id) - await trace.end_span(span) - return span diff --git a/src/agentex/lib/core/services/adk/utils/__init__.py b/src/agentex/lib/core/services/adk/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/services/adk/utils/templating.py b/src/agentex/lib/core/services/adk/utils/templating.py deleted file mode 100644 index 1cd0ebbf..00000000 --- a/src/agentex/lib/core/services/adk/utils/templating.py +++ /dev/null @@ -1,62 +0,0 @@ -from __future__ import annotations - -from typing import Any -from datetime import datetime - -from jinja2 import BaseLoader, Environment - -from agentex.lib.utils.temporal import heartbeat_if_in_workflow -from agentex.lib.core.tracing.tracer import AsyncTracer - -# Create a Jinja environment -JINJA_ENV = Environment( - loader=BaseLoader(), - trim_blocks=True, - lstrip_blocks=True, - extensions=["jinja2.ext.do"], -) - - -class TemplatingService: - def __init__(self, tracer: AsyncTracer | None = None): - self.tracer = tracer - - async def render_jinja( - self, - template: str, - variables: dict[str, Any], - trace_id: str | None = None, - parent_span_id: str | None = None, - ) -> str: - """ - Activity that renders a Jinja template with the provided data. - - Args: - template: The template string to render. - variables: The variables to render the template with. - trace_id: The trace ID for tracing. - parent_span_id: The parent span ID for tracing. - - Returns: - The rendered template as a string - """ - if self.tracer is None: - raise RuntimeError("Tracer not initialized - ensure tracer is provided to TemplatingService") - trace = self.tracer.trace(trace_id) - async with trace.span( - parent_id=parent_span_id, - name="render_jinja", - input={"template": template, "variables": variables}, - ) as span: - heartbeat_if_in_workflow("render jinja") - global_variables = { - "datetime": datetime, - } - jinja_template = JINJA_ENV.from_string(template, globals=global_variables) - try: - rendered_template = jinja_template.render(variables) - if span: - span.output = {"jinja_output": rendered_template} - return rendered_template - except Exception as e: - raise ValueError(f"Error rendering Jinja template: {str(e)}") from e diff --git a/src/agentex/lib/core/temporal/__init__.py b/src/agentex/lib/core/temporal/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/temporal/activities/__init__.py b/src/agentex/lib/core/temporal/activities/__init__.py deleted file mode 100644 index 17792227..00000000 --- a/src/agentex/lib/core/temporal/activities/__init__.py +++ /dev/null @@ -1,211 +0,0 @@ -import httpx -from scale_gp import SGPClient, SGPClientError - -from agentex import AsyncAgentex # noqa: F401 -from agentex.lib.core.tracing import AsyncTracer -from agentex.lib.core.services.adk.state import StateService -from agentex.lib.core.services.adk.tasks import TasksService -from agentex.lib.core.services.adk.events import EventsService -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.services.adk.acp.acp import ACPService -from agentex.lib.core.services.adk.tracing import TracingService -from agentex.lib.core.services.adk.messages import MessagesService -from agentex.lib.core.services.adk.streaming import StreamingService -from agentex.lib.core.services.adk.providers.sgp import SGPService -from agentex.lib.core.adapters.llm.adapter_litellm import LiteLLMGateway -from agentex.lib.core.services.adk.providers.openai import OpenAIService -from agentex.lib.core.services.adk.utils.templating import TemplatingService -from agentex.lib.core.adapters.streams.adapter_redis import RedisStreamRepository -from agentex.lib.core.services.adk.providers.litellm import LiteLLMService -from agentex.lib.core.services.adk.agent_task_tracker import AgentTaskTrackerService -from agentex.lib.core.temporal.activities.adk.state_activities import StateActivities -from agentex.lib.core.temporal.activities.adk.tasks_activities import TasksActivities -from agentex.lib.core.temporal.activities.adk.events_activities import EventsActivities -from agentex.lib.core.temporal.activities.adk.acp.acp_activities import ACPActivities -from agentex.lib.core.temporal.activities.adk.tracing_activities import TracingActivities -from agentex.lib.core.temporal.activities.adk.messages_activities import MessagesActivities -from agentex.lib.core.temporal.activities.adk.streaming_activities import ( - StreamingActivities, -) -from agentex.lib.core.temporal.activities.adk.providers.sgp_activities import SGPActivities -from agentex.lib.core.temporal.activities.adk.providers.openai_activities import ( - OpenAIActivities, -) -from agentex.lib.core.temporal.activities.adk.utils.templating_activities import ( - TemplatingActivities, -) -from agentex.lib.core.temporal.activities.adk.providers.litellm_activities import ( - LiteLLMActivities, -) -from agentex.lib.core.temporal.activities.adk.agent_task_tracker_activities import ( - AgentTaskTrackerActivities, -) - - -def get_all_activities(sgp_client=None): - """ - Returns a list of all standard activity functions that can be directly passed to worker.run(). - - Args: - sgp_client: Optional SGP client instance. If not provided, SGP activities will not be included. - - Returns: - list: A list of activity functions ready to be passed to worker.run() - """ - # Initialize common dependencies - try: - sgp_client = SGPClient() - except SGPClientError: - sgp_client = None - - llm_gateway = LiteLLMGateway() - stream_repository = RedisStreamRepository() - agentex_client = create_async_agentex_client( - timeout=httpx.Timeout(timeout=1000), - ) - tracer = AsyncTracer(agentex_client) - - # Services - - ## ADK - streaming_service = StreamingService( - agentex_client=agentex_client, - stream_repository=stream_repository, - ) - messages_service = MessagesService( - agentex_client=agentex_client, - streaming_service=streaming_service, - tracer=tracer, - ) - events_service = EventsService( - agentex_client=agentex_client, - tracer=tracer, - ) - agent_task_tracker_service = AgentTaskTrackerService( - agentex_client=agentex_client, - tracer=tracer, - ) - state_service = StateService( - agentex_client=agentex_client, - tracer=tracer, - ) - tasks_service = TasksService( - agentex_client=agentex_client, - tracer=tracer, - ) - tracing_service = TracingService( - tracer=tracer, - ) - - ## ACP - acp_service = ACPService( - agentex_client=agentex_client, - tracer=tracer, - ) - - ## Providers - litellm_service = LiteLLMService( - agentex_client=agentex_client, - llm_gateway=llm_gateway, - streaming_service=streaming_service, - tracer=tracer, - ) - openai_service = OpenAIService( - agentex_client=agentex_client, - streaming_service=streaming_service, - tracer=tracer, - ) - sgp_service = None - if sgp_client is not None: - sgp_service = SGPService( - sgp_client=sgp_client, - tracer=tracer, - ) - - ## Utils - templating_service = TemplatingService( - tracer=tracer, - ) - - # ADK - - ## Core activities - messages_activities = MessagesActivities(messages_service=messages_service) - events_activities = EventsActivities(events_service=events_service) - agent_task_tracker_activities = AgentTaskTrackerActivities( - agent_task_tracker_service=agent_task_tracker_service - ) - state_activities = StateActivities(state_service=state_service) - streaming_activities = StreamingActivities(streaming_service=streaming_service) - tasks_activities = TasksActivities(tasks_service=tasks_service) - tracing_activities = TracingActivities(tracing_service=tracing_service) - - ## ACP - acp_activities = ACPActivities(acp_service=acp_service) - - ## Providers - litellm_activities = LiteLLMActivities(litellm_service=litellm_service) - openai_activities = OpenAIActivities(openai_service=openai_service) - if sgp_client is not None: - sgp_activities = SGPActivities(sgp_service=sgp_service) - else: - sgp_activities = None - - ## Utils - templating_activities = TemplatingActivities(templating_service=templating_service) - - # Build list of standard activities - activities = [ - # Core activities - ## Messages activities - messages_activities.create_message, - messages_activities.update_message, - messages_activities.create_messages_batch, - messages_activities.update_messages_batch, - messages_activities.list_messages, - ## Events activities - events_activities.get_event, - events_activities.list_events, - ## Agent Task Tracker activities - agent_task_tracker_activities.get_agent_task_tracker, - agent_task_tracker_activities.get_agent_task_tracker_by_task_and_agent, - agent_task_tracker_activities.update_agent_task_tracker, - ## State activities - state_activities.create_state, - state_activities.get_state, - state_activities.update_state, - state_activities.delete_state, - ## Streaming activities - streaming_activities.stream_update, - ## Tasks activities - tasks_activities.get_task, - tasks_activities.delete_task, - ## Tracing activities - tracing_activities.start_span, - tracing_activities.end_span, - # ACP activities - acp_activities.task_create, - acp_activities.message_send, - acp_activities.event_send, - acp_activities.task_cancel, - # Providers - ## LiteLLM activities - litellm_activities.chat_completion, - litellm_activities.chat_completion_auto_send, - litellm_activities.chat_completion_stream_auto_send, - ## OpenAI activities - openai_activities.run_agent, - openai_activities.run_agent_auto_send, - openai_activities.run_agent_streamed_auto_send, - # Utils - templating_activities.render_jinja, - ] - - # SGP activities - if sgp_client is not None: - sgp_all_activities = [ - sgp_activities.download_file_content, # type: ignore[union-attr] - ] - activities.extend(sgp_all_activities) - - return activities diff --git a/src/agentex/lib/core/temporal/activities/activity_helpers.py b/src/agentex/lib/core/temporal/activities/activity_helpers.py deleted file mode 100644 index 53ec3a45..00000000 --- a/src/agentex/lib/core/temporal/activities/activity_helpers.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import annotations - -from typing import Any, TypeVar -from datetime import timedelta - -from pydantic import TypeAdapter -from temporalio import workflow -from temporalio.common import RetryPolicy - -from agentex.lib.utils.model_utils import BaseModel - -T = TypeVar("T", bound="BaseModel") - - -class ActivityHelpers: - @staticmethod - async def execute_activity( - activity_name: str, - request: BaseModel | str | int | float | bool | dict[str, Any] | list[Any], - response_type: Any, - start_to_close_timeout: timedelta | None = None, - heartbeat_timeout: timedelta | None = None, - retry_policy: RetryPolicy | None = None, - ) -> Any: - - response = await workflow.execute_activity( - activity=activity_name, - arg=request, - start_to_close_timeout=start_to_close_timeout, - retry_policy=retry_policy, - heartbeat_timeout=heartbeat_timeout, - ) - - adapter = TypeAdapter(response_type) - return adapter.validate_python(response) diff --git a/src/agentex/lib/core/temporal/activities/adk/__init__.py b/src/agentex/lib/core/temporal/activities/adk/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/temporal/activities/adk/acp/__init__.py b/src/agentex/lib/core/temporal/activities/adk/acp/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/temporal/activities/adk/acp/acp_activities.py b/src/agentex/lib/core/temporal/activities/adk/acp/acp_activities.py deleted file mode 100644 index 634892ec..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/acp/acp_activities.py +++ /dev/null @@ -1,108 +0,0 @@ -from __future__ import annotations - -from enum import Enum -from typing import Any, List - -from temporalio import activity - -from agentex.types.task import Task -from agentex.types.event import Event -from agentex.lib.types.tracing import BaseModelWithTraceParams -from agentex.lib.utils.logging import make_logger -from agentex.types.task_message import TaskMessage -from agentex.types.task_message_content import TaskMessageContent -from agentex.lib.core.services.adk.acp.acp import ACPService - -logger = make_logger(__name__) - - -class ACPActivityName(str, Enum): - TASK_CREATE = "task-create" - MESSAGE_SEND = "message-send" - EVENT_SEND = "event-send" - TASK_CANCEL = "task-cancel" - - -class TaskCreateParams(BaseModelWithTraceParams): - name: str | None = None - agent_id: str | None = None - agent_name: str | None = None - params: dict[str, Any] | None = None - request: dict[str, Any] | None = None - - -class MessageSendParams(BaseModelWithTraceParams): - agent_id: str | None = None - agent_name: str | None = None - task_id: str | None = None - content: TaskMessageContent - request: dict[str, Any] | None = None - - -class EventSendParams(BaseModelWithTraceParams): - agent_id: str | None = None - agent_name: str | None = None - task_id: str | None = None - content: TaskMessageContent - request: dict[str, Any] | None = None - - -class TaskCancelParams(BaseModelWithTraceParams): - task_id: str | None = None - task_name: str | None = None - agent_id: str | None = None - agent_name: str | None = None - request: dict[str, Any] | None = None - - -class ACPActivities: - def __init__(self, acp_service: ACPService): - self._acp_service = acp_service - - @activity.defn(name=ACPActivityName.TASK_CREATE) - async def task_create(self, params: TaskCreateParams) -> Task: - return await self._acp_service.task_create( - name=params.name, - agent_id=params.agent_id, - agent_name=params.agent_name, - params=params.params, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - request=params.request, - ) - - @activity.defn(name=ACPActivityName.MESSAGE_SEND) - async def message_send(self, params: MessageSendParams) -> List[TaskMessage]: - return await self._acp_service.message_send( - agent_id=params.agent_id, - agent_name=params.agent_name, - task_id=params.task_id, - content=params.content, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - request=params.request, - ) - - @activity.defn(name=ACPActivityName.EVENT_SEND) - async def event_send(self, params: EventSendParams) -> Event: - return await self._acp_service.event_send( - agent_id=params.agent_id, - agent_name=params.agent_name, - task_id=params.task_id, - content=params.content, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - request=params.request, - ) - - @activity.defn(name=ACPActivityName.TASK_CANCEL) - async def task_cancel(self, params: TaskCancelParams) -> Task: - return await self._acp_service.task_cancel( - task_id=params.task_id, - task_name=params.task_name, - agent_id=params.agent_id, - agent_name=params.agent_name, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - request=params.request, - ) diff --git a/src/agentex/lib/core/temporal/activities/adk/agent_task_tracker_activities.py b/src/agentex/lib/core/temporal/activities/adk/agent_task_tracker_activities.py deleted file mode 100644 index e20e4dd1..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/agent_task_tracker_activities.py +++ /dev/null @@ -1,78 +0,0 @@ -from __future__ import annotations - -from enum import Enum - -from temporalio import activity - -from agentex.lib.types.tracing import BaseModelWithTraceParams -from agentex.lib.utils.logging import make_logger -from agentex.types.agent_task_tracker import AgentTaskTracker -from agentex.lib.core.services.adk.agent_task_tracker import AgentTaskTrackerService - -logger = make_logger(__name__) - - -class AgentTaskTrackerActivityName(str, Enum): - GET_AGENT_TASK_TRACKER = "get-agent-task-tracker" - GET_AGENT_TASK_TRACKER_BY_TASK_AND_AGENT = ( - "get-agent-task-tracker-by-task-and-agent" - ) - UPDATE_AGENT_TASK_TRACKER = "update-agent-task-tracker" - - -class GetAgentTaskTrackerParams(BaseModelWithTraceParams): - tracker_id: str - - -class GetAgentTaskTrackerByTaskAndAgentParams(BaseModelWithTraceParams): - task_id: str - agent_id: str - - -class UpdateAgentTaskTrackerParams(BaseModelWithTraceParams): - tracker_id: str - last_processed_event_id: str | None - status: str | None - status_reason: str | None - - -class AgentTaskTrackerActivities: - def __init__(self, agent_task_tracker_service: AgentTaskTrackerService): - self._agent_task_tracker_service = agent_task_tracker_service - - @activity.defn(name=AgentTaskTrackerActivityName.GET_AGENT_TASK_TRACKER) - async def get_agent_task_tracker( - self, params: GetAgentTaskTrackerParams - ) -> AgentTaskTracker: - return await self._agent_task_tracker_service.get_agent_task_tracker( - tracker_id=params.tracker_id, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) - - @activity.defn( - name=AgentTaskTrackerActivityName.GET_AGENT_TASK_TRACKER_BY_TASK_AND_AGENT - ) - async def get_agent_task_tracker_by_task_and_agent( - self, - params: GetAgentTaskTrackerByTaskAndAgentParams, - ) -> AgentTaskTracker | None: - return await self._agent_task_tracker_service.get_by_task_and_agent( - task_id=params.task_id, - agent_id=params.agent_id, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) - - @activity.defn(name=AgentTaskTrackerActivityName.UPDATE_AGENT_TASK_TRACKER) - async def update_agent_task_tracker( - self, params: UpdateAgentTaskTrackerParams - ) -> AgentTaskTracker: - return await self._agent_task_tracker_service.update_agent_task_tracker( - tracker_id=params.tracker_id, - last_processed_event_id=params.last_processed_event_id, - status=params.status, - status_reason=params.status_reason, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) diff --git a/src/agentex/lib/core/temporal/activities/adk/agents_activities.py b/src/agentex/lib/core/temporal/activities/adk/agents_activities.py deleted file mode 100644 index 7b7e2b7a..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/agents_activities.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import annotations - -from enum import Enum -from typing import Optional - -from temporalio import activity - -from agentex.types.agent import Agent -from agentex.lib.types.tracing import BaseModelWithTraceParams -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.services.adk.agents import AgentsService - -logger = make_logger(__name__) - - -class AgentsActivityName(str, Enum): - GET_AGENT = "get-agent" - - -class GetAgentParams(BaseModelWithTraceParams): - agent_id: Optional[str] = None - agent_name: Optional[str] = None - - -class AgentsActivities: - def __init__(self, agents_service: AgentsService): - self._agents_service = agents_service - - @activity.defn(name=AgentsActivityName.GET_AGENT) - async def get_agent(self, params: GetAgentParams) -> Agent | None: - return await self._agents_service.get_agent( - agent_id=params.agent_id, - agent_name=params.agent_name, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) - diff --git a/src/agentex/lib/core/temporal/activities/adk/events_activities.py b/src/agentex/lib/core/temporal/activities/adk/events_activities.py deleted file mode 100644 index 59d5b360..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/events_activities.py +++ /dev/null @@ -1,52 +0,0 @@ -from __future__ import annotations - -from enum import Enum - -from temporalio import activity - -from agentex.types.event import Event -from agentex.lib.types.tracing import BaseModelWithTraceParams -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.services.adk.events import EventsService - -logger = make_logger(__name__) - - -class EventsActivityName(str, Enum): - GET_EVENT = "get-event" - LIST_EVENTS = "list-events" - - -class GetEventParams(BaseModelWithTraceParams): - event_id: str - - -class ListEventsParams(BaseModelWithTraceParams): - task_id: str - agent_id: str - last_processed_event_id: str | None = None - limit: int | None = None - - -class EventsActivities: - def __init__(self, events_service: EventsService): - self._events_service = events_service - - @activity.defn(name=EventsActivityName.GET_EVENT) - async def get_event(self, params: GetEventParams) -> Event | None: - return await self._events_service.get_event( - event_id=params.event_id, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) - - @activity.defn(name=EventsActivityName.LIST_EVENTS) - async def list_events(self, params: ListEventsParams) -> list[Event]: - return await self._events_service.list_events( - task_id=params.task_id, - agent_id=params.agent_id, - last_processed_event_id=params.last_processed_event_id, - limit=params.limit, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) diff --git a/src/agentex/lib/core/temporal/activities/adk/messages_activities.py b/src/agentex/lib/core/temporal/activities/adk/messages_activities.py deleted file mode 100644 index 69e2d7ab..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/messages_activities.py +++ /dev/null @@ -1,96 +0,0 @@ -from __future__ import annotations - -from enum import Enum - -from temporalio import activity - -from agentex.lib.types.tracing import BaseModelWithTraceParams -from agentex.lib.utils.logging import make_logger -from agentex.types.task_message import TaskMessage -from agentex.types.task_message_content import TaskMessageContent -from agentex.lib.core.services.adk.messages import MessagesService - -logger = make_logger(__name__) - - -class MessagesActivityName(str, Enum): - CREATE_MESSAGE = "create-message" - UPDATE_MESSAGE = "update-message" - CREATE_MESSAGES_BATCH = "create-messages-batch" - UPDATE_MESSAGES_BATCH = "update-messages-batch" - LIST_MESSAGES = "list-messages" - - -class CreateMessageParams(BaseModelWithTraceParams): - task_id: str - content: TaskMessageContent - emit_updates: bool = True - - -class UpdateMessageParams(BaseModelWithTraceParams): - task_id: str - message_id: str - content: TaskMessageContent - - -class CreateMessagesBatchParams(BaseModelWithTraceParams): - task_id: str - contents: list[TaskMessageContent] - emit_updates: bool = True - - -class UpdateMessagesBatchParams(BaseModelWithTraceParams): - task_id: str - updates: dict[str, TaskMessageContent] - - -class ListMessagesParams(BaseModelWithTraceParams): - task_id: str - limit: int | None = None - - -class MessagesActivities: - def __init__(self, messages_service: MessagesService): - self._messages_service = messages_service - - @activity.defn(name=MessagesActivityName.CREATE_MESSAGE) - async def create_message(self, params: CreateMessageParams) -> TaskMessage: - return await self._messages_service.create_message( - task_id=params.task_id, - content=params.content, - emit_updates=params.emit_updates, - ) - - @activity.defn(name=MessagesActivityName.UPDATE_MESSAGE) - async def update_message(self, params: UpdateMessageParams) -> TaskMessage: - return await self._messages_service.update_message( - task_id=params.task_id, - message_id=params.message_id, - content=params.content, - ) - - @activity.defn(name=MessagesActivityName.CREATE_MESSAGES_BATCH) - async def create_messages_batch( - self, params: CreateMessagesBatchParams - ) -> list[TaskMessage]: - return await self._messages_service.create_messages_batch( - task_id=params.task_id, - contents=params.contents, - emit_updates=params.emit_updates, - ) - - @activity.defn(name=MessagesActivityName.UPDATE_MESSAGES_BATCH) - async def update_messages_batch( - self, params: UpdateMessagesBatchParams - ) -> list[TaskMessage]: - return await self._messages_service.update_messages_batch( - task_id=params.task_id, - updates=params.updates, - ) - - @activity.defn(name=MessagesActivityName.LIST_MESSAGES) - async def list_messages(self, params: ListMessagesParams) -> list[TaskMessage]: - return await self._messages_service.list_messages( - task_id=params.task_id, - limit=params.limit, - ) diff --git a/src/agentex/lib/core/temporal/activities/adk/providers/__init__.py b/src/agentex/lib/core/temporal/activities/adk/providers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/temporal/activities/adk/providers/litellm_activities.py b/src/agentex/lib/core/temporal/activities/adk/providers/litellm_activities.py deleted file mode 100644 index 3252769b..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/providers/litellm_activities.py +++ /dev/null @@ -1,73 +0,0 @@ -from __future__ import annotations - -from enum import Enum - -from temporalio import activity - -from agentex.lib.utils import logging -from agentex.lib.types.tracing import BaseModelWithTraceParams -from agentex.types.task_message import TaskMessage -from agentex.lib.types.llm_messages import LLMConfig, Completion -from agentex.lib.core.services.adk.providers.litellm import LiteLLMService - -logger = logging.make_logger(__name__) - - -class LiteLLMActivityName(str, Enum): - CHAT_COMPLETION = "chat-completion" - CHAT_COMPLETION_AUTO_SEND = "chat-completion-auto-send" - # Note: CHAT_COMPLETION_STREAM is not supported in Temporal due to generator limitations - CHAT_COMPLETION_STREAM_AUTO_SEND = "chat-completion-stream-auto-send" - - -class ChatCompletionParams(BaseModelWithTraceParams): - llm_config: LLMConfig - - -class ChatCompletionAutoSendParams(BaseModelWithTraceParams): - task_id: str - llm_config: LLMConfig - - -class ChatCompletionStreamAutoSendParams(BaseModelWithTraceParams): - task_id: str - llm_config: LLMConfig - - -class LiteLLMActivities: - def __init__(self, litellm_service: LiteLLMService): - self._litellm_service = litellm_service - - @activity.defn(name=LiteLLMActivityName.CHAT_COMPLETION) - async def chat_completion(self, params: ChatCompletionParams) -> Completion: - return await self._litellm_service.chat_completion( - llm_config=params.llm_config, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) - - @activity.defn(name=LiteLLMActivityName.CHAT_COMPLETION_AUTO_SEND) - async def chat_completion_auto_send(self, params: ChatCompletionAutoSendParams) -> TaskMessage | None: - """ - Activity for non-streaming chat completion with automatic TaskMessage creation. - """ - return await self._litellm_service.chat_completion_auto_send( - task_id=params.task_id, - llm_config=params.llm_config, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) - - @activity.defn(name=LiteLLMActivityName.CHAT_COMPLETION_STREAM_AUTO_SEND) - async def chat_completion_stream_auto_send( - self, params: ChatCompletionStreamAutoSendParams - ) -> TaskMessage | None: - """ - Activity for streaming chat completion with automatic TaskMessage creation. - """ - return await self._litellm_service.chat_completion_stream_auto_send( - task_id=params.task_id, - llm_config=params.llm_config, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) diff --git a/src/agentex/lib/core/temporal/activities/adk/providers/openai_activities.py b/src/agentex/lib/core/temporal/activities/adk/providers/openai_activities.py deleted file mode 100644 index be20c99c..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/providers/openai_activities.py +++ /dev/null @@ -1,684 +0,0 @@ -# Standard library imports -from __future__ import annotations - -import base64 -from enum import Enum -from typing import Any, Literal, Optional -from contextlib import AsyncExitStack, asynccontextmanager -from collections.abc import Callable - -import cloudpickle -from mcp import StdioServerParameters -from agents import RunResult, RunContextWrapper, RunResultStreaming -from pydantic import Field, PrivateAttr -from agents.mcp import MCPServerStdio, MCPServerStdioParams -from temporalio import activity -from agents.tool import ( - ComputerTool as OAIComputerTool, - FunctionTool as OAIFunctionTool, - WebSearchTool as OAIWebSearchTool, - FileSearchTool as OAIFileSearchTool, - LocalShellTool as OAILocalShellTool, - CodeInterpreterTool as OAICodeInterpreterTool, - ImageGenerationTool as OAIImageGenerationTool, -) -from agents.guardrail import InputGuardrail, OutputGuardrail -from agents.exceptions import InputGuardrailTripwireTriggered, OutputGuardrailTripwireTriggered -from agents.model_settings import ModelSettings as OAIModelSettings -from openai.types.shared.reasoning import Reasoning -from openai.types.responses.response_includable import ResponseIncludable - -from agentex.lib.utils import logging - -# Third-party imports -from agentex.lib.types.tracing import BaseModelWithTraceParams - -# Local imports -from agentex.lib.types.agent_results import ( - SerializableRunResult, - SerializableRunResultStreaming, -) -from agentex.lib.core.services.adk.providers.openai import OpenAIService - -logger = logging.make_logger(__name__) - - -class OpenAIActivityName(str, Enum): - """Names of OpenAI agent activities.""" - - RUN_AGENT = "run_agent" - RUN_AGENT_AUTO_SEND = "run_agent_auto_send" - # Note: RUN_AGENT_STREAMED is not supported in Temporal due to generator limitations - RUN_AGENT_STREAMED_AUTO_SEND = "run_agent_streamed_auto_send" - - -class WebSearchTool(BaseModelWithTraceParams): - """Temporal-compatible wrapper for WebSearchTool.""" - - user_location: Optional[dict[str, Any]] = None # UserLocation object - search_context_size: Optional[Literal["low", "medium", "high"]] = "medium" - - def to_oai_function_tool(self) -> OAIWebSearchTool: - kwargs = {} - if self.user_location is not None: - kwargs["user_location"] = self.user_location - if self.search_context_size is not None: - kwargs["search_context_size"] = self.search_context_size - return OAIWebSearchTool(**kwargs) - - -class FileSearchTool(BaseModelWithTraceParams): - """Temporal-compatible wrapper for FileSearchTool.""" - - vector_store_ids: list[str] - max_num_results: Optional[int] = None - include_search_results: bool = False - ranking_options: Optional[dict[str, Any]] = None - filters: Optional[dict[str, Any]] = None - - def to_oai_function_tool(self): - return OAIFileSearchTool( - vector_store_ids=self.vector_store_ids, - max_num_results=self.max_num_results, - include_search_results=self.include_search_results, - ranking_options=self.ranking_options, - filters=self.filters, - ) - - -class ComputerTool(BaseModelWithTraceParams): - """Temporal-compatible wrapper for ComputerTool.""" - - # We need to serialize the computer object and safety check function - computer_serialized: str = Field(default="", description="Serialized computer object") - on_safety_check_serialized: str = Field(default="", description="Serialized safety check function") - - _computer: Any = PrivateAttr() - _on_safety_check: Optional[Callable] = PrivateAttr() - - def __init__( - self, - *, - computer: Any = None, - on_safety_check: Optional[Callable] = None, - **data, - ): - super().__init__(**data) - if computer is not None: - self.computer_serialized = self._serialize_callable(computer) - self._computer = computer - elif self.computer_serialized: - self._computer = self._deserialize_callable(self.computer_serialized) - - if on_safety_check is not None: - self.on_safety_check_serialized = self._serialize_callable(on_safety_check) - self._on_safety_check = on_safety_check - elif self.on_safety_check_serialized: - self._on_safety_check = self._deserialize_callable(self.on_safety_check_serialized) - - @classmethod - def _deserialize_callable(cls, serialized: str) -> Any: - encoded = serialized.encode() - serialized_bytes = base64.b64decode(encoded) - return cloudpickle.loads(serialized_bytes) - - @classmethod - def _serialize_callable(cls, func: Any) -> str: - serialized_bytes = cloudpickle.dumps(func) - encoded = base64.b64encode(serialized_bytes) - return encoded.decode() - - def to_oai_function_tool(self): - return OAIComputerTool( - computer=self._computer, - on_safety_check=self._on_safety_check, - ) - - -class CodeInterpreterTool(BaseModelWithTraceParams): - """Temporal-compatible wrapper for CodeInterpreterTool.""" - - tool_config: dict[str, Any] = Field( - default_factory=lambda: {"type": "code_interpreter"}, description="Tool configuration dict" - ) - - def to_oai_function_tool(self): - return OAICodeInterpreterTool(tool_config=self.tool_config) - - -class ImageGenerationTool(BaseModelWithTraceParams): - """Temporal-compatible wrapper for ImageGenerationTool.""" - - tool_config: dict[str, Any] = Field( - default_factory=lambda: {"type": "image_generation"}, description="Tool configuration dict" - ) - - def to_oai_function_tool(self): - return OAIImageGenerationTool(tool_config=self.tool_config) - - -class LocalShellTool(BaseModelWithTraceParams): - """Temporal-compatible wrapper for LocalShellTool.""" - - executor_serialized: str = Field(default="", description="Serialized LocalShellExecutor object") - - _executor: Any = PrivateAttr() - - def __init__( - self, - *, - executor: Any = None, - **data, - ): - super().__init__(**data) - if executor is not None: - self.executor_serialized = self._serialize_callable(executor) - self._executor = executor - elif self.executor_serialized: - self._executor = self._deserialize_callable(self.executor_serialized) - - @classmethod - def _deserialize_callable(cls, serialized: str) -> Any: - encoded = serialized.encode() - serialized_bytes = base64.b64decode(encoded) - return cloudpickle.loads(serialized_bytes) - - @classmethod - def _serialize_callable(cls, func: Any) -> str: - serialized_bytes = cloudpickle.dumps(func) - encoded = base64.b64encode(serialized_bytes) - return encoded.decode() - - def to_oai_function_tool(self): - return OAILocalShellTool(executor=self._executor) - - -class FunctionTool(BaseModelWithTraceParams): - name: str - description: str - params_json_schema: dict[str, Any] - - strict_json_schema: bool = True - is_enabled: bool = True - - _on_invoke_tool: Callable[[RunContextWrapper, str], Any] = PrivateAttr() - on_invoke_tool_serialized: str = Field( - default="", - description=( - "Normally will be set automatically during initialization and" - " doesn't need to be passed. " - "Instead, pass `on_invoke_tool` to the constructor. " - "See the __init__ method for details." - ), - ) - - def __init__( - self, - *, - on_invoke_tool: Optional[Callable[[RunContextWrapper, str], Any]] = None, - **data, - ): - """ - Initialize a FunctionTool with hacks to support serialization of the - on_invoke_tool callable arg. This is required to facilitate over-the-wire - communication of this object to/from temporal services/workers. - - Args: - on_invoke_tool: The callable to invoke when the tool is called. - **data: Additional data to initialize the FunctionTool. - """ - super().__init__(**data) - if not on_invoke_tool: - if not self.on_invoke_tool_serialized: - raise ValueError("One of `on_invoke_tool` or `on_invoke_tool_serialized` should be set") - else: - on_invoke_tool = self._deserialize_callable(self.on_invoke_tool_serialized) - else: - self.on_invoke_tool_serialized = self._serialize_callable(on_invoke_tool) - - self._on_invoke_tool = on_invoke_tool - - @classmethod - def _deserialize_callable(cls, serialized: str) -> Callable[[RunContextWrapper, str], Any]: - encoded = serialized.encode() - serialized_bytes = base64.b64decode(encoded) - return cloudpickle.loads(serialized_bytes) - - @classmethod - def _serialize_callable(cls, func: Callable) -> str: - serialized_bytes = cloudpickle.dumps(func) - encoded = base64.b64encode(serialized_bytes) - return encoded.decode() - - @property - def on_invoke_tool(self) -> Callable[[RunContextWrapper, str], Any]: - if self._on_invoke_tool is None and self.on_invoke_tool_serialized: - self._on_invoke_tool = self._deserialize_callable(self.on_invoke_tool_serialized) - return self._on_invoke_tool - - @on_invoke_tool.setter - def on_invoke_tool(self, value: Callable[[RunContextWrapper, str], Any]): - self.on_invoke_tool_serialized = self._serialize_callable(value) - self._on_invoke_tool = value - - def to_oai_function_tool(self) -> OAIFunctionTool: - """Convert to OpenAI function tool, excluding serialization fields.""" - # Create a dictionary with only the fields OAIFunctionTool expects - data = self.model_dump( - exclude={ - "trace_id", - "parent_span_id", - "_on_invoke_tool", - "on_invoke_tool_serialized", - } - ) - # Add the callable for OAI tool since properties are not serialized - data["on_invoke_tool"] = self.on_invoke_tool - return OAIFunctionTool(**data) - - -class TemporalInputGuardrail(BaseModelWithTraceParams): - """Temporal-compatible wrapper for InputGuardrail with function - serialization.""" - - name: str - _guardrail_function: Callable = PrivateAttr() - guardrail_function_serialized: str = Field( - default="", - description=( - "Serialized guardrail function. Set automatically during initialization. " - "Pass `guardrail_function` to the constructor instead." - ), - ) - - def __init__( - self, - *, - guardrail_function: Optional[Callable] = None, - **data, - ): - """Initialize with function serialization support for Temporal.""" - super().__init__(**data) - if not guardrail_function: - if not self.guardrail_function_serialized: - raise ValueError("One of `guardrail_function` or `guardrail_function_serialized` should be set") - else: - guardrail_function = self._deserialize_callable(self.guardrail_function_serialized) - else: - self.guardrail_function_serialized = self._serialize_callable(guardrail_function) - - self._guardrail_function = guardrail_function - - @classmethod - def _deserialize_callable(cls, serialized: str) -> Callable: - encoded = serialized.encode() - serialized_bytes = base64.b64decode(encoded) - return cloudpickle.loads(serialized_bytes) - - @classmethod - def _serialize_callable(cls, func: Callable) -> str: - serialized_bytes = cloudpickle.dumps(func) - encoded = base64.b64encode(serialized_bytes) - return encoded.decode() - - @property - def guardrail_function(self) -> Callable: - if self._guardrail_function is None and self.guardrail_function_serialized: - self._guardrail_function = self._deserialize_callable(self.guardrail_function_serialized) - return self._guardrail_function - - @guardrail_function.setter - def guardrail_function(self, value: Callable): - self.guardrail_function_serialized = self._serialize_callable(value) - self._guardrail_function = value - - def to_oai_input_guardrail(self) -> InputGuardrail: - """Convert to OpenAI InputGuardrail.""" - return InputGuardrail(guardrail_function=self.guardrail_function, name=self.name) - - -class TemporalOutputGuardrail(BaseModelWithTraceParams): - """Temporal-compatible wrapper for OutputGuardrail with function - serialization.""" - - name: str - _guardrail_function: Callable = PrivateAttr() - guardrail_function_serialized: str = Field( - default="", - description=( - "Serialized guardrail function. Set automatically during initialization. " - "Pass `guardrail_function` to the constructor instead." - ), - ) - - def __init__( - self, - *, - guardrail_function: Optional[Callable] = None, - **data, - ): - """Initialize with function serialization support for Temporal.""" - super().__init__(**data) - if not guardrail_function: - if not self.guardrail_function_serialized: - raise ValueError("One of `guardrail_function` or `guardrail_function_serialized` should be set") - else: - guardrail_function = self._deserialize_callable(self.guardrail_function_serialized) - else: - self.guardrail_function_serialized = self._serialize_callable(guardrail_function) - - self._guardrail_function = guardrail_function - - @classmethod - def _deserialize_callable(cls, serialized: str) -> Callable: - encoded = serialized.encode() - serialized_bytes = base64.b64decode(encoded) - return cloudpickle.loads(serialized_bytes) - - @classmethod - def _serialize_callable(cls, func: Callable) -> str: - serialized_bytes = cloudpickle.dumps(func) - encoded = base64.b64encode(serialized_bytes) - return encoded.decode() - - @property - def guardrail_function(self) -> Callable: - if self._guardrail_function is None and self.guardrail_function_serialized: - self._guardrail_function = self._deserialize_callable(self.guardrail_function_serialized) - return self._guardrail_function - - @guardrail_function.setter - def guardrail_function(self, value: Callable): - self.guardrail_function_serialized = self._serialize_callable(value) - self._guardrail_function = value - - def to_oai_output_guardrail(self) -> OutputGuardrail: - """Convert to OpenAI OutputGuardrail.""" - return OutputGuardrail(guardrail_function=self.guardrail_function, name=self.name) - - -class ModelSettings(BaseModelWithTraceParams): - temperature: float | None = None - top_p: float | None = None - frequency_penalty: float | None = None - presence_penalty: float | None = None - tool_choice: Literal["auto", "required", "none"] | str | None = None - parallel_tool_calls: bool | None = None - truncation: Literal["auto", "disabled"] | None = None - max_tokens: int | None = None - reasoning: Reasoning | None = None - metadata: dict[str, str] | None = None - store: bool | None = None - include_usage: bool | None = None - response_include: list[ResponseIncludable] | None = None - extra_body: dict[str, str] | None = None - extra_headers: dict[str, str] | None = None - extra_args: dict[str, Any] | None = None - - def to_oai_model_settings(self) -> OAIModelSettings: - return OAIModelSettings(**self.model_dump(exclude=["trace_id", "parent_span_id"])) - - -class RunAgentParams(BaseModelWithTraceParams): - """Parameters for running an agent without streaming.""" - - input_list: list[dict] - mcp_server_params: list[StdioServerParameters] - agent_name: str - agent_instructions: str - handoff_description: str | None = None - handoffs: list["RunAgentParams"] | None = None - model: str | None = None - model_settings: ModelSettings | None = None - tools: ( - list[ - FunctionTool - | WebSearchTool - | FileSearchTool - | ComputerTool - | CodeInterpreterTool - | ImageGenerationTool - | LocalShellTool - ] - | None - ) = None - output_type: Any = None - tool_use_behavior: Literal["run_llm_again", "stop_on_first_tool"] = "run_llm_again" - mcp_timeout_seconds: int | None = None - input_guardrails: list[TemporalInputGuardrail] | None = None - output_guardrails: list[TemporalOutputGuardrail] | None = None - max_turns: int | None = None - previous_response_id: str | None = None - - -class RunAgentAutoSendParams(RunAgentParams): - """Parameters for running an agent with automatic TaskMessage creation.""" - - task_id: str - - -class RunAgentStreamedAutoSendParams(RunAgentParams): - """Parameters for running an agent with streaming and automatic TaskMessage creation.""" - - task_id: str - - -@asynccontextmanager -async def mcp_server_context(mcp_server_params: list[StdioServerParameters]): - """Context manager for MCP servers.""" - servers: list[MCPServerStdio] = [] - for params in mcp_server_params: - server = MCPServerStdio( - name=f"Server: {params.command}", - params=MCPServerStdioParams(**params.model_dump()), - cache_tools_list=True, - client_session_timeout_seconds=60, - ) - servers.append(server) - - async with AsyncExitStack() as stack: - for server in servers: - await stack.enter_async_context(server) - yield servers - - -class OpenAIActivities: - """Activities for OpenAI agent operations.""" - - def __init__(self, openai_service: OpenAIService): - self._openai_service = openai_service - - @activity.defn(name=OpenAIActivityName.RUN_AGENT) - async def run_agent(self, params: RunAgentParams) -> SerializableRunResult: - """Run an agent without streaming or TaskMessage creation.""" - # Convert Temporal guardrails to OpenAI guardrails - input_guardrails = None - if params.input_guardrails: - input_guardrails = [g.to_oai_input_guardrail() for g in params.input_guardrails] - - output_guardrails = None - if params.output_guardrails: - output_guardrails = [g.to_oai_output_guardrail() for g in params.output_guardrails] - - result = await self._openai_service.run_agent( - input_list=params.input_list, - mcp_server_params=params.mcp_server_params, - agent_name=params.agent_name, - agent_instructions=params.agent_instructions, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - handoff_description=params.handoff_description, - handoffs=params.handoffs, - model=params.model, - model_settings=params.model_settings, - tools=params.tools, - output_type=params.output_type, - tool_use_behavior=params.tool_use_behavior, - input_guardrails=input_guardrails, - output_guardrails=output_guardrails, - mcp_timeout_seconds=params.mcp_timeout_seconds, - max_turns=params.max_turns, - previous_response_id=params.previous_response_id, - ) - return self._to_serializable_run_result(result) - - @activity.defn(name=OpenAIActivityName.RUN_AGENT_AUTO_SEND) - async def run_agent_auto_send(self, params: RunAgentAutoSendParams) -> SerializableRunResult: - """Run an agent with automatic TaskMessage creation.""" - # Convert Temporal guardrails to OpenAI guardrails - input_guardrails = None - if params.input_guardrails: - input_guardrails = [g.to_oai_input_guardrail() for g in params.input_guardrails] - - output_guardrails = None - if params.output_guardrails: - output_guardrails = [g.to_oai_output_guardrail() for g in params.output_guardrails] - - try: - result = await self._openai_service.run_agent_auto_send( - task_id=params.task_id, - input_list=params.input_list, - mcp_server_params=params.mcp_server_params, - agent_name=params.agent_name, - agent_instructions=params.agent_instructions, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - handoff_description=params.handoff_description, - handoffs=params.handoffs, - model=params.model, - model_settings=params.model_settings, - tools=params.tools, - output_type=params.output_type, - tool_use_behavior=params.tool_use_behavior, - input_guardrails=input_guardrails, - output_guardrails=output_guardrails, - mcp_timeout_seconds=params.mcp_timeout_seconds, - max_turns=params.max_turns, - previous_response_id=params.previous_response_id, - ) - return self._to_serializable_run_result(result) - except InputGuardrailTripwireTriggered as e: - # Handle guardrail trigger gracefully - rejection_message = ( - "I'm sorry, but I cannot process this request due to a guardrail. Please try a different question." - ) - - # Try to extract rejection message from the guardrail result - if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"): - output_info = getattr(e.guardrail_result.output, "output_info", {}) - if isinstance(output_info, dict) and "rejection_message" in output_info: - rejection_message = output_info["rejection_message"] - - # Build the final input list with the rejection message - final_input_list = list(params.input_list or []) - final_input_list.append({"role": "assistant", "content": rejection_message}) - - return SerializableRunResult(final_output=rejection_message, final_input_list=final_input_list) - except OutputGuardrailTripwireTriggered as e: - # Handle output guardrail trigger gracefully - rejection_message = ( - "I'm sorry, but I cannot provide this response due to a guardrail. Please try a different question." - ) - - # Try to extract rejection message from the guardrail result - if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"): - output_info = getattr(e.guardrail_result.output, "output_info", {}) - if isinstance(output_info, dict) and "rejection_message" in output_info: - rejection_message = output_info["rejection_message"] - - # Build the final input list with the rejection message - final_input_list = list(params.input_list or []) - final_input_list.append({"role": "assistant", "content": rejection_message}) - - return SerializableRunResult(final_output=rejection_message, final_input_list=final_input_list) - - @activity.defn(name=OpenAIActivityName.RUN_AGENT_STREAMED_AUTO_SEND) - async def run_agent_streamed_auto_send( - self, params: RunAgentStreamedAutoSendParams - ) -> SerializableRunResultStreaming: - """Run an agent with streaming and automatic TaskMessage creation.""" - - # Convert Temporal guardrails to OpenAI guardrails - input_guardrails = None - if params.input_guardrails: - input_guardrails = [g.to_oai_input_guardrail() for g in params.input_guardrails] - - output_guardrails = None - if params.output_guardrails: - output_guardrails = [g.to_oai_output_guardrail() for g in params.output_guardrails] - - try: - result = await self._openai_service.run_agent_streamed_auto_send( - task_id=params.task_id, - input_list=params.input_list, - mcp_server_params=params.mcp_server_params, - agent_name=params.agent_name, - agent_instructions=params.agent_instructions, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - handoff_description=params.handoff_description, - handoffs=params.handoffs, - model=params.model, - model_settings=params.model_settings, - tools=params.tools, - output_type=params.output_type, - tool_use_behavior=params.tool_use_behavior, - input_guardrails=input_guardrails, - output_guardrails=output_guardrails, - mcp_timeout_seconds=params.mcp_timeout_seconds, - max_turns=params.max_turns, - previous_response_id=params.previous_response_id, - ) - return self._to_serializable_run_result_streaming(result) - except InputGuardrailTripwireTriggered as e: - # Handle guardrail trigger gracefully - rejection_message = ( - "I'm sorry, but I cannot process this request due to a guardrail. Please try a different question." - ) - - # Try to extract rejection message from the guardrail result - if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"): - output_info = getattr(e.guardrail_result.output, "output_info", {}) - if isinstance(output_info, dict) and "rejection_message" in output_info: - rejection_message = output_info["rejection_message"] - - # Build the final input list with the rejection message - final_input_list = list(params.input_list or []) - final_input_list.append({"role": "assistant", "content": rejection_message}) - - return SerializableRunResultStreaming(final_output=rejection_message, final_input_list=final_input_list) - except OutputGuardrailTripwireTriggered as e: - # Handle output guardrail trigger gracefully - rejection_message = ( - "I'm sorry, but I cannot provide this response due to a guardrail. Please try a different question." - ) - - # Try to extract rejection message from the guardrail result - if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"): - output_info = getattr(e.guardrail_result.output, "output_info", {}) - if isinstance(output_info, dict) and "rejection_message" in output_info: - rejection_message = output_info["rejection_message"] - - # Build the final input list with the rejection message - final_input_list = list(params.input_list or []) - final_input_list.append({"role": "assistant", "content": rejection_message}) - - return SerializableRunResultStreaming(final_output=rejection_message, final_input_list=final_input_list) - - @staticmethod - def _to_serializable_run_result(result: RunResult) -> SerializableRunResult: - """Convert RunResult to SerializableRunResult.""" - return SerializableRunResult( - final_output=result.final_output, - final_input_list=result.to_input_list(), - ) - - @staticmethod - def _to_serializable_run_result_streaming( - result: RunResultStreaming, - ) -> SerializableRunResultStreaming: - """Convert RunResultStreaming to SerializableRunResultStreaming.""" - return SerializableRunResultStreaming( - final_output=result.final_output, - final_input_list=result.to_input_list(), - ) diff --git a/src/agentex/lib/core/temporal/activities/adk/providers/sgp_activities.py b/src/agentex/lib/core/temporal/activities/adk/providers/sgp_activities.py deleted file mode 100644 index 3905eb16..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/providers/sgp_activities.py +++ /dev/null @@ -1,42 +0,0 @@ -from enum import Enum - -from temporalio import activity - -from agentex.lib.types.files import FileContentResponse -from agentex.lib.types.tracing import BaseModelWithTraceParams -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.services.adk.providers.sgp import SGPService - -logger = make_logger(__name__) - - -class SGPActivityName(str, Enum): - DOWNLOAD_FILE_CONTENT = "download-file-content" - - -class DownloadFileParams(BaseModelWithTraceParams): - file_id: str - filename: str - - -class SGPActivities: - def __init__(self, sgp_service: SGPService): - self.sgp_service = sgp_service - - @activity.defn(name=SGPActivityName.DOWNLOAD_FILE_CONTENT) - async def download_file_content(self, params: DownloadFileParams) -> FileContentResponse: - """ - Download file content from SGP. - - Args: - params: DownloadFileParams containing file_id and filename. - - Returns: - FileContentResponse with mime_type and base64_content for constructing LLM input. - """ - return await self.sgp_service.download_file_content( - file_id=params.file_id, - filename=params.filename, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) diff --git a/src/agentex/lib/core/temporal/activities/adk/state_activities.py b/src/agentex/lib/core/temporal/activities/adk/state_activities.py deleted file mode 100644 index 4eaf83fb..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/state_activities.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import annotations - -from enum import Enum -from typing import Any - -from temporalio import activity - -from agentex.types.state import State -from agentex.lib.types.tracing import BaseModelWithTraceParams -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.services.adk.state import StateService - -logger = make_logger(__name__) - - -class StateActivityName(str, Enum): - CREATE_STATE = "create-state" - GET_STATE = "get-state" - UPDATE_STATE = "update-state" - DELETE_STATE = "delete-state" - - -class CreateStateParams(BaseModelWithTraceParams): - task_id: str - agent_id: str - state: dict[str, Any] - - -class GetStateParams(BaseModelWithTraceParams): - state_id: str | None = None - task_id: str | None = None - agent_id: str | None = None - - -class UpdateStateParams(BaseModelWithTraceParams): - state_id: str - task_id: str - agent_id: str - state: dict[str, Any] - - -class DeleteStateParams(BaseModelWithTraceParams): - state_id: str - - -class StateActivities: - def __init__(self, state_service: StateService): - self._state_service = state_service - - @activity.defn(name=StateActivityName.CREATE_STATE) - async def create_state(self, params: CreateStateParams) -> State: - return await self._state_service.create_state( - task_id=params.task_id, - agent_id=params.agent_id, - state=params.state, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) - - @activity.defn(name=StateActivityName.GET_STATE) - async def get_state(self, params: GetStateParams) -> State | None: - return await self._state_service.get_state( - state_id=params.state_id, - task_id=params.task_id, - agent_id=params.agent_id, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) - - @activity.defn(name=StateActivityName.UPDATE_STATE) - async def update_state(self, params: UpdateStateParams) -> State: - return await self._state_service.update_state( - state_id=params.state_id, - task_id=params.task_id, - agent_id=params.agent_id, - state=params.state, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) - - @activity.defn(name=StateActivityName.DELETE_STATE) - async def delete_state(self, params: DeleteStateParams) -> State: - return await self._state_service.delete_state( - state_id=params.state_id, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) diff --git a/src/agentex/lib/core/temporal/activities/adk/streaming_activities.py b/src/agentex/lib/core/temporal/activities/adk/streaming_activities.py deleted file mode 100644 index 2d9faf35..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/streaming_activities.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import annotations - -from enum import Enum - -from temporalio import activity - -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.temporal import heartbeat_if_in_workflow -from agentex.lib.utils.model_utils import BaseModel -from agentex.types.task_message_update import TaskMessageUpdate -from agentex.lib.core.services.adk.streaming import StreamingService - -logger = make_logger(__name__) - - -class StreamingActivityName(str, Enum): - STREAM_UPDATE = "stream-update" - - -class StreamUpdateParams(BaseModel): - update: TaskMessageUpdate - - -class StreamingActivities: - """ - Temporal activities for streaming events to clients (ADK pattern). - """ - - def __init__(self, streaming_service: StreamingService): - self._streaming_service = streaming_service - - @activity.defn(name=StreamingActivityName.STREAM_UPDATE) - async def stream_update(self, params: StreamUpdateParams) -> TaskMessageUpdate | None: - heartbeat_if_in_workflow("stream update") - return await self._streaming_service.stream_update(update=params.update) diff --git a/src/agentex/lib/core/temporal/activities/adk/tasks_activities.py b/src/agentex/lib/core/temporal/activities/adk/tasks_activities.py deleted file mode 100644 index f3f59f8c..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/tasks_activities.py +++ /dev/null @@ -1,52 +0,0 @@ -from __future__ import annotations - -from enum import Enum - -from temporalio import activity - -from agentex.types.task import Task -from agentex.lib.types.tracing import BaseModelWithTraceParams -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.services.adk.tasks import TasksService -from agentex.types.task_retrieve_response import TaskRetrieveResponse -from agentex.types.task_retrieve_by_name_response import TaskRetrieveByNameResponse - -logger = make_logger(__name__) - - -class TasksActivityName(str, Enum): - GET_TASK = "get-task" - DELETE_TASK = "delete-task" - - -class GetTaskParams(BaseModelWithTraceParams): - task_id: str | None = None - task_name: str | None = None - - -class DeleteTaskParams(BaseModelWithTraceParams): - task_id: str | None = None - task_name: str | None = None - - -class TasksActivities: - def __init__(self, tasks_service: TasksService): - self._tasks_service = tasks_service - - @activity.defn(name=TasksActivityName.GET_TASK) - async def get_task(self, params: GetTaskParams) -> TaskRetrieveResponse | TaskRetrieveByNameResponse: - return await self._tasks_service.get_task( - task_id=params.task_id, - task_name=params.task_name, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) - - @activity.defn(name=TasksActivityName.DELETE_TASK) - async def delete_task(self, params: DeleteTaskParams) -> Task: - return await self._tasks_service.delete_task( # type: ignore[return-value] - task_id=params.task_id, - task_name=params.task_name, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) diff --git a/src/agentex/lib/core/temporal/activities/adk/tracing_activities.py b/src/agentex/lib/core/temporal/activities/adk/tracing_activities.py deleted file mode 100644 index 65afcded..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/tracing_activities.py +++ /dev/null @@ -1,57 +0,0 @@ -from __future__ import annotations - -from enum import Enum -from typing import Any - -from temporalio import activity - -from agentex.types.span import Span -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.core.services.adk.tracing import TracingService - -logger = make_logger(__name__) - - -class TracingActivityName(str, Enum): - START_SPAN = "start-span" - END_SPAN = "end-span" - - -class StartSpanParams(BaseModel): - trace_id: str - parent_id: str | None = None - name: str - input: list[Any] | dict[str, Any] | BaseModel | None = None - data: list[Any] | dict[str, Any] | BaseModel | None = None - - -class EndSpanParams(BaseModel): - trace_id: str - span: Span - - -class TracingActivities: - """ - Temporal activities for tracing (spans), ADK pattern. - """ - - def __init__(self, tracing_service: TracingService): - self._tracing_service = tracing_service - - @activity.defn(name=TracingActivityName.START_SPAN) - async def start_span(self, params: StartSpanParams) -> Span | None: - return await self._tracing_service.start_span( - trace_id=params.trace_id, - parent_id=params.parent_id, - name=params.name, - input=params.input, - data=params.data, - ) - - @activity.defn(name=TracingActivityName.END_SPAN) - async def end_span(self, params: EndSpanParams) -> Span: - return await self._tracing_service.end_span( - trace_id=params.trace_id, - span=params.span, - ) diff --git a/src/agentex/lib/core/temporal/activities/adk/utils/__init__.py b/src/agentex/lib/core/temporal/activities/adk/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/temporal/activities/adk/utils/templating_activities.py b/src/agentex/lib/core/temporal/activities/adk/utils/templating_activities.py deleted file mode 100644 index a2cc4ff1..00000000 --- a/src/agentex/lib/core/temporal/activities/adk/utils/templating_activities.py +++ /dev/null @@ -1,43 +0,0 @@ -from __future__ import annotations - -from enum import Enum -from typing import Any - -from temporalio import activity - -from agentex.lib.types.tracing import BaseModelWithTraceParams -from agentex.lib.core.services.adk.utils.templating import TemplatingService - - -class JinjaActivityName(str, Enum): - RENDER_JINJA = "render-jinja" - - -class RenderJinjaParams(BaseModelWithTraceParams): - """Parameters for the Jinja activity""" - - template: str - variables: dict[str, Any] - - -class TemplatingActivities: - def __init__(self, templating_service: TemplatingService): - self.templating_service = templating_service - - @activity.defn(name=JinjaActivityName.RENDER_JINJA) - async def render_jinja(self, params: RenderJinjaParams) -> str: - """ - Activity that renders a Jinja template with the provided data. - - Args: - params: JinjaParams containing the data and template string - - Returns: - The rendered template as a string - """ - return await self.templating_service.render_jinja( - template=params.template, - variables=params.variables, - trace_id=params.trace_id, - parent_span_id=params.parent_span_id, - ) diff --git a/src/agentex/lib/core/temporal/plugins/__init__.py b/src/agentex/lib/core/temporal/plugins/__init__.py deleted file mode 100644 index 52ab6eac..00000000 --- a/src/agentex/lib/core/temporal/plugins/__init__.py +++ /dev/null @@ -1,58 +0,0 @@ -"""OpenAI Agents SDK Temporal Plugin with Streaming Support. - -This module provides streaming capabilities for the OpenAI Agents SDK in Temporal -using interceptors to thread task_id through workflows to activities. - -The streaming implementation works by: -1. Using Temporal interceptors to thread task_id through the execution -2. Streaming LLM responses to Redis in real-time from activities -3. Returning complete responses to maintain Temporal determinism - -Example: - >>> from agentex.lib.core.temporal.plugins.openai_agents import ( - ... TemporalStreamingModelProvider, - ... TemporalTracingModelProvider, - ... ContextInterceptor, - ... ) - >>> from temporalio.contrib.openai_agents import OpenAIAgentsPlugin, ModelActivityParameters - >>> from datetime import timedelta - >>> - >>> # Create streaming model provider - >>> model_provider = TemporalStreamingModelProvider() - >>> - >>> # Create STANDARD plugin with streaming model provider - >>> plugin = OpenAIAgentsPlugin( - ... model_params=ModelActivityParameters( - ... start_to_close_timeout=timedelta(seconds=120), - ... ), - ... model_provider=model_provider, - ... ) - >>> - >>> # Register interceptor with worker - >>> interceptor = ContextInterceptor() - >>> # Add interceptor to worker configuration -""" - -from agentex.lib.core.temporal.plugins.openai_agents import ( - ContextInterceptor, - TemporalStreamingHooks, - TemporalStreamingModel, - TemporalTracingModelProvider, - TemporalStreamingModelProvider, - streaming_task_id, - streaming_trace_id, - stream_lifecycle_content, - streaming_parent_span_id, -) - -__all__ = [ - "TemporalStreamingModel", - "TemporalStreamingModelProvider", - "TemporalTracingModelProvider", - "ContextInterceptor", - "streaming_task_id", - "streaming_trace_id", - "streaming_parent_span_id", - "TemporalStreamingHooks", - "stream_lifecycle_content", -] \ No newline at end of file diff --git a/src/agentex/lib/core/temporal/plugins/claude_agents/__init__.py b/src/agentex/lib/core/temporal/plugins/claude_agents/__init__.py deleted file mode 100644 index 6f8a7c41..00000000 --- a/src/agentex/lib/core/temporal/plugins/claude_agents/__init__.py +++ /dev/null @@ -1,72 +0,0 @@ -"""Claude Agents SDK integration with Temporal. - -This plugin provides integration between Claude Agents SDK and AgentEx's -Temporal-based orchestration platform. - -Features: -- Temporal activity wrapper for Claude SDK calls -- Real-time streaming to Redis/UI -- Session resume for conversation context -- Tool call visibility (Read, Write, Bash, etc.) -- Subagent support with nested tracing -- Workspace isolation per task - -Architecture: -- activities.py: Temporal activity definitions -- message_handler.py: Message parsing and streaming logic -- Reuses OpenAI's ContextInterceptor for context threading - -Usage: - from agentex.lib.core.temporal.plugins.claude_agents import ( - run_claude_agent_activity, - create_workspace_directory, - ContextInterceptor, - ) - - # In worker - worker = AgentexWorker( - task_queue=queue_name, - interceptors=[ContextInterceptor()], - ) - - activities = get_all_activities() - activities.extend([run_claude_agent_activity, create_workspace_directory]) - - await worker.run(activities=activities, workflow=YourWorkflow) -""" - -from agentex.lib.core.temporal.plugins.claude_agents.hooks import ( - TemporalStreamingHooks, - create_streaming_hooks, -) -from agentex.lib.core.temporal.plugins.claude_agents.activities import ( - run_claude_agent_activity, - create_workspace_directory, -) -from agentex.lib.core.temporal.plugins.claude_agents.message_handler import ( - ClaudeMessageHandler, -) - -# Reuse OpenAI's context threading - this is the key to streaming! -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ( - ContextInterceptor, - streaming_task_id, - streaming_trace_id, - streaming_parent_span_id, -) - -__all__ = [ - # Activities - "run_claude_agent_activity", - "create_workspace_directory", - # Message handling - "ClaudeMessageHandler", - # Hooks - "create_streaming_hooks", - "TemporalStreamingHooks", - # Context threading (reused from OpenAI) - "ContextInterceptor", - "streaming_task_id", - "streaming_trace_id", - "streaming_parent_span_id", -] diff --git a/src/agentex/lib/core/temporal/plugins/claude_agents/activities.py b/src/agentex/lib/core/temporal/plugins/claude_agents/activities.py deleted file mode 100644 index ccd6a9f9..00000000 --- a/src/agentex/lib/core/temporal/plugins/claude_agents/activities.py +++ /dev/null @@ -1,154 +0,0 @@ -"""Temporal activities for Claude Agents SDK integration.""" - -from __future__ import annotations - -import os -from typing import Any - -from temporalio import activity -from claude_agent_sdk import AgentDefinition, ClaudeSDKClient, ClaudeAgentOptions - -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.temporal.plugins.claude_agents.hooks import create_streaming_hooks -from agentex.lib.core.temporal.plugins.claude_agents.message_handler import ClaudeMessageHandler -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ( - streaming_task_id, - streaming_trace_id, - streaming_parent_span_id, -) - -logger = make_logger(__name__) - - -@activity.defn -async def create_workspace_directory(task_id: str, workspace_root: str | None = None) -> str: - """Create workspace directory for task - runs as Temporal activity - - Args: - task_id: Task ID for workspace directory name - workspace_root: Root directory for workspaces (defaults to .claude-workspace/ in cwd) - - Returns: - Absolute path to created workspace - """ - if workspace_root is None: - # Default to .claude-workspace in current directory - # Follows Claude SDK's .claude/ convention - workspace_root = os.path.join(os.getcwd(), ".claude-workspace") - - workspace_path = os.path.join(workspace_root, task_id) - os.makedirs(workspace_path, exist_ok=True) - logger.info(f"Created workspace: {workspace_path}") - return workspace_path - - -@activity.defn(name="run_claude_agent_activity") -async def run_claude_agent_activity( - prompt: str, - workspace_path: str, - allowed_tools: list[str], - permission_mode: str = "acceptEdits", - system_prompt: str | None = None, - resume_session_id: str | None = None, - agents: dict[str, Any] | None = None, -) -> dict[str, Any]: - """Execute Claude SDK - wrapped in Temporal activity - - This activity: - 1. Gets task_id from ContextVar (set by ContextInterceptor) - 2. Configures Claude with workspace isolation and session resume - 3. Runs Claude SDK and processes messages via ClaudeMessageHandler - 4. Streams messages to UI in real-time - 5. Returns session_id, usage, and cost for next turn - - Args: - prompt: User message to send to Claude - workspace_path: Directory for file operations (cwd) - allowed_tools: List of tools Claude can use (include "Task" for subagents) - permission_mode: Permission mode (default: acceptEdits) - system_prompt: Optional system prompt override - resume_session_id: Optional session ID to resume conversation context - agents: Optional dict of subagent definitions for Task tool - - Returns: - dict with "messages", "session_id", "usage", and "cost_usd" keys - """ - - # Get streaming context from ContextVars (set by interceptor) - task_id = streaming_task_id.get() - trace_id = streaming_trace_id.get() - parent_span_id = streaming_parent_span_id.get() - - logger.info( - f"[run_claude_agent_activity] Starting - " - f"task_id={task_id}, workspace={workspace_path}, tools={allowed_tools}, " - f"resume={'YES' if resume_session_id else 'NO (new session)'}, " - f"subagents={list(agents.keys()) if agents else 'NONE'}" - ) - - # Reconstruct AgentDefinition objects from serialized dicts - # Temporal serializes dataclasses to dicts, need to recreate them - agent_defs = None - if agents: - agent_defs = {} - for name, agent_data in agents.items(): - if isinstance(agent_data, AgentDefinition): - agent_defs[name] = agent_data - else: - # Reconstruct from dict - agent_defs[name] = AgentDefinition( - description=agent_data.get('description', ''), - prompt=agent_data.get('prompt', ''), - tools=agent_data.get('tools'), - model=agent_data.get('model'), - ) - - # Create hooks for streaming tool calls and subagent execution - hooks = create_streaming_hooks( - task_id=task_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - # Configure Claude with workspace isolation, session resume, subagents, and hooks - options = ClaudeAgentOptions( - cwd=workspace_path, - allowed_tools=allowed_tools, - permission_mode=permission_mode, # type: ignore - system_prompt=system_prompt, - resume=resume_session_id, - agents=agent_defs, - hooks=hooks, # Tool lifecycle hooks for streaming! - ) - - # Create message handler for streaming - handler = ClaudeMessageHandler( - task_id=task_id, - trace_id=trace_id, - parent_span_id=parent_span_id, - ) - - # Run Claude and process messages - try: - await handler.initialize() - - async with ClaudeSDKClient(options=options) as client: - await client.query(prompt) - - # Use receive_response() instead of receive_messages() - # receive_response() yields messages until ResultMessage, then stops - # receive_messages() is infinite and never completes! - async for message in client.receive_response(): - await handler.handle_message(message) - - logger.debug(f"Message loop completed, cleaning up...") - await handler.cleanup() - - results = handler.get_results() - logger.debug(f"Returning results with keys: {results.keys()}") - return results - - except Exception as e: - logger.error(f"[run_claude_agent_activity] Error: {e}", exc_info=True) - await handler.cleanup() - raise diff --git a/src/agentex/lib/core/temporal/plugins/claude_agents/hooks/__init__.py b/src/agentex/lib/core/temporal/plugins/claude_agents/hooks/__init__.py deleted file mode 100644 index 39c08651..00000000 --- a/src/agentex/lib/core/temporal/plugins/claude_agents/hooks/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Claude SDK hooks for streaming lifecycle events to AgentEx UI.""" - -from agentex.lib.core.temporal.plugins.claude_agents.hooks.hooks import ( - TemporalStreamingHooks, - create_streaming_hooks, -) - -__all__ = [ - "create_streaming_hooks", - "TemporalStreamingHooks", -] diff --git a/src/agentex/lib/core/temporal/plugins/claude_agents/hooks/hooks.py b/src/agentex/lib/core/temporal/plugins/claude_agents/hooks/hooks.py deleted file mode 100644 index 5f629fc1..00000000 --- a/src/agentex/lib/core/temporal/plugins/claude_agents/hooks/hooks.py +++ /dev/null @@ -1,212 +0,0 @@ -"""Claude SDK hooks for streaming tool calls and subagent execution to AgentEx UI. - -This module provides hook callbacks that integrate with Claude SDK's hooks system -to stream tool execution lifecycle events in real-time. -""" - -from __future__ import annotations - -from typing import Any - -from claude_agent_sdk import HookMatcher - -from agentex.lib import adk -from agentex.lib.utils.logging import make_logger -from agentex.types.task_message_update import StreamTaskMessageFull -from agentex.types.tool_request_content import ToolRequestContent -from agentex.types.tool_response_content import ToolResponseContent - -logger = make_logger(__name__) - - -class TemporalStreamingHooks: - """Hooks for streaming Claude SDK lifecycle events to AgentEx UI. - - Implements Claude SDK hook callbacks: - - PreToolUse: Called before tool execution โ†’ stream tool request - - PostToolUse: Called after tool execution โ†’ stream tool result - - Also handles subagent detection and nested tracing. - """ - - def __init__( - self, - task_id: str | None, - trace_id: str | None = None, - parent_span_id: str | None = None, - ): - """Initialize streaming hooks. - - Args: - task_id: AgentEx task ID for routing streams - trace_id: Trace ID for nested spans - parent_span_id: Parent span ID for subagent spans - """ - self.task_id = task_id - self.trace_id = trace_id - self.parent_span_id = parent_span_id - - # Track active subagent spans - self.subagent_spans: dict[str, Any] = {} # tool_call_id โ†’ (ctx, span) - - async def pre_tool_use( - self, - input_data: dict[str, Any], - tool_use_id: str | None, - _context: Any, - ) -> dict[str, Any]: - """Hook called before tool execution. - - Args: - input_data: Contains tool_name, tool_input from Claude SDK - tool_use_id: Unique ID for this tool call - context: Hook context from Claude SDK - - Returns: - Empty dict (allow execution to proceed) - """ - if not self.task_id or not tool_use_id: - return {} - - tool_name = input_data.get("tool_name", "unknown") - tool_input = input_data.get("tool_input", {}) - - logger.info(f"๐Ÿ”ง Tool request: {tool_name}") - - # Special handling for Task tool (subagents) - create nested span - if tool_name == "Task" and self.trace_id and self.parent_span_id: - subagent_type = tool_input.get("subagent_type", "unknown") - logger.info(f"๐Ÿค– Subagent started: {subagent_type}") - - # Create nested trace span for subagent - subagent_ctx = adk.tracing.span( - trace_id=self.trace_id, - parent_id=self.parent_span_id, - name=f"Subagent: {subagent_type}", - input=tool_input, - ) - subagent_span = await subagent_ctx.__aenter__() - self.subagent_spans[tool_use_id] = (subagent_ctx, subagent_span) - - # Stream tool request to UI - try: - async with adk.streaming.streaming_task_message_context( - task_id=self.task_id, - initial_content=ToolRequestContent( - author="agent", - name=tool_name, - arguments=tool_input, - tool_call_id=tool_use_id, - ) - ) as tool_ctx: - await tool_ctx.stream_update( - StreamTaskMessageFull( - parent_task_message=tool_ctx.task_message, - content=ToolRequestContent( - author="agent", - name=tool_name, - arguments=tool_input, - tool_call_id=tool_use_id, - ), - type="full" - ) - ) - except Exception as e: - logger.warning(f"Failed to stream tool request: {e}") - - return {} # Allow execution - - async def post_tool_use( - self, - input_data: dict[str, Any], - tool_use_id: str | None, - _context: Any, - ) -> dict[str, Any]: - """Hook called after tool execution. - - Args: - input_data: Contains tool_name, tool_output from Claude SDK - tool_use_id: Unique ID for this tool call - context: Hook context from Claude SDK - - Returns: - Empty dict - """ - if not self.task_id or not tool_use_id: - return {} - - tool_name = input_data.get("tool_name", "unknown") - tool_output = input_data.get("tool_output", "") - - logger.info(f"โœ… Tool result: {tool_name}") - - # If this was a subagent, close the nested span - if tool_use_id in self.subagent_spans: - subagent_ctx, subagent_span = self.subagent_spans[tool_use_id] - subagent_span.output = {"result": tool_output} - await subagent_ctx.__aexit__(None, None, None) - logger.info(f"๐Ÿค– Subagent completed: {tool_name}") - del self.subagent_spans[tool_use_id] - - # Stream tool response to UI - try: - async with adk.streaming.streaming_task_message_context( - task_id=self.task_id, - initial_content=ToolResponseContent( - author="agent", - name=tool_name, - content=tool_output, - tool_call_id=tool_use_id, - ) - ) as tool_ctx: - await tool_ctx.stream_update( - StreamTaskMessageFull( - parent_task_message=tool_ctx.task_message, - content=ToolResponseContent( - author="agent", - name=tool_name, - content=tool_output, - tool_call_id=tool_use_id, - ), - type="full" - ) - ) - except Exception as e: - logger.warning(f"Failed to stream tool response: {e}") - - return {} - - -def create_streaming_hooks( - task_id: str | None, - trace_id: str | None = None, - parent_span_id: str | None = None, -) -> dict[str, list[HookMatcher]]: - """Create Claude SDK hooks configuration for streaming. - - Returns hooks dict suitable for ClaudeAgentOptions(hooks=...). - - Args: - task_id: AgentEx task ID for streaming - trace_id: Trace ID for nested spans - parent_span_id: Parent span ID for subagent spans - - Returns: - Dict with PreToolUse and PostToolUse hook configurations - """ - hooks_instance = TemporalStreamingHooks(task_id, trace_id, parent_span_id) - - return { - "PreToolUse": [ - HookMatcher( - matcher=None, # Match all tools - hooks=[hooks_instance.pre_tool_use] - ) - ], - "PostToolUse": [ - HookMatcher( - matcher=None, # Match all tools - hooks=[hooks_instance.post_tool_use] - ) - ], - } diff --git a/src/agentex/lib/core/temporal/plugins/claude_agents/message_handler.py b/src/agentex/lib/core/temporal/plugins/claude_agents/message_handler.py deleted file mode 100644 index c0d414a2..00000000 --- a/src/agentex/lib/core/temporal/plugins/claude_agents/message_handler.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Message handling and streaming for Claude Agents SDK. - -Simplified message handler that focuses on: -- Streaming text content to UI -- Extracting session_id for conversation continuity -- Extracting usage and cost information - -Tool requests/responses are handled by Claude SDK hooks (see hooks/hooks.py). -""" - -from __future__ import annotations - -from typing import Any - -from claude_agent_sdk import ( - TextBlock, - ResultMessage, - SystemMessage, - AssistantMessage, -) - -from agentex.lib import adk -from agentex.lib.utils.logging import make_logger -from agentex.types.text_content import TextContent -from agentex.types.task_message_delta import TextDelta -from agentex.types.task_message_update import StreamTaskMessageDelta - -logger = make_logger(__name__) - - -class ClaudeMessageHandler: - """Handles Claude SDK messages and streams them to AgentEx UI. - - Simplified handler focused on: - - Streaming text blocks to UI - - Extracting session_id from SystemMessage/ResultMessage - - Extracting usage and cost from ResultMessage - - Serializing responses for Temporal - - Note: Tool lifecycle events (requests/responses) are handled by - TemporalStreamingHooks, not this class. - """ - - def __init__( - self, - task_id: str | None, - trace_id: str | None, - parent_span_id: str | None, - ): - self.task_id = task_id - self.trace_id = trace_id - self.parent_span_id = parent_span_id - - # Message tracking - self.messages: list[Any] = [] - self.serialized_messages: list[dict] = [] - - # Streaming context for text - self.streaming_ctx = None - - # Result data - self.session_id: str | None = None - self.usage_info: dict | None = None - self.cost_info: float | None = None - - async def initialize(self): - """Initialize streaming context if task_id is available.""" - if self.task_id: - logger.debug(f"Creating streaming context for task: {self.task_id}") - self.streaming_ctx = await adk.streaming.streaming_task_message_context( - task_id=self.task_id, - initial_content=TextContent( - author="agent", - content="", - format="markdown" - ) - ).__aenter__() - - async def handle_message(self, message: Any): - """Process a single message from Claude SDK.""" - self.messages.append(message) - msg_num = len(self.messages) - - # Debug logging (verbose - only for troubleshooting) - logger.debug(f"๐Ÿ“จ [{msg_num}] Message type: {type(message).__name__}") - if isinstance(message, AssistantMessage): - block_types = [type(b).__name__ for b in message.content] - logger.debug(f" [{msg_num}] Content blocks: {block_types}") - - # Route to specific handlers - # Note: Tool requests/responses are handled by hooks, not here! - if isinstance(message, AssistantMessage): - await self._handle_assistant_message(message, msg_num) - elif isinstance(message, SystemMessage): - await self._handle_system_message(message) - elif isinstance(message, ResultMessage): - await self._handle_result_message(message) - - async def _handle_assistant_message(self, message: AssistantMessage, _msg_num: int): - """Handle AssistantMessage - contains text blocks. - - Note: Tool calls (ToolUseBlock/ToolResultBlock) are handled by hooks, not here. - We only process TextBlock for streaming text to UI. - """ - # Stream text blocks to UI - for block in message.content: - if isinstance(block, TextBlock): - await self._handle_text_block(block) - - # Collect text for final response - text_content = [] - for block in message.content: - if isinstance(block, TextBlock): - text_content.append(block.text) - - if text_content: - self.serialized_messages.append({ - "role": "assistant", - "content": "\n".join(text_content) - }) - - async def _handle_text_block(self, block: TextBlock): - """Handle text content block.""" - if not block.text or not self.streaming_ctx: - return - - logger.debug(f"๐Ÿ’ฌ Text block: {block.text[:50]}...") - - delta = TextDelta(type="text", text_delta=block.text) - - try: - await self.streaming_ctx.stream_update( - StreamTaskMessageDelta( - parent_task_message=self.streaming_ctx.task_message, - delta=delta, - type="delta" - ) - ) - except Exception as e: - logger.warning(f"Failed to stream text delta: {e}") - - async def _handle_system_message(self, message: SystemMessage): - """Handle system message - extract session_id.""" - if message.subtype == "init": - self.session_id = message.data.get("session_id") - logger.debug(f"Session initialized: {self.session_id[:16] if self.session_id else 'unknown'}...") - else: - logger.debug(f"SystemMessage: {message.subtype}") - - async def _handle_result_message(self, message: ResultMessage): - """Handle result message - extract usage and cost.""" - self.usage_info = message.usage - self.cost_info = message.total_cost_usd - - # Update session_id if available - if message.session_id: - self.session_id = message.session_id - - logger.info(f"๐Ÿ’ฐ Cost: ${self.cost_info:.4f}, Duration: {message.duration_ms}ms, Turns: {message.num_turns}") - - async def cleanup(self): - """Clean up open streaming contexts.""" - if self.streaming_ctx: - try: - await self.streaming_ctx.close() - logger.debug(f"Closed streaming context") - except Exception as e: - logger.warning(f"Failed to close streaming context: {e}") - - def get_results(self) -> dict[str, Any]: - """Get final results for Temporal.""" - return { - "messages": self.serialized_messages, - "task_id": self.task_id, - "session_id": self.session_id, - "usage": self.usage_info, - "cost_usd": self.cost_info, - } diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/README.md b/src/agentex/lib/core/temporal/plugins/openai_agents/README.md deleted file mode 100644 index 5497c466..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/README.md +++ /dev/null @@ -1,750 +0,0 @@ -# Temporal + OpenAI Agents SDK Streaming Implementation - -## TL;DR - -We use Temporal interceptors to add real-time streaming to Redis/UI while maintaining workflow determinism with the STANDARD OpenAI Agents plugin. The key challenge was threading `task_id` (only known at runtime) through a plugin system initialized at startup. We solved this using Temporal's interceptor pattern to inject task_id into activity headers, making it available via context variables in the model. - -**What we built**: Real-time streaming of LLM responses to users while preserving Temporal's durability guarantees. - -**How**: Interceptors thread task_id โ†’ Model reads from context โ†’ stream to Redis during activity โ†’ return complete response for determinism. - -**The win**: NO forked plugin needed - uses standard `temporalio.contrib.openai_agents.OpenAIAgentsPlugin`! - -## Table of Contents -1. [Background: How OpenAI Agents SDK Works](#background-how-openai-agents-sdk-works) -2. [How Temporal's OpenAI Plugin Works](#how-temporals-openai-plugin-works) -3. [The Streaming Challenge](#the-streaming-challenge) -4. [Our Streaming Solution](#our-streaming-solution) -5. [Implementation Details](#implementation-details) -6. [Usage](#usage) -7. [Drawbacks and Maintenance](#drawbacks-and-maintenance) - ---- - -## Background: How OpenAI Agents SDK Works - -Before diving into Temporal integration, let's understand the basic OpenAI Agents SDK flow: - -```python -# Standard OpenAI Agents SDK usage -agent = Agent( - name="Assistant", - model="gpt-4", - instructions="You are a helpful assistant" -) - -# Under the hood, this happens: -runner = AgentRunner() -result = await runner.run(agent, "Hello") -# โ†“ -# runner.run() calls agent.model.get_response() -# โ†“ -# model.get_response() makes the actual LLM API call to OpenAI -``` - -The key insight: **`model.get_response()`** is where the actual LLM call happens. - ---- - -## How Temporal's OpenAI Plugin Works - -The Temporal plugin intercepts this flow to make LLM calls durable by converting them into Temporal activities. Here's how: - -### 1. Plugin Setup and Runner Override - -When you create the Temporal plugin and pass it to the worker: - -```python -# In _temporal_openai_agents.py (lines ~72-112) -@contextmanager -def set_open_ai_agent_temporal_overrides(model_params): - # This is the critical line - replaces the default runner! - set_default_agent_runner(TemporalOpenAIRunner(model_params)) -``` - -### 2. Model Interception Chain - -Here's the clever interception that happens: - -``` -Original OpenAI SDK Flow: -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Agent โ”‚ --> โ”‚ Runner.run() โ”‚ --> โ”‚ Model.get_responseโ”‚ --> โ”‚ OpenAI API โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - -Temporal Plugin Flow: -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Agent โ”‚ --> โ”‚ TemporalRunner.run โ”‚ --> โ”‚ _TemporalModelStub โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ .get_response() โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ†“ - โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” - โ”‚ Temporal Activity โ”‚ - โ”‚ "invoke_model_activity"โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ†“ - โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” - โ”‚ Model.get_response() โ”‚ --> โ”‚ OpenAI API โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -### 3. The Model Stub Trick - -The `TemporalOpenAIRunner` replaces the agent's model with `_TemporalModelStub`: - -```python -# In _openai_runner.py -def _convert_agent(agent): - # Replace the model with a stub - new_agent.model = _TemporalModelStub( - model_name=agent.model, - model_params=model_params - ) - return new_agent -``` - -### 4. Activity Creation - -The `_TemporalModelStub` doesn't call the LLM directly. Instead, it creates a Temporal activity: - -```python -# In _temporal_model_stub.py -class _TemporalModelStub: - async def get_response(self, ...): - # Instead of calling the LLM, create an activity! - return await workflow.execute_activity_method( - ModelActivity.invoke_model_activity, # โ† This becomes visible in Temporal UI - activity_input, - ... - ) -``` - -### 5. Actual LLM Call in Activity - -Finally, inside the activity, the real LLM call happens: - -```python -# In _invoke_model_activity.py -class ModelActivity: - async def invoke_model_activity(self, input): - model = self._model_provider.get_model(input["model_name"]) - # NOW we actually call the LLM - return await model.get_response(...) # โ† Real OpenAI API call -``` - -**Summary**: The plugin intercepts at TWO levels: -1. **Runner level**: Replaces default runner with TemporalRunner -2. **Model level**: Replaces agent.model with _TemporalModelStub that creates activities - ---- - -## The Streaming Challenge - -### Why Temporal Doesn't Support Streaming by Default - -Temporal's philosophy is that activities should be: -- **Idempotent**: Same input โ†’ same output -- **Retriable**: Can restart from beginning on failure -- **Deterministic**: Replays produce identical results - -Streaming breaks these guarantees: -- If streaming fails halfway, where do you restart? -- How do you replay a stream deterministically? -- Partial responses violate idempotency - -### Why We Need Streaming Anyway - -For Scale/AgentEx customers, **latency is critical**: -- Time to first token matters more than total generation time -- Users expect to see responses as they're generated -- 10-30 second waits for long responses are unacceptable - -Our pragmatic decision: **Accept the tradeoff**. If streaming fails midway, we restart from the beginning. This may cause a brief UX hiccup but enables the streaming experience users expect. - ---- - -## Our Streaming Solution - -### The Key Insight: Where We Can Hook In - -When we instantiate the OpenAI plugin for Temporal, we can pass in a **model provider**: - -```python -plugin = OpenAIAgentsPlugin( - model_provider=StreamingModelProvider() # โ† This is our hook! -) -``` - -**IMPORTANT**: This model provider returns the ACTUAL model that makes the LLM call - this is the final layer, NOT the stub. This is where `model.get_response()` actually calls OpenAI's API. By providing our own model here, we can: - -1. Make the same OpenAI chat completion call with `stream=True` -2. Capture chunks as they arrive -3. Stream them to Redis -4. Still return the complete response for Temporal - -Our `StreamingModel` implementation: -1. **Streams to Redis** using XADD commands -2. **Returns complete response** to maintain Temporal determinism - -### The Task ID Problem - -Here's the critical issue we had to solve: - -``` -Timeline of Execution: -โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• -Time T0: Application Startup - plugin = CustomStreamingOpenAIAgentsPlugin( - model_provider=StreamingModelProvider() โ† No task_id exists yet! - ) - -Time T1: Worker Creation - worker = Worker(plugins=[plugin]) โ† Still no task_id! - -Time T2: Worker Starts - await worker.run() โ† Still no task_id! - -Time T3: Workflow Receives Request - @workflow.defn - async def on_task_create(params): - task_id = params.task.id โ† task_id CREATED HERE! ๐ŸŽฏ - -Time T4: Model Needs to Stream - StreamingModel.get_response(...?) โ† Need task_id but how?! -โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ• -``` - -**The problem**: The model provider is configured before we know the task_id, but streaming requires task_id to route to the correct Redis channel. - -### Our Solution: Temporal Interceptors + Context Variables - -Instead of forking the plugin, we use Temporal's interceptor pattern to thread task_id through the system. This elegant solution uses standard Temporal features and requires NO custom plugin components! - -Here's exactly how task_id flows through the interceptor chain: - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ WORKFLOW EXECUTION โ”‚ -โ”‚ self._task_id = params.task.id <-- Store in instance variable โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ†“ workflow.instance() -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ StreamingWorkflowOutboundInterceptor โ”‚ -โ”‚ โ€ข Reads _task_id from workflow.instance() โ”‚ -โ”‚ โ€ข Injects into activity headers โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ†“ headers["streaming-task-id"]="abc123" -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ STANDARD Temporal Plugin โ”‚ -โ”‚ โ€ข Uses standard TemporalRunner (no fork!) โ”‚ -โ”‚ โ€ข Uses standard TemporalModelStub (no fork!) โ”‚ -โ”‚ โ€ข Creates standard invoke_model_activity โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ†“ activity with headers -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ StreamingActivityInboundInterceptor โ”‚ -โ”‚ โ€ข Extracts task_id from headers โ”‚ -โ”‚ โ€ข Sets streaming_task_id ContextVar โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ†“ streaming_task_id.set("abc123") -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ StreamingModel.get_response() โ”‚ -โ”‚ โ€ข Reads task_id from streaming_task_id.get() โ”‚ -โ”‚ โ€ข Streams chunks to Redis channel: "stream:abc123" โ”‚ -โ”‚ โ€ข Returns complete response for Temporal โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ†“ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ REDIS โ”‚ -โ”‚ XADD stream:abc123 chunk1, chunk2, chunk3... โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ†“ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ UI SUBSCRIBER โ”‚ -โ”‚ Reads from stream:abc123 and displays real-time updates โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - ---- - -## Implementation Details - -### The Interceptor Approach - Clean and Maintainable - -Instead of forking components, we use Temporal's interceptor system. Here's what we built: - -### 1. StreamingInterceptor - The Main Component - -```python -# streaming_interceptor.py -class StreamingInterceptor(Interceptor): - """Main interceptor that enables task_id threading.""" - - def intercept_activity(self, next): - # Create activity interceptor to extract headers - return StreamingActivityInboundInterceptor(next, self._payload_converter) - - def workflow_interceptor_class(self, input): - # Return workflow interceptor class - return StreamingWorkflowInboundInterceptor -``` - -### 2. Task ID Flow - Using Standard Components - -Here's EXACTLY how task_id flows through the system without any forked components: - -#### Step 1: Workflow stores task_id in instance variable -```python -# workflow.py -self._task_id = params.task.id # Store in instance variable -result = await Runner.run(agent, input) # No context needed! -``` - -#### Step 2: Outbound Interceptor injects task_id into headers -```python -# StreamingWorkflowOutboundInterceptor -def start_activity(self, input): - workflow_instance = workflow.instance() - task_id = getattr(workflow_instance, '_task_id', None) - if task_id and "invoke_model_activity" in str(input.activity): - input.headers["streaming-task-id"] = self._payload_converter.to_payload(task_id) -``` - -#### Step 3: Inbound Interceptor extracts from headers and sets context -```python -# StreamingActivityInboundInterceptor -async def execute_activity(self, input): - if input.headers and "streaming-task-id" in input.headers: - task_id = self._payload_converter.from_payload(input.headers["streaming-task-id"], str) - streaming_task_id.set(task_id) # Set ContextVar! -``` - -#### Step 4: StreamingModel reads from context variable -```python -# StreamingModel.get_response() -from agentex.lib.core.temporal.plugins.openai_agents.streaming_interceptor import ( - streaming_task_id, - streaming_trace_id, - streaming_parent_span_id -) - -async def get_response(self, ...): - # Read from ContextVar - set by interceptor! - task_id = streaming_task_id.get() - trace_id = streaming_trace_id.get() - parent_span_id = streaming_parent_span_id.get() - - if task_id: - # Open streaming context to Redis - async with adk.streaming.streaming_task_message_context( - task_id=task_id, - ... - ) as streaming_context: - # Stream tokens as they arrive - ... -``` - -### 3. Worker Configuration - Simply Add the Interceptor - -```python -# run_worker.py -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin # STANDARD! -from agentex.lib.core.temporal.plugins.openai_agents import ( - StreamingInterceptor, - StreamingModelProvider, -) - -# Create the interceptor -interceptor = StreamingInterceptor() - -# Use STANDARD plugin with streaming model provider -plugin = OpenAIAgentsPlugin( - model_provider=StreamingModelProvider(), - model_params=ModelActivityParameters(...) -) - -# Create worker with interceptor -worker = Worker( - client, - task_queue="example_tutorial_queue", - workflows=[ExampleTutorialWorkflow], - activities=[...], - interceptors=[interceptor], # Just add interceptor! -) -``` - -### 4. The Streaming Model - Where Magic Happens - -This is where the actual streaming happens. Our `StreamingModel` is what gets called inside the activity: - -```python -# streaming_model.py -class StreamingModel(Model): - async def get_response(self, ..., task_id=None): - # 1. Open Redis streaming context with task_id - async with adk.streaming.streaming_task_message_context( - task_id=task_id, # โ† This creates Redis channel stream:abc123 - initial_content=TextContent(author="agent", content="") - ) as streaming_context: - - # 2. Make OpenAI call WITH STREAMING - stream = await self.client.chat.completions.create( - model=self.model_name, - messages=messages, - stream=True, # โ† Enable streaming! - # ... other params ... - ) - - # 3. Process chunks as they arrive - full_content = "" - async for chunk in stream: - if chunk.choices and chunk.choices[0].delta.content: - content = chunk.choices[0].delta.content - full_content += content - - # 4. Stream to Redis (UI sees this immediately!) - delta = TextDelta(type="text", text_delta=content) - update = StreamTaskMessageDelta( - parent_task_message=streaming_context.task_message, - delta=delta, - type="delta" - ) - await streaming_context.stream_update(update) - - # 5. Handle tool calls (sent as complete messages, not streamed) - if tool_calls: - for tool_call_data in tool_calls.values(): - tool_request = ToolRequestContent( - author="agent", - tool_call_id=tool_call_data["id"], - name=tool_call_data["function"]["name"], - arguments=json.loads(tool_call_data["function"]["arguments"]) - ) - - # Tool calls use StreamTaskMessageFull (complete message) - async with adk.streaming.streaming_task_message_context( - task_id=task_id, - initial_content=tool_request - ) as tool_context: - await tool_context.stream_update( - StreamTaskMessageFull( - parent_task_message=tool_context.task_message, - content=tool_request, - type="full" - ) - ) - - # 6. Handle reasoning tokens (o1 models) - if reasoning_content: # For o1 models - reasoning = ReasoningContent( - author="agent", - summary=[reasoning_content], - type="reasoning" - ) - # Stream reasoning as complete message - await stream_reasoning_update(reasoning) - - # 7. Context auto-closes and saves to DB - # The streaming_task_message_context: - # - Accumulates all chunks - # - Saves complete message to database - # - Sends DONE signal to Redis - - # 8. Return complete response for Temporal determinism - return ModelResponse( - output=output_items, # Complete response - usage=usage, - response_id=completion_id - ) -``` - -### 5. Redis and AgentEx Streaming Infrastructure - -Here's what happens under the hood with AgentEx's streaming system: - -#### Redis Implementation Details - -1. **Channel Creation**: `stream:{task_id}` - Each task gets its own Redis stream -2. **XADD Commands**: Each chunk is appended using Redis XADD -3. **Message Types**: - - `StreamTaskMessageDelta`: For text chunks (token by token) - - `StreamTaskMessageFull`: For complete messages (tool calls, reasoning) -4. **Auto-accumulation**: The streaming context accumulates all chunks -5. **Database Persistence**: Complete message saved to DB when context closes -6. **DONE Signal**: Sent to Redis when streaming completes - -#### What Gets Streamed - -```python -# Text content - streamed token by token -await streaming_context.stream_update( - StreamTaskMessageDelta(delta=TextDelta(text_delta=chunk)) -) - -# Tool calls - sent as complete messages -await streaming_context.stream_update( - StreamTaskMessageFull(content=ToolRequestContent(...)) -) - -# Reasoning (o1 models) - sent as complete -await streaming_context.stream_update( - StreamTaskMessageFull(content=ReasoningContent(...)) -) - -# Guardrails - sent as complete -await streaming_context.stream_update( - StreamTaskMessageFull(content=GuardrailContent(...)) -) -``` - -#### UI Subscription - -The frontend subscribes to `stream:{task_id}` and receives: -1. Real-time text chunks as they're generated -2. Complete tool calls when they're ready -3. Reasoning summaries for o1 models -4. DONE signal when complete - -This decoupling means we can stream anything we want through Redis! - -### 6. Workflow Integration - -```python -# workflow.py -@workflow.defn -class ExampleWorkflow: - async def on_task_event_send(self, params): - # Pass task_id through context - context = {"task_id": params.task.id} # โ† Critical line! - - runner = get_default_agent_runner() # Gets our StreamingTemporalRunner - result = await runner.run(agent, input, context=context) -``` - ---- - -## Usage - -### Installation - -This plugin is included in the agentex-python package. No additional installation needed. - -### Basic Setup - -```python -from agentex.lib.core.temporal.plugins.openai_agents import ( - CustomStreamingOpenAIAgentsPlugin, - StreamingModelProvider, -) -from temporalio.contrib.openai_agents import ModelActivityParameters -from temporalio.client import Client -from temporalio.worker import Worker -from datetime import timedelta - -# Create streaming model provider -model_provider = StreamingModelProvider() - -# Create plugin with streaming support -plugin = CustomStreamingOpenAIAgentsPlugin( - model_params=ModelActivityParameters( - start_to_close_timeout=timedelta(seconds=120), - ), - model_provider=model_provider, -) - -# Use with Temporal client -client = await Client.connect( - "localhost:7233", - plugins=[plugin] -) - -# Create worker with the plugin -worker = Worker( - client, - task_queue="my-task-queue", - workflows=[MyWorkflow], -) -``` - -### In Your Workflow - -```python -from agents import Agent -from agents.run import get_default_agent_runner - -@workflow.defn -class MyWorkflow: - @workflow.run - async def run(self, params): - # Create an agent - agent = Agent( - name="Assistant", - instructions="You are a helpful assistant", - model="gpt-4o", - ) - - # Pass task_id through context for streaming - context = {"task_id": params.task.id} - - # Run the agent - streaming happens automatically! - runner = get_default_agent_runner() - result = await runner.run( - agent, - params.event.content, - context=context # task_id enables streaming - ) - - return result.final_output -``` - -### Comparison with Original Temporal Plugin - -| Feature | Original Plugin | Streaming Plugin | -|---------|----------------|------------------| -| **Response Time** | Complete response only (10-30s wait) | Real-time streaming (immediate feedback) | -| **User Experience** | No feedback during generation | See response as it's generated | -| **Task ID Support** | Not supported | Runtime extraction and threading | -| **Activity Name** | `invoke_model_activity` | `invoke_model_activity_streaming` | -| **Model Stub** | `_TemporalModelStub` | `StreamingTemporalModelStub` | -| **Runner** | `TemporalOpenAIRunner` | `StreamingTemporalRunner` | -| **Redis Integration** | None | Full streaming via AgentEx ADK | -| **Temporal Determinism** | โœ… Yes | โœ… Yes (returns complete response) | -| **Replay Safety** | โœ… Yes | โœ… Yes (streaming is side-effect only) | - ---- - -## Benefits of the Interceptor Approach - -### Major Advantages Over Forking - -1. **No Code Duplication**: Uses standard `temporalio.contrib.openai_agents` plugin - - Automatic compatibility with Temporal updates - - No risk of divergence from upstream features - - Zero maintenance of forked code - -2. **Clean Architecture**: - - Interceptors are Temporal's official extension mechanism - - Clear separation between streaming logic and core plugin - - Easy to enable/disable streaming by adding/removing interceptor - -3. **Simplicity**: - - Single interceptor handles all task_id threading - - Uses Python's ContextVar for thread-safe async state - - No need to understand Temporal plugin internals - -### Minimal Limitations - -1. **Streaming Semantics** (unchanged): - - On failure, streaming restarts from beginning (may show duplicate partial content) - - This is acceptable for user experience - -2. **Worker Configuration**: - - Must register interceptor with worker - - Workflow must store task_id in instance variable - -### Future Improvements - -1. **Contribute Back**: - - This pattern could be contributed to Temporal as an example - - Shows how to extend plugins without forking - -2. **Enhanced Features**: - - Could add request/response interceptors for other use cases - - Pattern works for any runtime context threading need - -### Alternative Approaches Considered - -1. **Workflow-level streaming**: Stream directly from workflow (violates determinism) -2. **Separate streaming service**: Additional infrastructure complexity -3. **Polling pattern**: Poor latency characteristics -4. **WebSockets**: Doesn't integrate with existing AgentEx infrastructure - ---- - -## Key Innovation - -The most important innovation is **using interceptors for runtime context threading**. Instead of forking the plugin to pass task_id through custom components, we use Temporal's interceptor system with Python's ContextVar. This allows: - -- One plugin instance for all workflows (standard plugin!) -- Dynamic streaming channels per execution -- Clean separation of concerns -- No forked components to maintain -- Thread-safe async context propagation -- Compatible with all Temporal updates - ---- - -## Troubleshooting - -**No streaming visible in UI:** -- Ensure task_id is passed in the context: `context = {"task_id": params.task.id}` -- Verify Redis is running and accessible -- Check that the UI is subscribed to the correct task channel - -**Import errors:** -- Make sure agentex-python/src is in your Python path -- Install required dependencies: `uv add agentex-sdk openai-agents temporalio` - -**Activity not found:** -- Ensure the plugin is registered with both client and worker -- Check that `invoke_model_activity_streaming` is registered - ---- - -## Testing - -### Running Tests - -The streaming model implementation has comprehensive tests in `tests/test_streaming_model.py` that verify all configurations, tool types, and edge cases. - -#### From Repository Root - -```bash -# Run all tests -rye run pytest src/agentex/lib/core/temporal/plugins/openai_agents/tests/test_streaming_model.py -v - -# Run without parallel execution (more stable) -rye run pytest src/agentex/lib/core/temporal/plugins/openai_agents/tests/test_streaming_model.py -v -n0 - -# Run specific test -rye run pytest src/agentex/lib/core/temporal/plugins/openai_agents/tests/test_streaming_model.py::TestStreamingModelSettings::test_temperature_setting -v -``` - -#### From Test Directory - -```bash -cd src/agentex/lib/core/temporal/plugins/openai_agents/tests - -# Run all tests -rye run pytest test_streaming_model.py -v - -# Run without parallel execution (recommended) -rye run pytest test_streaming_model.py -v -n0 - -# Run specific test class -rye run pytest test_streaming_model.py::TestStreamingModelSettings -v -``` - -#### Test Coverage - -The test suite covers: -- **ModelSettings**: All configuration parameters (temperature, reasoning, truncation, etc.) -- **Tool Types**: Function tools, web search, file search, computer tools, MCP tools, etc. -- **Streaming**: Redis context creation, task ID threading, error handling -- **Edge Cases**: Missing task IDs, multiple computer tools, handoffs - -**Note**: Tests run faster without parallel execution (`-n0` flag) and avoid potential state pollution between test workers. All 29 tests pass individually; parallel execution may show 4-6 intermittent failures due to shared mock state. - ---- - -## Conclusion - -This implementation uses Temporal interceptors to thread task_id through the standard OpenAI plugin to enable real-time streaming while maintaining workflow determinism. The key innovation is using interceptors with Python's ContextVar to propagate runtime context without forking any Temporal components. - -This approach provides the optimal user experience with: -- **Zero code duplication** - uses standard Temporal plugin -- **Minimal maintenance** - only interceptor and streaming model to maintain -- **Clean architecture** - leverages Temporal's official extension mechanism -- **Full compatibility** - works with all Temporal and OpenAI SDK updates - -The interceptor pattern demonstrates how to extend Temporal plugins without forking, setting a precedent for future enhancements. \ No newline at end of file diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/__init__.py b/src/agentex/lib/core/temporal/plugins/openai_agents/__init__.py deleted file mode 100644 index def67c9a..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/__init__.py +++ /dev/null @@ -1,84 +0,0 @@ -"""OpenAI Agents SDK Temporal Plugin with Streaming Support. - -This module provides streaming capabilities for the OpenAI Agents SDK in Temporal -using interceptors to thread task_id through workflows to activities. - -The streaming implementation works by: -1. Using Temporal interceptors to thread task_id through the execution -2. Streaming LLM responses to Redis in real-time from activities -3. Streaming lifecycle events (tool calls, handoffs) via hooks and activities -4. Returning complete responses to maintain Temporal determinism - -Example - Complete Setup: - >>> from agentex.lib.core.temporal.plugins.openai_agents import ( - ... StreamingModelProvider, - ... TemporalStreamingHooks, - ... ContextInterceptor, - ... ) - >>> from temporalio.contrib.openai_agents import OpenAIAgentsPlugin, ModelActivityParameters - >>> from datetime import timedelta - >>> from agents import Agent, Runner - >>> - >>> # 1. Create streaming model provider - >>> model_provider = StreamingModelProvider() - >>> - >>> # 2. Create STANDARD plugin with streaming model provider - >>> plugin = OpenAIAgentsPlugin( - ... model_params=ModelActivityParameters( - ... start_to_close_timeout=timedelta(seconds=120), - ... ), - ... model_provider=model_provider, - ... ) - >>> - >>> # 3. Register interceptor with worker - >>> interceptor = ContextInterceptor() - >>> # Add interceptor to worker configuration - >>> - >>> # 4. In workflow, store task_id in instance variable - >>> self._task_id = params.task.id - >>> - >>> # 5. Create hooks for streaming lifecycle events - >>> hooks = TemporalStreamingHooks(task_id="your-task-id") - >>> - >>> # 6. Run agent - interceptor handles task_id threading automatically - >>> result = await Runner.run(agent, input, hooks=hooks) - -This gives you: -- Real-time streaming of LLM responses (via StreamingModel + interceptors) -- Real-time streaming of tool calls (via TemporalStreamingHooks) -- Real-time streaming of agent handoffs (via TemporalStreamingHooks) -- Full Temporal durability and observability -- No forked plugin required - uses standard OpenAIAgentsPlugin -""" - -from agentex.lib.core.temporal.plugins.openai_agents.hooks.hooks import ( - TemporalStreamingHooks, -) -from agentex.lib.core.temporal.plugins.openai_agents.hooks.activities import ( - stream_lifecycle_content, -) -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_tracing_model import ( - TemporalTracingModelProvider, -) -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModel, - TemporalStreamingModelProvider, -) -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ( - ContextInterceptor, - streaming_task_id, - streaming_trace_id, - streaming_parent_span_id, -) - -__all__ = [ - "TemporalStreamingModel", - "TemporalStreamingModelProvider", - "TemporalTracingModelProvider", - "ContextInterceptor", - "streaming_task_id", - "streaming_trace_id", - "streaming_parent_span_id", - "TemporalStreamingHooks", - "stream_lifecycle_content", -] \ No newline at end of file diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/hooks/__init__.py b/src/agentex/lib/core/temporal/plugins/openai_agents/hooks/__init__.py deleted file mode 100644 index 7a01e3f5..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/hooks/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Temporal streaming hooks and activities for OpenAI Agents SDK. - -This module provides hooks for streaming agent lifecycle events and -activities for streaming content to the AgentEx UI. -""" - -from agentex.lib.core.temporal.plugins.openai_agents.hooks.hooks import ( - TemporalStreamingHooks, -) -from agentex.lib.core.temporal.plugins.openai_agents.hooks.activities import ( - stream_lifecycle_content, -) - -__all__ = [ - "TemporalStreamingHooks", - "stream_lifecycle_content", -] \ No newline at end of file diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/hooks/activities.py b/src/agentex/lib/core/temporal/plugins/openai_agents/hooks/activities.py deleted file mode 100644 index bcd82385..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/hooks/activities.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Temporal activities for streaming agent lifecycle events. - -This module provides reusable Temporal activities for streaming content -to the AgentEx UI, designed to work with TemporalStreamingHooks. -""" - -from typing import Union - -from temporalio import activity - -from agentex.lib import adk -from agentex.types.text_content import TextContent -from agentex.types.task_message_update import StreamTaskMessageFull -from agentex.types.task_message_content import ( - TaskMessageContent, - ToolRequestContent, - ToolResponseContent, -) - - -@activity.defn(name="stream_lifecycle_content") -async def stream_lifecycle_content( - task_id: str, - content: Union[TextContent, ToolRequestContent, ToolResponseContent, TaskMessageContent], -) -> None: - """Stream agent lifecycle content to the AgentEx UI. - - This is a universal streaming activity that can handle any type of agent - lifecycle content (text messages, tool requests, tool responses, etc.). - It uses the AgentEx streaming context to send updates to the UI in real-time. - - Designed to work seamlessly with TemporalStreamingHooks. The hooks class - will call this activity automatically when lifecycle events occur. - - Args: - task_id: The AgentEx task ID for routing the content to the correct UI session - content: The content to stream - can be any of: - - TextContent: Plain text messages (e.g., handoff notifications) - - ToolRequestContent: Tool invocation requests with call_id and name - - ToolResponseContent: Tool execution results with call_id and output - - TaskMessageContent: Generic task message content - - Example: - Register this activity with your Temporal worker:: - - from agentex.lib.core.temporal.plugins.openai_agents import ( - TemporalStreamingHooks, - stream_lifecycle_content, - ) - - # In your workflow - hooks = TemporalStreamingHooks( - task_id=params.task.id, - stream_activity=stream_lifecycle_content - ) - result = await Runner.run(agent, input, hooks=hooks) - - Note: - This activity is non-blocking and will not throw exceptions to the workflow. - Any streaming errors are logged but do not fail the activity. This ensures - that streaming failures don't break the agent execution. - """ - try: - async with adk.streaming.streaming_task_message_context( - task_id=task_id, - initial_content=content, - ) as streaming_context: - # Send the content as a full message update - await streaming_context.stream_update( - StreamTaskMessageFull( - parent_task_message=streaming_context.task_message, - content=content, - type="full", - ) - ) - except Exception as e: - # Log error but don't fail the activity - streaming failures shouldn't break execution - activity.logger.warning(f"Failed to stream content to task {task_id}: {e}") diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/hooks/hooks.py b/src/agentex/lib/core/temporal/plugins/openai_agents/hooks/hooks.py deleted file mode 100644 index 795d44a0..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/hooks/hooks.py +++ /dev/null @@ -1,209 +0,0 @@ -"""Temporal streaming hooks for OpenAI Agents SDK lifecycle events. - -This module provides a convenience class for streaming agent lifecycle events -to the AgentEx UI via Temporal activities. -""" - -import logging -from typing import Any, override -from datetime import timedelta - -from agents import Tool, Agent, RunHooks, RunContextWrapper -from temporalio import workflow -from agents.tool_context import ToolContext - -from agentex.types.text_content import TextContent -from agentex.types.task_message_content import ToolRequestContent, ToolResponseContent -from agentex.lib.core.temporal.plugins.openai_agents.hooks.activities import stream_lifecycle_content - -logger = logging.getLogger(__name__) - - -class TemporalStreamingHooks(RunHooks): - """Convenience hooks class for streaming OpenAI Agent lifecycle events to the AgentEx UI. - - This class automatically streams agent lifecycle events (tool calls, handoffs) to the - AgentEx UI via Temporal activities. It subclasses the OpenAI Agents SDK's RunHooks - to intercept lifecycle events and forward them for real-time UI updates. - - Lifecycle events streamed: - - Tool requests (on_tool_start): Streams when a tool is about to be invoked - - Tool responses (on_tool_end): Streams the tool's execution result - - Agent handoffs (on_handoff): Streams when control transfers between agents - - Usage: - Basic usage - streams all lifecycle events:: - - from agentex.lib.core.temporal.plugins.openai_agents import TemporalStreamingHooks - - hooks = TemporalStreamingHooks(task_id="abc123") - result = await Runner.run(agent, input, hooks=hooks) - - Advanced - subclass for custom behavior:: - - class MyCustomHooks(TemporalStreamingHooks): - async def on_tool_start(self, context, agent, tool): - # Add custom logic before streaming - await self.my_custom_logging(tool) - # Call parent to stream to UI - await super().on_tool_start(context, agent, tool) - - async def on_agent_start(self, context, agent): - # Override empty methods for additional tracking - print(f"Agent {agent.name} started") - - Power users can ignore this class and subclass agents.RunHooks directly for full control. - - Note: - Tool arguments are extracted from the ToolContext's tool_arguments field, - which contains a JSON string of the arguments passed to the tool. - - Attributes: - task_id: The AgentEx task ID for routing streamed events - timeout: Timeout for streaming activity calls (default: 10 seconds) - """ - - def __init__( - self, - task_id: str, - timeout: timedelta = timedelta(seconds=10), - ): - """Initialize the streaming hooks. - - Args: - task_id: AgentEx task ID for routing streamed events to the correct UI session - timeout: Timeout for streaming activity invocations (default: 10 seconds) - """ - super().__init__() - self.task_id = task_id - self.timeout = timeout - - @override - async def on_agent_start(self, context: RunContextWrapper, agent: Agent) -> None: # noqa: ARG002 - """Called when an agent starts execution. - - Default implementation logs the event. Override to add custom behavior. - - Args: - context: The run context wrapper - agent: The agent that is starting - """ - logger.debug(f"[TemporalStreamingHooks] Agent '{agent.name}' started execution") - - @override - async def on_agent_end(self, context: RunContextWrapper, agent: Agent, output: Any) -> None: # noqa: ARG002 - """Called when an agent completes execution. - - Default implementation logs the event. Override to add custom behavior. - - Args: - context: The run context wrapper - agent: The agent that completed - output: The agent's output - """ - logger.debug(f"[TemporalStreamingHooks] Agent '{agent.name}' completed execution with output type: {type(output).__name__}") - - @override - async def on_tool_start(self, context: RunContextWrapper, agent: Agent, tool: Tool) -> None: # noqa: ARG002 - """Stream tool request when a tool starts execution. - - Extracts the tool_call_id and tool_arguments from the context and streams a - ToolRequestContent message to the UI showing that the tool is about to execute. - - Args: - context: The run context wrapper (will be a ToolContext with tool_call_id and tool_arguments) - agent: The agent executing the tool - tool: The tool being executed - """ - import json - - tool_context = context if isinstance(context, ToolContext) else None - tool_call_id = tool_context.tool_call_id if tool_context else f"call_{id(tool)}" - - # Extract tool arguments from context - tool_arguments = {} - if tool_context and hasattr(tool_context, 'tool_arguments'): - try: - # tool_arguments is a JSON string, parse it - tool_arguments = json.loads(tool_context.tool_arguments) - except (json.JSONDecodeError, TypeError): - # If parsing fails, log and use empty dict - logger.warning(f"Failed to parse tool arguments: {tool_context.tool_arguments}") - tool_arguments = {} - - await workflow.execute_activity_method( - stream_lifecycle_content, - args=[ - self.task_id, - ToolRequestContent( - author="agent", - tool_call_id=tool_call_id, - name=tool.name, - arguments=tool_arguments, # Now properly extracted from context - ), - ], - start_to_close_timeout=self.timeout, - ) - - @override - async def on_tool_end( - self, context: RunContextWrapper, agent: Agent, tool: Tool, result: str # noqa: ARG002 - ) -> None: - """Stream tool response when a tool completes execution. - - Extracts the tool_call_id and streams a ToolResponseContent message to the UI - showing the tool's execution result. - - Args: - context: The run context wrapper (will be a ToolContext with tool_call_id) - agent: The agent that executed the tool - tool: The tool that was executed - result: The tool's execution result - """ - tool_context = context if isinstance(context, ToolContext) else None - tool_call_id = ( - getattr(tool_context, "tool_call_id", f"call_{id(tool)}") - if tool_context - else f"call_{id(tool)}" - ) - - await workflow.execute_activity_method( - stream_lifecycle_content, - args=[ - self.task_id, - ToolResponseContent( - author="agent", - tool_call_id=tool_call_id, - name=tool.name, - content=result, - ), - ], - start_to_close_timeout=self.timeout, - ) - - @override - async def on_handoff( - self, context: RunContextWrapper, from_agent: Agent, to_agent: Agent # noqa: ARG002 - ) -> None: - """Stream handoff message when control transfers between agents. - - Sends a text message to the UI indicating that one agent is handing off - to another agent. - - Args: - context: The run context wrapper - from_agent: The agent transferring control - to_agent: The agent receiving control - """ - await workflow.execute_activity_method( - stream_lifecycle_content, - args=[ - self.task_id, - TextContent( - author="agent", - content=f"Handoff from {from_agent.name} to {to_agent.name}", - type="text", - ), - ], - start_to_close_timeout=self.timeout, - ) diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/interceptors/__init__.py b/src/agentex/lib/core/temporal/plugins/openai_agents/interceptors/__init__.py deleted file mode 100644 index 47290ea4..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/interceptors/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Temporal interceptors for OpenAI Agents SDK integration. - -This module provides interceptors for threading context (task_id, trace_id, parent_span_id) -from workflows to activities in Temporal. -""" - -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ( - ContextInterceptor, - streaming_task_id, - streaming_trace_id, - streaming_parent_span_id, -) - -__all__ = [ - "ContextInterceptor", - "streaming_task_id", - "streaming_trace_id", - "streaming_parent_span_id", -] \ No newline at end of file diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/interceptors/context_interceptor.py b/src/agentex/lib/core/temporal/plugins/openai_agents/interceptors/context_interceptor.py deleted file mode 100644 index 1111249f..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/interceptors/context_interceptor.py +++ /dev/null @@ -1,162 +0,0 @@ -""" -Temporal context interceptors for threading runtime context through workflows and activities. - -This module provides interceptors that pass task_id, trace_id, and parent_span_id from -workflows to activities via headers, making them available via ContextVars for models -to use for streaming, tracing, or other purposes. -""" - -import logging -from typing import Any, Type, Optional, override -from contextvars import ContextVar - -from temporalio import workflow -from temporalio.worker import ( - Interceptor, - StartActivityInput, - ExecuteActivityInput, - ExecuteWorkflowInput, - ActivityInboundInterceptor, - WorkflowInboundInterceptor, - WorkflowOutboundInterceptor, -) -from temporalio.converter import default - -# Set up logging -logger = logging.getLogger("context.interceptor") - -# Global context variables that models can read -# These are thread-safe and work across async boundaries -streaming_task_id: ContextVar[Optional[str]] = ContextVar('streaming_task_id', default=None) -streaming_trace_id: ContextVar[Optional[str]] = ContextVar('streaming_trace_id', default=None) -streaming_parent_span_id: ContextVar[Optional[str]] = ContextVar('streaming_parent_span_id', default=None) - -# Header keys for passing context -TASK_ID_HEADER = "context-task-id" -TRACE_ID_HEADER = "context-trace-id" -PARENT_SPAN_ID_HEADER = "context-parent-span-id" - -class ContextInterceptor(Interceptor): - """Main interceptor that enables context threading through Temporal.""" - - def __init__(self): - self._payload_converter = default().payload_converter - logger.info("[ContextInterceptor] Initialized") - - @override - def intercept_activity(self, next: ActivityInboundInterceptor) -> ActivityInboundInterceptor: - """Create activity interceptor to read context from headers.""" - return ContextActivityInboundInterceptor(next, self._payload_converter) - - @override - def workflow_interceptor_class(self, _input: Any) -> Optional[Type[WorkflowInboundInterceptor]]: - """Return workflow interceptor class.""" - return ContextWorkflowInboundInterceptor - - -class ContextWorkflowInboundInterceptor(WorkflowInboundInterceptor): - """Workflow interceptor that creates the outbound interceptor.""" - - def __init__(self, next: WorkflowInboundInterceptor): - super().__init__(next) - self._payload_converter = default().payload_converter - - @override - async def execute_workflow(self, input: ExecuteWorkflowInput) -> Any: - """Execute workflow - just pass through.""" - return await self.next.execute_workflow(input) - - @override - def init(self, outbound: WorkflowOutboundInterceptor) -> None: - """Initialize with our custom outbound interceptor.""" - self.next.init(ContextWorkflowOutboundInterceptor( - outbound, self._payload_converter - )) - - -class ContextWorkflowOutboundInterceptor(WorkflowOutboundInterceptor): - """Outbound interceptor that adds task_id to activity headers.""" - - def __init__(self, next, payload_converter): - super().__init__(next) - self._payload_converter = payload_converter - - @override - def start_activity(self, input: StartActivityInput) -> workflow.ActivityHandle: - """Add task_id, trace_id, and parent_span_id to headers when starting model activities.""" - - # Only add headers for model activity calls (OpenAI and Claude) - activity_name = str(input.activity) if hasattr(input, 'activity') else "" - - if ("invoke_model_activity" in activity_name or - "invoke-model-activity" in activity_name or - "run_claude_agent_activity" in activity_name): - # Get task_id, trace_id, and parent_span_id from workflow instance instead of inbound interceptor - try: - workflow_instance = workflow.instance() - task_id = getattr(workflow_instance, '_task_id', None) - trace_id = getattr(workflow_instance, '_trace_id', None) - parent_span_id = getattr(workflow_instance, '_parent_span_id', None) - - if task_id and trace_id and parent_span_id: - # Initialize headers if needed - if not input.headers: - input.headers = {} - - # Add task_id to headers - input.headers[TASK_ID_HEADER] = self._payload_converter.to_payload(task_id) # type: ignore[index] - input.headers[TRACE_ID_HEADER] = self._payload_converter.to_payload(trace_id) # type: ignore[index] - input.headers[PARENT_SPAN_ID_HEADER] = self._payload_converter.to_payload(parent_span_id) # type: ignore[index] - logger.debug(f"[OutboundInterceptor] Added task_id, trace_id, and parent_span_id to activity headers: {task_id}, {trace_id}, {parent_span_id}") - else: - logger.warning("[OutboundInterceptor] No _task_id, _trace_id, or _parent_span_id found in workflow instance") - except Exception as e: - logger.error(f"[OutboundInterceptor] Failed to get task_id, trace_id, or parent_span_id from workflow instance: {e}") - - return self.next.start_activity(input) - - -class ContextActivityInboundInterceptor(ActivityInboundInterceptor): - """Activity interceptor that extracts task_id, trace_id, and parent_span_id from headers and sets context variables.""" - - def __init__(self, next, payload_converter): - super().__init__(next) - self._payload_converter = payload_converter - - @override - async def execute_activity(self, input: ExecuteActivityInput) -> Any: - """Extract task_id, trace_id, and parent_span_id from headers and set context variables.""" - - # Extract task_id from headers if present - if input.headers and TASK_ID_HEADER in input.headers: - task_id_value = self._payload_converter.from_payload( - input.headers[TASK_ID_HEADER], str - ) - trace_id_value = self._payload_converter.from_payload( - input.headers[TRACE_ID_HEADER], str - ) - parent_span_id_value = self._payload_converter.from_payload( - input.headers[PARENT_SPAN_ID_HEADER], str - ) - - # P THIS IS THE KEY PART - Set the context variable! - # This makes task_id available to TemporalStreamingModel.get_response() - streaming_task_id.set(task_id_value) - streaming_trace_id.set(trace_id_value) - streaming_parent_span_id.set(parent_span_id_value) - logger.info(f"[ActivityInterceptor] Set task_id, trace_id, and parent_span_id in context: {task_id_value}, {trace_id_value}, {parent_span_id_value}") - else: - logger.debug("[ActivityInterceptor] No task_id, trace_id, or parent_span_id in headers") - - try: - # Execute the activity - # The TemporalStreamingModel can now read streaming_task_id.get() - result = await self.next.execute_activity(input) - return result - finally: - # Clean up context after activity - streaming_task_id.set(None) - streaming_trace_id.set(None) - streaming_parent_span_id.set(None) - logger.debug("[ActivityInterceptor] Cleared task_id, trace_id, and parent_span_id from context") - diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/models/__init__.py b/src/agentex/lib/core/temporal/plugins/openai_agents/models/__init__.py deleted file mode 100644 index bb5dc97e..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/models/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Model providers for Temporal OpenAI Agents SDK integration. - -This module provides model implementations that add streaming and tracing -capabilities to standard OpenAI models when running in Temporal workflows/activities. -""" - -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_tracing_model import ( - TemporalTracingModelProvider, - TemporalTracingResponsesModel, - TemporalTracingChatCompletionsModel, -) -from agentex.lib.core.temporal.plugins.openai_agents.models.temporal_streaming_model import ( - TemporalStreamingModel, - TemporalStreamingModelProvider, -) - -__all__ = [ - "TemporalStreamingModel", - "TemporalStreamingModelProvider", - "TemporalTracingModelProvider", - "TemporalTracingResponsesModel", - "TemporalTracingChatCompletionsModel", -] \ No newline at end of file diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/models/temporal_streaming_model.py b/src/agentex/lib/core/temporal/plugins/openai_agents/models/temporal_streaming_model.py deleted file mode 100644 index 0a85c9c6..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/models/temporal_streaming_model.py +++ /dev/null @@ -1,933 +0,0 @@ -"""Custom Temporal Model Provider with streaming support for OpenAI agents.""" -from __future__ import annotations - -import uuid -import logging -from typing import Any, List, Union, Optional, override - -from agents import ( - Tool, - Model, - Handoff, - FunctionTool, - ModelTracing, - ModelProvider, - ModelResponse, - ModelSettings, - TResponseInputItem, - AgentOutputSchemaBase, -) -from openai import NOT_GIVEN, AsyncOpenAI -from agents.tool import ( - ComputerTool, - HostedMCPTool, - WebSearchTool, - FileSearchTool, - LocalShellTool, - CodeInterpreterTool, - ImageGenerationTool, -) -from agents.usage import Usage, InputTokensDetails, OutputTokensDetails # type: ignore[attr-defined] -from agents.model_settings import MCPToolChoice -from openai.types.responses import ( - ResponseOutputText, - ResponseOutputMessage, - ResponseCompletedEvent, - ResponseTextDeltaEvent, - ResponseFunctionToolCall, - ResponseOutputItemDoneEvent, - # Event types for proper type checking - ResponseOutputItemAddedEvent, - ResponseReasoningTextDeltaEvent, - ResponseReasoningSummaryPartDoneEvent, - ResponseFunctionCallArgumentsDoneEvent, - ResponseReasoningSummaryPartAddedEvent, - ResponseReasoningSummaryTextDeltaEvent, - ResponseFunctionCallArgumentsDeltaEvent, -) - -# AgentEx SDK imports -from agentex.lib import adk -from agentex.lib.core.tracing.tracer import AsyncTracer -from agentex.types.task_message_delta import TextDelta, ReasoningContentDelta, ReasoningSummaryDelta -from agentex.types.task_message_update import StreamTaskMessageFull, StreamTaskMessageDelta -from agentex.types.task_message_content import TextContent, ReasoningContent -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ( - streaming_task_id, - streaming_trace_id, - streaming_parent_span_id, -) - -# Create logger for this module -logger = logging.getLogger("agentex.temporal.streaming") - - -def _serialize_item(item: Any) -> dict[str, Any]: - """ - Universal serializer for any item type from OpenAI Agents SDK. - - Uses model_dump() for Pydantic models, otherwise extracts attributes manually. - Filters out internal Pydantic fields that can't be serialized. - """ - if hasattr(item, 'model_dump'): - # Pydantic model - use model_dump for proper serialization - try: - return item.model_dump(mode='json', exclude_unset=True) - except Exception: - # Fallback to dict conversion - return dict(item) if hasattr(item, '__iter__') else {} - else: - # Not a Pydantic model - extract attributes manually - item_dict = {} - for attr_name in dir(item): - if not attr_name.startswith('_') and attr_name not in ('model_fields', 'model_config', 'model_computed_fields'): - try: - attr_value = getattr(item, attr_name, None) - # Skip methods and None values - if attr_value is not None and not callable(attr_value): - # Convert to JSON-serializable format - if hasattr(attr_value, 'model_dump'): - item_dict[attr_name] = attr_value.model_dump() - elif isinstance(attr_value, (str, int, float, bool, list, dict)): - item_dict[attr_name] = attr_value - else: - item_dict[attr_name] = str(attr_value) - except Exception: - # Skip attributes that can't be accessed - pass - return item_dict - - -class TemporalStreamingModel(Model): - """Custom model implementation with streaming support.""" - - def __init__( - self, - model_name: str = "gpt-4o", - _use_responses_api: bool = True, - openai_client: Optional[AsyncOpenAI] = None, - ): - """Initialize the streaming model with OpenAI client and model name. - - Args: - model_name: The name of the OpenAI model to use (default: "gpt-4o") - _use_responses_api: Internal flag for responses API (deprecated, always True) - openai_client: Optional custom AsyncOpenAI client. If not provided, a default - client with max_retries=0 will be created (since Temporal handles retries) - """ - # Use provided client or create default (Temporal handles retries) - self.client = openai_client if openai_client is not None else AsyncOpenAI(max_retries=0) - self.model_name = model_name - # Always use Responses API for all models - self.use_responses_api = True - - # Initialize tracer as a class variable - agentex_client = create_async_agentex_client() - self.tracer = AsyncTracer(agentex_client) - - logger.info(f"[TemporalStreamingModel] Initialized model={self.model_name}, use_responses_api={self.use_responses_api}, custom_client={openai_client is not None}, tracer=initialized") - - def _non_null_or_not_given(self, value: Any) -> Any: - """Convert None to NOT_GIVEN sentinel, matching OpenAI SDK pattern.""" - return value if value is not None else NOT_GIVEN - - def _prepare_response_input(self, input: Union[str, list[TResponseInputItem]]) -> List[dict]: - """Convert input to Responses API format. - - Args: - input: Either a string prompt or list of ResponseInputItem messages - - Returns: - List of input items in Responses API format - """ - response_input = [] - - if isinstance(input, list): - # Process list of ResponseInputItem objects - for _idx, item in enumerate(input): - # Convert to dict if needed - if isinstance(item, dict): - item_dict = item - else: - item_dict = item.model_dump() if hasattr(item, 'model_dump') else item - - item_type = item_dict.get("type") - - if item_type == "message": - # ResponseOutputMessage format - role = item_dict.get("role", "assistant") - content_list = item_dict.get("content", []) - - # Build content array - content_array = [] - for content_item in content_list: - if isinstance(content_item, dict): - if content_item.get("type") == "output_text": - # For assistant messages, keep as output_text - # For user messages, convert to input_text - if role == "user": - content_array.append({ - "type": "input_text", - "text": content_item.get("text", "") - }) - else: - content_array.append({ - "type": "output_text", - "text": content_item.get("text", "") - }) - else: - content_array.append(content_item) - - response_input.append({ - "type": "message", - "role": role, - "content": content_array - }) - - elif item_type == "function_call": - # Function call from previous response - logger.debug(f"[Responses API] function_call item keys: {list(item_dict.keys())}") - call_id = item_dict.get("call_id") or item_dict.get("id") - if not call_id: - logger.debug(f"[Responses API] WARNING: No call_id found in function_call item!") - logger.debug(f"[Responses API] Full item: {item_dict}") - # Generate a fallback ID if missing - call_id = f"call_{uuid.uuid4().hex[:8]}" - logger.debug(f"[Responses API] Generated fallback call_id: {call_id}") - logger.debug(f"[Responses API] Adding function_call with call_id={call_id}, name={item_dict.get('name')}") - response_input.append({ - "type": "function_call", - "call_id": call_id, # API expects 'call_id' not 'id' - "name": item_dict.get("name", ""), - "arguments": item_dict.get("arguments", "{}"), - }) - - elif item_type == "function_call_output": - # Function output/response - call_id = item_dict.get("call_id") - if not call_id: - logger.debug(f"[Responses API] WARNING: No call_id in function_call_output!") - # Try to find it from id field - call_id = item_dict.get("id") - response_input.append({ - "type": "function_call_output", - "call_id": call_id or "", - "output": item_dict.get("output", "") - }) - - elif item_dict.get("role") == "user": - # Simple user message - response_input.append({ - "type": "message", - "role": "user", - "content": [{"type": "input_text", "text": item_dict.get("content", "")}] - }) - - elif item_dict.get("role") == "tool": - # Tool message - response_input.append({ - "type": "function_call_output", - "call_id": item_dict.get("tool_call_id"), - "output": item_dict.get("content") - }) - else: - logger.debug(f"[Responses API] Skipping unhandled item type: {item_type}, role: {item_dict.get('role')}") - - elif isinstance(input, str): - # Simple string input - response_input.append({ - "type": "message", - "role": "user", - "content": [{"type": "input_text", "text": input}] - }) - - return response_input - - def _convert_tools(self, tools: list[Tool], handoffs: list[Handoff]) -> tuple[List[dict], List[str]]: - """Convert tools and handoffs to Responses API format. - - Args: - tools: List of Tool objects - handoffs: List of Handoff objects - - Returns: - Tuple of (converted_tools, include_list) where include_list contains - additional response data to request - """ - response_tools = [] - tool_includes = [] - - # Check for multiple computer tools (only one allowed) - computer_tools = [tool for tool in tools if isinstance(tool, ComputerTool)] - if len(computer_tools) > 1: - raise ValueError(f"You can only provide one computer tool. Got {len(computer_tools)}") - - # Convert each tool based on its type - for tool in tools: - if isinstance(tool, FunctionTool): - response_tools.append({ - "type": "function", - "name": tool.name, - "description": tool.description or "", - "parameters": tool.params_json_schema if tool.params_json_schema else {}, - "strict": tool.strict_json_schema, - }) - - elif isinstance(tool, WebSearchTool): - tool_config = { - "type": "web_search", - } - # filters attribute was removed from WebSearchTool API - if hasattr(tool, 'user_location') and tool.user_location is not None: - tool_config["user_location"] = tool.user_location - if hasattr(tool, 'search_context_size') and tool.search_context_size is not None: - tool_config["search_context_size"] = tool.search_context_size - response_tools.append(tool_config) - - elif isinstance(tool, FileSearchTool): - tool_config = { - "type": "file_search", - "vector_store_ids": tool.vector_store_ids, - } - if tool.max_num_results: - tool_config["max_num_results"] = tool.max_num_results - if tool.ranking_options: - tool_config["ranking_options"] = tool.ranking_options - if tool.filters: - tool_config["filters"] = tool.filters - response_tools.append(tool_config) - - # Add include for file search results if needed - if tool.include_search_results: - tool_includes.append("file_search_call.results") - - elif isinstance(tool, ComputerTool): - response_tools.append({ - "type": "computer_use_preview", - "environment": tool.computer.environment, - "display_width": tool.computer.dimensions[0], - "display_height": tool.computer.dimensions[1], - }) - - elif isinstance(tool, HostedMCPTool): - response_tools.append(tool.tool_config) - - elif isinstance(tool, ImageGenerationTool): - response_tools.append(tool.tool_config) - - elif isinstance(tool, CodeInterpreterTool): - response_tools.append(tool.tool_config) - - elif isinstance(tool, LocalShellTool): - # LocalShellTool API changed - no longer has working_directory - # The executor handles execution details internally - response_tools.append({ - "type": "local_shell", - }) - - else: - logger.warning(f"Unknown tool type: {type(tool).__name__}, skipping") - - # Convert handoffs (always function tools) - for handoff in handoffs: - response_tools.append({ - "type": "function", - "name": handoff.tool_name, - "description": handoff.tool_description or f"Transfer to {handoff.agent_name}", - "parameters": handoff.input_json_schema if handoff.input_json_schema else {}, - }) - - return response_tools, tool_includes - - def _build_reasoning_param(self, model_settings: ModelSettings) -> Any: - """Build reasoning parameter from model settings. - - Args: - model_settings: Model configuration settings - - Returns: - Reasoning parameter dict or NOT_GIVEN - """ - if not model_settings.reasoning: - return NOT_GIVEN - - if hasattr(model_settings.reasoning, 'effort') and model_settings.reasoning.effort: - # For Responses API, reasoning is an object - reasoning_param = { - "effort": model_settings.reasoning.effort, - } - # Add summary if specified (check both 'summary' and 'generate_summary' for compatibility) - summary_value = None - if hasattr(model_settings.reasoning, 'summary') and model_settings.reasoning.summary is not None: - summary_value = model_settings.reasoning.summary - elif ( - hasattr(model_settings.reasoning, 'generate_summary') - and model_settings.reasoning.generate_summary is not None - ): - summary_value = model_settings.reasoning.generate_summary - - if summary_value is not None: - reasoning_param["summary"] = summary_value - - logger.debug(f"[TemporalStreamingModel] Using reasoning param: {reasoning_param}") - return reasoning_param - - return NOT_GIVEN - - def _convert_tool_choice(self, tool_choice: Any) -> Any: - """Convert tool_choice to Responses API format. - - Args: - tool_choice: Tool choice from model settings - - Returns: - Converted tool choice or NOT_GIVEN - """ - if tool_choice is None: - return NOT_GIVEN - - if isinstance(tool_choice, MCPToolChoice): - # MCP tool choice with server label - return { - "server_label": tool_choice.server_label, - "type": "mcp", - "name": tool_choice.name, - } - elif tool_choice == "required": - return "required" - elif tool_choice == "auto": - return "auto" - elif tool_choice == "none": - return "none" - elif tool_choice == "file_search": - return {"type": "file_search"} - elif tool_choice == "web_search": - return {"type": "web_search"} - elif tool_choice == "web_search_preview": - return {"type": "web_search_preview"} - elif tool_choice == "computer_use_preview": - return {"type": "computer_use_preview"} - elif tool_choice == "image_generation": - return {"type": "image_generation"} - elif tool_choice == "code_interpreter": - return {"type": "code_interpreter"} - elif tool_choice == "mcp": - # Generic MCP without specific tool - return {"type": "mcp"} - elif isinstance(tool_choice, str): - # Specific function tool by name - return { - "type": "function", - "name": tool_choice, - } - else: - # Pass through as-is for other types - return tool_choice - - @override - async def get_response( - self, - system_instructions: Optional[str], - input: Union[str, list[TResponseInputItem]], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: Optional[AgentOutputSchemaBase], - handoffs: list[Handoff], - tracing: ModelTracing, # noqa: ARG002 - **kwargs, # noqa: ARG002 - ) -> ModelResponse: - """Get a non-streaming response from the model with streaming to Redis. - - This method is used by Temporal activities and needs to return a complete - response, but we stream the response to Redis while generating it. - """ - - task_id = streaming_task_id.get() - trace_id = streaming_trace_id.get() - parent_span_id = streaming_parent_span_id.get() - - if not task_id or not trace_id or not parent_span_id: - raise ValueError("task_id, trace_id, and parent_span_id are required for streaming with Responses API") - - trace = self.tracer.trace(trace_id) - - async with trace.span( - parent_id=parent_span_id, - name="streaming_model_get_response", - input={ - "model": self.model_name, - "has_system_instructions": system_instructions is not None, - "input_type": type(input).__name__, - "tools_count": len(tools) if tools else 0, - "handoffs_count": len(handoffs) if handoffs else 0, - }, - ) as span: - # Always use Responses API for streaming - if not task_id: - # If no task_id, we can't use streaming - this shouldn't happen normally - raise ValueError("task_id is required for streaming with Responses API") - - logger.info(f"[TemporalStreamingModel] Using Responses API for {self.model_name}") - - try: - # Prepare input using helper method - response_input = self._prepare_response_input(input) - - # Convert tools and handoffs using helper method - response_tools, tool_includes = self._convert_tools(tools, handoffs) - openai_tools = response_tools if response_tools else None - - # Build reasoning parameter using helper method - reasoning_param = self._build_reasoning_param(model_settings) - - # Convert tool_choice using helper method - tool_choice = self._convert_tool_choice(model_settings.tool_choice) - - # Build include list for response data - include_list = [] - # Add tool-specific includes - if tool_includes: - include_list.extend(tool_includes) - # Add user-specified includes - if model_settings.response_include: - include_list.extend(model_settings.response_include) - # Add logprobs include if top_logprobs is set - if model_settings.top_logprobs is not None: - include_list.append("message.output_text.logprobs") - # Build response format for verbosity and structured output - response_format = NOT_GIVEN - - if output_schema is not None: - # Handle structured output schema for Responses API - # The Responses API expects the schema in the 'text' parameter with a 'format' key - logger.debug(f"[TemporalStreamingModel] Converting output_schema to Responses API format") - try: - # Get the JSON schema from the output schema - schema_dict = output_schema.json_schema() - response_format = { - "format": { - "type": "json_schema", - "name": "final_output", - "schema": schema_dict, - "strict": output_schema.is_strict_json_schema() if hasattr(output_schema, 'is_strict_json_schema') else True, - } - } - logger.debug(f"[TemporalStreamingModel] Built response_format with json_schema: {response_format}") - except Exception as e: - logger.warning(f"Failed to convert output_schema: {e}") - response_format = NOT_GIVEN - - if model_settings.verbosity is not None: - if response_format is not NOT_GIVEN and isinstance(response_format, dict): - response_format["verbosity"] = model_settings.verbosity - else: - response_format = {"verbosity": model_settings.verbosity} - - # Build extra_args dict for additional parameters - extra_args = dict(model_settings.extra_args or {}) - if model_settings.top_logprobs is not None: - extra_args["top_logprobs"] = model_settings.top_logprobs - - # Create the response stream using Responses API - logger.debug(f"[TemporalStreamingModel] Creating response stream with Responses API") - stream = await self.client.responses.create( # type: ignore[call-overload] - - model=self.model_name, - input=response_input, - instructions=system_instructions, - tools=openai_tools or NOT_GIVEN, - stream=True, - # Temperature and sampling parameters - temperature=self._non_null_or_not_given(model_settings.temperature), - max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens), - top_p=self._non_null_or_not_given(model_settings.top_p), - # Note: frequency_penalty and presence_penalty are not supported by Responses API - # Tool and reasoning parameters - reasoning=reasoning_param, - tool_choice=tool_choice, - parallel_tool_calls=self._non_null_or_not_given(model_settings.parallel_tool_calls), - # Context and truncation - truncation=self._non_null_or_not_given(model_settings.truncation), - # Response configuration (includes structured output schema) - text=response_format, - include=include_list if include_list else NOT_GIVEN, - # Metadata and storage - metadata=self._non_null_or_not_given(model_settings.metadata), - store=self._non_null_or_not_given(model_settings.store), - # Extra customization - extra_headers=model_settings.extra_headers, - extra_query=model_settings.extra_query, - extra_body=model_settings.extra_body, - # Any additional parameters from extra_args - **extra_args, - ) - - # Process the stream of events from Responses API - output_items = [] - current_text = "" - streaming_context = None - reasoning_context = None - reasoning_summaries = [] - reasoning_contents = [] - event_count = 0 - - # We expect task_id to always be provided for streaming - if not task_id: - raise ValueError("[TemporalStreamingModel] task_id is required for streaming model") - - # Process events from the Responses API stream - function_calls_in_progress = {} # Track function calls being streamed - - async for event in stream: - event_count += 1 - - # Log event type - logger.debug(f"[TemporalStreamingModel] Event {event_count}: {type(event).__name__}") - - # Handle different event types using isinstance for type safety - if isinstance(event, ResponseOutputItemAddedEvent): - # New output item (reasoning, function call, or message) - item = getattr(event, 'item', None) - output_index = getattr(event, 'output_index', 0) - - if item and getattr(item, 'type', None) == 'reasoning': - logger.debug(f"[TemporalStreamingModel] Starting reasoning item") - if not reasoning_context: - # Start a reasoning context for streaming reasoning to UI - reasoning_context = await adk.streaming.streaming_task_message_context( - task_id=task_id, - initial_content=ReasoningContent( - author="agent", - summary=[], - content=[], - type="reasoning", - style="active", - ), - ).__aenter__() - elif item and getattr(item, 'type', None) == 'function_call': - # Track the function call being streamed - function_calls_in_progress[output_index] = { - 'id': getattr(item, 'id', ''), - 'call_id': getattr(item, 'call_id', ''), - 'name': getattr(item, 'name', ''), - 'arguments': getattr(item, 'arguments', ''), - } - logger.debug(f"[TemporalStreamingModel] Starting function call: {item.name}") - - elif item and getattr(item, 'type', None) == 'message': - # Track the message being streamed - streaming_context = await adk.streaming.streaming_task_message_context( - task_id=task_id, - initial_content=TextContent( - author="agent", - content="", - format="markdown", - ), - ).__aenter__() - - elif isinstance(event, ResponseFunctionCallArgumentsDeltaEvent): - # Stream function call arguments - output_index = getattr(event, 'output_index', 0) - delta = getattr(event, 'delta', '') - - if output_index in function_calls_in_progress: - function_calls_in_progress[output_index]['arguments'] += delta - logger.debug(f"[TemporalStreamingModel] Function call args delta: {delta[:50]}...") - - elif isinstance(event, ResponseFunctionCallArgumentsDoneEvent): - # Function call arguments complete - output_index = getattr(event, 'output_index', 0) - arguments = getattr(event, 'arguments', '') - - if output_index in function_calls_in_progress: - function_calls_in_progress[output_index]['arguments'] = arguments - logger.debug(f"[TemporalStreamingModel] Function call args done") - - elif isinstance(event, (ResponseReasoningTextDeltaEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseTextDeltaEvent)): - # Handle text streaming - delta = getattr(event, 'delta', '') - - if isinstance(event, ResponseReasoningSummaryTextDeltaEvent) and reasoning_context: - # Stream reasoning summary deltas - these are the actual reasoning tokens! - try: - # Use ReasoningSummaryDelta for reasoning summaries - summary_index = getattr(event, 'summary_index', 0) - delta_obj = ReasoningSummaryDelta( - summary_index=summary_index, - summary_delta=delta, - type="reasoning_summary", - ) - update = StreamTaskMessageDelta( - parent_task_message=reasoning_context.task_message, - delta=delta_obj, - type="delta", - ) - await reasoning_context.stream_update(update) - # Accumulate the reasoning summary - if len(reasoning_summaries) <= summary_index: - logger.debug(f"[TemporalStreamingModel] Extending reasoning summaries: {summary_index}") - reasoning_summaries.extend([""] * (summary_index + 1 - len(reasoning_summaries))) - reasoning_summaries[summary_index] += delta - logger.debug(f"[TemporalStreamingModel] Streamed reasoning summary: {delta[:30]}..." if len(delta) > 30 else f"[TemporalStreamingModel] Streamed reasoning summary: {delta}") - except Exception as e: - logger.warning(f"Failed to send reasoning delta: {e}") - elif isinstance(event, ResponseReasoningTextDeltaEvent) and reasoning_context: - # Regular reasoning delta (if these ever appear) - try: - delta_obj = ReasoningContentDelta( - content_index=0, - content_delta=delta, - type="reasoning_content", - ) - update = StreamTaskMessageDelta( - parent_task_message=reasoning_context.task_message, - delta=delta_obj, - type="delta", - ) - await reasoning_context.stream_update(update) - reasoning_contents.append(delta) - except Exception as e: - logger.warning(f"Failed to send reasoning delta: {e}") - elif isinstance(event, ResponseTextDeltaEvent): - # Stream regular text output - current_text += delta - try: - delta_obj = TextDelta( - type="text", - text_delta=delta, - ) - update = StreamTaskMessageDelta( - parent_task_message=streaming_context.task_message if streaming_context else None, - delta=delta_obj, - type="delta", - ) - await streaming_context.stream_update(update) if streaming_context else None - except Exception as e: - logger.warning(f"Failed to send text delta: {e}") - - elif isinstance(event, ResponseOutputItemDoneEvent): - # Output item completed - item = getattr(event, 'item', None) - output_index = getattr(event, 'output_index', 0) - - if item and getattr(item, 'type', None) == 'reasoning': - if reasoning_context and reasoning_summaries: - logger.debug(f"[TemporalStreamingModel] Reasoning itme completed, sending final update") - try: - # Send a full message update with the complete reasoning content - complete_reasoning_content = ReasoningContent( - author="agent", - summary=reasoning_summaries, # Use accumulated summaries - content=reasoning_contents if reasoning_contents else [], - type="reasoning", - style="static", - ) - - await reasoning_context.stream_update( - update=StreamTaskMessageFull( - parent_task_message=reasoning_context.task_message, - content=complete_reasoning_content, - type="full", - ), - ) - - # Close the reasoning context after sending the final update - # This matches the reference implementation pattern - await reasoning_context.close() - reasoning_context = None - logger.debug(f"[TemporalStreamingModel] Closed reasoning context after final update") - except Exception as e: - logger.warning(f"Failed to send reasoning part done update: {e}") - - elif item and getattr(item, 'type', None) == 'function_call': - # Function call completed - add to output - if output_index in function_calls_in_progress: - call_data = function_calls_in_progress[output_index] - logger.debug(f"[TemporalStreamingModel] Function call completed: {call_data['name']}") - - # Create proper function call object - tool_call = ResponseFunctionToolCall( - id=call_data['id'], - call_id=call_data['call_id'], - type="function_call", - name=call_data['name'], - arguments=call_data['arguments'], - ) - output_items.append(tool_call) - - elif isinstance(event, ResponseReasoningSummaryPartAddedEvent): - # New reasoning part/summary started - reset accumulator - part = getattr(event, 'part', None) - if part: - part_type = getattr(part, 'type', 'unknown') - logger.debug(f"[TemporalStreamingModel] New reasoning part: type={part_type}") - # Reset the current reasoning summary for this new part - - elif isinstance(event, ResponseReasoningSummaryPartDoneEvent): - # Reasoning part completed - ResponseOutputItemDoneEvent will handle the final update - logger.debug(f"[TemporalStreamingModel] Reasoning part completed") - - elif isinstance(event, ResponseCompletedEvent): - # Response completed - logger.debug(f"[TemporalStreamingModel] Response completed") - response = getattr(event, 'response', None) - if response and hasattr(response, 'output'): - # Use the final output from the response - output_items = response.output - logger.debug(f"[TemporalStreamingModel] Found {len(output_items)} output items in final response") - - # End of event processing loop - close any open contexts - if reasoning_context: - await reasoning_context.close() - reasoning_context = None - - if streaming_context: - await streaming_context.close() - streaming_context = None - - # Build the response from output items collected during streaming - # Create output from the items we collected - response_output = [] - - # Process output items from the response - if output_items: - for item in output_items: - if isinstance(item, ResponseFunctionToolCall): - response_output.append(item) - elif isinstance(item, ResponseOutputMessage): - response_output.append(item) - else: - response_output.append(item) - else: - # No output items - create empty message - message = ResponseOutputMessage( - id=f"msg_{uuid.uuid4().hex[:8]}", - type="message", - status="completed", - role="assistant", - content=[ResponseOutputText( - type="output_text", - text=current_text if current_text else "", - annotations=[] - )] - ) - response_output.append(message) - - # Create usage object - usage = Usage( - input_tokens=0, - output_tokens=0, - total_tokens=0, - input_tokens_details=InputTokensDetails(cached_tokens=0), - output_tokens_details=OutputTokensDetails(reasoning_tokens=len(''.join(reasoning_contents)) // 4), # Approximate - ) - - # Serialize response output items for span tracing - new_items = [] - final_output = None - tool_calls = [] - tool_outputs = [] - - for item in response_output: - try: - item_dict = _serialize_item(item) - if item_dict: - new_items.append(item_dict) - - # Extract final_output from message type if available - if item_dict.get('type') == 'message' and not final_output: - content = item_dict.get('content', []) - if content and isinstance(content, list): - for content_part in content: - if isinstance(content_part, dict) and 'text' in content_part: - final_output = content_part['text'] - break - except Exception as e: - logger.warning(f"Failed to serialize item in temporal_streaming_model: {e}") - continue - - # Extract tool calls and outputs from input - try: - if isinstance(input, list): - for item in input: - try: - item_dict = _serialize_item(item) if not isinstance(item, dict) else item - if item_dict: - # Capture function calls - if item_dict.get('type') == 'function_call': - tool_calls.append(item_dict) - # Capture function outputs - elif item_dict.get('type') == 'function_call_output': - tool_outputs.append(item_dict) - except Exception: - pass - except Exception as e: - logger.warning(f"Failed to extract tool calls and outputs: {e}") - - # Set span output with structured data - if span: - output_data = { - "new_items": new_items, - "final_output": final_output, - } - # Include tool calls if any were in the input - if tool_calls: - output_data["tool_calls"] = tool_calls - # Include tool outputs if any were processed - if tool_outputs: - output_data["tool_outputs"] = tool_outputs - - span.output = output_data - - # Return the response - return ModelResponse( - output=response_output, - usage=usage, - response_id=f"resp_{uuid.uuid4().hex[:8]}", - ) - - except Exception as e: - logger.error(f"Error using Responses API: {e}") - raise - - # The _get_response_with_responses_api method has been merged into get_response above - # All Responses API logic is now integrated directly in get_response() method - - @override - def stream_response(self, *args, **kwargs): - """Streaming is not implemented as we use the async get_response method. - This method is included for compatibility with the Model interface but should not be used. - All streaming is handled through the async get_response method with the Responses API.""" - raise NotImplementedError("stream_response is not used in Temporal activities - use get_response instead") - - -class TemporalStreamingModelProvider(ModelProvider): - """Custom model provider that returns a streaming-capable model.""" - - def __init__(self, openai_client: Optional[AsyncOpenAI] = None): - """Initialize the provider. - - Args: - openai_client: Optional custom AsyncOpenAI client to use for all models. - If not provided, each model will create its own default client. - """ - super().__init__() - self.openai_client = openai_client - logger.info(f"[TemporalStreamingModelProvider] Initialized, custom_client={openai_client is not None}") - - @override - def get_model(self, model_name: Union[str, None]) -> Model: - """Get a model instance with streaming capabilities. - - Args: - model_name: The name of the model to retrieve - - Returns: - A Model instance with streaming support. - """ - # Use the provided model_name or default to gpt-4o - actual_model = model_name if model_name else "gpt-4o" - logger.info(f"[TemporalStreamingModelProvider] Creating TemporalStreamingModel for model_name: {actual_model}") - model = TemporalStreamingModel(model_name=actual_model, openai_client=self.openai_client) - return model diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/models/temporal_tracing_model.py b/src/agentex/lib/core/temporal/plugins/openai_agents/models/temporal_tracing_model.py deleted file mode 100644 index e3d1b380..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/models/temporal_tracing_model.py +++ /dev/null @@ -1,402 +0,0 @@ -"""Temporal-aware tracing model provider. - -This module provides model implementations that add AgentEx tracing to standard OpenAI models -when running in Temporal workflows/activities. It uses context variables set by the Temporal -context interceptor to access task_id, trace_id, and parent_span_id. - -The key innovation is that these are thin wrappers around the standard OpenAI models, -avoiding code duplication while adding tracing capabilities. -""" -from __future__ import annotations - -import logging -from typing import Any, List, Union, Optional, override - -from agents import ( - Tool, - Model, - Handoff, - ModelTracing, - ModelResponse, - ModelSettings, - OpenAIProvider, - TResponseInputItem, - AgentOutputSchemaBase, -) -from openai import AsyncOpenAI -from openai.types.responses import ResponsePromptParam -from agents.models.openai_responses import OpenAIResponsesModel -from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel - -from agentex.lib.core.tracing.tracer import AsyncTracer - -# Import AgentEx components -from agentex.lib.adk.utils._modules.client import create_async_agentex_client - -# Import context variables from the interceptor -from agentex.lib.core.temporal.plugins.openai_agents.interceptors.context_interceptor import ( - streaming_task_id, - streaming_trace_id, - streaming_parent_span_id, -) - -logger = logging.getLogger("agentex.temporal.tracing") - - -def _serialize_item(item: Any) -> dict[str, Any]: - """ - Universal serializer for any item type from OpenAI Agents SDK. - - Uses model_dump() for Pydantic models, otherwise extracts attributes manually. - Filters out internal Pydantic fields that can't be serialized. - """ - if hasattr(item, 'model_dump'): - # Pydantic model - use model_dump for proper serialization - try: - return item.model_dump(mode='json', exclude_unset=True) - except Exception: - # Fallback to dict conversion - return dict(item) if hasattr(item, '__iter__') else {} - else: - # Not a Pydantic model - extract attributes manually - item_dict = {} - for attr_name in dir(item): - if not attr_name.startswith('_') and attr_name not in ('model_fields', 'model_config', 'model_computed_fields'): - try: - attr_value = getattr(item, attr_name, None) - # Skip methods and None values - if attr_value is not None and not callable(attr_value): - # Convert to JSON-serializable format - if hasattr(attr_value, 'model_dump'): - item_dict[attr_name] = attr_value.model_dump() - elif isinstance(attr_value, (str, int, float, bool, list, dict)): - item_dict[attr_name] = attr_value - else: - item_dict[attr_name] = str(attr_value) - except Exception: - # Skip attributes that can't be accessed - pass - return item_dict - - -class TemporalTracingModelProvider(OpenAIProvider): - """Model provider that returns OpenAI models wrapped with AgentEx tracing. - - This provider extends the standard OpenAIProvider to return models that add - tracing spans around model calls when running in Temporal activities with - the context interceptor enabled. - """ - - def __init__(self, openai_client: Optional[AsyncOpenAI] = None, **kwargs): - """Initialize the tracing model provider. - - Args: - openai_client: Optional custom AsyncOpenAI client. If provided, this client - will be used for all model calls. If not provided, OpenAIProvider - will create a default client. - **kwargs: All other arguments are passed to OpenAIProvider. - """ - # Pass openai_client to parent if provided - if openai_client is not None: - super().__init__(openai_client=openai_client, **kwargs) - else: - super().__init__(**kwargs) - - # Initialize tracer for all models - agentex_client = create_async_agentex_client() - self._tracer = AsyncTracer(agentex_client) - logger.info(f"[TemporalTracingModelProvider] Initialized with AgentEx tracer, custom_client={openai_client is not None}") - - @override - def get_model(self, model_name: Optional[str]) -> Model: - """Get a model wrapped with tracing capabilities. - - Args: - model_name: The name of the model to use - - Returns: - A model instance wrapped with tracing - """ - # Get the base model from the parent provider - base_model = super().get_model(model_name) - - # Wrap with appropriate tracing wrapper based on model type - if isinstance(base_model, OpenAIResponsesModel): - logger.info(f"[TemporalTracingModelProvider] Wrapping OpenAIResponsesModel '{model_name}' with tracing") - return TemporalTracingResponsesModel(base_model, self._tracer) # type: ignore[abstract] - elif isinstance(base_model, OpenAIChatCompletionsModel): - logger.info(f"[TemporalTracingModelProvider] Wrapping OpenAIChatCompletionsModel '{model_name}' with tracing") - return TemporalTracingChatCompletionsModel(base_model, self._tracer) # type: ignore[abstract] - else: - logger.warning(f"[TemporalTracingModelProvider] Unknown model type, returning without tracing: {type(base_model)}") - return base_model - - -class TemporalTracingResponsesModel(Model): - """Wrapper for OpenAIResponsesModel that adds AgentEx tracing. - - This is a thin wrapper that adds tracing spans around the base model's - get_response() method. It reads tracing context from ContextVars set by - the Temporal context interceptor. - """ - - def __init__(self, base_model: OpenAIResponsesModel, tracer: AsyncTracer): - """Initialize the tracing wrapper. - - Args: - base_model: The OpenAI Responses model to wrap - tracer: The AgentEx tracer to use - """ - self._base_model = base_model - self._tracer = tracer - # Expose the model name for compatibility - self.model = base_model.model - - @override - async def get_response( - self, - system_instructions: Optional[str], - input: Union[str, List[TResponseInputItem]], - model_settings: ModelSettings, - tools: List[Tool], - output_schema: Optional[AgentOutputSchemaBase], - handoffs: List[Handoff], - tracing: ModelTracing, - previous_response_id: Optional[str] = None, - conversation_id: Optional[str] = None, - prompt: Optional[ResponsePromptParam] = None, - **kwargs, - ) -> ModelResponse: - """Get a response from the model with optional tracing. - - If tracing context is available from the interceptor, this wraps the - model call in a tracing span. Otherwise, it passes through to the - base model without tracing. - """ - # Try to get tracing context from ContextVars - task_id = streaming_task_id.get() - trace_id = streaming_trace_id.get() - parent_span_id = streaming_parent_span_id.get() - - # If we have tracing context, wrap with span - if trace_id and parent_span_id: - logger.debug(f"[TemporalTracingResponsesModel] Adding tracing span for task_id={task_id}, trace_id={trace_id}") - - trace = self._tracer.trace(trace_id) - - async with trace.span( - parent_id=parent_span_id, - name="model_get_response", - input={ - "model": str(self.model), - "has_system_instructions": system_instructions is not None, - "input_type": type(input).__name__, - "tools_count": len(tools) if tools else 0, - "handoffs_count": len(handoffs) if handoffs else 0, - "has_output_schema": output_schema is not None, - "model_settings": { - "temperature": model_settings.temperature, - "max_tokens": model_settings.max_tokens, - "reasoning": model_settings.reasoning, - } if model_settings else None, - }, - ) as span: - try: - # Call the base model - response = await self._base_model.get_response( - system_instructions=system_instructions, - input=input, - model_settings=model_settings, - tools=tools, - output_schema=output_schema, - handoffs=handoffs, - tracing=tracing, - previous_response_id=previous_response_id, - conversation_id=conversation_id, # type: ignore[call-arg] - prompt=prompt, - **kwargs, - ) - - # Serialize response output items for span tracing - new_items = [] - final_output = None - - if hasattr(response, 'output') and response.output: - response_output = response.output if isinstance(response.output, list) else [response.output] - - for item in response_output: - try: - item_dict = _serialize_item(item) - if item_dict: - new_items.append(item_dict) - - # Extract final_output from message type if available - if item_dict.get('type') == 'message' and not final_output: - content = item_dict.get('content', []) - if content and isinstance(content, list): - for content_part in content: - if isinstance(content_part, dict) and 'text' in content_part: - final_output = content_part['text'] - break - except Exception as e: - logger.warning(f"Failed to serialize item in temporal tracing model: {e}") - continue - - # Set span output with structured data - span.output = { # type: ignore[attr-defined] - "new_items": new_items, - "final_output": final_output, - } - - return response - - except Exception as e: - # Record error in span - span.error = str(e) # type: ignore[attr-defined] - raise - else: - # No tracing context, just pass through - logger.debug("[TemporalTracingResponsesModel] No tracing context available, calling base model directly") - return await self._base_model.get_response( - system_instructions=system_instructions, - input=input, - model_settings=model_settings, - tools=tools, - output_schema=output_schema, - handoffs=handoffs, - tracing=tracing, - previous_response_id=previous_response_id, - conversation_id=conversation_id, # type: ignore[call-arg] - prompt=prompt, - **kwargs, - ) - - -class TemporalTracingChatCompletionsModel(Model): - """Wrapper for OpenAIChatCompletionsModel that adds AgentEx tracing. - - This is a thin wrapper that adds tracing spans around the base model's - get_response() method. It reads tracing context from ContextVars set by - the Temporal context interceptor. - """ - - def __init__(self, base_model: OpenAIChatCompletionsModel, tracer: AsyncTracer): - """Initialize the tracing wrapper. - - Args: - base_model: The OpenAI ChatCompletions model to wrap - tracer: The AgentEx tracer to use - """ - self._base_model = base_model - self._tracer = tracer - # Expose the model name for compatibility - self.model = base_model.model - - @override - async def get_response( - self, - system_instructions: Optional[str], - input: Union[str, List[TResponseInputItem]], - model_settings: ModelSettings, - tools: List[Tool], - output_schema: Optional[AgentOutputSchemaBase], - handoffs: List[Handoff], - tracing: ModelTracing, - **kwargs, - ) -> ModelResponse: - """Get a response from the model with optional tracing. - - If tracing context is available from the interceptor, this wraps the - model call in a tracing span. Otherwise, it passes through to the - base model without tracing. - """ - # Try to get tracing context from ContextVars - task_id = streaming_task_id.get() - trace_id = streaming_trace_id.get() - parent_span_id = streaming_parent_span_id.get() - - # If we have tracing context, wrap with span - if trace_id and parent_span_id: - logger.debug(f"[TemporalTracingChatCompletionsModel] Adding tracing span for task_id={task_id}, trace_id={trace_id}") - - trace = self._tracer.trace(trace_id) - - async with trace.span( - parent_id=parent_span_id, - name="model_get_response", - input={ - "model": str(self.model), - "has_system_instructions": system_instructions is not None, - "input_type": type(input).__name__, - "tools_count": len(tools) if tools else 0, - "handoffs_count": len(handoffs) if handoffs else 0, - "has_output_schema": output_schema is not None, - "model_settings": { - "temperature": model_settings.temperature, - "max_tokens": model_settings.max_tokens, - } if model_settings else None, - }, - ) as span: - try: - # Call the base model - response = await self._base_model.get_response( - system_instructions=system_instructions, - input=input, - model_settings=model_settings, - tools=tools, - output_schema=output_schema, - handoffs=handoffs, - tracing=tracing, - **kwargs, - ) - - # Serialize response output items for span tracing - new_items = [] - final_output = None - - if hasattr(response, 'output') and response.output: - response_output = response.output if isinstance(response.output, list) else [response.output] - - for item in response_output: - try: - item_dict = _serialize_item(item) - if item_dict: - new_items.append(item_dict) - - # Extract final_output from message type if available - if item_dict.get('type') == 'message' and not final_output: - content = item_dict.get('content', []) - if content and isinstance(content, list): - for content_part in content: - if isinstance(content_part, dict) and 'text' in content_part: - final_output = content_part['text'] - break - except Exception as e: - logger.warning(f"Failed to serialize item in temporal tracing model: {e}") - continue - - # Set span output with structured data - span.output = { # type: ignore[attr-defined] - "new_items": new_items, - "final_output": final_output, - } - - return response - - except Exception as e: - # Record error in span - span.error = str(e) # type: ignore[attr-defined] - raise - else: - # No tracing context, just pass through - logger.debug("[TemporalTracingChatCompletionsModel] No tracing context available, calling base model directly") - return await self._base_model.get_response( - system_instructions=system_instructions, - input=input, - model_settings=model_settings, - tools=tools, - output_schema=output_schema, - handoffs=handoffs, - tracing=tracing, - **kwargs, - ) \ No newline at end of file diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/tests/__init__.py b/src/agentex/lib/core/temporal/plugins/openai_agents/tests/__init__.py deleted file mode 100644 index 0c635833..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/tests/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -Tests for the StreamingModel implementation in the OpenAI Agents plugin. -""" \ No newline at end of file diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/tests/conftest.py b/src/agentex/lib/core/temporal/plugins/openai_agents/tests/conftest.py deleted file mode 100644 index 599cb1e3..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/tests/conftest.py +++ /dev/null @@ -1,297 +0,0 @@ -""" -Pytest configuration and fixtures for StreamingModel tests. -""" - -import uuid -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest -import pytest_asyncio -from agents import ( - Handoff, - FunctionTool, - ModelSettings, -) -from agents.tool import ( - ComputerTool, - HostedMCPTool, - WebSearchTool, - FileSearchTool, - LocalShellTool, - CodeInterpreterTool, - ImageGenerationTool, -) -from agents.model_settings import Reasoning # type: ignore[attr-defined] -from openai.types.responses import ( - ResponseCompletedEvent, - ResponseTextDeltaEvent, - ResponseOutputItemAddedEvent, - ResponseReasoningSummaryTextDeltaEvent, -) - -# Configure pytest-asyncio -pytest_plugins = ("pytest_asyncio",) - - -@pytest.fixture -def mock_openai_client(): - """Mock AsyncOpenAI client""" - client = MagicMock() - client.responses = MagicMock() - return client - - -@pytest.fixture -def sample_task_id(): - """Generate a sample task ID""" - return f"task_{uuid.uuid4().hex[:8]}" - - -@pytest.fixture -def mock_streaming_context(): - """Mock streaming context for testing""" - context = AsyncMock() - context.task_message = MagicMock() - context.stream_update = AsyncMock() - context.close = AsyncMock() - context.__aenter__ = AsyncMock(return_value=context) - context.__aexit__ = AsyncMock() - return context - - -@pytest.fixture(autouse=True) -def mock_adk_streaming(): - """Mock the ADK streaming module""" - with patch('agentex.lib.adk.streaming') as mock_streaming: - mock_context = AsyncMock() - mock_context.task_message = MagicMock() - mock_context.stream_update = AsyncMock() - mock_context.close = AsyncMock() - mock_context.__aenter__ = AsyncMock(return_value=mock_context) - mock_context.__aexit__ = AsyncMock() - - mock_streaming.streaming_task_message_context.return_value = mock_context - yield mock_streaming - - -@pytest.fixture -def sample_function_tool(): - """Sample FunctionTool for testing""" - async def mock_tool_handler(_context, _args): - return {"temperature": "72F", "condition": "sunny"} - - return FunctionTool( - name="get_weather", - description="Get the current weather", - params_json_schema={ - "type": "object", - "properties": { - "location": {"type": "string"} - } - }, - on_invoke_tool=mock_tool_handler, - strict_json_schema=False - ) - - -@pytest.fixture -def sample_web_search_tool(): - """Sample WebSearchTool for testing""" - return WebSearchTool( - user_location=None, - search_context_size="medium" - ) - - -@pytest.fixture -def sample_file_search_tool(): - """Sample FileSearchTool for testing""" - return FileSearchTool( - vector_store_ids=["vs_123"], - max_num_results=10, - include_search_results=True - ) - - -@pytest.fixture -def sample_computer_tool(): - """Sample ComputerTool for testing""" - computer = MagicMock() - computer.environment = "desktop" - computer.dimensions = [1920, 1080] - return ComputerTool(computer=computer) - - -@pytest.fixture -def sample_hosted_mcp_tool(): - """Sample HostedMCPTool for testing""" - tool = MagicMock(spec=HostedMCPTool) - tool.tool_config = { - "type": "mcp", - "server_label": "test_server", - "name": "test_tool" - } - return tool - - -@pytest.fixture -def sample_image_generation_tool(): - """Sample ImageGenerationTool for testing""" - tool = MagicMock(spec=ImageGenerationTool) - tool.tool_config = { - "type": "image_generation", - "model": "dall-e-3" - } - return tool - - -@pytest.fixture -def sample_code_interpreter_tool(): - """Sample CodeInterpreterTool for testing""" - tool = MagicMock(spec=CodeInterpreterTool) - tool.tool_config = { - "type": "code_interpreter" - } - return tool - - -@pytest.fixture -def sample_local_shell_tool(): - """Sample LocalShellTool for testing""" - from agents import LocalShellExecutor - executor = MagicMock(spec=LocalShellExecutor) - return LocalShellTool(executor=executor) - - -@pytest.fixture -def sample_handoff(): - """Sample Handoff for testing""" - from agents import Agent - - async def mock_handoff_handler(_context, _args): - # Return a mock agent - return MagicMock(spec=Agent) - - return Handoff( - agent_name="support_agent", - tool_name="transfer_to_support", - tool_description="Transfer to support agent", - input_json_schema={"type": "object"}, - on_invoke_handoff=mock_handoff_handler - ) - - -@pytest.fixture -def basic_model_settings(): - """Basic ModelSettings for testing""" - return ModelSettings( - temperature=0.7, - max_tokens=1000, - top_p=0.9 - ) - - -@pytest.fixture -def reasoning_model_settings(): - """ModelSettings with reasoning enabled""" - return ModelSettings( - reasoning=Reasoning( - effort="medium", - generate_summary="auto" - ) - ) - - -@pytest.fixture -def mock_response_stream(): - """Mock a response stream with basic events""" - async def stream_generator(): - # Yield some basic events - yield ResponseOutputItemAddedEvent( # type: ignore[call-arg] - type="response.output_item.added", - output_index=0, - item=MagicMock(type="message") - ) - - yield ResponseTextDeltaEvent( # type: ignore[call-arg] - type="response.text.delta", - delta="Hello ", - output_index=0 - ) - - yield ResponseTextDeltaEvent( # type: ignore[call-arg] - type="response.text.delta", - delta="world!", - output_index=0 - ) - - yield ResponseCompletedEvent( # type: ignore[call-arg] - type="response.completed", - response=MagicMock( - output=[], - usage=MagicMock() - ) - ) - - return stream_generator() - - -@pytest.fixture -def mock_reasoning_stream(): - """Mock a response stream with reasoning events""" - async def stream_generator(): - # Start reasoning - yield ResponseOutputItemAddedEvent( # type: ignore[call-arg] - type="response.output_item.added", - output_index=0, - item=MagicMock(type="reasoning") - ) - - # Reasoning deltas - yield ResponseReasoningSummaryTextDeltaEvent( # type: ignore[call-arg] - type="response.reasoning_summary_text.delta", - delta="Let me think about this...", - summary_index=0 - ) - - # Complete - yield ResponseCompletedEvent( # type: ignore[call-arg] - type="response.completed", - response=MagicMock( - output=[], - usage=MagicMock() - ) - ) - - return stream_generator() - - -@pytest_asyncio.fixture(scope="function") -async def streaming_model(): - """Create a TemporalStreamingModel instance for testing""" - from ..models.temporal_streaming_model import TemporalStreamingModel - - model = TemporalStreamingModel(model_name="gpt-4o") - # Mock the OpenAI client with fresh mocks for each test - model.client = AsyncMock() - model.client.responses = AsyncMock() - - yield model - - # Cleanup after each test - if hasattr(model.client, 'close'): - await model.client.close() - - -# Mock environment variables for testing -@pytest.fixture(autouse=True) -def mock_env_vars(): - """Mock environment variables""" - env_vars = { - "OPENAI_API_KEY": "test-key-123", - "AGENT_NAME": "test-agent", - "ACP_URL": "http://localhost:8000", - } - - with patch.dict("os.environ", env_vars): - yield env_vars \ No newline at end of file diff --git a/src/agentex/lib/core/temporal/plugins/openai_agents/tests/test_streaming_model.py b/src/agentex/lib/core/temporal/plugins/openai_agents/tests/test_streaming_model.py deleted file mode 100644 index 457ec954..00000000 --- a/src/agentex/lib/core/temporal/plugins/openai_agents/tests/test_streaming_model.py +++ /dev/null @@ -1,848 +0,0 @@ -""" -Comprehensive tests for StreamingModel with all configurations and tool types. -""" - -from unittest.mock import AsyncMock, MagicMock - -import pytest -from agents import ModelSettings -from openai import NOT_GIVEN -from agents.model_settings import Reasoning, MCPToolChoice # type: ignore[attr-defined] - - -class TestStreamingModelSettings: - """Test that all ModelSettings parameters work with Responses API""" - - @pytest.mark.asyncio - async def test_temperature_setting(self, streaming_model, _mock_adk_streaming, sample_task_id): - """Test that temperature parameter is properly passed to Responses API""" - streaming_model.client.responses.create = AsyncMock() - - # Mock the response stream - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - # Test with various temperature values - for temp in [0.0, 0.7, 1.5, 2.0]: - settings = ModelSettings(temperature=temp) - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - # Verify temperature was passed correctly - create_call = streaming_model.client.responses.create.call_args - assert create_call.kwargs['temperature'] == temp - - @pytest.mark.asyncio - async def test_top_p_setting(self, streaming_model, _mock_adk_streaming, sample_task_id): - """Test that top_p parameter is properly passed to Responses API""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - # Test with various top_p values - for top_p in [0.1, 0.5, 0.9, None]: - settings = ModelSettings(top_p=top_p) - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - expected = top_p if top_p is not None else NOT_GIVEN - assert create_call.kwargs['top_p'] == expected - - @pytest.mark.asyncio - async def test_max_tokens_setting(self, streaming_model, _mock_adk_streaming, sample_task_id): - """Test that max_tokens is properly mapped to max_output_tokens""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - settings = ModelSettings(max_tokens=2000) - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - assert create_call.kwargs['max_output_tokens'] == 2000 - - @pytest.mark.asyncio - async def test_reasoning_effort_settings(self, streaming_model, _mock_adk_streaming, sample_task_id): - """Test reasoning effort levels (low/medium/high)""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - for effort in ["low", "medium", "high"]: - settings = ModelSettings( - reasoning=Reasoning(effort=effort) - ) - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - assert create_call.kwargs['reasoning'] == {"effort": effort} - - @pytest.mark.asyncio - async def test_reasoning_summary_settings(self, streaming_model, _mock_adk_streaming, sample_task_id): - """Test reasoning summary settings (auto/none)""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - for summary in ["auto", "concise", "detailed"]: - settings = ModelSettings( - reasoning=Reasoning(effort="medium", generate_summary=summary) - ) - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - assert create_call.kwargs['reasoning'] == {"effort": "medium", "summary": summary} - - @pytest.mark.asyncio - async def test_tool_choice_variations(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_function_tool): - """Test various tool_choice settings""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - # Test different tool_choice options - test_cases = [ - ("auto", "auto"), - ("required", "required"), - ("none", "none"), - ("get_weather", {"type": "function", "name": "get_weather"}), - ("web_search", {"type": "web_search"}), - (MCPToolChoice(server_label="test", name="tool"), {"server_label": "test", "type": "mcp", "name": "tool"}) - ] - - for tool_choice, expected in test_cases: - settings = ModelSettings(tool_choice=tool_choice) - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[sample_function_tool], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - assert create_call.kwargs['tool_choice'] == expected - - @pytest.mark.asyncio - async def test_parallel_tool_calls(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_function_tool): - """Test parallel tool calls setting""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - for parallel in [True, False]: - settings = ModelSettings(parallel_tool_calls=parallel) - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[sample_function_tool], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - assert create_call.kwargs['parallel_tool_calls'] == parallel - - @pytest.mark.asyncio - async def test_truncation_strategy(self, streaming_model, _mock_adk_streaming, sample_task_id): - """Test truncation parameter""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - # truncation now accepts 'auto' or 'disabled' string literals - settings = ModelSettings(truncation="auto") - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - assert create_call.kwargs['truncation'] == "auto" - - @pytest.mark.asyncio - async def test_response_include(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_file_search_tool): - """Test response include parameter""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - settings = ModelSettings( - response_include=["reasoning.encrypted_content", "message.output_text.logprobs"] - ) - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[sample_file_search_tool], # This adds file_search_call.results - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - include_list = create_call.kwargs['include'] - assert "reasoning.encrypted_content" in include_list - assert "message.output_text.logprobs" in include_list - assert "file_search_call.results" in include_list # Added by file search tool - - @pytest.mark.asyncio - async def test_verbosity(self, streaming_model, _mock_adk_streaming, sample_task_id): - """Test verbosity settings""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - settings = ModelSettings(verbosity="high") - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - assert create_call.kwargs['text'] == {"verbosity": "high"} - - @pytest.mark.asyncio - async def test_metadata_and_store(self, streaming_model, _mock_adk_streaming, sample_task_id): - """Test metadata and store parameters""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - metadata = {"user_id": "123", "session": "abc"} - store = True - - settings = ModelSettings( - metadata=metadata, - store=store - ) - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - assert create_call.kwargs['metadata'] == metadata - assert create_call.kwargs['store'] == store - - @pytest.mark.asyncio - async def test_extra_headers_and_body(self, streaming_model, _mock_adk_streaming, sample_task_id): - """Test extra customization parameters""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - extra_headers = {"X-Custom": "header"} - extra_body = {"custom_field": "value"} - extra_query = {"param": "value"} - - settings = ModelSettings( - extra_headers=extra_headers, - extra_body=extra_body, - extra_query=extra_query - ) - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - assert create_call.kwargs['extra_headers'] == extra_headers - assert create_call.kwargs['extra_body'] == extra_body - assert create_call.kwargs['extra_query'] == extra_query - - @pytest.mark.asyncio - async def test_top_logprobs(self, streaming_model, _mock_adk_streaming, sample_task_id): - """Test top_logprobs parameter""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - settings = ModelSettings(top_logprobs=5) - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=settings, - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - # top_logprobs goes into extra_args - assert "top_logprobs" in create_call.kwargs - assert create_call.kwargs['top_logprobs'] == 5 - # Also should add to include list - assert "message.output_text.logprobs" in create_call.kwargs['include'] - - -class TestStreamingModelTools: - """Test that all tool types work with streaming""" - - @pytest.mark.asyncio - async def test_function_tool(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_function_tool): - """Test FunctionTool conversion and streaming""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[sample_function_tool], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - tools = create_call.kwargs['tools'] - assert len(tools) == 1 - assert tools[0]['type'] == 'function' - assert tools[0]['name'] == 'get_weather' - assert tools[0]['description'] == 'Get the current weather' - assert 'parameters' in tools[0] - - @pytest.mark.asyncio - async def test_web_search_tool(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_web_search_tool): - """Test WebSearchTool conversion""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[sample_web_search_tool], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - tools = create_call.kwargs['tools'] - assert len(tools) == 1 - assert tools[0]['type'] == 'web_search' - - @pytest.mark.asyncio - async def test_file_search_tool(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_file_search_tool): - """Test FileSearchTool conversion""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[sample_file_search_tool], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - tools = create_call.kwargs['tools'] - assert len(tools) == 1 - assert tools[0]['type'] == 'file_search' - assert tools[0]['vector_store_ids'] == ['vs_123'] - assert tools[0]['max_num_results'] == 10 - - @pytest.mark.asyncio - async def test_computer_tool(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_computer_tool): - """Test ComputerTool conversion""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[sample_computer_tool], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - tools = create_call.kwargs['tools'] - assert len(tools) == 1 - assert tools[0]['type'] == 'computer_use_preview' - assert tools[0]['environment'] == 'desktop' - assert tools[0]['display_width'] == 1920 - assert tools[0]['display_height'] == 1080 - - @pytest.mark.asyncio - async def test_multiple_computer_tools_error(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_computer_tool): - """Test that multiple computer tools raise an error""" - streaming_model.client.responses.create = AsyncMock() - - # Create two computer tools - computer2 = MagicMock() - computer2.environment = "mobile" - computer2.dimensions = [375, 812] - from agents.tool import ComputerTool - second_computer_tool = ComputerTool(computer=computer2) - - with pytest.raises(ValueError, match="You can only provide one computer tool"): - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[sample_computer_tool, second_computer_tool], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - @pytest.mark.asyncio - async def test_hosted_mcp_tool(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_hosted_mcp_tool): - """Test HostedMCPTool conversion""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[sample_hosted_mcp_tool], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - tools = create_call.kwargs['tools'] - assert len(tools) == 1 - assert tools[0]['type'] == 'mcp' - assert tools[0]['server_label'] == 'test_server' - - @pytest.mark.asyncio - async def test_image_generation_tool(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_image_generation_tool): - """Test ImageGenerationTool conversion""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[sample_image_generation_tool], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - tools = create_call.kwargs['tools'] - assert len(tools) == 1 - assert tools[0]['type'] == 'image_generation' - - @pytest.mark.asyncio - async def test_code_interpreter_tool(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_code_interpreter_tool): - """Test CodeInterpreterTool conversion""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[sample_code_interpreter_tool], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - tools = create_call.kwargs['tools'] - assert len(tools) == 1 - assert tools[0]['type'] == 'code_interpreter' - - @pytest.mark.asyncio - async def test_local_shell_tool(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_local_shell_tool): - """Test LocalShellTool conversion""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[sample_local_shell_tool], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - tools = create_call.kwargs['tools'] - assert len(tools) == 1 - assert tools[0]['type'] == 'local_shell' - # working_directory no longer in API - LocalShellTool uses executor internally - - @pytest.mark.asyncio - async def test_handoffs(self, streaming_model, _mock_adk_streaming, sample_task_id, sample_handoff): - """Test Handoff conversion to function tools""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[], - output_schema=None, - handoffs=[sample_handoff], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - tools = create_call.kwargs['tools'] - assert len(tools) == 1 - assert tools[0]['type'] == 'function' - assert tools[0]['name'] == 'transfer_to_support' - assert tools[0]['description'] == 'Transfer to support agent' - - @pytest.mark.asyncio - async def test_mixed_tools(self, streaming_model, _mock_adk_streaming, sample_task_id, - sample_function_tool, sample_web_search_tool, sample_handoff): - """Test multiple tools together""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[sample_function_tool, sample_web_search_tool], - output_schema=None, - handoffs=[sample_handoff], - tracing=None, - task_id=sample_task_id - ) - - create_call = streaming_model.client.responses.create.call_args - tools = create_call.kwargs['tools'] - assert len(tools) == 3 # 2 tools + 1 handoff - - # Check each tool type is present - tool_types = [t['type'] for t in tools] - assert 'function' in tool_types # function tool and handoff - assert 'web_search' in tool_types - - -class TestStreamingModelBasics: - """Test core streaming functionality""" - - @pytest.mark.asyncio - async def test_responses_api_streaming(self, streaming_model, mock_adk_streaming, sample_task_id): - """Test basic Responses API streaming flow""" - streaming_model.client.responses.create = AsyncMock() - - # Create a mock stream with text deltas - mock_stream = AsyncMock() - events = [ - MagicMock(type="response.output_item.added", item=MagicMock(type="message")), - MagicMock(type="response.text.delta", delta="Hello "), - MagicMock(type="response.text.delta", delta="world!"), - MagicMock(type="response.completed", response=MagicMock(output=[])) - ] - mock_stream.__aiter__.return_value = iter(events) - streaming_model.client.responses.create.return_value = mock_stream - - result = await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - # Verify streaming context was created - mock_adk_streaming.streaming_task_message_context.assert_called_with( - task_id=sample_task_id, - initial_content=mock_adk_streaming.streaming_task_message_context.call_args.kwargs['initial_content'] - ) - - # Verify result is returned as ModelResponse - from agents import ModelResponse - assert isinstance(result, ModelResponse) - - @pytest.mark.asyncio - async def test_task_id_threading(self, streaming_model, mock_adk_streaming): - """Test that task_id is properly threaded through to streaming context""" - streaming_model.client.responses.create = AsyncMock() - - mock_stream = AsyncMock() - mock_stream.__aiter__.return_value = iter([ - MagicMock(type="response.completed", response=MagicMock(output=[])) - ]) - streaming_model.client.responses.create.return_value = mock_stream - - task_id = "test_task_12345" - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=task_id - ) - - # Verify task_id was passed to streaming context - mock_adk_streaming.streaming_task_message_context.assert_called() - call_args = mock_adk_streaming.streaming_task_message_context.call_args - assert call_args.kwargs['task_id'] == task_id - - @pytest.mark.asyncio - async def test_redis_context_creation(self, streaming_model, mock_adk_streaming, sample_task_id): - """Test that Redis streaming contexts are created properly""" - streaming_model.client.responses.create = AsyncMock() - - # Mock stream with reasoning - mock_stream = AsyncMock() - events = [ - MagicMock(type="response.output_item.added", item=MagicMock(type="reasoning")), - MagicMock(type="response.reasoning_summary_text.delta", delta="Thinking...", summary_index=0), - MagicMock(type="response.completed", response=MagicMock(output=[])) - ] - mock_stream.__aiter__.return_value = iter(events) - streaming_model.client.responses.create.return_value = mock_stream - - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(reasoning=Reasoning(effort="medium")), - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=sample_task_id - ) - - # Should create at least one context for reasoning - assert mock_adk_streaming.streaming_task_message_context.call_count >= 1 - - @pytest.mark.asyncio - async def test_missing_task_id_error(self, streaming_model): - """Test that missing task_id raises appropriate error""" - streaming_model.client.responses.create = AsyncMock() - - with pytest.raises(ValueError, match="task_id is required"): - await streaming_model.get_response( - system_instructions="Test", - input="Hello", - model_settings=ModelSettings(), - tools=[], - output_schema=None, - handoffs=[], - tracing=None, - task_id=None # Missing task_id - ) \ No newline at end of file diff --git a/src/agentex/lib/core/temporal/services/__init__.py b/src/agentex/lib/core/temporal/services/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/temporal/services/temporal_task_service.py b/src/agentex/lib/core/temporal/services/temporal_task_service.py deleted file mode 100644 index 5551ebcb..00000000 --- a/src/agentex/lib/core/temporal/services/temporal_task_service.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import annotations - -from typing import Any - -from agentex.types.task import Task -from agentex.types.agent import Agent -from agentex.types.event import Event -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.clients.temporal.types import WorkflowState -from agentex.lib.core.temporal.types.workflow import SignalName -from agentex.lib.core.clients.temporal.temporal_client import TemporalClient - - -class TemporalTaskService: - """ - Submits Agent agent_tasks to the async runtime for execution. - """ - - def __init__( - self, - temporal_client: TemporalClient, - env_vars: EnvironmentVariables, - ): - self._temporal_client = temporal_client - self._env_vars = env_vars - - - async def submit_task(self, agent: Agent, task: Task, params: dict[str, Any] | None) -> str: - """ - Submit a task to the async runtime for execution. - - returns the workflow ID of the temporal workflow - """ - return await self._temporal_client.start_workflow( - workflow=self._env_vars.WORKFLOW_NAME, - arg=CreateTaskParams( - agent=agent, - task=task, - params=params, - ), - id=task.id, - task_queue=self._env_vars.WORKFLOW_TASK_QUEUE, - ) - - async def get_state(self, task_id: str) -> WorkflowState: - """ - Get the task state from the async runtime. - """ - return await self._temporal_client.get_workflow_status( - workflow_id=task_id, - ) - - async def send_event(self, agent: Agent, task: Task, event: Event, request: dict | None = None) -> None: - return await self._temporal_client.send_signal( - workflow_id=task.id, - signal=SignalName.RECEIVE_EVENT.value, - payload=SendEventParams( - agent=agent, - task=task, - event=event, - request=request, - ).model_dump(), - ) - - async def cancel(self, task_id: str) -> None: - return await self._temporal_client.cancel_workflow( - workflow_id=task_id, - ) - - async def terminate(self, task_id: str) -> None: - return await self._temporal_client.terminate_workflow( - workflow_id=task_id, - ) diff --git a/src/agentex/lib/core/temporal/types/__init__.py b/src/agentex/lib/core/temporal/types/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/temporal/types/workflow.py b/src/agentex/lib/core/temporal/types/workflow.py deleted file mode 100644 index 973bb52c..00000000 --- a/src/agentex/lib/core/temporal/types/workflow.py +++ /dev/null @@ -1,5 +0,0 @@ -from enum import Enum - - -class SignalName(str, Enum): - RECEIVE_EVENT = "receive_event" diff --git a/src/agentex/lib/core/temporal/workers/__init__.py b/src/agentex/lib/core/temporal/workers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/core/temporal/workers/worker.py b/src/agentex/lib/core/temporal/workers/worker.py deleted file mode 100644 index 28cab2e1..00000000 --- a/src/agentex/lib/core/temporal/workers/worker.py +++ /dev/null @@ -1,247 +0,0 @@ -from __future__ import annotations - -import os -import uuid -import datetime -import dataclasses -from typing import Any, overload, override -from collections.abc import Callable -from concurrent.futures import ThreadPoolExecutor - -from aiohttp import web -from temporalio.client import Client, Plugin as ClientPlugin -from temporalio.worker import ( - Plugin as WorkerPlugin, - Worker, - Interceptor, - UnsandboxedWorkflowRunner, -) -from temporalio.runtime import Runtime, TelemetryConfig, OpenTelemetryConfig -from temporalio.converter import ( - DataConverter, - JSONTypeConverter, - AdvancedJSONEncoder, - DefaultPayloadConverter, - CompositePayloadConverter, - JSONPlainPayloadConverter, - _JSONTypeConverterUnhandled, -) -from temporalio.contrib.openai_agents import OpenAIAgentsPlugin - -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.registration import register_agent -from agentex.lib.environment_variables import EnvironmentVariables - -logger = make_logger(__name__) - - -class DateTimeJSONEncoder(AdvancedJSONEncoder): - @override - def default(self, o: Any) -> Any: - if isinstance(o, datetime.datetime): - return o.isoformat() - return super().default(o) - - -class DateTimeJSONTypeConverter(JSONTypeConverter): - @override - def to_typed_value(self, hint: type, value: Any) -> Any | None | _JSONTypeConverterUnhandled: - if hint == datetime.datetime: - return datetime.datetime.fromisoformat(value) - return JSONTypeConverter.Unhandled - - -class DateTimePayloadConverter(CompositePayloadConverter): - def __init__(self) -> None: - json_converter = JSONPlainPayloadConverter( - encoder=DateTimeJSONEncoder, - custom_type_converters=[DateTimeJSONTypeConverter()], - ) - super().__init__( - *[ - c if not isinstance(c, JSONPlainPayloadConverter) else json_converter - for c in DefaultPayloadConverter.default_encoding_payload_converters - ] - ) - - -custom_data_converter = dataclasses.replace( - DataConverter.default, - payload_converter_class=DateTimePayloadConverter, -) - - -def _validate_plugins(plugins: list) -> None: - """Validate that all items in the plugins list are valid Temporal plugins.""" - for i, plugin in enumerate(plugins): - if not isinstance(plugin, (ClientPlugin, WorkerPlugin)): - raise TypeError( - f"Plugin at index {i} must be an instance of temporalio.client.Plugin " - f"or temporalio.worker.Plugin, got {type(plugin).__name__}" - ) - - -def _validate_interceptors(interceptors: list) -> None: - """Validate that all items in the interceptors list are valid Temporal interceptors.""" - for i, interceptor in enumerate(interceptors): - if not isinstance(interceptor, Interceptor): - raise TypeError( - f"Interceptor at index {i} must be an instance of temporalio.worker.Interceptor, " - f"got {type(interceptor).__name__}" - ) - - -async def get_temporal_client(temporal_address: str, metrics_url: str | None = None, plugins: list = []) -> Client: - if plugins != []: # We don't need to validate the plugins if they are empty - _validate_plugins(plugins) - - # Check if OpenAI plugin is present - it needs to configure its own data converter - has_openai_plugin = any( - isinstance(p, OpenAIAgentsPlugin) for p in (plugins or []) - ) - - # Build connection kwargs - connect_kwargs = { - "target_host": temporal_address, - "plugins": plugins, - } - - # Only set data_converter if OpenAI plugin is not present - if not has_openai_plugin: - connect_kwargs["data_converter"] = custom_data_converter - - if not metrics_url: - client = await Client.connect(**connect_kwargs) - else: - runtime = Runtime(telemetry=TelemetryConfig(metrics=OpenTelemetryConfig(url=metrics_url))) - connect_kwargs["runtime"] = runtime - client = await Client.connect(**connect_kwargs) - return client - - -class AgentexWorker: - def __init__( - self, - task_queue, - max_workers: int = 10, - max_concurrent_activities: int = 10, - health_check_port: int | None = None, - plugins: list = [], - interceptors: list = [], - ): - self.task_queue = task_queue - self.activity_handles = [] - self.max_workers = max_workers - self.max_concurrent_activities = max_concurrent_activities - self.health_check_server_running = False - self.healthy = False - self.health_check_port = health_check_port if health_check_port is not None else EnvironmentVariables.refresh().HEALTH_CHECK_PORT - self.plugins = plugins - self.interceptors = interceptors - - @overload - async def run( - self, - activities: list[Callable], - *, - workflow: type, - ) -> None: ... - - @overload - async def run( - self, - activities: list[Callable], - *, - workflows: list[type], - ) -> None: ... - - async def run( - self, - activities: list[Callable], - *, - workflow: type | None = None, - workflows: list[type] | None = None, - ): - await self.start_health_check_server() - await self._register_agent() - - # Validate interceptors if any are provided - if self.interceptors: - _validate_interceptors(self.interceptors) - - temporal_client = await get_temporal_client( - temporal_address=os.environ.get("TEMPORAL_ADDRESS", "localhost:7233"), - plugins=self.plugins, - ) - - # Enable debug mode if AgentEx debug is enabled (disables deadlock detection) - debug_enabled = os.environ.get("AGENTEX_DEBUG_ENABLED", "false").lower() == "true" - if debug_enabled: - logger.info("๐Ÿ› [WORKER] Temporal debug mode enabled - deadlock detection disabled") - - if workflow is None and workflows is None: - raise ValueError("Either workflow or workflows must be provided") - - worker = Worker( - client=temporal_client, - task_queue=self.task_queue, - activity_executor=ThreadPoolExecutor(max_workers=self.max_workers), - workflows=[workflow] if workflows is None else workflows, - activities=activities, - workflow_runner=UnsandboxedWorkflowRunner(), - max_concurrent_activities=self.max_concurrent_activities, - build_id=str(uuid.uuid4()), - debug_mode=debug_enabled, # Disable deadlock detection in debug mode - interceptors=self.interceptors, # Pass interceptors to Worker - ) - - logger.info(f"Starting workers for task queue: {self.task_queue}") - # Eagerly set the worker status to healthy - self.healthy = True - logger.info(f"Running workers for task queue: {self.task_queue}") - await worker.run() - - async def _health_check(self): - return web.json_response(self.healthy) - - async def start_health_check_server(self): - if not self.health_check_server_running: - app = web.Application() - app.router.add_get("/readyz", lambda request: self._health_check()) # noqa: ARG005 - - # Disable access logging - runner = web.AppRunner(app, access_log=None) - await runner.setup() - - try: - site = web.TCPSite(runner, "0.0.0.0", self.health_check_port) - await site.start() - logger.info(f"Health check server running on http://0.0.0.0:{self.health_check_port}/readyz") - self.health_check_server_running = True - except OSError as e: - logger.error(f"Failed to start health check server on port {self.health_check_port}: {e}") - # Try alternative port if default fails - try: - alt_port = self.health_check_port + 1 - site = web.TCPSite(runner, "0.0.0.0", alt_port) - await site.start() - logger.info(f"Health check server running on alternative port http://0.0.0.0:{alt_port}/readyz") - self.health_check_server_running = True - except OSError as e: - logger.error(f"Failed to start health check server on alternative port {alt_port}: {e}") - raise - - """ - Register the worker with the Agentex server. - - Even though the Temporal server will also register the agent with the server, - doing this on the worker side is required to make sure that both share the API key - which is returned on registration and used to authenticate the worker with the Agentex server. - """ - - async def _register_agent(self): - env_vars = EnvironmentVariables.refresh() - if env_vars and env_vars.AGENTEX_BASE_URL: - await register_agent(env_vars) - else: - logger.warning("AGENTEX_BASE_URL not set, skipping worker registration") diff --git a/src/agentex/lib/core/temporal/workflows/workflow.py b/src/agentex/lib/core/temporal/workflows/workflow.py deleted file mode 100644 index 727f3ac8..00000000 --- a/src/agentex/lib/core/temporal/workflows/workflow.py +++ /dev/null @@ -1,26 +0,0 @@ -from abc import ABC, abstractmethod - -from temporalio import workflow - -from agentex.lib.types.acp import SendEventParams, CreateTaskParams -from agentex.lib.utils.logging import make_logger -from agentex.lib.core.temporal.types.workflow import SignalName - -logger = make_logger(__name__) - - -class BaseWorkflow(ABC): - def __init__( - self, - display_name: str, - ): - self.display_name = display_name - - @abstractmethod - @workflow.signal(name=SignalName.RECEIVE_EVENT) - async def on_task_event_send(self, params: SendEventParams) -> None: - raise NotImplementedError - - @abstractmethod - async def on_task_create(self, params: CreateTaskParams) -> None: - raise NotImplementedError diff --git a/src/agentex/lib/core/tracing/__init__.py b/src/agentex/lib/core/tracing/__init__.py deleted file mode 100644 index 9f91f9ce..00000000 --- a/src/agentex/lib/core/tracing/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from agentex.types.span import Span -from agentex.lib.core.tracing.trace import Trace, AsyncTrace -from agentex.lib.core.tracing.tracer import Tracer, AsyncTracer - -__all__ = ["Trace", "AsyncTrace", "Span", "Tracer", "AsyncTracer"] diff --git a/src/agentex/lib/core/tracing/processors/agentex_tracing_processor.py b/src/agentex/lib/core/tracing/processors/agentex_tracing_processor.py deleted file mode 100644 index 56e4fa34..00000000 --- a/src/agentex/lib/core/tracing/processors/agentex_tracing_processor.py +++ /dev/null @@ -1,118 +0,0 @@ -from typing import Any, Dict, override - -from agentex import Agentex -from agentex.types.span import Span -from agentex.lib.types.tracing import AgentexTracingProcessorConfig -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.core.tracing.processors.tracing_processor_interface import ( - SyncTracingProcessor, - AsyncTracingProcessor, -) - - -class AgentexSyncTracingProcessor(SyncTracingProcessor): - def __init__(self, config: AgentexTracingProcessorConfig): # noqa: ARG002 - self.client = Agentex() - - @override - def on_span_start(self, span: Span) -> None: - self.client.spans.create( - name=span.name, - start_time=span.start_time, - end_time=span.end_time, - trace_id=span.trace_id, - id=span.id, - data=span.data, - input=span.input, - output=span.output, - parent_id=span.parent_id, - ) - - @override - def on_span_end(self, span: Span) -> None: - update: Dict[str, Any] = {} - if span.trace_id: - update["trace_id"] = span.trace_id - if span.name: - update["name"] = span.name - if span.parent_id: - update["parent_id"] = span.parent_id - if span.start_time: - update["start_time"] = span.start_time.isoformat() - if span.end_time is not None: - update["end_time"] = span.end_time.isoformat() - if span.input is not None: - update["input"] = span.input - if span.output is not None: - update["output"] = span.output - if span.data is not None: - update["data"] = span.data - - self.client.spans.update( - span.id, - **span.model_dump( - mode="json", - exclude={"id"}, - exclude_defaults=True, - exclude_none=True, - exclude_unset=True, - ), - ) - - @override - def shutdown(self) -> None: - pass - - -class AgentexAsyncTracingProcessor(AsyncTracingProcessor): - def __init__(self, config: AgentexTracingProcessorConfig): # noqa: ARG002 - self.client = create_async_agentex_client() - - @override - async def on_span_start(self, span: Span) -> None: - await self.client.spans.create( - name=span.name, - start_time=span.start_time, - end_time=span.end_time, - id=span.id, - trace_id=span.trace_id, - parent_id=span.parent_id, - input=span.input, - output=span.output, - data=span.data, - ) - - @override - async def on_span_end(self, span: Span) -> None: - update: Dict[str, Any] = {} - if span.trace_id: - update["trace_id"] = span.trace_id - if span.name: - update["name"] = span.name - if span.parent_id: - update["parent_id"] = span.parent_id - if span.start_time: - update["start_time"] = span.start_time.isoformat() - if span.end_time: - update["end_time"] = span.end_time.isoformat() - if span.input: - update["input"] = span.input - if span.output: - update["output"] = span.output - if span.data: - update["data"] = span.data - - await self.client.spans.update( - span.id, - **span.model_dump( - mode="json", - exclude={"id"}, - exclude_defaults=True, - exclude_none=True, - exclude_unset=True, - ), - ) - - @override - async def shutdown(self) -> None: - pass diff --git a/src/agentex/lib/core/tracing/processors/sgp_tracing_processor.py b/src/agentex/lib/core/tracing/processors/sgp_tracing_processor.py deleted file mode 100644 index 8a298121..00000000 --- a/src/agentex/lib/core/tracing/processors/sgp_tracing_processor.py +++ /dev/null @@ -1,160 +0,0 @@ -from typing import override - -import scale_gp_beta.lib.tracing as tracing -from scale_gp_beta import SGPClient, AsyncSGPClient -from scale_gp_beta.lib.tracing import create_span, flush_queue -from scale_gp_beta.lib.tracing.span import Span as SGPSpan - -from agentex.types.span import Span -from agentex.lib.types.tracing import SGPTracingProcessorConfig -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.core.tracing.processors.tracing_processor_interface import ( - SyncTracingProcessor, - AsyncTracingProcessor, -) - -logger = make_logger(__name__) - - -class SGPSyncTracingProcessor(SyncTracingProcessor): - def __init__(self, config: SGPTracingProcessorConfig): - disabled = config.sgp_api_key == "" or config.sgp_account_id == "" - tracing.init( - SGPClient( - api_key=config.sgp_api_key, - account_id=config.sgp_account_id, - base_url=config.sgp_base_url, - ), - disabled=disabled, - ) - self._spans: dict[str, SGPSpan] = {} - self.env_vars = EnvironmentVariables.refresh() - - def _add_source_to_span(self, span: Span) -> None: - if span.data is None: - span.data = {} - if isinstance(span.data, dict): - span.data["__source__"] = "agentex" - if self.env_vars.ACP_TYPE is not None: - span.data["__acp_type__"] = self.env_vars.ACP_TYPE - if self.env_vars.AGENT_NAME is not None: - span.data["__agent_name__"] = self.env_vars.AGENT_NAME - if self.env_vars.AGENT_ID is not None: - span.data["__agent_id__"] = self.env_vars.AGENT_ID - - @override - def on_span_start(self, span: Span) -> None: - self._add_source_to_span(span) - - sgp_span = create_span( - name=span.name, - span_id=span.id, - parent_id=span.parent_id, - trace_id=span.trace_id, - input=span.input, - output=span.output, - metadata=span.data, - ) - sgp_span.start_time = span.start_time.isoformat() # type: ignore[union-attr] - sgp_span.flush(blocking=False) - - self._spans[span.id] = sgp_span - - @override - def on_span_end(self, span: Span) -> None: - sgp_span = self._spans.get(span.id) - if sgp_span is None: - logger.warning( - f"Span {span.id} not found in stored spans, skipping span end" - ) - return - - self._add_source_to_span(span) - sgp_span.output = span.output # type: ignore[assignment] - sgp_span.metadata = span.data # type: ignore[assignment] - sgp_span.end_time = span.end_time.isoformat() # type: ignore[union-attr] - sgp_span.flush(blocking=False) - - @override - def shutdown(self) -> None: - self._spans.clear() - flush_queue() - - -class SGPAsyncTracingProcessor(AsyncTracingProcessor): - def __init__(self, config: SGPTracingProcessorConfig): - self.disabled = config.sgp_api_key == "" or config.sgp_account_id == "" - self._spans: dict[str, SGPSpan] = {} - self.sgp_async_client = ( - AsyncSGPClient( - api_key=config.sgp_api_key, - account_id=config.sgp_account_id, - base_url=config.sgp_base_url, - ) - if not self.disabled - else None - ) - self.env_vars = EnvironmentVariables.refresh() - - def _add_source_to_span(self, span: Span) -> None: - if span.data is None: - span.data = {} - if isinstance(span.data, dict): - span.data["__source__"] = "agentex" - if self.env_vars.ACP_TYPE is not None: - span.data["__acp_type__"] = self.env_vars.ACP_TYPE - if self.env_vars.AGENT_NAME is not None: - span.data["__agent_name__"] = self.env_vars.AGENT_NAME - if self.env_vars.AGENT_ID is not None: - span.data["__agent_id__"] = self.env_vars.AGENT_ID - - @override - async def on_span_start(self, span: Span) -> None: - self._add_source_to_span(span) - sgp_span = create_span( - name=span.name, - span_id=span.id, - parent_id=span.parent_id, - trace_id=span.trace_id, - input=span.input, - output=span.output, - metadata=span.data, - ) - sgp_span.start_time = span.start_time.isoformat() # type: ignore[union-attr] - - if self.disabled: - logger.warning("SGP is disabled, skipping span upsert") - return - await self.sgp_async_client.spans.upsert_batch( # type: ignore[union-attr] - items=[sgp_span.to_request_params()] - ) - - self._spans[span.id] = sgp_span - - @override - async def on_span_end(self, span: Span) -> None: - sgp_span = self._spans.get(span.id) - if sgp_span is None: - logger.warning( - f"Span {span.id} not found in stored spans, skipping span end" - ) - return - - self._add_source_to_span(span) - sgp_span.output = span.output # type: ignore[assignment] - sgp_span.metadata = span.data # type: ignore[assignment] - sgp_span.end_time = span.end_time.isoformat() # type: ignore[union-attr] - - if self.disabled: - return - await self.sgp_async_client.spans.upsert_batch( # type: ignore[union-attr] - items=[sgp_span.to_request_params()] - ) - - @override - async def shutdown(self) -> None: - await self.sgp_async_client.spans.upsert_batch( # type: ignore[union-attr] - items=[sgp_span.to_request_params() for sgp_span in self._spans.values()] - ) - self._spans.clear() diff --git a/src/agentex/lib/core/tracing/processors/tracing_processor_interface.py b/src/agentex/lib/core/tracing/processors/tracing_processor_interface.py deleted file mode 100644 index 4ab85dcf..00000000 --- a/src/agentex/lib/core/tracing/processors/tracing_processor_interface.py +++ /dev/null @@ -1,40 +0,0 @@ -from abc import ABC, abstractmethod - -from agentex.types.span import Span -from agentex.lib.types.tracing import TracingProcessorConfig - - -class SyncTracingProcessor(ABC): - @abstractmethod - def __init__(self, config: TracingProcessorConfig): - pass - - @abstractmethod - def on_span_start(self, span: Span) -> None: - pass - - @abstractmethod - def on_span_end(self, span: Span) -> None: - pass - - @abstractmethod - def shutdown(self) -> None: - pass - - -class AsyncTracingProcessor(ABC): - @abstractmethod - def __init__(self, config: TracingProcessorConfig): - pass - - @abstractmethod - async def on_span_start(self, span: Span) -> None: - pass - - @abstractmethod - async def on_span_end(self, span: Span) -> None: - pass - - @abstractmethod - async def shutdown(self) -> None: - pass diff --git a/src/agentex/lib/core/tracing/trace.py b/src/agentex/lib/core/tracing/trace.py deleted file mode 100644 index 2ba1d489..00000000 --- a/src/agentex/lib/core/tracing/trace.py +++ /dev/null @@ -1,313 +0,0 @@ -from __future__ import annotations - -import uuid -import asyncio -from typing import Any, AsyncGenerator -from datetime import UTC, datetime -from contextlib import contextmanager, asynccontextmanager - -from pydantic import BaseModel - -from agentex import Agentex, AsyncAgentex -from agentex.types.span import Span -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.model_utils import recursive_model_dump -from agentex.lib.core.tracing.processors.tracing_processor_interface import ( - SyncTracingProcessor, - AsyncTracingProcessor, -) - -logger = make_logger(__name__) - - -class Trace: - """ - Trace is a wrapper around the Agentex API for tracing. - It provides a context manager for spans and a way to start and end spans. - It also provides a way to get spans by ID and list all spans in a trace. - """ - - def __init__( - self, - processors: list[SyncTracingProcessor], - client: Agentex, - trace_id: str | None = None, - ): - """ - Initialize a new trace with the specified trace ID. - - Args: - trace_id: Required trace ID to use for this trace. - processors: Optional list of tracing processors to use for this trace. - """ - self.processors = processors - self.client = client - self.trace_id = trace_id - - def start_span( - self, - name: str, - parent_id: str | None = None, - input: dict[str, Any] | list[dict[str, Any]] | BaseModel | None = None, - data: dict[str, Any] | list[dict[str, Any]] | BaseModel | None = None, - ) -> Span: - """ - Start a new span and register it with the API. - - Args: - name: Name of the span. - parent_id: Optional parent span ID. - input: Optional input data for the span. - data: Optional additional data for the span. - - Returns: - The newly created span. - """ - - if not self.trace_id: - raise ValueError("Trace ID is required to start a span") - - # Create a span using the client's spans resource - start_time = datetime.now(UTC) - - serialized_input = recursive_model_dump(input) if input else None - serialized_data = recursive_model_dump(data) if data else None - id = str(uuid.uuid4()) - - span = Span( - id=id, - trace_id=self.trace_id, - name=name, - parent_id=parent_id, - start_time=start_time, - input=serialized_input, - data=serialized_data, - ) - - for processor in self.processors: - processor.on_span_start(span) - - return span - - def end_span( - self, - span: Span, - ) -> Span: - """ - End a span by updating it with any changes made to the span object. - - Args: - span: The span object to update. - - Returns: - The updated span. - """ - if span.end_time is None: - span.end_time = datetime.now(UTC) - - span.input = recursive_model_dump(span.input) if span.input else None - span.output = recursive_model_dump(span.output) if span.output else None - span.data = recursive_model_dump(span.data) if span.data else None - - for processor in self.processors: - processor.on_span_end(span) - - return span - - def get_span(self, span_id: str) -> Span: - """ - Get a span by ID. - - Args: - span_id: The ID of the span to get. - - Returns: - The requested span. - """ - # Query from Agentex API - span = self.client.spans.retrieve(span_id) - return span - - def list_spans(self) -> list[Span]: - """ - List all spans in this trace. - - Returns: - List of spans in this trace. - """ - # Query from Agentex API - spans = self.client.spans.list(trace_id=self.trace_id) - return spans - - @contextmanager - def span( - self, - name: str, - parent_id: str | None = None, - input: dict[str, Any] | list[dict[str, Any]] | BaseModel | None = None, - data: dict[str, Any] | list[dict[str, Any]] | BaseModel | None = None, - ): - """ - Context manager for spans. - If trace_id is falsy, acts as a no-op context manager. - """ - if not self.trace_id: - yield None - return - span = self.start_span(name, parent_id, input, data) - try: - yield span - finally: - self.end_span(span) - - -class AsyncTrace: - """ - AsyncTrace is a wrapper around the Agentex API for tracing. - It provides a context manager for spans and a way to start and end spans. - It also provides a way to get spans by ID and list all spans in a trace. - """ - - def __init__( - self, - processors: list[AsyncTracingProcessor], - client: AsyncAgentex, - trace_id: str | None = None, - ): - """ - Initialize a new trace with the specified trace ID. - - Args: - trace_id: Required trace ID to use for this trace. - processors: Optional list of tracing processors to use for this trace. - """ - self.processors = processors - self.client = client - self.trace_id = trace_id - - async def start_span( - self, - name: str, - parent_id: str | None = None, - input: dict[str, Any] | list[dict[str, Any]] | BaseModel | None = None, - data: dict[str, Any] | list[dict[str, Any]] | BaseModel | None = None, - ) -> Span: - """ - Start a new span and register it with the API. - - Args: - name: Name of the span. - parent_id: Optional parent span ID. - input: Optional input data for the span. - data: Optional additional data for the span. - - Returns: - The newly created span. - """ - if not self.trace_id: - raise ValueError("Trace ID is required to start a span") - - # Create a span using the client's spans resource - start_time = datetime.now(UTC) - - serialized_input = recursive_model_dump(input) if input else None - serialized_data = recursive_model_dump(data) if data else None - id = str(uuid.uuid4()) - - span = Span( - id=id, - trace_id=self.trace_id, - name=name, - parent_id=parent_id, - start_time=start_time, - input=serialized_input, - data=serialized_data, - ) - - if self.processors: - await asyncio.gather( - *[processor.on_span_start(span) for processor in self.processors] - ) - - return span - - async def end_span( - self, - span: Span, - ) -> Span: - """ - End a span by updating it with any changes made to the span object. - - Args: - span: The span object to update. - - Returns: - The updated span. - """ - if span.end_time is None: - span.end_time = datetime.now(UTC) - - span.input = recursive_model_dump(span.input) if span.input else None - span.output = recursive_model_dump(span.output) if span.output else None - span.data = recursive_model_dump(span.data) if span.data else None - - if self.processors: - await asyncio.gather( - *[processor.on_span_end(span) for processor in self.processors] - ) - - return span - - async def get_span(self, span_id: str) -> Span: - """ - Get a span by ID. - - Args: - span_id: The ID of the span to get. - - Returns: - The requested span. - """ - # Query from Agentex API - span = await self.client.spans.retrieve(span_id) - return span - - async def list_spans(self) -> list[Span]: - """ - List all spans in this trace. - - Returns: - List of spans in this trace. - """ - # Query from Agentex API - spans = await self.client.spans.list(trace_id=self.trace_id) - return spans - - @asynccontextmanager - async def span( - self, - name: str, - parent_id: str | None = None, - input: dict[str, Any] | list[dict[str, Any]] | BaseModel | None = None, - data: dict[str, Any] | list[dict[str, Any]] | BaseModel | None = None, - ) -> AsyncGenerator[Span | None, None]: - """ - Context manager for spans. - - Args: - name: Name of the span. - parent_id: Optional parent span ID. - input: Optional input data for the span. - data: Optional additional data for the span. - - Yields: - The span object. - """ - if not self.trace_id: - yield None - return - span = await self.start_span(name, parent_id, input, data) - try: - yield span - finally: - await self.end_span(span) diff --git a/src/agentex/lib/core/tracing/tracer.py b/src/agentex/lib/core/tracing/tracer.py deleted file mode 100644 index da77bec9..00000000 --- a/src/agentex/lib/core/tracing/tracer.py +++ /dev/null @@ -1,72 +0,0 @@ -from __future__ import annotations - -from agentex import Agentex, AsyncAgentex -from agentex.lib.core.tracing.trace import Trace, AsyncTrace -from agentex.lib.core.tracing.tracing_processor_manager import ( - get_sync_tracing_processors, - get_async_tracing_processors, -) - - -class Tracer: - """ - Tracer is the main entry point for tracing in Agentex. - It manages the client connection and creates traces. - """ - - def __init__(self, client: Agentex): - """ - Initialize a new sync tracer with the provided client. - - Args: - client: Agentex client instance used for API communication. - """ - self.client = client - - def trace(self, trace_id: str | None = None) -> Trace: - """ - Create a new trace with the given trace ID. - - Args: - trace_id: The trace ID to use. - - Returns: - A new Trace instance. - """ - return Trace( - processors=get_sync_tracing_processors(), - client=self.client, - trace_id=trace_id, - ) - - -class AsyncTracer: - """ - AsyncTracer is the async version of Tracer. - It manages the async client connection and creates async traces. - """ - - def __init__(self, client: AsyncAgentex): - """ - Initialize a new async tracer with the provided client. - - Args: - client: AsyncAgentex client instance used for API communication. - """ - self.client = client - - def trace(self, trace_id: str | None = None) -> AsyncTrace: - """ - Create a new trace with the given trace ID. - - Args: - trace_id: The trace ID to use. - - Returns: - A new AsyncTrace instance. - """ - return AsyncTrace( - processors=get_async_tracing_processors(), - client=self.client, - trace_id=trace_id, - ) diff --git a/src/agentex/lib/core/tracing/tracing_processor_manager.py b/src/agentex/lib/core/tracing/tracing_processor_manager.py deleted file mode 100644 index 14b0ce39..00000000 --- a/src/agentex/lib/core/tracing/tracing_processor_manager.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING -from threading import Lock - -from agentex.lib.types.tracing import TracingProcessorConfig, AgentexTracingProcessorConfig -from agentex.lib.core.tracing.processors.sgp_tracing_processor import ( - SGPSyncTracingProcessor, - SGPAsyncTracingProcessor, -) -from agentex.lib.core.tracing.processors.tracing_processor_interface import ( - SyncTracingProcessor, - AsyncTracingProcessor, -) - -if TYPE_CHECKING: - from agentex.lib.core.tracing.processors.agentex_tracing_processor import ( # noqa: F401 - AgentexSyncTracingProcessor, - AgentexAsyncTracingProcessor, - ) - - -class TracingProcessorManager: - def __init__(self): - # Mapping of processor config type to processor class - # Use lazy loading for agentex processors to avoid circular imports - self.sync_config_registry: dict[str, type[SyncTracingProcessor]] = { - "sgp": SGPSyncTracingProcessor, - } - self.async_config_registry: dict[str, type[AsyncTracingProcessor]] = { - "sgp": SGPAsyncTracingProcessor, - } - # Cache for processors - self.sync_processors: list[SyncTracingProcessor] = [] - self.async_processors: list[AsyncTracingProcessor] = [] - self.lock = Lock() - self._agentex_registered = False - - def _ensure_agentex_registered(self): - """Lazily register agentex processors to avoid circular imports.""" - if not self._agentex_registered: - from agentex.lib.core.tracing.processors.agentex_tracing_processor import ( - AgentexSyncTracingProcessor, - AgentexAsyncTracingProcessor, - ) - self.sync_config_registry["agentex"] = AgentexSyncTracingProcessor - self.async_config_registry["agentex"] = AgentexAsyncTracingProcessor - self._agentex_registered = True - - def add_processor_config(self, processor_config: TracingProcessorConfig) -> None: - with self.lock: - self._ensure_agentex_registered() - sync_processor = self.sync_config_registry[processor_config.type] - async_processor = self.async_config_registry[processor_config.type] - self.sync_processors.append(sync_processor(processor_config)) - self.async_processors.append(async_processor(processor_config)) - - def set_processor_configs(self, processor_configs: list[TracingProcessorConfig]): - with self.lock: - for processor_config in processor_configs: - self.add_processor_config(processor_config) - - def get_sync_processors(self) -> list[SyncTracingProcessor]: - return self.sync_processors - - def get_async_processors(self) -> list[AsyncTracingProcessor]: - return self.async_processors - - -# Global instance -GLOBAL_TRACING_PROCESSOR_MANAGER = TracingProcessorManager() - -add_tracing_processor_config = GLOBAL_TRACING_PROCESSOR_MANAGER.add_processor_config -set_tracing_processor_configs = GLOBAL_TRACING_PROCESSOR_MANAGER.set_processor_configs - -# Lazy initialization to avoid circular imports -_default_initialized = False - -def _ensure_default_initialized(): - """Ensure default processor is initialized (lazy to avoid circular imports).""" - global _default_initialized - if not _default_initialized: - add_tracing_processor_config(AgentexTracingProcessorConfig()) - _default_initialized = True - -def get_sync_tracing_processors(): - """Get sync processors, initializing defaults if needed.""" - _ensure_default_initialized() - return GLOBAL_TRACING_PROCESSOR_MANAGER.get_sync_processors() - -def get_async_tracing_processors(): - """Get async processors, initializing defaults if needed.""" - _ensure_default_initialized() - return GLOBAL_TRACING_PROCESSOR_MANAGER.get_async_processors() diff --git a/src/agentex/lib/environment_variables.py b/src/agentex/lib/environment_variables.py deleted file mode 100644 index cb534e5b..00000000 --- a/src/agentex/lib/environment_variables.py +++ /dev/null @@ -1,115 +0,0 @@ - -from __future__ import annotations - -import os -from enum import Enum -from pathlib import Path - -from dotenv import load_dotenv - -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.model_utils import BaseModel - -PROJECT_ROOT = Path(__file__).resolve().parents[2] - -logger = make_logger(__name__) - - -class EnvVarKeys(str, Enum): - ENVIRONMENT = "ENVIRONMENT" - TEMPORAL_ADDRESS = "TEMPORAL_ADDRESS" - REDIS_URL = "REDIS_URL" - AGENTEX_BASE_URL = "AGENTEX_BASE_URL" - # Agent Identifiers - AGENT_NAME = "AGENT_NAME" - AGENT_DESCRIPTION = "AGENT_DESCRIPTION" - AGENT_ID = "AGENT_ID" - AGENT_API_KEY = "AGENT_API_KEY" - # ACP Configuration - ACP_URL = "ACP_URL" - ACP_PORT = "ACP_PORT" - ACP_TYPE = "ACP_TYPE" - # Workflow Configuration - WORKFLOW_NAME = "WORKFLOW_NAME" - WORKFLOW_TASK_QUEUE = "WORKFLOW_TASK_QUEUE" - # Temporal Worker Configuration - HEALTH_CHECK_PORT = "HEALTH_CHECK_PORT" - # Auth Configuration - AUTH_PRINCIPAL_B64 = "AUTH_PRINCIPAL_B64" - # Build Information - BUILD_INFO_PATH = "BUILD_INFO_PATH" - AGENT_INPUT_TYPE = "AGENT_INPUT_TYPE" - # Claude Agents SDK Configuration - ANTHROPIC_API_KEY = "ANTHROPIC_API_KEY" - CLAUDE_WORKSPACE_ROOT = "CLAUDE_WORKSPACE_ROOT" - - -class Environment(str, Enum): - LOCAL = "local" - DEV = "development" - STAGING = "staging" - PROD = "production" - - -refreshed_environment_variables: EnvironmentVariables | None = None - - -class EnvironmentVariables(BaseModel): - ENVIRONMENT: str = Environment.DEV - TEMPORAL_ADDRESS: str | None = "localhost:7233" - REDIS_URL: str | None = None - AGENTEX_BASE_URL: str | None = "http://localhost:5003" - # Agent Identifiers - AGENT_NAME: str - AGENT_DESCRIPTION: str | None = None - AGENT_ID: str | None = None - AGENT_API_KEY: str | None = None - ACP_TYPE: str | None = "async" - AGENT_INPUT_TYPE: str | None = None - # ACP Configuration - ACP_URL: str - ACP_PORT: int = 8000 - # Workflow Configuration - WORKFLOW_TASK_QUEUE: str | None = None - WORKFLOW_NAME: str | None = None - # Temporal Worker Configuration - HEALTH_CHECK_PORT: int = 80 - # Auth Configuration - AUTH_PRINCIPAL_B64: str | None = None - # Build Information - BUILD_INFO_PATH: str | None = None - # Claude Agents SDK Configuration - ANTHROPIC_API_KEY: str | None = None - CLAUDE_WORKSPACE_ROOT: str | None = None # Defaults to project/workspace if not set - - @classmethod - def refresh(cls) -> EnvironmentVariables: - global refreshed_environment_variables - if refreshed_environment_variables is not None: - return refreshed_environment_variables - - logger.info("Refreshing environment variables") - if os.environ.get(EnvVarKeys.ENVIRONMENT) == Environment.DEV: - # Load global .env file first - global_env_path = PROJECT_ROOT / ".env" - if global_env_path.exists(): - logger.debug(f"Loading global environment variables FROM: {global_env_path}") - load_dotenv(dotenv_path=global_env_path, override=False) - - # Load local project .env.local file (takes precedence) - local_env_path = Path.cwd().parent / ".env.local" - if local_env_path.exists(): - logger.debug(f"Loading local environment variables FROM: {local_env_path}") - load_dotenv(dotenv_path=local_env_path, override=True) - - # Create kwargs dict with environment variables, using None for missing values - # Pydantic will use the default values when None is passed for optional fields - kwargs = {} - for key in EnvVarKeys: - env_value = os.environ.get(key.value) - if env_value is not None: - kwargs[key.value] = env_value - - environment_variables = EnvironmentVariables(**kwargs) - refreshed_environment_variables = environment_variables - return refreshed_environment_variables diff --git a/src/agentex/lib/py.typed b/src/agentex/lib/py.typed deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/sdk/__init__.py b/src/agentex/lib/sdk/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/sdk/config/__init__.py b/src/agentex/lib/sdk/config/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/sdk/config/agent_config.py b/src/agentex/lib/sdk/config/agent_config.py deleted file mode 100644 index c5f31994..00000000 --- a/src/agentex/lib/sdk/config/agent_config.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import annotations - -from typing import Any, Literal - -from pydantic import Field - -from agentex.lib.utils.logging import make_logger -from agentex.lib.types.credentials import CredentialMapping -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.types.agent_configs import TemporalConfig, TemporalWorkflowConfig - -logger = make_logger(__name__) - - -class AgentConfig(BaseModel): - name: str = Field( - ..., - description="The name of the agent.", - pattern=r"^[a-z0-9-]+$", - ) - acp_type: Literal["sync", "async", "agentic"] = Field(..., description="The type of agent.") - agent_input_type: Literal["text", "json"] | None = Field( - default=None, - description="The type of input the agent accepts." - ) - description: str = Field(..., description="The description of the agent.") - env: dict[str, str] | None = Field( - default=None, description="Environment variables to set directly in the agent deployment" - ) - credentials: list[CredentialMapping | dict[str, Any]] | None = Field( - default=None, - description="List of credential mappings to mount to the agent deployment. Supports both legacy format and new typed credentials.", - ) - temporal: TemporalConfig | None = Field( - default=None, description="Temporal workflow configuration for this agent" - ) - - def is_temporal_agent(self) -> bool: - """Check if this agent uses Temporal workflows""" - # Check temporal config with enabled flag - if self.temporal and self.temporal.enabled: - return True - return False - - def get_temporal_workflow_config(self) -> TemporalWorkflowConfig | None: - """Get temporal workflow configuration, checking both new and legacy formats""" - # Check new workflows list first - if self.temporal and self.temporal.enabled and self.temporal.workflows: - return self.temporal.workflows[0] # Return first workflow for backward compatibility - - # Check legacy single workflow - if self.temporal and self.temporal.enabled and self.temporal.workflow: - return self.temporal.workflow - - return None - - def get_temporal_workflows(self) -> list[TemporalWorkflowConfig]: - """Get all temporal workflow configurations""" - # Check new workflows list first - if self.temporal and self.temporal.enabled and self.temporal.workflows: - return self.temporal.workflows - - # Check legacy single workflow - if self.temporal and self.temporal.enabled and self.temporal.workflow: - return [self.temporal.workflow] - - return [] diff --git a/src/agentex/lib/sdk/config/agent_manifest.py b/src/agentex/lib/sdk/config/agent_manifest.py deleted file mode 100644 index 671a4b61..00000000 --- a/src/agentex/lib/sdk/config/agent_manifest.py +++ /dev/null @@ -1,238 +0,0 @@ -from __future__ import annotations - -import io -import time -import shutil -import tarfile -import tempfile -import subprocess -from typing import IO, Any -from pathlib import Path -from contextlib import contextmanager -from collections.abc import Iterator - -from pydantic import Field - -from agentex.lib.utils.logging import make_logger -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.sdk.config.agent_config import AgentConfig -from agentex.lib.sdk.config.build_config import BuildConfig -from agentex.lib.sdk.config.deployment_config import DeploymentConfig -from agentex.lib.sdk.config.environment_config import AgentEnvironmentsConfig -from agentex.lib.sdk.config.local_development_config import LocalDevelopmentConfig - -logger = make_logger(__name__) - - -class AgentManifest(BaseModel): - """ - Represents a manifest file that describes how to build and deploy an agent. - """ - - build: BuildConfig - agent: AgentConfig - local_development: LocalDevelopmentConfig | None = Field( - default=None, description="Configuration for local development" - ) - deployment: DeploymentConfig | None = Field( - default=None, description="Deployment configuration for the agent" - ) - - - def context_manager(self, build_context_root: Path) -> BuildContextManager: - """ - Creates a build context manager - """ - return BuildContextManager( - agent_manifest=self, build_context_root=build_context_root - ) - - def load_environments_config(self, manifest_dir: Path) -> "AgentEnvironmentsConfig | None": - """Load environments.yaml from same directory as manifest.yaml. - - Args: - manifest_dir: Directory containing manifest.yaml - - Returns: - AgentEnvironmentsConfig if environments.yaml exists, None otherwise - - Raises: - ValueError: If environments.yaml exists but is invalid - """ - # Import here to avoid circular imports - from agentex.lib.sdk.config.environment_config import load_environments_config_from_manifest_dir - - return load_environments_config_from_manifest_dir(manifest_dir) - - -class BuildContextManager: - """ - A gateway used to manage the build context for a docker image - """ - - def __init__(self, agent_manifest: AgentManifest, build_context_root: Path): - self.agent_manifest = agent_manifest - self.build_context_root = build_context_root - self._temp_dir: tempfile.TemporaryDirectory | None = None - - self.path: Path | None = None - self.dockerfile_path = "Dockerfile" - self.dockerignore_path = ".dockerignore" - self.directory_paths: list[Path] = [] - - def __enter__(self) -> BuildContextManager: - self._temp_dir = tempfile.TemporaryDirectory() - self.path = Path(self._temp_dir.name) - - dockerfile_path = ( - self.build_context_root / self.agent_manifest.build.context.dockerfile - ) - self.add_dockerfile(root_path=self.path, dockerfile_path=dockerfile_path) - - ignore_patterns = [] - if self.agent_manifest.build.context.dockerignore: - dockerignore_path = ( - self.build_context_root / self.agent_manifest.build.context.dockerignore - ) - self.add_dockerignore( - root_path=self.path, dockerignore_path=dockerignore_path - ) - ignore_patterns = _extract_dockerignore_patterns(dockerignore_path) - - for directory in self.agent_manifest.build.context.include_paths: - directory_path = self.build_context_root / directory - self.add_directory( - root_path=self.path, - directory_path=directory_path, - context_root=self.build_context_root, - ignore_patterns=ignore_patterns, - ) - - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - if self._temp_dir: - self._temp_dir.cleanup() - - def add_dockerfile(self, root_path: Path, dockerfile_path: Path) -> None: - """ - Copies a dockerfile to the temporary context directory root - """ - shutil.copy2(dockerfile_path, root_path / self.dockerfile_path) - - def add_dockerignore(self, root_path: Path, dockerignore_path: Path) -> None: - """ - Copies a dockerignore to the temporary context directory root - """ - shutil.copy2(str(dockerignore_path), root_path / self.dockerignore_path) - - def add_directory( - self, - root_path: Path, - directory_path: Path, - context_root: Path, - ignore_patterns: list[str] | None = None, - ) -> None: - """ - Copies a directory to the temporary context directory root while maintaining its relative - path to the context root. - """ - directory_copy_start_time = time.time() - last_log_time = directory_copy_start_time - - def copy_function_with_progress(src, dst): - nonlocal directory_copy_start_time - nonlocal last_log_time - logger.info(f"Adding {src} to build context...") - shutil.copy2(src, dst) - current_time = time.time() - time_elapsed = current_time - directory_copy_start_time - - if time_elapsed > 1 and current_time - last_log_time >= 1: - logger.info( - f"Time elapsed copying ({directory_path}): {time_elapsed} " - f"seconds" - ) - last_log_time = current_time - if time_elapsed > 5: - logger.warning( - f"This may take a while... " - f"Consider adding {directory_path} or {src} to your .dockerignore file." - ) - - directory_path_relative_to_root = directory_path.relative_to(context_root) - all_ignore_patterns = [f"{root_path}*"] - if ignore_patterns: - all_ignore_patterns += ignore_patterns - shutil.copytree( - src=directory_path, - dst=root_path / directory_path_relative_to_root, - ignore=shutil.ignore_patterns(*all_ignore_patterns), - dirs_exist_ok=True, - copy_function=copy_function_with_progress, - ) - self.directory_paths.append(directory_path_relative_to_root) - - @contextmanager - def zip_stream(self, root_path: Path | None = None) -> Iterator[IO[bytes]]: - """ - Creates a tar archive of the temporary context directory - and returns a stream of the archive. - """ - if not root_path: - raise ValueError("root_path must be provided") - context = str(root_path.absolute()) - folders_to_include = "." - tar_command = ["tar", "-C", context, "-cf", "-"] - tar_command.extend(folders_to_include) - - logger.info(f"Creating archive: {' '.join(tar_command)}") - - with subprocess.Popen( - tar_command, - stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL, - ) as proc: - assert proc.stdout is not None - try: - yield proc.stdout - finally: - pass - - @staticmethod - @contextmanager - def zipped(root_path: Path | None = None) -> Iterator[IO[bytes]]: - """ - Creates a tar.gz archive of the temporary context directory - and returns a stream of the archive. - """ - if not root_path: - raise ValueError("root_path must be provided") - - tar_buffer = io.BytesIO() - - with tarfile.open(fileobj=tar_buffer, mode="w:gz") as tar_file: - for path in Path(root_path).rglob( - "*" - ): # Recursively add files to the tar.gz - if path.is_file(): # Ensure that we're only adding files - tar_file.add(path, arcname=path.relative_to(root_path)) - - tar_buffer.seek(0) # Reset the buffer position to the beginning - yield tar_buffer - - -def _extract_dockerignore_patterns(dockerignore_path: Path) -> list[str]: - """ - Extracts glob patterns to ignore from the dockerignore into a list of patterns - :param dockerignore_path: Path to the dockerignore to extract patterns from - :return: List of glob patterns to ignore - :rtype: List[str] - """ - ignore_patterns = [] - with open(dockerignore_path) as file: - for line in file: - ignored_filepath = line.split("#", 1)[0].strip() - if ignored_filepath: - ignore_patterns.append(ignored_filepath) - return ignore_patterns diff --git a/src/agentex/lib/sdk/config/build_config.py b/src/agentex/lib/sdk/config/build_config.py deleted file mode 100644 index 96a7f92e..00000000 --- a/src/agentex/lib/sdk/config/build_config.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import annotations - -from pydantic import Field - -from agentex.lib.utils.model_utils import BaseModel - - -class BuildContext(BaseModel): - """ - Represents the context in which the Docker image should be built. - """ - - root: str = Field( - ..., - description="The root directory of the build context. Should be specified relative to the location of the " - "build config file.", - ) - include_paths: list[str] = Field( - default_factory=list, - description="The paths to include in the build context. Should be specified relative to the root directory.", - ) - dockerfile: str = Field( - ..., - description="The path to the Dockerfile. Should be specified relative to the root directory.", - ) - dockerignore: str | None = Field( - None, - description="The path to the .dockerignore file. Should be specified relative to the root directory.", - ) - - -class BuildConfig(BaseModel): - """ - Represents a configuration for building the action as a Docker image. - """ - - context: BuildContext diff --git a/src/agentex/lib/sdk/config/deployment_config.py b/src/agentex/lib/sdk/config/deployment_config.py deleted file mode 100644 index 1ba5b348..00000000 --- a/src/agentex/lib/sdk/config/deployment_config.py +++ /dev/null @@ -1,123 +0,0 @@ -from __future__ import annotations - -from typing import Any, Dict - -from pydantic import Field - -from agentex.lib.utils.model_utils import BaseModel - - -class ImageConfig(BaseModel): - """Configuration for container images""" - - repository: str = Field(..., description="Container image repository URL") - tag: str = Field(default="latest", description="Container image tag") - - -class ImagePullSecretConfig(BaseModel): - """Configuration for image pull secrets""" - - name: str = Field(..., description="Name of the image pull secret") - - -class ResourceRequirements(BaseModel): - """Resource requirements for containers""" - - cpu: str = Field( - default="500m", description="CPU request/limit (e.g., '500m', '1')" - ) - memory: str = Field( - default="1Gi", description="Memory request/limit (e.g., '1Gi', '512Mi')" - ) - - -class ResourceConfig(BaseModel): - """Resource configuration for containers""" - - requests: ResourceRequirements = Field( - default_factory=ResourceRequirements, description="Resource requests" - ) - limits: ResourceRequirements = Field( - default_factory=ResourceRequirements, description="Resource limits" - ) - - -class GlobalDeploymentConfig(BaseModel): - """Global deployment configuration that applies to all clusters""" - - agent: dict[str, str] = Field( - default_factory=dict, description="Agent metadata (name, description)" - ) - replicaCount: int = Field(default=1, description="Number of replicas to deploy") - resources: ResourceConfig = Field( - default_factory=ResourceConfig, description="Resource requirements" - ) - - -class DeploymentConfig(BaseModel): - """Main deployment configuration in the manifest""" - - image: ImageConfig = Field(..., description="Container image configuration") - imagePullSecrets: list[ImagePullSecretConfig] | None = Field( - default=None, description="Image pull secrets to use for the deployment" - ) - global_config: GlobalDeploymentConfig = Field( - default_factory=GlobalDeploymentConfig, - description="Global deployment settings", - alias="global", - ) - - class Config: - validate_by_name = True - - -class ClusterConfig(BaseModel): - """Per-cluster deployment overrides""" - - image: ImageConfig | None = Field( - default=None, description="Cluster-specific image overrides" - ) - replicaCount: int | None = Field( - default=None, description="Cluster-specific replica count" - ) - resources: ResourceConfig | None = Field( - default=None, description="Cluster-specific resource overrides" - ) - env: list[dict[str, str]] | None = Field( - default=None, description="Additional environment variables for this cluster" - ) - # Allow additional arbitrary overrides for advanced users - additional_overrides: dict[str, Any] | None = Field( - default=None, description="Additional helm chart value overrides" - ) - - -class AuthenticationConfig(BaseModel): - principal: Dict[str, Any] = Field(description="Principal used for authorization on registration") - - -class InjectedImagePullSecretValues(BaseModel): - """Values for image pull secrets""" - - registry: str = Field(..., description="Registry of the image pull secret") - username: str = Field(..., description="Username of the image pull secret") - password: str = Field(..., description="Password of the image pull secret") - email: str | None = Field( - default=None, description="Email of the image pull secret" - ) - - -class InjectedSecretsValues(BaseModel): - """Values for injected secrets""" - - # Defined as a dictionary because the names need to be unique - credentials: dict[str, Any] = Field( - default_factory=dict, description="Secrets to inject into the deployment" - ) - imagePullSecrets: dict[str, InjectedImagePullSecretValues] = Field( - default_factory=dict, - description="Image pull secrets to inject into the deployment", - ) - - class Config: - validate_by_name = True diff --git a/src/agentex/lib/sdk/config/environment_config.py b/src/agentex/lib/sdk/config/environment_config.py deleted file mode 100644 index 959e2683..00000000 --- a/src/agentex/lib/sdk/config/environment_config.py +++ /dev/null @@ -1,196 +0,0 @@ -""" -Environment-specific configuration models for agent deployments. - -This module provides Pydantic models for managing environment-specific -configurations that are separate from the main manifest.yaml file. -""" - -from __future__ import annotations - -from typing import Any, Dict, override -from pathlib import Path - -import yaml -from pydantic import Field, BaseModel, field_validator - -from agentex.lib.utils.model_utils import BaseModel as UtilsBaseModel - - -class AgentAuthConfig(BaseModel): - """Authentication configuration for an agent in a specific environment.""" - - principal: Dict[str, Any] = Field( - ..., - description="Principal configuration for agent authorization and registration" - ) - - @field_validator('principal') - @classmethod - def validate_principal_required_fields(cls, v: Any) -> Dict[str, Any]: - """Ensure principal has required fields for agent registration.""" - if not isinstance(v, dict): - raise ValueError("Principal must be a dictionary") - return v - - -class AgentKubernetesConfig(BaseModel): - """Kubernetes configuration for an agent in a specific environment.""" - - namespace: str = Field( - ..., - description="Kubernetes namespace where the agent will be deployed" - ) - - @field_validator('namespace') - @classmethod - def validate_namespace_format(cls, v: str) -> str: - """Ensure namespace follows Kubernetes naming conventions.""" - if not v or not v.strip(): - raise ValueError("Namespace cannot be empty") - - # Basic Kubernetes namespace validation - namespace = v.strip().lower() - if not namespace.replace('-', '').replace('.', '').isalnum(): - raise ValueError( - f"Namespace '{v}' must contain only lowercase letters, numbers, " - "hyphens, and periods" - ) - - if len(namespace) > 63: - raise ValueError(f"Namespace '{v}' cannot exceed 63 characters") - - return namespace - - -class AgentEnvironmentConfig(BaseModel): - """Complete configuration for an agent in a specific environment.""" - - kubernetes: AgentKubernetesConfig | None = Field( - default=None, - description="Kubernetes deployment configuration" - ) - auth: AgentAuthConfig = Field( - ..., - description="Authentication and authorization configuration" - ) - helm_repository_name: str = Field( - default="scale-egp", - description="Helm repository name for the environment" - ) - helm_repository_url: str = Field( - default="https://scale-egp-helm-charts-us-west-2.s3.amazonaws.com/charts", - description="Helm repository url for the environment" - ) - helm_overrides: Dict[str, Any] = Field( - default_factory=dict, - description="Helm chart value overrides for environment-specific tuning" - ) - - -class AgentEnvironmentsConfig(UtilsBaseModel): - """All environment configurations for an agent.""" - - schema_version: str = Field( - default="v1", - description="Schema version for validation and compatibility" - ) - environments: Dict[str, AgentEnvironmentConfig] = Field( - ..., - description="Environment-specific configurations (dev, prod, etc.)" - ) - - @field_validator('schema_version') - @classmethod - def validate_schema_version(cls, v: str) -> str: - """Ensure schema version is supported.""" - supported_versions = ['v1'] - if v not in supported_versions: - raise ValueError( - f"Schema version '{v}' not supported. " - f"Supported versions: {', '.join(supported_versions)}" - ) - return v - - @field_validator('environments') - @classmethod - def validate_environments_not_empty(cls, v: Dict[str, AgentEnvironmentConfig]) -> Dict[str, AgentEnvironmentConfig]: - """Ensure at least one environment is defined.""" - if not v: - raise ValueError("At least one environment must be defined") - return v - - def get_config_for_env(self, env_name: str) -> AgentEnvironmentConfig: - """Get configuration for a specific environment. - - Args: - env_name: Name of the environment (e.g., 'dev', 'prod') - - Returns: - AgentEnvironmentConfig for the specified environment - - Raises: - ValueError: If environment is not found - """ - if env_name not in self.environments: - available_envs = ', '.join(self.environments.keys()) - raise ValueError( - f"Environment '{env_name}' not found in environments.yaml. " - f"Available environments: {available_envs}" - ) - return self.environments[env_name] - - def list_environments(self) -> list[str]: - """Get list of all configured environment names.""" - return list(self.environments.keys()) - - @classmethod - @override - def from_yaml(cls, file_path: str) -> "AgentEnvironmentsConfig": - """Load configuration from environments.yaml file. - - Args: - file_path: Path to environments.yaml file - - Returns: - Parsed and validated AgentEnvironmentsConfig - - Raises: - FileNotFoundError: If file doesn't exist - ValueError: If file is invalid or doesn't validate - """ - path = Path(file_path) - if not path.exists(): - raise FileNotFoundError(f"environments.yaml not found: {file_path}") - - try: - with open(path, 'r') as f: - data = yaml.safe_load(f) - - if not data: - raise ValueError("environments.yaml file is empty") - - return cls.model_validate(data) - - except yaml.YAMLError as e: - raise ValueError(f"Invalid YAML format in {file_path}: {e}") from e - except Exception as e: - raise ValueError(f"Failed to load environments.yaml from {file_path}: {e}") from e - - -def load_environments_config_from_manifest_dir(manifest_dir: Path) -> AgentEnvironmentsConfig | None: - """Helper function to load environments.yaml from same directory as manifest.yaml. - - Args: - manifest_dir: Directory containing manifest.yaml - - Returns: - AgentEnvironmentsConfig if environments.yaml exists, None otherwise - - Raises: - ValueError: If environments.yaml exists but is invalid - """ - environments_file = manifest_dir / "environments.yaml" - if not environments_file.exists(): - return None - - return AgentEnvironmentsConfig.from_yaml(str(environments_file)) diff --git a/src/agentex/lib/sdk/config/local_development_config.py b/src/agentex/lib/sdk/config/local_development_config.py deleted file mode 100644 index 061500ab..00000000 --- a/src/agentex/lib/sdk/config/local_development_config.py +++ /dev/null @@ -1,58 +0,0 @@ -from __future__ import annotations - -from pathlib import Path - -from pydantic import Field, validator - -from agentex.lib.utils.model_utils import BaseModel - - -class LocalAgentConfig(BaseModel): - """Configuration for local agent development""" - - port: int = Field( - ..., - description="The port where the agent's ACP server is running locally", - gt=0, - lt=65536, - ) - host_address: str = Field( - default="host.docker.internal", - description="The host address where the agent's ACP server can be reached (e.g., host.docker.internal for Docker, localhost for direct)", - ) - - -class LocalPathsConfig(BaseModel): - """Configuration for local file paths""" - - acp: str = Field( - default="project/acp.py", - description="Path to the ACP server file. Can be relative to manifest directory or absolute.", - ) - worker: str | None = Field( - default=None, - description="Path to the temporal worker file. Can be relative to manifest directory or absolute. (only for temporal agents)", - ) - - @validator("acp", "worker") - def validate_path_format(cls, v): - """Validate that the path is a reasonable format""" - if v is None: - return v - - # Convert to Path to validate format - try: - Path(v) - except Exception as e: - raise ValueError(f"Invalid path format: {v}") from e - - return v - - -class LocalDevelopmentConfig(BaseModel): - """Configuration for local development environment""" - - agent: LocalAgentConfig = Field(..., description="Local agent configuration") - paths: LocalPathsConfig | None = Field( - default=None, description="File paths for local development" - ) diff --git a/src/agentex/lib/sdk/config/project_config.py b/src/agentex/lib/sdk/config/project_config.py deleted file mode 100644 index 0621ae37..00000000 --- a/src/agentex/lib/sdk/config/project_config.py +++ /dev/null @@ -1,105 +0,0 @@ -from __future__ import annotations - -import os -import re -from typing import Any, TypeVar -from pathlib import Path - -import yaml -from jinja2 import BaseLoader, Environment, TemplateError, StrictUndefined - -T = TypeVar("T") - - -class ConfigResolutionError(Exception): - def __init__(self, message: str) -> None: - super().__init__(message) - self.status_code = 400 - - -def _preprocess_template(template_str: str) -> str: - # Replace $env. and $variables. with unique internal names - return template_str.replace("{{ $env.", "{{ __special_env__.").replace( - "{{ $variables.", "{{ __special_variables__." - ) - - -def _extract_variables_section(raw_config_str: str) -> str: - # Use regex to extract the variables: ... block (YAML top-level) - match = re.search( - r"(^variables:.*?)(^config:|\Z)", raw_config_str, re.DOTALL | re.MULTILINE - ) - if not match: - return "" - return match.group(1) - - -def ProjectConfigLoader( - config_path: str, model: type[T] | None = None, env_path: str | None = None -) -> dict[str, Any] | T: - config_path_obj = Path(config_path) - env_path_obj = Path(env_path) if env_path else config_path_obj.parent / ".env" - env = _load_env(env_path_obj) - raw_config_str = _load_file_as_str(config_path_obj) - raw_config_str = _preprocess_template(raw_config_str) - - # Extract and render only the variables section - variables_section_str = _extract_variables_section(raw_config_str) - env_context = {"__special_env__": env, "__special_variables__": {}} - try: - env_only_template = Environment( - loader=BaseLoader(), - undefined=StrictUndefined, - keep_trailing_newline=True, - autoescape=False, - ).from_string(variables_section_str) - rendered_variables_yaml = env_only_template.render(**env_context) - variables_dict = yaml.safe_load(rendered_variables_yaml).get("variables", {}) - except Exception as e: - raise ConfigResolutionError(f"Error rendering variables with $env: {e}") from e - # Second pass: render the whole config with both __special_env__ and resolved __special_variables__ - full_context = {"__special_env__": env, "__special_variables__": variables_dict} - rendered_config_str = _jinja_render(raw_config_str, full_context) - try: - rendered_config = yaml.safe_load(rendered_config_str) - except Exception as e: - raise ConfigResolutionError(f"Error loading rendered YAML: {e}") from e - if "config" not in rendered_config: - raise ConfigResolutionError("Missing 'config' section in config file.") - config_section = rendered_config["config"] - if model is not None: - return model(**config_section) - return config_section - - -def _load_env(env_path: Path) -> dict[str, str]: - env = dict(os.environ) - if env_path.exists(): - with open(env_path) as f: - for line in f: - line = line.strip() - if not line or line.startswith("#"): - continue - if "=" in line: - k, v = line.split("=", 1) - env[k.strip()] = v.strip() - return env - - -def _load_file_as_str(path: Path) -> str: - with open(path) as f: - return f.read() - - -def _jinja_render(template_str: str, context: dict) -> str: - try: - env = Environment( - loader=BaseLoader(), - undefined=StrictUndefined, - keep_trailing_newline=True, - autoescape=False, - ) - template = env.from_string(template_str) - return template.render(**context) - except TemplateError as e: - raise ConfigResolutionError(f"Jinja template error: {e}") from e diff --git a/src/agentex/lib/sdk/config/validation.py b/src/agentex/lib/sdk/config/validation.py deleted file mode 100644 index 4b00a682..00000000 --- a/src/agentex/lib/sdk/config/validation.py +++ /dev/null @@ -1,250 +0,0 @@ -""" -Validation framework for agent configuration files. - -This module provides validation functions for agent configurations, -with clear error messages and best practices enforcement. -""" -from __future__ import annotations - -from typing import Any, Dict, List, Optional -from pathlib import Path - -from agentex.lib.utils.logging import make_logger -from agentex.lib.sdk.config.environment_config import AgentEnvironmentConfig, AgentEnvironmentsConfig - -logger = make_logger(__name__) - - -class ConfigValidationError(Exception): - """Exception raised when configuration validation fails.""" - - def __init__(self, message: str, file_path: Optional[str] = None): - self.file_path = file_path - super().__init__(message) - - -class EnvironmentsValidationError(ConfigValidationError): - """Exception raised when environments.yaml validation fails.""" - pass - - -def validate_environments_config( - environments_config: AgentEnvironmentsConfig, - required_environments: Optional[List[str]] = None -) -> None: - """ - Validate environments configuration with comprehensive checks. - - Args: - environments_config: The loaded environments configuration - required_environments: List of environment names that must be present - - Raises: - EnvironmentsValidationError: If validation fails - """ - # Check for required environments - if required_environments: - missing_envs: List[str] = [] - for env_name in required_environments: - if env_name not in environments_config.environments: - missing_envs.append(env_name) - - if missing_envs: - available_envs = list(environments_config.environments.keys()) - raise EnvironmentsValidationError( - f"Missing required environments: {', '.join(missing_envs)}. " - f"Available environments: {', '.join(available_envs)}" - ) - - # Validate each environment configuration - for env_name, env_config in environments_config.environments.items(): - try: - _validate_single_environment_config(env_name, env_config) - except Exception as e: - raise EnvironmentsValidationError( - f"Environment '{env_name}' configuration error: {str(e)}" - ) from e - - -def _validate_single_environment_config(env_name: str, env_config: AgentEnvironmentConfig) -> None: - """ - Validate a single environment configuration. - - Args: - env_name: Name of the environment - env_config: AgentEnvironmentConfig instance - - Raises: - ValueError: If validation fails - """ - # Validate namespace naming conventions if kubernetes config exists - if env_config.kubernetes and env_config.kubernetes.namespace: - namespace = env_config.kubernetes.namespace - - # Check for common namespace naming issues - if namespace != namespace.lower(): - logger.warning( - f"Namespace '{namespace}' contains uppercase letters. " - "Kubernetes namespaces should be lowercase." - ) - - if namespace.startswith('-') or namespace.endswith('-'): - raise ValueError( - f"Namespace '{namespace}' cannot start or end with hyphens" - ) - - # Validate auth principal - principal = env_config.auth.principal - if not principal.get('user_id'): - raise ValueError("Auth principal must contain non-empty 'user_id'") - - # Check for environment-specific user_id patterns - user_id = principal['user_id'] - if isinstance(user_id, str): - if not any(env_name.lower() in user_id.lower() for env_name in ['dev', 'prod', 'staging', env_name]): - logger.warning( - f"User ID '{user_id}' doesn't contain environment indicator. " - f"Consider including '{env_name}' in the user_id for clarity." - ) - - # Validate helm overrides if present - if env_config.helm_overrides: - _validate_helm_overrides(env_config.helm_overrides) - - -def _validate_helm_overrides(helm_overrides: Dict[str, Any]) -> None: - """ - Validate helm override configuration. - - Args: - helm_overrides: Dictionary of helm overrides - - Raises: - ValueError: If validation fails - """ - # Check for common helm override issues - if 'resources' in helm_overrides: - resources = helm_overrides['resources'] - if isinstance(resources, dict): - # Validate resource format - if 'requests' in resources or 'limits' in resources: - for resource_type in ['requests', 'limits']: - if resource_type in resources: - resource_config: Any = resources[resource_type] - if isinstance(resource_config, dict): - # Check for valid resource specifications - for key, value in resource_config.items(): - if key in ['cpu', 'memory'] and not isinstance(value, str): - logger.warning( - f"Resource {key} should be a string (e.g., '500m', '1Gi'), " - f"got {type(value).__name__}: {value}" - ) - - -def validate_environments_yaml_file(file_path: str) -> AgentEnvironmentsConfig: - """ - Load and validate environments.yaml file. - - Args: - file_path: Path to environments.yaml file - - Returns: - Validated AgentEnvironmentsConfig - - Raises: - EnvironmentsValidationError: If file is invalid - """ - try: - environments_config = AgentEnvironmentsConfig.from_yaml(file_path) - validate_environments_config(environments_config) - return environments_config - except FileNotFoundError: - raise EnvironmentsValidationError( - f"environments.yaml not found: {file_path}\n\n" - "๐Ÿ“‹ Why required:\n" - " Environment-specific settings (auth, namespace, resources)\n" - " must be separated from global manifest for proper isolation.", - file_path=file_path - ) from None - except Exception as e: - raise EnvironmentsValidationError( - f"Invalid environments.yaml file: {str(e)}", - file_path=file_path - ) from e - - -def validate_manifest_and_environments( - manifest_path: str, - required_environment: Optional[str] = None -) -> tuple[str, AgentEnvironmentsConfig]: - """ - Validate both manifest.yaml and environments.yaml files together. - - Args: - manifest_path: Path to manifest.yaml file - required_environment: Specific environment that must be present - - Returns: - Tuple of (manifest_path, environments_config) - - Raises: - ConfigValidationError: If validation fails - """ - manifest_file = Path(manifest_path) - if not manifest_file.exists(): - raise ConfigValidationError(f"Manifest file not found: {manifest_path}") - - # Look for environments.yaml in same directory - environments_file = manifest_file.parent / "environments.yaml" - environments_config = validate_environments_yaml_file(str(environments_file)) - - # Validate specific environment if requested - if required_environment: - validate_environments_config( - environments_config, - required_environments=[required_environment] - ) - - return manifest_path, environments_config - - -def generate_helpful_error_message(error: Exception, context: str = "") -> str: - """ - Generate helpful error message with troubleshooting tips. - - Args: - error: The original exception - context: Additional context about where the error occurred - - Returns: - Formatted error message with troubleshooting tips - """ - base_msg = str(error) - - if context: - base_msg = f"{context}: {base_msg}" - - # Add troubleshooting tips based on error type - if isinstance(error, FileNotFoundError): - if "environments.yaml" in base_msg: - base_msg += ( - "\n\n๐Ÿ”ง Troubleshooting:\n" - "1. Check file location: should be next to manifest.yaml\n" - "2. Verify file permissions" - ) - elif "user_id" in base_msg.lower(): - base_msg += ( - "\n\n๐Ÿ’ก Auth Principal Tips:\n" - "- user_id should be unique per environment\n" - "- Include environment name (e.g., 'dev_my_agent')\n" - "- Use consistent naming convention across agents" - ) - elif "namespace" in base_msg.lower(): - base_msg += ( - "\n\n๐Ÿท๏ธ Namespace Tips:\n" - "- Use lowercase letters, numbers, and hyphens only\n" - "- Include team and environment (e.g., 'team-dev-agent')\n" - "- Keep under 63 characters" - ) - - return base_msg diff --git a/src/agentex/lib/sdk/fastacp/__init__.py b/src/agentex/lib/sdk/fastacp/__init__.py deleted file mode 100644 index b6986379..00000000 --- a/src/agentex/lib/sdk/fastacp/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from agentex.lib.sdk.fastacp.fastacp import FastACP - -__all__ = ["FastACP"] diff --git a/src/agentex/lib/sdk/fastacp/base/base_acp_server.py b/src/agentex/lib/sdk/fastacp/base/base_acp_server.py deleted file mode 100644 index b625eaa1..00000000 --- a/src/agentex/lib/sdk/fastacp/base/base_acp_server.py +++ /dev/null @@ -1,408 +0,0 @@ -from __future__ import annotations - -import uuid -import asyncio -import inspect -from typing import Any -from datetime import datetime -from contextlib import asynccontextmanager -from collections.abc import Callable, Awaitable, AsyncGenerator - -import uvicorn -from fastapi import FastAPI, Request -from pydantic import TypeAdapter, ValidationError -from fastapi.responses import StreamingResponse -from starlette.middleware.base import BaseHTTPMiddleware - -from agentex.lib.types.acp import ( - RPC_SYNC_METHODS, - PARAMS_MODEL_BY_METHOD, - RPCMethod, - SendEventParams, - CancelTaskParams, - CreateTaskParams, - SendMessageParams, -) -from agentex.lib.utils.logging import make_logger, ctx_var_request_id -from agentex.lib.types.json_rpc import JSONRPCError, JSONRPCRequest, JSONRPCResponse -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.utils.registration import register_agent - -# from agentex.lib.sdk.fastacp.types import BaseACPConfig -from agentex.lib.environment_variables import EnvironmentVariables, refreshed_environment_variables -from agentex.types.task_message_update import TaskMessageUpdate, StreamTaskMessageFull -from agentex.types.task_message_content import TaskMessageContent -from agentex.lib.sdk.fastacp.base.constants import ( - FASTACP_HEADER_SKIP_EXACT, - FASTACP_HEADER_SKIP_PREFIXES, -) - -logger = make_logger(__name__) - -# Create a TypeAdapter for TaskMessageUpdate validation -task_message_update_adapter = TypeAdapter(TaskMessageUpdate) - - -class RequestIDMiddleware(BaseHTTPMiddleware): - """Middleware to extract or generate request IDs and add them to logs and response headers""" - - async def dispatch(self, request: Request, call_next): # type: ignore[override] - # Extract request ID from header or generate a new one if there isn't one - request_id = request.headers.get("x-request-id") or uuid.uuid4().hex - # Store request ID in request state for access in handlers - ctx_var_request_id.set(request_id) - # Process request - response = await call_next(request) - return response - - -class BaseACPServer(FastAPI): - """ - AsyncAgentACP provides RPC-style hooks for agent events and commands asynchronously. - All methods follow JSON-RPC 2.0 format. - - Available methods: - - event/send โ†’ Send a message to a task - - task/cancel โ†’ Cancel a task - - task/approve โ†’ Approve a task - """ - - def __init__(self): - super().__init__(lifespan=self.get_lifespan_function()) - - self.get("/healthz")(self._healthz) - self.post("/api")(self._handle_jsonrpc) - - # Method handlers - # this just adds a request ID to the request and response headers - self.add_middleware(RequestIDMiddleware) - self._handlers: dict[RPCMethod, Callable] = {} - - # Agent info to return in healthz - self.agent_id: str | None = None - - @classmethod - def create(cls): - """Create and initialize BaseACPServer instance""" - instance = cls() - instance._setup_handlers() - return instance - - def _setup_handlers(self): - """Set up default handlers - override in subclasses""" - # Base class has no default handlers - pass - - def get_lifespan_function(self): - @asynccontextmanager - async def lifespan_context(app: FastAPI): # noqa: ARG001 - env_vars = EnvironmentVariables.refresh() - if env_vars.AGENTEX_BASE_URL: - await register_agent(env_vars) - self.agent_id = env_vars.AGENT_ID - else: - logger.warning("AGENTEX_BASE_URL not set, skipping agent registration") - - yield - - return lifespan_context - - async def _healthz(self): - """Health check endpoint""" - result = {"status": "healthy"} - if self.agent_id: - result["agent_id"] = self.agent_id - return result - - def _wrap_handler(self, fn: Callable[..., Awaitable[Any]]): - """Wraps handler functions to provide JSON-RPC 2.0 response format""" - - async def wrapper(*args, **kwargs) -> Any: - return await fn(*args, **kwargs) - - return wrapper - - async def _handle_jsonrpc(self, request: Request): - """Main JSON-RPC endpoint handler""" - rpc_request = None - logger.info(f"[base_acp_server] received request: {datetime.now()}") - try: - data = await request.json() - rpc_request = JSONRPCRequest(**data) - - # Check if the request is authenticated - if refreshed_environment_variables and getattr(refreshed_environment_variables, "AGENT_API_KEY", None): - authorization_header = request.headers.get("x-agent-api-key") - if authorization_header != refreshed_environment_variables.AGENT_API_KEY: - return JSONRPCResponse( - id=rpc_request.id, - error=JSONRPCError(code=-32601, message="Unauthorized"), - ) - - - # Check if method is valid first - try: - method = RPCMethod(rpc_request.method) - except ValueError: - logger.error(f"Method {rpc_request.method} was invalid") - return JSONRPCResponse( - id=rpc_request.id, - error=JSONRPCError( - code=-32601, message=f"Method {rpc_request.method} not found" - ), - ) - - if method not in self._handlers or self._handlers[method] is None: - logger.error(f"Method {method} not found on existing ACP server") - return JSONRPCResponse( - id=rpc_request.id, - error=JSONRPCError( - code=-32601, message=f"Method {method} not found" - ), - ) - - # Extract application headers using allowlist approach (only x-* headers) - # Matches gateway's security filtering rules - # Forward filtered headers via params.request.headers to agent handlers - custom_headers = { - key: value - for key, value in request.headers.items() - if key.lower().startswith("x-") - and key.lower() not in FASTACP_HEADER_SKIP_EXACT - and not any(key.lower().startswith(p) for p in FASTACP_HEADER_SKIP_PREFIXES) - } - - # Parse params into appropriate model based on method and include headers - params_model = PARAMS_MODEL_BY_METHOD[method] - params_data = dict(rpc_request.params) if rpc_request.params else {} - - # Add custom headers to the request structure if any headers were provided - # Gateway sends filtered headers via HTTP, SDK extracts and populates params.request - if custom_headers: - params_data["request"] = {"headers": custom_headers} - params = params_model.model_validate(params_data) - - if method in RPC_SYNC_METHODS: - handler = self._handlers[method] - result = await handler(params) - - if rpc_request.id is None: - # Seems like you should return None for notifications - return None - else: - # Handle streaming vs non-streaming for MESSAGE_SEND - if method == RPCMethod.MESSAGE_SEND and isinstance( - result, AsyncGenerator - ): - return await self._handle_streaming_response( - rpc_request.id, result - ) - else: - if isinstance(result, BaseModel): - result = result.model_dump() - return JSONRPCResponse(id=rpc_request.id, result=result) - else: - # If this is a notification (no request ID), process in background and return immediately - if rpc_request.id is None: - asyncio.create_task(self._process_notification(method, params)) - return JSONRPCResponse(id=None) - - # For regular requests, start processing in background but return immediately - asyncio.create_task( - self._process_request(rpc_request.id, method, params) - ) - - # Return immediate acknowledgment - return JSONRPCResponse( - id=rpc_request.id, result={"status": "processing"} - ) - - except Exception as e: - logger.error(f"Error handling JSON-RPC request: {e}", exc_info=True) - request_id = None - if rpc_request is not None: - request_id = rpc_request.id - return JSONRPCResponse( - id=request_id, - error=JSONRPCError(code=-32603, message=str(e)).model_dump(), - ) - - async def _handle_streaming_response( - self, request_id: int | str, async_gen: AsyncGenerator - ): - """Handle streaming response by formatting TaskMessageUpdate objects as JSON-RPC stream""" - - async def generate_json_rpc_stream(): - try: - async for chunk in async_gen: - # Each chunk should be a TaskMessageUpdate object - # Validate using Pydantic's TypeAdapter to ensure it's a proper TaskMessageUpdate - try: - # This will validate that chunk conforms to the TaskMessageUpdate union type - validated_chunk = task_message_update_adapter.validate_python( - chunk - ) - # Use mode="json" to properly serialize datetime objects - chunk_data = validated_chunk.model_dump(mode="json") - except ValidationError as e: - raise TypeError( - f"Streaming chunks must be TaskMessageUpdate objects. Validation error: {e}" - ) from e - except Exception as e: - raise TypeError( - f"Streaming chunks must be TaskMessageUpdate objects, got {type(chunk)}: {e}" - ) from e - - # Wrap in JSON-RPC response format - response = JSONRPCResponse(id=request_id, result=chunk_data) - # Use model_dump_json() which handles datetime serialization automatically - yield f"{response.model_dump_json()}\n" - - except Exception as e: - logger.error(f"Error in streaming response: {e}", exc_info=True) - error_response = JSONRPCResponse( - id=request_id, - error=JSONRPCError(code=-32603, message=str(e)).model_dump(), - ) - yield f"{error_response.model_dump_json()}\n" - - return StreamingResponse( - generate_json_rpc_stream(), - media_type="application/x-ndjson", # Newline Delimited JSON - headers={ - "Cache-Control": "no-cache", - "Connection": "keep-alive", - "X-Accel-Buffering": "no", # Disable nginx buffering - }, - ) - - async def _process_notification(self, method: RPCMethod, params: Any): - """Process a notification (request with no ID) in the background""" - try: - handler = self._handlers[method] - await handler(params) - except Exception as e: - logger.error(f"Error processing notification {method}: {e}", exc_info=True) - - async def _process_request( - self, request_id: int | str, method: RPCMethod, params: Any - ): - """Process a request in the background""" - try: - handler = self._handlers[method] - await handler(params) - # Note: In a real implementation, you might want to store the result somewhere - # or notify the client through a different mechanism - logger.info( - f"Successfully processed request {request_id} for method {method}" - ) - except Exception as e: - logger.error( - f"Error processing request {request_id} for method {method}: {e}", - exc_info=True, - ) - - """ - Define all possible decorators to be overriden and implemented by each ACP implementation - Then the users can override the default handlers by implementing their own handlers - - ACP Type: Async - Decorators: - - on_task_create - - on_task_event_send - - on_task_cancel - - ACP Type: Sync - Decorators: - - on_message_send - """ - - # Type: Async - def on_task_create(self, fn: Callable[[CreateTaskParams], Awaitable[Any]]): - """Handle task/init method""" - wrapped = self._wrap_handler(fn) - self._handlers[RPCMethod.TASK_CREATE] = wrapped - return fn - - # Type: Async - def on_task_event_send(self, fn: Callable[[SendEventParams], Awaitable[Any]]): - """Handle event/send method""" - - async def wrapped_handler(params: SendEventParams): - # # # Send message to client first most of the time - # ## But, sometimes you may want to process the message first - # ## and then send a message to the client - # await agentex.interactions.send_messages_to_client( - # task_id=params.task_id, - # messages=[params.message] - # ) - return await fn(params) - - wrapped = self._wrap_handler(wrapped_handler) - self._handlers[RPCMethod.EVENT_SEND] = wrapped - return fn - - # Type: Async - def on_task_cancel(self, fn: Callable[[CancelTaskParams], Awaitable[Any]]): - """Handle task/cancel method""" - wrapped = self._wrap_handler(fn) - self._handlers[RPCMethod.TASK_CANCEL] = wrapped - return fn - - # Type: Sync - def on_message_send( - self, - fn: Callable[ - [SendMessageParams], - Awaitable[TaskMessageContent | list[TaskMessageContent] | AsyncGenerator[TaskMessageUpdate, None]], - ], - ): - """Handle message/send method - supports both single and streaming responses - - For non-streaming: return a single TaskMessage - For streaming: return an AsyncGenerator that yields TaskMessageUpdate objects - """ - - async def message_send_wrapper(params: SendMessageParams): - """Special wrapper for message_send that handles both regular async functions and async generators""" - # Check if the function is an async generator function - - # Regardless of whether the Agent developer implemented an Async generator or not, we will always turn the function into an async generator and yield SSE events back tot he Agentex server so there is only one way for it to process the response. Then, based on the client's desire to stream or not, the Agentex server will either yield back the async generator objects directly (if streaming) or aggregate the content into a list of TaskMessageContents and to dispatch to the client. This basically gives the Agentex server the flexibility to handle both cases itself. - - if inspect.isasyncgenfunction(fn): - # The client wants streaming, an async generator already streams the content, so just return it - return fn(params) - else: - # The client wants streaming, but the function is not an async generator, so we turn it into one and yield each TaskMessageContent as a StreamTaskMessageFull which will be streamed to the client by the Agentex server. - task_message_content_response = await fn(params) - # Handle None returns gracefully - treat as empty list - if task_message_content_response is None: - task_message_content_list = [] - elif isinstance(task_message_content_response, list): - # Filter out None values from lists - task_message_content_list = [content for content in task_message_content_response if content is not None] - else: - task_message_content_list = [task_message_content_response] - - async def async_generator(task_message_content_list: list[TaskMessageContent]): - for i, task_message_content in enumerate(task_message_content_list): - yield StreamTaskMessageFull(type="full", index=i, content=task_message_content) - - return async_generator(task_message_content_list) - - self._handlers[RPCMethod.MESSAGE_SEND] = message_send_wrapper - return fn - - """ - End of Decorators - """ - - """ - ACP Server Lifecycle Methods - """ - - def run(self, host: str = "0.0.0.0", port: int = 8000, **kwargs): - """Start the Uvicorn server for async handlers.""" - uvicorn.run(self, host=host, port=port, **kwargs) - - diff --git a/src/agentex/lib/sdk/fastacp/base/constants.py b/src/agentex/lib/sdk/fastacp/base/constants.py deleted file mode 100644 index c04287e0..00000000 --- a/src/agentex/lib/sdk/fastacp/base/constants.py +++ /dev/null @@ -1,36 +0,0 @@ -from __future__ import annotations - -# Header filtering rules for FastACP server -# These rules match the gateway's security filtering - -# Hop-by-hop headers that should not be forwarded -HOP_BY_HOP_HEADERS: set[str] = { - "connection", - "keep-alive", - "proxy-authenticate", - "proxy-authorization", - "te", - "trailer", - "transfer-encoding", - "upgrade", - "content-length", - "content-encoding", - "host", -} - -# Sensitive headers that should never be forwarded -BLOCKED_HEADERS: set[str] = { - "authorization", - "cookie", - "x-agent-api-key", -} - -# Legacy constants for backward compatibility -FASTACP_HEADER_SKIP_EXACT: set[str] = HOP_BY_HOP_HEADERS | BLOCKED_HEADERS - -FASTACP_HEADER_SKIP_PREFIXES: tuple[str, ...] = ( - "x-forwarded-", # proxy headers - "sec-", # security headers added by browsers -) - - diff --git a/src/agentex/lib/sdk/fastacp/fastacp.py b/src/agentex/lib/sdk/fastacp/fastacp.py deleted file mode 100644 index 0e32c346..00000000 --- a/src/agentex/lib/sdk/fastacp/fastacp.py +++ /dev/null @@ -1,111 +0,0 @@ -from __future__ import annotations - -import os -import inspect -from typing import Literal -from pathlib import Path -from typing_extensions import deprecated - -from agentex.lib.types.fastacp import ( - BaseACPConfig, - SyncACPConfig, - AsyncACPConfig, - AgenticACPConfig, -) -from agentex.lib.utils.logging import make_logger -from agentex.lib.sdk.fastacp.impl.sync_acp import SyncACP -from agentex.lib.sdk.fastacp.impl.temporal_acp import TemporalACP -from agentex.lib.sdk.fastacp.impl.async_base_acp import AsyncBaseACP -from agentex.lib.sdk.fastacp.base.base_acp_server import BaseACPServer - -# Add new mappings between ACP types and configs here -# Add new mappings between ACP types and implementations here -AGENTIC_ACP_IMPLEMENTATIONS: dict[Literal["temporal", "base"], type[BaseACPServer]] = { - "temporal": TemporalACP, - "base": AsyncBaseACP, -} - -logger = make_logger(__name__) - - -class FastACP: - """Factory for creating FastACP instances - - Supports three main ACP types: - - "sync": Simple synchronous ACP implementation - - "async": Advanced ACP with sub-types "base" or "temporal" (requires config) - - "agentic": (Deprecated, use "async") Identical to "async" - """ - - @staticmethod - # Note: the config is optional and not used right now but is there to be extended in the future - def create_sync_acp(config: SyncACPConfig | None = None, **kwargs) -> SyncACP: # noqa: ARG004 - """Create a SyncACP instance""" - return SyncACP.create(**kwargs) - - @staticmethod - def create_async_acp(config: AsyncACPConfig, **kwargs) -> BaseACPServer: - """Create an async ACP instance (base or temporal) - - Args: - config: AsyncACPConfig with type="base" or type="temporal" - **kwargs: Additional configuration parameters - """ - # Get implementation class - implementation_class = AGENTIC_ACP_IMPLEMENTATIONS[config.type] - # Handle temporal-specific configuration - if config.type == "temporal": - # Extract temporal_address, plugins, and interceptors from config if it's a TemporalACPConfig - temporal_config = kwargs.copy() - if hasattr(config, "temporal_address"): - temporal_config["temporal_address"] = config.temporal_address # type: ignore[attr-defined] - if hasattr(config, "plugins"): - temporal_config["plugins"] = config.plugins # type: ignore[attr-defined] - if hasattr(config, "interceptors"): - temporal_config["interceptors"] = config.interceptors # type: ignore[attr-defined] - return implementation_class.create(**temporal_config) - else: - return implementation_class.create(**kwargs) - - @staticmethod - @deprecated("Use create_async_acp instead") - def create_agentic_acp(config: AgenticACPConfig, **kwargs) -> BaseACPServer: - """Create an async ACP instance (base or temporal) - - Args: - config: AsyncACPConfig with type="base" or type="temporal" - **kwargs: Additional configuration parameters - """ - return FastACP.create_async_acp(config, **kwargs) - - @staticmethod - def locate_build_info_path() -> None: - """If a build-info.json file is present, set the BUILD_INFO_PATH environment variable""" - acp_root = Path(inspect.stack()[2].filename).resolve().parents[0] - build_info_path = acp_root / "build-info.json" - if build_info_path.exists(): - os.environ["BUILD_INFO_PATH"] = str(build_info_path) - - @staticmethod - def create( - acp_type: Literal["sync", "async", "agentic"], config: BaseACPConfig | None = None, **kwargs - ) -> BaseACPServer | SyncACP | AsyncBaseACP | TemporalACP: - """Main factory method to create any ACP type - - Args: - acp_type: Type of ACP to create ("sync", "async", or "agentic") - config: Configuration object. Required for async/agentic type. - **kwargs: Additional configuration parameters - """ - - FastACP.locate_build_info_path() - - if acp_type == "sync": - sync_config = config if isinstance(config, SyncACPConfig) else None - return FastACP.create_sync_acp(sync_config, **kwargs) - elif acp_type == "async" or acp_type == "agentic": - if config is None: - config = AsyncACPConfig(type="base") - if not isinstance(config, AsyncACPConfig): - raise ValueError("AsyncACPConfig is required for async/agentic ACP type") - return FastACP.create_async_acp(config, **kwargs) diff --git a/src/agentex/lib/sdk/fastacp/impl/async_base_acp.py b/src/agentex/lib/sdk/fastacp/impl/async_base_acp.py deleted file mode 100644 index d2b8bac9..00000000 --- a/src/agentex/lib/sdk/fastacp/impl/async_base_acp.py +++ /dev/null @@ -1,75 +0,0 @@ -from typing import Any -from typing_extensions import override - -from agentex.lib.types.acp import ( - SendEventParams, - CancelTaskParams, - CreateTaskParams, -) -from agentex.lib.utils.logging import make_logger -from agentex.lib.adk.utils._modules.client import create_async_agentex_client -from agentex.lib.sdk.fastacp.base.base_acp_server import BaseACPServer - -logger = make_logger(__name__) - - -class AsyncBaseACP(BaseACPServer): - """ - AsyncBaseACP implementation - a synchronous ACP that provides basic functionality - without any special async orchestration like Temporal. - - This implementation provides simple synchronous processing of tasks - and is suitable for basic agent implementations. - """ - - def __init__(self): - super().__init__() - self._setup_handlers() - self._agentex_client = create_async_agentex_client() - - @classmethod - @override - def create(cls, **kwargs: Any) -> "AsyncBaseACP": - """Create and initialize SyncACP instance - - Args: - **kwargs: Configuration parameters (unused in sync implementation) - - Returns: - Initialized SyncACP instance - """ - logger.info("Initializing AsyncBaseACP instance") - instance = cls() - logger.info("AsyncBaseACP instance initialized with default handlers") - return instance - - @override - def _setup_handlers(self): - """Set up default handlers for sync operations""" - - @self.on_task_create - async def handle_create_task(params: CreateTaskParams) -> None: # type: ignore[unused-function] - """Default create task handler - logs the task""" - logger.info(f"AsyncBaseACP creating task {params.task.id}") - - @self.on_task_event_send - async def handle_event_send(params: SendEventParams) -> None: # type: ignore[unused-function] - """Default event handler - logs the event""" - logger.info( - f"AsyncBaseACP received event for task {params.task.id}: {params.event.id}," - f"content: {params.event.content}" - ) - # TODO: Implement event handling logic here - - # Implement cursor commit logic here - await self._agentex_client.tracker.update( - tracker_id=params.task.id, - last_processed_event_id=params.event.id, - ) - - @self.on_task_cancel - async def handle_cancel(params: CancelTaskParams) -> None: # type: ignore[unused-function] - """Default cancel handler - logs the cancellation""" - logger.info(f"AsyncBaseACP canceling task {params.task.id}") - -AgenticBaseACP = AsyncBaseACP \ No newline at end of file diff --git a/src/agentex/lib/sdk/fastacp/impl/sync_acp.py b/src/agentex/lib/sdk/fastacp/impl/sync_acp.py deleted file mode 100644 index 4898a963..00000000 --- a/src/agentex/lib/sdk/fastacp/impl/sync_acp.py +++ /dev/null @@ -1,111 +0,0 @@ -from __future__ import annotations - -from typing import Any, override -from collections.abc import AsyncGenerator - -from agentex.lib.types.acp import SendMessageParams -from agentex.lib.utils.logging import make_logger -from agentex.types.task_message_delta import TextDelta -from agentex.types.task_message_update import ( - TaskMessageUpdate, - StreamTaskMessageFull, - StreamTaskMessageDelta, -) -from agentex.types.task_message_content import TextContent, TaskMessageContent -from agentex.lib.sdk.fastacp.base.base_acp_server import BaseACPServer - -logger = make_logger(__name__) - - -class SyncACP(BaseACPServer): - """ - SyncACP provides synchronous request-response style communication. - Handlers execute and return responses immediately. - - The SyncACP automatically creates input and output messages, so handlers - don't need to manually create TaskMessage objects via the Agentex API. All that needs - to be done is return the output message via TaskMessageContent objects. - - Usage: - acp = SyncACP() - - @acp.on_message_send - async def handle_message(params: SendMessageParams) -> TaskMessageContent: - # Process message and return response - pass - - acp.run() - """ - - def __init__(self): - super().__init__() - self._setup_handlers() - - @classmethod - @override - def create(cls, **kwargs: Any) -> "SyncACP": - """Create and initialize SyncACP instance - - Args: - **kwargs: Configuration parameters (unused in sync implementation) - - Returns: - Initialized SyncACP instance - """ - logger.info("Creating SyncACP instance") - instance = cls() - logger.info("SyncACP instance created with default handlers") - return instance - - @override - def _setup_handlers(self): - """Set up default handlers for sync operations""" - - @self.on_message_send - async def handle_message_send( # type: ignore[unused-function] - params: SendMessageParams - ) -> TaskMessageContent | AsyncGenerator[TaskMessageUpdate, None]: - """Default message handler with TaskMessageUpdate streaming support - - For streaming, the SyncACP server automatically creates the input and output - messages, so we just return TaskMessageUpdate objects with parent_task_message=None - """ - logger.info( - f"SyncACP received message for task {params.task.id}: {params.content}" - ) - - if params.stream: - # Return streaming response - async def stream_response(): - # Example: Stream 3 chunks - full_message = "" - for i in range(3): - data = f"Streaming chunk {i+1}: Processing your request...\n" - full_message += data - yield StreamTaskMessageDelta( - type="delta", - index=0, - delta=TextDelta( - text_delta=f"Streaming chunk {i+1}: Processing your request...\n" - ), - ) - - # Final response - yield StreamTaskMessageFull( - type="full", - index=0, - content=TextContent( - author="agent", - content=full_message, - format="markdown", - ), - ) - - return stream_response() - else: - # Return single response for non-streaming - return TextContent( - author="agent", - content=f"Processed message for task {params.task.id}", - format="markdown", - ) diff --git a/src/agentex/lib/sdk/fastacp/impl/temporal_acp.py b/src/agentex/lib/sdk/fastacp/impl/temporal_acp.py deleted file mode 100644 index 750707c4..00000000 --- a/src/agentex/lib/sdk/fastacp/impl/temporal_acp.py +++ /dev/null @@ -1,113 +0,0 @@ -from __future__ import annotations - -from typing import Any, Callable, AsyncGenerator, override -from contextlib import asynccontextmanager - -from fastapi import FastAPI - -from agentex.lib.types.acp import ( - SendEventParams, - CancelTaskParams, - CreateTaskParams, -) -from agentex.lib.utils.logging import make_logger -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.lib.sdk.fastacp.base.base_acp_server import BaseACPServer -from agentex.lib.core.clients.temporal.temporal_client import TemporalClient -from agentex.lib.core.temporal.services.temporal_task_service import TemporalTaskService - -logger = make_logger(__name__) - - -class TemporalACP(BaseACPServer): - """ - Temporal-specific implementation of AsyncAgentACP. - Uses TaskService to forward operations to temporal workflows. - """ - - def __init__( - self, - temporal_address: str, - temporal_task_service: TemporalTaskService | None = None, - plugins: list[Any] | None = None, - interceptors: list[Any] | None = None, - ): - super().__init__() - self._temporal_task_service = temporal_task_service - self._temporal_address = temporal_address - self._plugins = plugins or [] - self._interceptors = interceptors or [] - - @classmethod - @override - def create(cls, temporal_address: str, plugins: list[Any] | None = None, interceptors: list[Any] | None = None) -> "TemporalACP": - logger.info("Initializing TemporalACP instance") - - # Create instance without temporal client initially - temporal_acp = cls(temporal_address=temporal_address, plugins=plugins, interceptors=interceptors) - temporal_acp._setup_handlers() - logger.info("TemporalACP instance initialized now") - return temporal_acp - - @override - def get_lifespan_function(self) -> Callable[[FastAPI], AsyncGenerator[None, None]]: - @asynccontextmanager - async def lifespan(app: FastAPI): - # Create temporal client during startup - if self._temporal_address is None: - raise ValueError("Temporal address is not set") - - if self._temporal_task_service is None: - env_vars = EnvironmentVariables.refresh() - temporal_client = await TemporalClient.create( - temporal_address=self._temporal_address, plugins=self._plugins - ) - self._temporal_task_service = TemporalTaskService( - temporal_client=temporal_client, - env_vars=env_vars, - ) - - # Call parent lifespan for agent registration - async with super().get_lifespan_function()(app): # type: ignore[misc] - yield - - return lifespan # type: ignore[return-value] - - @override - def _setup_handlers(self): - """Set up the handlers for temporal workflow operations""" - - @self.on_task_create - async def handle_task_create(params: CreateTaskParams) -> None: - """Default create task handler - logs the task""" - logger.info(f"TemporalACP received task create rpc call for task {params.task.id}") - if self._temporal_task_service is not None: - await self._temporal_task_service.submit_task( - agent=params.agent, task=params.task, params=params.params - ) - - @self.on_task_event_send - async def handle_event_send(params: SendEventParams) -> None: - """Forward messages to running workflows via TaskService""" - try: - if self._temporal_task_service is not None: - await self._temporal_task_service.send_event( - agent=params.agent, - task=params.task, - event=params.event, - request=params.request, - ) - - except Exception as e: - logger.error(f"Failed to send message: {e}") - raise - - @self.on_task_cancel - async def handle_cancel(params: CancelTaskParams) -> None: - """Cancel running workflows via TaskService""" - try: - if self._temporal_task_service is not None: - await self._temporal_task_service.cancel(task_id=params.task.id) - except Exception as e: - logger.error(f"Failed to cancel task: {e}") - raise diff --git a/src/agentex/lib/sdk/fastacp/tests/README.md b/src/agentex/lib/sdk/fastacp/tests/README.md deleted file mode 100644 index fa958012..00000000 --- a/src/agentex/lib/sdk/fastacp/tests/README.md +++ /dev/null @@ -1,297 +0,0 @@ -# BaseACPServer Test Suite - -This directory contains comprehensive tests for the `BaseACPServer` and its implementations (`SyncACP`, `AsyncBaseACP`, and `TemporalACP`). - -## Test Structure - -The test suite is organized into several categories: - -### 1. Core Unit Tests (`test_base_acp_server.py`) -- **TestBaseACPServerInitialization**: Server initialization and setup -- **TestHealthCheckEndpoint**: Health check endpoint functionality -- **TestJSONRPCEndpointCore**: Basic JSON-RPC endpoint functionality -- **TestHandlerRegistration**: Handler registration and management -- **TestBackgroundProcessing**: Background task processing -- **TestErrorHandling**: Basic error handling scenarios - -### 2. JSON-RPC Endpoint Tests (`test_json_rpc_endpoints.py`) -- **TestJSONRPCMethodHandling**: Method routing and execution -- **TestJSONRPCParameterValidation**: Parameter parsing and validation -- **TestJSONRPCResponseFormat**: Response formatting compliance -- **TestJSONRPCErrorCodes**: JSON-RPC 2.0 error code compliance -- **TestJSONRPCConcurrency**: Concurrent request handling - -### 3. Integration Tests (`test_server_integration.py`) -- **TestServerLifecycle**: Server startup, running, and shutdown -- **TestHTTPClientIntegration**: Real HTTP client interactions -- **TestHandlerExecutionIntegration**: Handler execution in server environment -- **TestServerPerformance**: Performance characteristics - -### 4. Implementation Tests (`test_implementations.py`) -- **TestSyncACP**: SyncACP-specific functionality -- **TestAsyncBaseACP**: AsyncBaseACP-specific functionality -- **TestTemporalACP**: TemporalACP-specific functionality -- **TestImplementationComparison**: Differences between implementations -- **TestImplementationErrorHandling**: Implementation-specific error handling - -### 5. Error Handling Tests (`test_error_handling.py`) -- **TestMalformedRequestHandling**: Invalid and malformed requests -- **TestHandlerErrorHandling**: Handler-level error scenarios -- **TestServerErrorHandling**: Server-level error handling -- **TestEdgeCases**: Edge cases and boundary conditions - -## Running Tests - -### Prerequisites - -Install test dependencies: -```bash -pip install pytest pytest-asyncio httpx pytest-cov pytest-xdist -``` - -### Basic Usage - -Run all tests: -```bash -python run_tests.py -``` - -Run specific test categories: -```bash -python run_tests.py --category unit -python run_tests.py --category integration -python run_tests.py --category implementations -python run_tests.py --category error -``` - -### Advanced Options - -Run with coverage: -```bash -python run_tests.py --coverage -``` - -Run in parallel: -```bash -python run_tests.py --parallel 4 -``` - -Run with increased verbosity: -```bash -python run_tests.py -vv -``` - -Stop on first failure: -```bash -python run_tests.py --failfast -``` - -Run only failed tests from last run: -```bash -python run_tests.py --lf -``` - -### Quick Test Options - -For development, use these quick test commands: - -```bash -# Quick smoke tests -python run_tests.py smoke - -# Quick development tests -python run_tests.py quick - -# Performance tests only -python run_tests.py perf -``` - -### Direct pytest Usage - -You can also run tests directly with pytest: - -```bash -# Run all tests -pytest - -# Run specific test file -pytest test_base_acp_server.py - -# Run specific test class -pytest test_base_acp_server.py::TestBaseACPServerInitialization - -# Run specific test method -pytest test_base_acp_server.py::TestBaseACPServerInitialization::test_base_acp_server_init - -# Run with markers -pytest -m "not slow" -``` - -## Test Configuration - -### Fixtures (`conftest.py`) - -The test suite uses several fixtures: - -- **`free_port`**: Provides a free port for testing -- **`sample_task`**, **`sample_message`**: Sample data objects -- **`base_acp_server`**, **`sync_acp`**, **`agentic_base_acp`**, **`mock_temporal_acp`**: Server instances -- **`test_server_runner`**: Manages server lifecycle for integration tests -- **`jsonrpc_client_factory`**: Creates JSON-RPC test clients -- **`mock_env_vars`**: Mocked environment variables - -### Test Utilities - -- **`TestServerRunner`**: Manages server startup/shutdown for integration tests -- **`JSONRPCTestClient`**: Simplified JSON-RPC client for testing -- **`find_free_port()`**: Utility to find available ports - -## Test Categories Explained - -### Unit Tests -Focus on individual components in isolation: -- Server initialization -- Handler registration -- Basic endpoint functionality -- Parameter validation - -### Integration Tests -Test components working together: -- Full server lifecycle -- Real HTTP requests -- Handler execution in server context -- Performance characteristics - -### Implementation Tests -Test specific ACP implementations: -- SyncACP behavior -- AsyncBaseACP send_event functionality -- TemporalACP workflow integration -- Implementation differences - -### Error Handling Tests -Comprehensive error scenarios: -- Malformed JSON-RPC requests -- Handler exceptions -- Server error recovery -- Edge cases and boundary conditions - -## Writing New Tests - -### Test Naming Convention -- Test files: `test_*.py` -- Test classes: `Test*` -- Test methods: `test_*` - -### Async Test Example -```python -@pytest.mark.asyncio -async def test_my_async_functionality(self, base_acp_server): - # Your async test code here - result = await some_async_operation() - assert result is not None -``` - -### Integration Test Example -```python -@pytest.mark.asyncio -async def test_server_integration(self, base_acp_server, free_port, test_server_runner): - runner = test_server_runner(base_acp_server, free_port) - await runner.start() - - try: - # Test server functionality - async with httpx.AsyncClient() as client: - response = await client.get(f"http://127.0.0.1:{free_port}/healthz") - assert response.status_code == 200 - finally: - await runner.stop() -``` - -### Handler Test Example -```python -@pytest.mark.asyncio -async def test_custom_handler(self, base_acp_server): - handler_called = False - - @base_acp_server.on_task_event_send - async def test_handler(params: SendEventParams): - nonlocal handler_called - handler_called = True - return {"handled": True} - - # Test handler execution - params = SendEventParams(...) - result = await base_acp_server._handlers[RPCMethod.EVENT_SEND](params) - - assert handler_called is True - assert result["handled"] is True -``` - -## Continuous Integration - -The test suite is designed to work well in CI environments: - -- Tests are isolated and don't interfere with each other -- Ports are dynamically allocated to avoid conflicts -- Background tasks are properly cleaned up -- Timeouts are reasonable for CI environments - -### CI Configuration Example - -```yaml -# .github/workflows/test.yml -name: Tests -on: [push, pull_request] -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: '3.9' - - run: pip install -r requirements.txt - - run: pip install pytest pytest-asyncio httpx pytest-cov - - run: cd agentex/sdk/fastacp/tests && python run_tests.py --coverage -``` - -## Troubleshooting - -### Common Issues - -1. **Port conflicts**: Tests use dynamic port allocation, but if you see port conflicts, try running tests sequentially: - ```bash - python run_tests.py --parallel 1 - ``` - -2. **Async test failures**: Make sure all async tests are marked with `@pytest.mark.asyncio` - -3. **Handler not found errors**: Ensure handlers are properly registered before testing - -4. **Timeout issues**: Some tests have built-in delays for background processing. If tests are flaky, increase sleep times in test code. - -### Debug Mode - -Run tests with maximum verbosity and no capture: -```bash -pytest -vvv -s --tb=long -``` - -### Memory Issues - -If you encounter memory issues with large tests: -```bash -python run_tests.py --markers "not memory_intensive" -``` - -## Contributing - -When adding new tests: - -1. Follow the existing test structure and naming conventions -2. Add appropriate docstrings explaining what the test does -3. Use fixtures for common setup -4. Clean up resources properly (especially in integration tests) -5. Add tests to the appropriate category in `run_tests.py` -6. Update this README if adding new test categories or significant functionality \ No newline at end of file diff --git a/src/agentex/lib/sdk/fastacp/tests/conftest.py b/src/agentex/lib/sdk/fastacp/tests/conftest.py deleted file mode 100644 index 8941f16e..00000000 --- a/src/agentex/lib/sdk/fastacp/tests/conftest.py +++ /dev/null @@ -1,311 +0,0 @@ -from __future__ import annotations - -import time -import socket -import asyncio -from typing import Any -from unittest.mock import AsyncMock, patch - -import httpx -import pytest -import uvicorn -import pytest_asyncio - -from agentex.types.task import Task -from agentex.types.agent import Agent -from agentex.lib.types.acp import ( - CancelTaskParams, - CreateTaskParams, - SendMessageParams, -) -from agentex.lib.types.json_rpc import JSONRPCRequest -from agentex.types.task_message import TaskMessageContent -from agentex.types.task_message_content import TextContent -from agentex.lib.sdk.fastacp.impl.sync_acp import SyncACP -from agentex.lib.sdk.fastacp.impl.temporal_acp import TemporalACP -from agentex.lib.sdk.fastacp.impl.async_base_acp import AsyncBaseACP -from agentex.lib.sdk.fastacp.base.base_acp_server import BaseACPServer - -# Configure pytest-asyncio -pytest_plugins = ("pytest_asyncio",) - - -def find_free_port() -> int: - """Find a free port for testing""" - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(("", 0)) - s.listen(1) - port = s.getsockname()[1] - return port - - -@pytest.fixture -def free_port() -> int: - """Fixture that provides a free port for testing""" - return find_free_port() - - -@pytest.fixture -def sample_task() -> Task: - """Fixture that provides a sample Task object""" - return Task( - id="test-task-123", status="RUNNING" - ) - - -@pytest.fixture -def sample_message_content() -> TaskMessageContent: - """Fixture that provides a sample TaskMessage object""" - return TextContent( - type="text", - author="user", - content="Hello, this is a test message", - ) - - -@pytest.fixture -def sample_send_message_params( - sample_task: Task, sample_message_content: TaskMessageContent -) -> SendMessageParams: - """Fixture that provides sample SendMessageParams""" - return SendMessageParams( - agent=Agent( - id="test-agent-456", - name="test-agent", - description="test-agent", - acp_type="sync", - created_at="2023-01-01T00:00:00Z", - updated_at="2023-01-01T00:00:00Z", - ), - task=sample_task, - content=sample_message_content, - stream=False, - ) - - -@pytest.fixture -def sample_cancel_task_params() -> CancelTaskParams: - """Fixture that provides sample CancelTaskParams""" - return CancelTaskParams( - agent=Agent(id="test-agent-456", name="test-agent", description="test-agent", acp_type="sync", created_at="2023-01-01T00:00:00Z", updated_at="2023-01-01T00:00:00Z"), - task=Task(id="test-task-123", status="RUNNING"), - ) - - -@pytest.fixture -def sample_create_task_params(sample_task: Task) -> CreateTaskParams: - """Fixture that provides sample CreateTaskParams""" - return CreateTaskParams( - agent=Agent(id="test-agent-456", name="test-agent", description="test-agent", acp_type="sync", created_at="2023-01-01T00:00:00Z", updated_at="2023-01-01T00:00:00Z"), - task=sample_task, - params={}, - ) - - -class TestServerRunner: - """Utility class for running test servers""" - - def __init__(self, app: BaseACPServer, port: int): - self.app = app - self.port = port - self.server = None - self.server_task = None - - async def start(self): - """Start the server in a background task""" - config = uvicorn.Config( - app=self.app, - host="127.0.0.1", - port=self.port, - log_level="error", # Reduce noise in tests - ) - self.server = uvicorn.Server(config) - self.server_task = asyncio.create_task(self.server.serve()) - - # Wait for server to be ready - await self._wait_for_server() - - async def stop(self): - """Stop the server""" - if self.server: - self.server.should_exit = True - if self.server_task: - try: - await asyncio.wait_for(self.server_task, timeout=5.0) - except TimeoutError: - self.server_task.cancel() - try: - await self.server_task - except asyncio.CancelledError: - pass - - async def _wait_for_server(self, timeout: float = 10.0): - """Wait for server to be ready to accept connections""" - start_time = time.time() - while time.time() - start_time < timeout: - try: - async with httpx.AsyncClient() as client: - response = await client.get(f"http://127.0.0.1:{self.port}/healthz") - if response.status_code == 200: - return - except (httpx.ConnectError, httpx.ConnectTimeout): - await asyncio.sleep(0.1) - raise TimeoutError(f"Server did not start within {timeout} seconds") - - -@pytest_asyncio.fixture -async def test_server_runner(): - """Fixture that provides a TestServerRunner factory""" - runners = [] - - def create_runner(app: BaseACPServer, port: int) -> TestServerRunner: - runner = TestServerRunner(app, port) - runners.append(runner) - return runner - - yield create_runner - - # Cleanup all runners - for runner in runners: - await runner.stop() - - -@pytest.fixture -def base_acp_server(): - """Fixture that provides a BaseACPServer instance for sync tests""" - with patch.dict( - "os.environ", {"AGENTEX_BASE_URL": ""} - ): # Disable agent registration - server = BaseACPServer() - return server - - -@pytest_asyncio.fixture -async def async_base_acp_server(): - """Fixture that provides a BaseACPServer instance for async tests""" - with patch.dict( - "os.environ", {"AGENTEX_BASE_URL": ""} - ): # Disable agent registration - server = BaseACPServer.create() - return server - - -@pytest.fixture -def sync_acp_server(): - """Fixture that provides a SyncACP instance for sync tests""" - with patch.dict( - "os.environ", {"AGENTEX_BASE_URL": ""} - ): # Disable agent registration - server = SyncACP() - return server - - -@pytest_asyncio.fixture -async def async_sync_acp_server(): - """Fixture that provides a SyncACP instance for async tests""" - with patch.dict( - "os.environ", {"AGENTEX_BASE_URL": ""} - ): # Disable agent registration - server = SyncACP.create() - return server - - -@pytest.fixture -def agentic_base_acp_server(): - """Fixture that provides an AgenticBaseACP instance for sync tests""" - with patch.dict( - "os.environ", {"AGENTEX_BASE_URL": ""} - ): # Disable agent registration - server = AsyncBaseACP() - return server - - -@pytest_asyncio.fixture -async def async_agentic_base_acp_server(): - """Fixture that provides an AsyncBaseACP instance for async tests""" - with patch.dict( - "os.environ", {"AGENTEX_BASE_URL": ""} - ): # Disable agent registration - server = AsyncBaseACP.create() - return server - - -@pytest_asyncio.fixture -async def mock_temporal_acp_server(): - """Fixture that provides a mocked TemporalACP instance""" - with patch.dict( - "os.environ", {"AGENTEX_BASE_URL": ""} - ): # Disable agent registration - with patch( - "agentex.sdk.fastacp.impl.temporal_acp.TemporalClient" - ) as mock_temporal_client: - with patch( - "agentex.sdk.fastacp.impl.temporal_acp.AsyncAgentexClient" - ) as mock_agentex_client: - # Mock the temporal client creation - mock_temporal_client.create.return_value = AsyncMock() - mock_agentex_client.return_value = AsyncMock() - - server = TemporalACP.create(temporal_address="localhost:7233") - return server - - -class JSONRPCTestClient: - """Test client for making JSON-RPC requests""" - - def __init__(self, base_url: str): - self.base_url = base_url - - async def call_method( - self, method: str, params: dict[str, Any], request_id: str | None = "test-1" - ) -> dict[str, Any]: - """Make a JSON-RPC method call""" - request = JSONRPCRequest(method=method, params=params, id=request_id) - - async with httpx.AsyncClient() as client: - response = await client.post( - f"{self.base_url}/api", - json=request.model_dump(), - headers={"Content-Type": "application/json"}, - ) - return response.json() - - async def send_notification( - self, method: str, params: dict[str, Any] - ) -> dict[str, Any]: - """Send a JSON-RPC notification (no ID)""" - return await self.call_method(method, params, request_id=None) - - async def health_check(self) -> dict[str, Any]: - """Check server health""" - async with httpx.AsyncClient() as client: - response = await client.get(f"{self.base_url}/healthz") - return response.json() - - -@pytest.fixture -def jsonrpc_client_factory(): - """Fixture that provides a JSONRPCTestClient factory""" - - def create_client(base_url: str) -> JSONRPCTestClient: - return JSONRPCTestClient(base_url) - - return create_client - - -# Mock environment variables for testing -@pytest.fixture -def mock_env_vars(): - """Fixture that mocks environment variables""" - env_vars = { - "AGENTEX_BASE_URL": "", # Disable agent registration by default - "AGENT_NAME": "test-agent", - "AGENT_DESCRIPTION": "Test agent description", - "ACP_URL": "http://localhost", - "ACP_PORT": "8000", - "WORKFLOW_NAME": "test-workflow", - "WORKFLOW_TASK_QUEUE": "test-queue", - } - - with patch.dict("os.environ", env_vars): - yield env_vars diff --git a/src/agentex/lib/sdk/fastacp/tests/pytest.ini b/src/agentex/lib/sdk/fastacp/tests/pytest.ini deleted file mode 100644 index c36f46f2..00000000 --- a/src/agentex/lib/sdk/fastacp/tests/pytest.ini +++ /dev/null @@ -1,10 +0,0 @@ -[tool:pytest] -asyncio_mode = auto -addopts = -v --tb=short -testpaths = . -python_files = test_*.py -python_classes = Test* -python_functions = test_* -filterwarnings = - ignore::DeprecationWarning - ignore::PytestDeprecationWarning \ No newline at end of file diff --git a/src/agentex/lib/sdk/fastacp/tests/run_tests.py b/src/agentex/lib/sdk/fastacp/tests/run_tests.py deleted file mode 100644 index 8b23be16..00000000 --- a/src/agentex/lib/sdk/fastacp/tests/run_tests.py +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/env python3 -""" -Test runner for BaseACPServer and implementations. - -This script provides various options for running the test suite: -- Run all tests -- Run specific test categories -- Run with different verbosity levels -- Generate coverage reports -- Run performance tests -""" - -import sys -import argparse -import subprocess -from pathlib import Path - - -def run_command(cmd, description=""): - """Run a command and return the result""" - if description: - print(f"\n{'='*60}") - print(f"Running: {description}") - print(f"Command: {' '.join(cmd)}") - print(f"{'='*60}") - - result = subprocess.run(cmd, capture_output=True, text=True, check=False) - - if result.stdout: - print(result.stdout) - if result.stderr: - print(result.stderr, file=sys.stderr) - - return result.returncode == 0 - - -def main(): - parser = argparse.ArgumentParser(description="Run BaseACPServer tests") - parser.add_argument( - "--category", - choices=["unit", "integration", "implementations", "error", "all"], - default="all", - help="Test category to run", - ) - parser.add_argument( - "--verbose", - "-v", - action="count", - default=0, - help="Increase verbosity (use -v, -vv, or -vvv)", - ) - parser.add_argument("--coverage", action="store_true", help="Run with coverage reporting") - parser.add_argument( - "--parallel", "-n", type=int, help="Run tests in parallel (number of workers)" - ) - parser.add_argument( - "--markers", "-m", help="Run tests with specific markers (e.g., 'not slow')" - ) - parser.add_argument("--failfast", "-x", action="store_true", help="Stop on first failure") - parser.add_argument( - "--lf", - "--last-failed", - action="store_true", - help="Run only tests that failed in the last run", - ) - parser.add_argument( - "--collect-only", action="store_true", help="Only collect tests, don't run them" - ) - - args = parser.parse_args() - - # Base pytest command - cmd = ["python", "-m", "pytest"] - - # Add test files based on category - test_files = { - "unit": ["test_base_acp_server.py", "test_json_rpc_endpoints.py"], - "integration": ["test_server_integration.py"], - "implementations": ["test_implementations.py"], - "error": ["test_error_handling.py"], - "all": [ - "test_base_acp_server.py", - "test_json_rpc_endpoints.py", - "test_server_integration.py", - "test_implementations.py", - "test_error_handling.py", - ], - } - - # Add test files to command - for test_file in test_files[args.category]: - cmd.append(test_file) - - # Add verbosity - if args.verbose: - cmd.append("-" + "v" * min(args.verbose, 3)) - - # Add coverage - if args.coverage: - cmd.extend( - [ - "--cov=agentex.sdk.fastacp", - "--cov-report=html", - "--cov-report=term-missing", - "--cov-branch", - ] - ) - - # Add parallel execution - if args.parallel: - cmd.extend(["-n", str(args.parallel)]) - - # Add markers - if args.markers: - cmd.extend(["-m", args.markers]) - - # Add fail fast - if args.failfast: - cmd.append("-x") - - # Add last failed - if args.lf: - cmd.append("--lf") - - # Add collect only - if args.collect_only: - cmd.append("--collect-only") - - # Add other useful options - cmd.extend( - [ - "--tb=short", # Shorter traceback format - "--strict-markers", # Strict marker checking - "--disable-warnings", # Disable warnings for cleaner output - ] - ) - - # Change to test directory - test_dir = Path(__file__).parent - original_cwd = Path.cwd() - - try: - import os - - os.chdir(test_dir) - - # Run the tests - success = run_command(cmd, f"Running {args.category} tests") - - if success: - print(f"\nโœ… All {args.category} tests passed!") - if args.coverage: - print("๐Ÿ“Š Coverage report generated in htmlcov/") - else: - print(f"\nโŒ Some {args.category} tests failed!") - return 1 - - finally: - os.chdir(original_cwd) - - return 0 - - -def run_quick_tests(): - """Run a quick subset of tests for development""" - cmd = [ - "python", - "-m", - "pytest", - "test_base_acp_server.py::TestBaseACPServerInitialization", - "test_json_rpc_endpoints.py::TestJSONRPCMethodHandling", - "-v", - "--tb=short", - ] - - return run_command(cmd, "Running quick development tests") - - -def run_smoke_tests(): - """Run smoke tests to verify basic functionality""" - cmd = [ - "python", - "-m", - "pytest", - "-m", - "not slow", - "-x", # Stop on first failure - "--tb=line", - "test_base_acp_server.py::TestBaseACPServerInitialization::test_base_acp_server_init", - "test_base_acp_server.py::TestHealthCheckEndpoint::test_health_check_endpoint", - "test_json_rpc_endpoints.py::TestJSONRPCMethodHandling::test_message_received_method_routing", - ] - - return run_command(cmd, "Running smoke tests") - - -def run_performance_tests(): - """Run performance-focused tests""" - cmd = [ - "python", - "-m", - "pytest", - "test_server_integration.py::TestServerPerformance", - "test_error_handling.py::TestServerErrorHandling::test_server_handles_concurrent_errors", - "-v", - "--tb=short", - ] - - return run_command(cmd, "Running performance tests") - - -if __name__ == "__main__": - # Check if specific test type is requested via environment - test_type = ( - sys.argv[1] if len(sys.argv) > 1 and sys.argv[1] in ["quick", "smoke", "perf"] else None - ) - - if test_type == "quick": - success = run_quick_tests() - elif test_type == "smoke": - success = run_smoke_tests() - elif test_type == "perf": - success = run_performance_tests() - else: - success = main() - - sys.exit(0 if success else 1) diff --git a/src/agentex/lib/sdk/fastacp/tests/test_base_acp_server.py b/src/agentex/lib/sdk/fastacp/tests/test_base_acp_server.py deleted file mode 100644 index 0816ac43..00000000 --- a/src/agentex/lib/sdk/fastacp/tests/test_base_acp_server.py +++ /dev/null @@ -1,450 +0,0 @@ -# ruff: noqa: ARG001 -import asyncio -from unittest.mock import patch - -import pytest -from fastapi.testclient import TestClient - -from agentex.lib.types.acp import ( - RPCMethod, - SendEventParams, - CancelTaskParams, -) -from agentex.lib.sdk.fastacp.base.base_acp_server import BaseACPServer - - -class TestBaseACPServerInitialization: - """Test BaseACPServer initialization and setup""" - - def test_base_acp_server_init(self): - """Test BaseACPServer initialization sets up routes correctly""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - server = BaseACPServer() - - # Check that FastAPI routes are set up - routes = [route.path for route in server.routes] # type: ignore[attr-defined] - assert "/healthz" in routes - assert "/api" in routes - - # Check that handlers dict is initialized - assert hasattr(server, "_handlers") - assert isinstance(server._handlers, dict) - - def test_base_acp_server_create_classmethod(self): - """Test BaseACPServer.create() class method""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - server = BaseACPServer.create() - - assert isinstance(server, BaseACPServer) - assert hasattr(server, "_handlers") - - def test_lifespan_function_setup(self): - """Test that lifespan function is properly configured""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - server = BaseACPServer() - - # Check that lifespan is configured - assert server.router.lifespan_context is not None - - -class TestHealthCheckEndpoint: - """Test health check endpoint functionality""" - - def test_health_check_endpoint(self, base_acp_server): - """Test GET /healthz endpoint returns correct response""" - client = TestClient(base_acp_server) - - response = client.get("/healthz") - - assert response.status_code == 200 - assert response.json() == {"status": "healthy"} - - def test_health_check_content_type(self, base_acp_server): - """Test health check returns JSON content type""" - client = TestClient(base_acp_server) - - response = client.get("/healthz") - - assert response.headers["content-type"] == "application/json" - - -class TestJSONRPCEndpointCore: - """Test core JSON-RPC endpoint functionality""" - - def test_jsonrpc_endpoint_exists(self, base_acp_server): - """Test POST /api endpoint exists""" - client = TestClient(base_acp_server) - - # Send a basic request to check endpoint exists - response = client.post("/api", json={}) - - # Should not return 404 (endpoint exists) - assert response.status_code != 404 - - def test_jsonrpc_malformed_request(self, base_acp_server): - """Test JSON-RPC endpoint handles malformed requests""" - client = TestClient(base_acp_server) - - # Send malformed JSON - response = client.post("/api", json={"invalid": "request"}) - - assert response.status_code == 200 - data = response.json() - assert "error" in data - assert data["jsonrpc"] == "2.0" - - def test_jsonrpc_method_not_found(self, base_acp_server): - """Test JSON-RPC method not found error""" - client = TestClient(base_acp_server) - - request = { - "jsonrpc": "2.0", - "method": "nonexistent/method", - "params": {}, - "id": "test-1", - } - - response = client.post("/api", json=request) - - assert response.status_code == 200 - data = response.json() - assert "error" in data - assert data["error"]["code"] == -32601 # Method not found - assert data["id"] == "test-1" - - def test_jsonrpc_valid_request_structure(self, base_acp_server): - """Test JSON-RPC request parsing with valid structure""" - client = TestClient(base_acp_server) - - # Add a mock handler for testing - async def mock_handler(params): - return {"status": "success"} - - base_acp_server._handlers[RPCMethod.EVENT_SEND] = mock_handler - - request = { - "jsonrpc": "2.0", - "method": "event/send", - "params": { - "task": {"id": "test-task", "agent_id": "test-agent", "status": "RUNNING"}, - "message": { - "type": "text", - "author": "user", - "content": "test message", - }, - }, - "id": "test-1", - } - - response = client.post("/api", json=request) - - assert response.status_code == 200 - data = response.json() - assert data["jsonrpc"] == "2.0" - assert data["id"] == "test-1" - # Should return immediate acknowledgment - assert data["result"]["status"] == "processing" - - -class TestHandlerRegistration: - """Test handler registration and management""" - - def test_on_task_event_send_decorator(self): - """Test on_task_event_send decorator registration""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - server = BaseACPServer() - - @server.on_task_event_send - async def test_handler(params: SendEventParams): - return {"test": "response"} - - # Check handler is registered - assert RPCMethod.EVENT_SEND in server._handlers - assert server._handlers[RPCMethod.EVENT_SEND] is not None - - def test_cancel_task_decorator(self): - """Test cancel_task decorator registration""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - server = BaseACPServer() - - @server.on_task_cancel - async def test_handler(params: CancelTaskParams): - return {"test": "response"} - - # Check handler is registered - assert RPCMethod.TASK_CANCEL in server._handlers - assert server._handlers[RPCMethod.TASK_CANCEL] is not None - - @pytest.mark.asyncio - async def test_handler_wrapper_functionality(self): - """Test that handler wrapper works correctly""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - server = BaseACPServer() - - # Create a test handler - async def test_handler(params): - return {"handler_called": True, "params_received": True} - - # Wrap the handler - wrapped = server._wrap_handler(test_handler) - - # Test the wrapped handler - result = await wrapped({"test": "params"}) - assert result["handler_called"] is True - assert result["params_received"] is True - - -class TestBackgroundProcessing: - """Test background processing functionality""" - - @pytest.mark.asyncio - async def test_notification_processing(self, async_base_acp_server): - """Test notification processing (requests with no ID)""" - # Add a mock handler - handler_called = False - received_params = None - - async def mock_handler(params): - nonlocal handler_called, received_params - handler_called = True - received_params = params - return {"status": "processed"} - - async_base_acp_server._handlers[RPCMethod.EVENT_SEND] = mock_handler - - client = TestClient(async_base_acp_server) - - request = { - "jsonrpc": "2.0", - "method": "event/send", - "params": { - "task": {"id": "test-task", "agent_id": "test-agent", "status": "RUNNING"}, - "message": { - "type": "text", - "author": "user", - "content": "test message", - }, - }, - # No ID = notification - } - - response = client.post("/api", json=request) - - assert response.status_code == 200 - data = response.json() - assert data["id"] is None # Notification response - - # Give background task time to execute - await asyncio.sleep(0.1) - - # Handler should have been called - assert handler_called is True - assert received_params is not None - - @pytest.mark.asyncio - async def test_request_processing_with_id(self, async_base_acp_server): - """Test request processing with ID returns immediate acknowledgment""" - - # Add a mock handler - async def mock_handler(params): - return {"status": "processed"} - - async_base_acp_server._handlers[RPCMethod.TASK_CANCEL] = mock_handler - - client = TestClient(async_base_acp_server) - - request = { - "jsonrpc": "2.0", - "method": "task/cancel", - "params": {"task_id": "test-task-123"}, - "id": "test-request-1", - } - - response = client.post("/api", json=request) - - assert response.status_code == 200 - data = response.json() - assert data["jsonrpc"] == "2.0" - assert data["id"] == "test-request-1" - assert data["result"]["status"] == "processing" # Immediate acknowledgment - - -class TestSynchronousRPCMethods: - """Test synchronous RPC methods that return results immediately""" - - def test_send_message_synchronous_response(self, base_acp_server): - """Test that MESSAGE_SEND method returns handler result synchronously""" - client = TestClient(base_acp_server) - - # Add a mock handler that returns a specific result - async def mock_execute_handler(params): - return { - "task_id": params.task.id, - "message_content": params.message.content, - "status": "executed_synchronously", - "custom_data": {"processed": True, "timestamp": "2024-01-01T12:00:00Z"}, - } - - base_acp_server._handlers[RPCMethod.MESSAGE_SEND] = mock_execute_handler - - request = { - "jsonrpc": "2.0", - "method": "message/send", - "params": { - "task": {"id": "test-task-123", "agent_id": "test-agent", "status": "RUNNING"}, - "message": { - "type": "text", - "author": "user", - "content": "Execute this task please", - }, - }, - "id": "test-execute-1", - } - - response = client.post("/api", json=request) - - assert response.status_code == 200 - data = response.json() - - # Verify JSON-RPC structure - assert data["jsonrpc"] == "2.0" - assert data["id"] == "test-execute-1" - assert "result" in data - assert data.get("error") is None - - # Verify the handler's result is returned directly (not "processing" status) - result = data["result"] - assert result["task_id"] == "test-task-123" - assert result["message_content"] == "Execute this task please" - assert result["status"] == "executed_synchronously" - assert result["custom_data"]["processed"] is True - assert result["custom_data"]["timestamp"] == "2024-01-01T12:00:00Z" - - # Verify it's NOT the async "processing" response - assert result.get("status") != "processing" - - def test_create_task_async_response(self, base_acp_server): - """Test that TASK_CREATE method returns processing status (async behavior)""" - client = TestClient(base_acp_server) - - # Add a mock handler for init task - async def mock_init_handler(params): - return { - "task_id": params.task.id, - "status": "initialized", - } - - base_acp_server._handlers[RPCMethod.TASK_CREATE] = mock_init_handler - - request = { - "jsonrpc": "2.0", - "method": "task/create", - "params": { - "task": {"id": "test-task-456", "agent_id": "test-agent", "status": "RUNNING"} - }, - "id": "test-init-1", - } - - response = client.post("/api", json=request) - - assert response.status_code == 200 - data = response.json() - - # Verify JSON-RPC structure - assert data["jsonrpc"] == "2.0" - assert data["id"] == "test-init-1" - assert "result" in data - assert data.get("error") is None - - # Verify it returns async "processing" status (not the handler's result) - result = data["result"] - assert result["status"] == "processing" - - # Verify it's NOT the handler's actual result - assert result.get("status") != "initialized" - - -class TestErrorHandling: - """Test error handling scenarios""" - - def test_invalid_json_request(self, base_acp_server): - """Test handling of invalid JSON in request body""" - client = TestClient(base_acp_server) - - # Send invalid JSON - response = client.post( - "/api", content="invalid json", headers={"Content-Type": "application/json"} - ) - - assert response.status_code == 200 - data = response.json() - assert "error" in data - assert data["jsonrpc"] == "2.0" - - def test_missing_required_fields(self, base_acp_server): - """Test handling of requests missing required JSON-RPC fields""" - client = TestClient(base_acp_server) - - # Missing method field - request = {"jsonrpc": "2.0", "params": {}, "id": "test-1"} - - response = client.post("/api", json=request) - - assert response.status_code == 200 - data = response.json() - assert "error" in data - - def test_invalid_method_enum(self, base_acp_server): - """Test handling of invalid method names""" - client = TestClient(base_acp_server) - - request = { - "jsonrpc": "2.0", - "method": "invalid/method/name", - "params": {}, - "id": "test-1", - } - - response = client.post("/api", json=request) - - assert response.status_code == 200 - data = response.json() - assert "error" in data - assert data["error"]["code"] == -32601 # Method not found - - @pytest.mark.asyncio - async def test_handler_exception_handling(self, async_base_acp_server): - """Test that handler exceptions are properly handled""" - - # Add a handler that raises an exception - async def failing_handler(params): - raise ValueError("Test exception") - - async_base_acp_server._handlers[RPCMethod.EVENT_SEND] = failing_handler - - client = TestClient(async_base_acp_server) - - request = { - "jsonrpc": "2.0", - "method": "event/send", - "params": { - "task": {"id": "test-task", "agent_id": "test-agent", "status": "RUNNING"}, - "message": { - "type": "text", - "author": "user", - "content": "test message", - }, - }, - "id": "test-1", - } - - response = client.post("/api", json=request) - - # Should still return immediate acknowledgment - assert response.status_code == 200 - data = response.json() - assert data["result"]["status"] == "processing" - - # Give background task time to fail - await asyncio.sleep(0.1) - # Exception should be logged but not crash the server diff --git a/src/agentex/lib/sdk/fastacp/tests/test_fastacp_factory.py b/src/agentex/lib/sdk/fastacp/tests/test_fastacp_factory.py deleted file mode 100644 index 8c62efa0..00000000 --- a/src/agentex/lib/sdk/fastacp/tests/test_fastacp_factory.py +++ /dev/null @@ -1,371 +0,0 @@ -import asyncio -from unittest.mock import AsyncMock, MagicMock, patch - -import pytest - -from agentex.lib.types.fastacp import ( - SyncACPConfig, - AsyncACPConfig, - TemporalACPConfig, - AsyncBaseACPConfig, -) -from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.lib.sdk.fastacp.impl.sync_acp import SyncACP -from agentex.lib.sdk.fastacp.impl.temporal_acp import TemporalACP -from agentex.lib.sdk.fastacp.impl.async_base_acp import AsyncBaseACP -from agentex.lib.sdk.fastacp.base.base_acp_server import BaseACPServer - - -class TestFastACPInitialization: - """Test FastACP basic functionality""" - - def test_factory_class_exists(self): - """Test that FastACP class exists and is properly structured""" - assert hasattr(FastACP, "create") - assert hasattr(FastACP, "create_sync_acp") - assert hasattr(FastACP, "create_async_acp") - - -class TestSyncACPCreation: - """Test SyncACP creation through factory""" - - @pytest.mark.asyncio - async def test_create_sync_acp_direct_method(self): - """Test creating SyncACP using direct method""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - sync_acp = FastACP.create_sync_acp() - - assert isinstance(sync_acp, SyncACP) - assert isinstance(sync_acp, BaseACPServer) - assert hasattr(sync_acp, "_handlers") - - @pytest.mark.asyncio - async def test_create_sync_acp_with_config(self): - """Test creating SyncACP with configuration""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = SyncACPConfig() - sync_acp = FastACP.create_sync_acp(config=config) - - assert isinstance(sync_acp, SyncACP) - - @pytest.mark.asyncio - async def test_create_sync_acp_via_generic_create(self): - """Test creating SyncACP via generic create method""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - sync_acp = FastACP.create("sync") - - assert isinstance(sync_acp, SyncACP) - - @pytest.mark.asyncio - async def test_create_sync_acp_via_generic_create_with_config(self): - """Test creating SyncACP via generic create method with config""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = SyncACPConfig() - sync_acp = FastACP.create("sync", config=config) - - assert isinstance(sync_acp, SyncACP) - - @pytest.mark.asyncio - async def test_create_sync_acp_with_enum(self): - """Test creating SyncACP using ACPType enum""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - sync_acp = FastACP.create("sync") - - assert isinstance(sync_acp, SyncACP) - - @pytest.mark.asyncio - async def test_create_sync_acp_with_kwargs(self): - """Test creating SyncACP with additional kwargs""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - sync_acp = FastACP.create_sync_acp(custom_param="test_value") - - assert isinstance(sync_acp, SyncACP) - - -class TestAsyncBaseACPCreation: - """Test AsyncBaseACP creation through factory""" - - @pytest.mark.asyncio - async def test_create_async_base_acp_direct_method(self): - """Test creating AsyncBaseACP using direct method""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = AsyncACPConfig(type="base") - async_acp = FastACP.create_async_acp(config=config) - - assert isinstance(async_acp, AsyncBaseACP) - assert isinstance(async_acp, BaseACPServer) - - @pytest.mark.asyncio - async def test_create_async_base_acp_with_specific_config(self): - """Test creating AsyncBaseACP with AsyncBaseACPConfig""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = AsyncBaseACPConfig(type="base") - async_acp = FastACP.create_async_acp(config=config) - - assert isinstance(async_acp, AsyncBaseACP) - - @pytest.mark.asyncio - async def test_create_async_base_acp_via_generic_create(self): - """Test creating AsyncBaseACP via generic create method""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = AsyncACPConfig(type="base") - async_acp = FastACP.create("async", config=config) - - assert isinstance(async_acp, AsyncBaseACP) - - @pytest.mark.asyncio - async def test_create_async_base_acp_with_enum(self): - """Test creating AsyncBaseACP using ACPType enum""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = AsyncACPConfig(type="base") - async_acp = FastACP.create("async", config=config) - - assert isinstance(async_acp, AsyncBaseACP) - - -class TestAsyncTemporalACPCreation: - """Test AsyncTemporalACP (TemporalACP) creation through factory""" - - @pytest.mark.asyncio - async def test_create_temporal_acp_direct_method(self): - """Test creating TemporalACP using direct method""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = AsyncACPConfig(type="temporal") - - # Mock the TemporalACP.create method since it requires temporal dependencies - with patch.object(TemporalACP, "create", new_callable=AsyncMock) as mock_create: - mock_temporal_instance = MagicMock(spec=TemporalACP) - mock_create.return_value = mock_temporal_instance - - temporal_acp = FastACP.create_async_acp(config=config) - - assert temporal_acp == mock_temporal_instance - mock_create.assert_called_once() - - @pytest.mark.asyncio - async def test_create_temporal_acp_with_temporal_config(self): - """Test creating TemporalACP with TemporalACPConfig""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = TemporalACPConfig(type="temporal", temporal_address="localhost:7233") - - with patch.object(TemporalACP, "create", new_callable=AsyncMock) as mock_create: - mock_temporal_instance = MagicMock(spec=TemporalACP) - mock_create.return_value = mock_temporal_instance - - temporal_acp = FastACP.create_async_acp(config=config) - - assert temporal_acp == mock_temporal_instance - # Verify temporal_address was passed - mock_create.assert_called_once_with(temporal_address="localhost:7233") - - @pytest.mark.asyncio - async def test_create_temporal_acp_via_generic_create(self): - """Test creating TemporalACP via generic create method""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = AsyncACPConfig(type="temporal") - - with patch.object(TemporalACP, "create", new_callable=AsyncMock) as mock_create: - mock_temporal_instance = MagicMock(spec=TemporalACP) - mock_create.return_value = mock_temporal_instance - - temporal_acp = FastACP.create("async", config=config) - - assert temporal_acp == mock_temporal_instance - - @pytest.mark.asyncio - async def test_create_temporal_acp_with_custom_address(self): - """Test creating TemporalACP with custom temporal address""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = TemporalACPConfig(type="temporal", temporal_address="custom-temporal:9999") - - with patch.object(TemporalACP, "create", new_callable=AsyncMock) as mock_create: - mock_temporal_instance = MagicMock(spec=TemporalACP) - mock_create.return_value = mock_temporal_instance - - FastACP.create_async_acp(config=config) - - mock_create.assert_called_once_with(temporal_address="custom-temporal:9999") - - -class TestConfigurationValidation: - """Test configuration validation and error handling""" - - @pytest.mark.asyncio - async def test_async_requires_config(self): - """Test that async ACP creation requires configuration""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - with pytest.raises(ValueError, match="AsyncACPConfig is required"): - FastACP.create("async") - - @pytest.mark.asyncio - async def test_async_requires_correct_config_type(self): - """Test that async ACP creation requires AsyncACPConfig type""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - sync_config = SyncACPConfig() - - with pytest.raises(ValueError, match="AsyncACPConfig is required"): - FastACP.create("async", config=sync_config) - - @pytest.mark.asyncio - async def test_async_direct_method_requires_config(self): - """Test that direct async method requires configuration""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - # This should raise TypeError since config is required parameter - with pytest.raises(TypeError): - FastACP.create_async_acp() # type: ignore[call-arg] - - def test_invalid_acp_type_string(self): - """Test that invalid ACP type string raises ValueError""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - with pytest.raises(ValueError): - asyncio.run(FastACP.create("invalid_type")) - - def test_invalid_async_type_in_config(self): - """Test that invalid async type in config raises ValueError""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - # This should raise ValueError during config creation - with pytest.raises(ValueError): - AsyncACPConfig(type="invalid_async_type") - - @pytest.mark.asyncio - async def test_unsupported_acp_type_enum(self): - """Test handling of unsupported ACP type enum values""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - # Create a mock enum value that's not supported - with patch("agentex.sdk.fastacp.fastacp.ACPType") as mock_enum: - mock_enum.SYNC = "sync" - mock_enum.ASYNC = "async" - mock_enum.AGENTIC = "agentic" - unsupported_type = "unsupported" - - with pytest.raises(ValueError, match="Unsupported ACP type"): - FastACP.create(unsupported_type) - - -class TestErrorHandling: - """Test error handling scenarios""" - - @pytest.mark.asyncio - async def test_sync_acp_creation_failure(self): - """Test handling of SyncACP creation failure""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - with patch.object(SyncACP, "create", side_effect=Exception("Creation failed")): - with pytest.raises(Exception, match="Creation failed"): - FastACP.create_sync_acp() - - @pytest.mark.asyncio - async def test_async_acp_creation_failure(self): - """Test handling of AsyncACP creation failure""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = AsyncACPConfig(type="base") - - with patch.object(AsyncBaseACP, "create", side_effect=Exception("Creation failed")): - with pytest.raises(Exception, match="Creation failed"): - FastACP.create_async_acp(config=config) - - @pytest.mark.asyncio - async def test_temporal_acp_creation_failure(self): - """Test handling of TemporalACP creation failure""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - config = AsyncACPConfig(type="temporal") - - with patch.object( - TemporalACP, "create", side_effect=Exception("Temporal connection failed") - ): - with pytest.raises(Exception, match="Temporal connection failed"): - FastACP.create_async_acp(config=config) - - -class TestIntegrationScenarios: - """Test integration scenarios and real-world usage patterns""" - - @pytest.mark.asyncio - async def test_create_all_acp_types(self): - """Test creating all supported ACP types""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - # Create SyncACP - sync_acp = FastACP.create("sync") - assert isinstance(sync_acp, SyncACP) - - # Create AsyncBaseACP - base_config = AsyncACPConfig(type="base") - async_base = FastACP.create("async", config=base_config) - assert isinstance(async_base, AsyncBaseACP) - - # Create TemporalACP (mocked) - temporal_config = AsyncACPConfig(type="temporal") - with patch.object(TemporalACP, "create", new_callable=AsyncMock) as mock_create: - mock_temporal_instance = MagicMock(spec=TemporalACP) - mock_create.return_value = mock_temporal_instance - - temporal_acp = FastACP.create("async", config=temporal_config) - assert temporal_acp == mock_temporal_instance - - @pytest.mark.asyncio - async def test_async_type_backwards_compatibility(self): - """Test that 'async' type works the same as 'async' for backwards compatibility""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - # Test async with base config - base_config = AsyncACPConfig(type="base") - async_base = FastACP.create("async", config=base_config) - assert isinstance(async_base, AsyncBaseACP) - - # Test async with temporal config (mocked) - temporal_config = AsyncACPConfig(type="temporal") - with patch.object(TemporalACP, "create", new_callable=AsyncMock) as mock_create: - mock_temporal_instance = MagicMock(spec=TemporalACP) - mock_create.return_value = mock_temporal_instance - - temporal_acp = FastACP.create("async", config=temporal_config) - assert temporal_acp == mock_temporal_instance - - # Test that async requires config - with pytest.raises(ValueError, match="AsyncACPConfig is required"): - sync_config = SyncACPConfig() - FastACP.create("async", config=sync_config) - - @pytest.mark.asyncio - async def test_configuration_driven_creation(self): - """Test configuration-driven ACP creation""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - configs = [ - ("sync", None), - ("async", AsyncACPConfig(type="base")), - ("async", AsyncACPConfig(type="base")), - ("async", TemporalACPConfig(type="temporal", temporal_address="localhost:7233")), - ("async", TemporalACPConfig(type="temporal", temporal_address="localhost:7233")), - ] - - created_acps = [] - - for acp_type, config in configs: - if acp_type in ("async", "async") and config and config.type == "temporal": - # Mock temporal creation - with patch.object(TemporalACP, "create", new_callable=AsyncMock) as mock_create: - mock_temporal_instance = MagicMock(spec=TemporalACP) - mock_create.return_value = mock_temporal_instance - - acp = FastACP.create(acp_type, config=config) - created_acps.append(acp) - else: - acp = FastACP.create(acp_type, config=config) - created_acps.append(acp) - - assert len(created_acps) == 5 - assert isinstance(created_acps[0], SyncACP) - assert isinstance(created_acps[1], AsyncBaseACP) - assert isinstance(created_acps[2], AsyncBaseACP) - # Fourth and fifth ones are mocked TemporalACP - - @pytest.mark.asyncio - async def test_factory_with_custom_kwargs(self): - """Test factory methods with custom keyword arguments""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - # Test sync with kwargs - sync_acp = FastACP.create_sync_acp(custom_param="test") - assert isinstance(sync_acp, SyncACP) - - # Test async base with kwargs - config = AsyncACPConfig(type="base") - async_acp = FastACP.create_async_acp(config=config, custom_param="test") - assert isinstance(async_acp, AsyncBaseACP) diff --git a/src/agentex/lib/sdk/fastacp/tests/test_integration.py b/src/agentex/lib/sdk/fastacp/tests/test_integration.py deleted file mode 100644 index c6f310af..00000000 --- a/src/agentex/lib/sdk/fastacp/tests/test_integration.py +++ /dev/null @@ -1,478 +0,0 @@ -# ruff: noqa: ARG001 -import asyncio -from unittest.mock import AsyncMock, MagicMock, patch - -import httpx -import pytest - -from agentex.lib.types.acp import ( - RPCMethod, - SendEventParams, - CancelTaskParams, - CreateTaskParams, -) -from agentex.lib.sdk.fastacp.impl.sync_acp import SyncACP -from agentex.lib.sdk.fastacp.impl.temporal_acp import TemporalACP -from agentex.lib.sdk.fastacp.impl.async_base_acp import AsyncBaseACP - - -class TestImplementationBehavior: - """Test specific behavior differences between ACP implementations""" - - @pytest.mark.asyncio() - async def test_sync_acp_default_handlers(self): - """Test SyncACP has expected default handlers""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - sync_acp = SyncACP.create() - - # Should have send_message_message handler by default - assert RPCMethod.MESSAGE_SEND in sync_acp._handlers - - @pytest.mark.asyncio() - async def test_async_acp_default_handlers(self): - """Test AsyncBaseACP has expected default handlers""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - async_acp = AsyncBaseACP.create() - - # Should have create, message, and cancel handlers by default - assert RPCMethod.TASK_CREATE in async_acp._handlers - assert RPCMethod.EVENT_SEND in async_acp._handlers - assert RPCMethod.TASK_CANCEL in async_acp._handlers - - @pytest.mark.asyncio() - async def test_temporal_acp_creation_with_mocked_client(self): - """Test TemporalACP creation with mocked temporal client""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - with patch.object(TemporalACP, "create", new_callable=AsyncMock) as mock_create: - mock_temporal_instance = MagicMock(spec=TemporalACP) - mock_temporal_instance._handlers = {} - mock_temporal_instance.temporal_client = MagicMock() - mock_create.return_value = mock_temporal_instance - - temporal_acp = TemporalACP.create(temporal_address="localhost:7233") - - assert temporal_acp == mock_temporal_instance - assert hasattr(temporal_acp, "temporal_client") - - -class TestRealWorldScenarios: - """Test real-world usage scenarios and integration""" - - @pytest.mark.asyncio() - async def test_message_handling_workflow(self, sync_acp, free_port, test_server_runner): - """Test complete message handling workflow""" - messages_received = [] - - @sync_acp.on_task_event_send - async def message_handler(params: SendEventParams): - messages_received.append( - { - "task_id": params.task.id, - "message_content": params.message.content, # type: ignore[attr-defined] - "author": params.message.author, # type: ignore[attr-defined] - } - ) - return {"processed": True} - - runner = test_server_runner(sync_acp, free_port) - await runner.start() - - # Send multiple messages - async with httpx.AsyncClient() as client: - for i in range(3): - request_data = { - "jsonrpc": "2.0", - "method": "event/send", - "params": { - "task": { - "id": f"workflow-task-{i}", - "agent_id": "workflow-agent", - "status": "RUNNING", - }, - "message": { - "type": "text", - "author": "user", - "content": f"Workflow message {i}", - }, - }, - "id": f"workflow-{i}", - } - - response = await client.post(f"http://127.0.0.1:{free_port}/api", json=request_data) - assert response.status_code == 200 - - # Give background tasks time to process - await asyncio.sleep(0.2) - - # Verify all messages were processed - assert len(messages_received) == 3 - for i, msg in enumerate(messages_received): - assert msg["task_id"] == f"workflow-task-{i}" - assert msg["message_content"] == f"Workflow message {i}" - assert msg["author"] == "user" - - await runner.stop() - - @pytest.mark.asyncio() - async def test_task_lifecycle_management(self, async_base_acp, free_port, test_server_runner): - """Test complete task lifecycle: create -> message -> cancel""" - task_events = [] - - @async_base_acp.on_task_create - async def create_handler(params: CreateTaskParams): - task_events.append(("created", params.task.id)) - - @async_base_acp.on_task_event_send - async def message_handler(params: SendEventParams): - task_events.append(("message", params.task.id)) - - @async_base_acp.on_task_cancel - async def cancel_handler(params: CancelTaskParams): - task_events.append(("cancelled", params.task_id)) # type: ignore[attr-defined] - - runner = test_server_runner(async_base_acp, free_port) - await runner.start() - - async with httpx.AsyncClient() as client: - # Create task - create_request = { - "jsonrpc": "2.0", - "method": "task/create", - "params": { - "task": { - "id": "lifecycle-task", - "agent_id": "lifecycle-agent", - "status": "RUNNING", - } - }, - "id": "create-1", - } - - response = await client.post(f"http://127.0.0.1:{free_port}/api", json=create_request) - assert response.status_code == 200 - - # Send message - message_request = { - "jsonrpc": "2.0", - "method": "event/send", - "params": { - "task": { - "id": "lifecycle-task", - "agent_id": "lifecycle-agent", - "status": "RUNNING", - }, - "message": { - "type": "text", - "author": "user", - "content": "Lifecycle test message", - }, - }, - "id": "message-1", - } - - response = await client.post(f"http://127.0.0.1:{free_port}/api", json=message_request) - assert response.status_code == 200 - - # Cancel task - cancel_request = { - "jsonrpc": "2.0", - "method": "task/cancel", - "params": {"task_id": "lifecycle-task"}, - "id": "cancel-1", - } - - response = await client.post(f"http://127.0.0.1:{free_port}/api", json=cancel_request) - assert response.status_code == 200 - - # Give background tasks time to process - await asyncio.sleep(0.2) - - # Verify task lifecycle events - assert len(task_events) == 3 - assert task_events[0] == ("created", "lifecycle-task") - assert task_events[1] == ("message", "lifecycle-task") - assert task_events[2] == ("cancelled", "lifecycle-task") - - await runner.stop() - - -class TestErrorRecovery: - """Test error handling and recovery scenarios""" - - @pytest.mark.asyncio() - async def test_server_resilience_to_handler_failures( - self, sync_acp, free_port, test_server_runner - ): - """Test server continues working after handler failures""" - failure_count = 0 - success_count = 0 - - @sync_acp.on_task_event_send - async def unreliable_handler(params: SendEventParams): - nonlocal failure_count, success_count - if "fail" in params.message.content: # type: ignore[attr-defined] - failure_count += 1 - raise RuntimeError("Simulated handler failure") - else: - success_count += 1 - return {"success": True} - - runner = test_server_runner(sync_acp, free_port) - await runner.start() - - async with httpx.AsyncClient() as client: - # Send failing request - fail_request = { - "jsonrpc": "2.0", - "method": "event/send", - "params": { - "task": {"id": "fail-task", "agent_id": "test-agent", "status": "RUNNING"}, - "message": {"type": "text", "author": "user", "content": "This should fail"}, - }, - "id": "fail-1", - } - - response = await client.post(f"http://127.0.0.1:{free_port}/api", json=fail_request) - assert response.status_code == 200 # Server should still respond - - # Send successful request after failure - success_request = { - "jsonrpc": "2.0", - "method": "event/send", - "params": { - "task": {"id": "success-task", "agent_id": "test-agent", "status": "RUNNING"}, - "message": {"type": "text", "author": "user", "content": "This should succeed"}, - }, - "id": "success-1", - } - - response = await client.post(f"http://127.0.0.1:{free_port}/api", json=success_request) - assert response.status_code == 200 - - # Verify server is still healthy - health_response = await client.get(f"http://127.0.0.1:{free_port}/healthz") - assert health_response.status_code == 200 - - # Give background tasks time to process - await asyncio.sleep(0.2) - - assert failure_count == 1 - assert success_count == 1 - - await runner.stop() - - @pytest.mark.asyncio() - async def test_concurrent_request_handling(self, sync_acp, free_port, test_server_runner): - """Test handling multiple concurrent requests""" - processed_requests = [] - - @sync_acp.on_task_event_send - async def concurrent_handler(params: SendEventParams): - # Simulate some processing time - await asyncio.sleep(0.05) - processed_requests.append(params.task.id) - return {"processed": params.task.id} - - runner = test_server_runner(sync_acp, free_port) - await runner.start() - - # Send multiple concurrent requests - async def send_request(client, task_id): - request_data = { - "jsonrpc": "2.0", - "method": "event/send", - "params": { - "task": {"id": task_id, "agent_id": "concurrent-agent", "status": "RUNNING"}, - "message": { - "type": "text", - "author": "user", - "content": f"Concurrent message for {task_id}", - }, - }, - "id": f"concurrent-{task_id}", - } - - return await client.post(f"http://127.0.0.1:{free_port}/api", json=request_data) - - async with httpx.AsyncClient() as client: - # Send 5 concurrent requests - tasks = [send_request(client, f"task-{i}") for i in range(5)] - responses = await asyncio.gather(*tasks) - - # All should return immediate acknowledgment - for response in responses: - assert response.status_code == 200 - data = response.json() - assert data["result"]["status"] == "processing" - - # Give background tasks time to complete - await asyncio.sleep(0.3) - - # All requests should have been processed - assert len(processed_requests) == 5 - assert set(processed_requests) == {f"task-{i}" for i in range(5)} - - await runner.stop() - - -class TestSpecialCases: - """Test edge cases and special scenarios""" - - @pytest.mark.asyncio() - async def test_notification_vs_request_behavior(self, sync_acp, free_port, test_server_runner): - """Test difference between notifications (no ID) and requests (with ID)""" - notifications_received = 0 - requests_received = 0 - - @sync_acp.on_task_event_send - async def tracking_handler(params: SendEventParams): - nonlocal notifications_received, requests_received - if "notification" in params.message.content: # type: ignore[attr-defined] - notifications_received += 1 - else: - requests_received += 1 - return {"handled": True} - - runner = test_server_runner(sync_acp, free_port) - await runner.start() - - async with httpx.AsyncClient() as client: - # Send notification (no ID) - notification_data = { - "jsonrpc": "2.0", - "method": "event/send", - "params": { - "task": { - "id": "notification-task", - "agent_id": "test-agent", - "status": "RUNNING", - }, - "message": { - "type": "text", - "author": "user", - "content": "This is a notification", - }, - }, - # Note: no "id" field - } - - notification_response = await client.post( - f"http://127.0.0.1:{free_port}/api", json=notification_data - ) - assert notification_response.status_code == 200 - notification_result = notification_response.json() - assert notification_result["id"] is None - - # Send regular request (with ID) - request_data = { - "jsonrpc": "2.0", - "method": "event/send", - "params": { - "task": {"id": "request-task", "agent_id": "test-agent", "status": "RUNNING"}, - "message": {"type": "text", "author": "user", "content": "This is a request"}, - }, - "id": "request-1", - } - - request_response = await client.post( - f"http://127.0.0.1:{free_port}/api", json=request_data - ) - assert request_response.status_code == 200 - request_result = request_response.json() - assert request_result["id"] == "request-1" - assert request_result["result"]["status"] == "processing" - - # Give background tasks time to process - await asyncio.sleep(0.1) - - assert notifications_received == 1 - assert requests_received == 1 - - await runner.stop() - - @pytest.mark.asyncio() - async def test_unicode_message_handling(self, sync_acp, free_port, test_server_runner): - """Test handling of unicode characters in messages""" - received_message = None - - @sync_acp.on_task_event_send - async def unicode_handler(params: SendEventParams): - nonlocal received_message - received_message = params.message.content # type: ignore[attr-defined] - return {"unicode_handled": True} - - runner = test_server_runner(sync_acp, free_port) - await runner.start() - - unicode_text = "Hello ไธ–็•Œ ๐ŸŒ รฉmojis ๐Ÿš€ and special chars: \n\t\r" - - async with httpx.AsyncClient() as client: - request_data = { - "jsonrpc": "2.0", - "method": "event/send", - "params": { - "task": { - "id": "unicode-task", - "agent_id": "unicode-agent", - "status": "RUNNING", - }, - "message": {"type": "text", "author": "user", "content": unicode_text}, - }, - "id": "unicode-test", - } - - response = await client.post(f"http://127.0.0.1:{free_port}/api", json=request_data) - - assert response.status_code == 200 - - # Give background task time to process - await asyncio.sleep(0.1) - - assert received_message == unicode_text - - await runner.stop() - - -class TestImplementationIsolation: - """Test that different implementations don't interfere with each other""" - - @pytest.mark.asyncio() - async def test_handler_isolation_between_implementations(self): - """Test handlers registered on one implementation don't affect others""" - with patch.dict("os.environ", {"AGENTEX_BASE_URL": ""}): - sync_acp = SyncACP.create() - async_acp = AsyncBaseACP.create() - - sync_handled = False - async_handled = False - - @sync_acp.on_task_event_send - async def sync_handler(params: SendEventParams): - nonlocal sync_handled - sync_handled = True - return {"sync": True} - - @async_acp.on_task_event_send - async def async_handler(params: SendEventParams): - nonlocal async_handled - async_handled = True - return {"async": True} - - # Create test parameters - message_params = SendEventParams( # type: ignore[call-arg] - task={"id": "isolation-test-task", "agent_id": "test-agent", "status": "RUNNING"}, - event={"type": "text", "author": "user", "content": "Isolation test"}, # type: ignore[misc] - ) - - # Execute sync handler - sync_result = await sync_acp._handlers[RPCMethod.EVENT_SEND](message_params) - assert sync_handled is True - assert async_handled is False - assert sync_result == {"sync": True} - - # Reset and execute async handler - sync_handled = False - async_result = await async_acp._handlers[RPCMethod.EVENT_SEND](message_params) - assert sync_handled is False - assert async_handled is True - assert async_result == {"async": True} diff --git a/src/agentex/lib/sdk/state_machine/__init__.py b/src/agentex/lib/sdk/state_machine/__init__.py deleted file mode 100644 index 92dc35fe..00000000 --- a/src/agentex/lib/sdk/state_machine/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .state import State -from .noop_workflow import NoOpWorkflow -from .state_machine import StateMachine -from .state_workflow import StateWorkflow - -__all__ = ["StateMachine", "StateWorkflow", "State", "NoOpWorkflow"] diff --git a/src/agentex/lib/sdk/state_machine/noop_workflow.py b/src/agentex/lib/sdk/state_machine/noop_workflow.py deleted file mode 100644 index a7c54cfb..00000000 --- a/src/agentex/lib/sdk/state_machine/noop_workflow.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, override - -from pydantic import BaseModel - -from agentex.lib.utils.logging import make_logger -from agentex.lib.sdk.state_machine.state_workflow import StateWorkflow - -if TYPE_CHECKING: - from agentex.lib.sdk.state_machine import StateMachine - -logger = make_logger(__name__) - - -class NoOpWorkflow(StateWorkflow): - """ - Workflow that does nothing. This is commonly used as a terminal state. - """ - - @override - async def execute( - self, state_machine: "StateMachine", state_machine_data: BaseModel | None = None - ) -> str: - return state_machine.get_current_state() # Stay in current state diff --git a/src/agentex/lib/sdk/state_machine/state.py b/src/agentex/lib/sdk/state_machine/state.py deleted file mode 100644 index 6ddddc0c..00000000 --- a/src/agentex/lib/sdk/state_machine/state.py +++ /dev/null @@ -1,10 +0,0 @@ -from pydantic import BaseModel, ConfigDict - -from agentex.lib.sdk.state_machine.state_workflow import StateWorkflow - - -class State(BaseModel): - model_config = ConfigDict(arbitrary_types_allowed=True) - - name: str - workflow: StateWorkflow diff --git a/src/agentex/lib/sdk/state_machine/state_machine.py b/src/agentex/lib/sdk/state_machine/state_machine.py deleted file mode 100644 index 6f2acded..00000000 --- a/src/agentex/lib/sdk/state_machine/state_machine.py +++ /dev/null @@ -1,197 +0,0 @@ -from __future__ import annotations - -from abc import ABC, abstractmethod -from typing import Any, Generic, TypeVar - -from agentex.lib import adk -from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.sdk.state_machine.state import State -from agentex.lib.sdk.state_machine.state_workflow import StateWorkflow - -T = TypeVar("T", bound=BaseModel) - - -class StateMachine(ABC, Generic[T]): - def __init__( - self, - initial_state: str, - states: list[State], - task_id: str | None = None, - state_machine_data: T | None = None, - trace_transitions: bool = False, - ): - self._task_id = task_id - self._state_map: dict[str, State] = {state.name: state for state in states} - self.state_machine_data = state_machine_data - self._initial_state = initial_state - self._trace_transitions = trace_transitions - - # Validate that initial state exists - if initial_state not in self._state_map: - raise ValueError(f"Initial state '{initial_state}' not found in states") - self._current_state = self._state_map[initial_state] - - def set_task_id(self, task_id: str): - self._task_id = task_id - - def get_current_state(self) -> str: - return self._current_state.name - - def get_current_workflow(self) -> StateWorkflow: - """ - Get the workflow of the current state. - - Returns: - The workflow of the current state - - Raises: - ValueError: If the current state is not found in the state map - """ - current_state = self._state_map.get(self.get_current_state()) - if not current_state: - raise ValueError(f"State {self.get_current_state()} not found") - return current_state.workflow - - async def transition(self, target_state_name: str): - if not self._state_map.get(target_state_name): - raise ValueError(f"State {target_state_name} not found") - self._current_state = self._state_map[target_state_name] - - def get_state_machine_data(self) -> T | None: - return self.state_machine_data - - def require_state_machine_data(self) -> T: - """Get state machine data, raising an error if not set.""" - if self.state_machine_data is None: - raise ValueError("State machine data not initialized - ensure data is provided") - return self.state_machine_data - - @abstractmethod - async def terminal_condition(self) -> bool: - pass - - # Overwrite this if you want to add more logic to the state machine - async def run(self): - while not await self.terminal_condition(): - await self.step() - - async def step(self) -> str: - current_state_name = self.get_current_state() - current_state = self._state_map.get(current_state_name) - if current_state is None: - raise ValueError(f"Current state '{current_state_name}' not found in state map") - - span = None - if self._trace_transitions: - if self._task_id is None: - raise ValueError( - "Task ID is must be set before tracing can be enabled" - ) - span = await adk.tracing.start_span( - trace_id=self._task_id, - name="state_transition", - input=self.require_state_machine_data().model_dump(), - data={"input_state": current_state_name}, - ) - - next_state_name = await current_state.workflow.execute( - state_machine=self, state_machine_data=self.state_machine_data - ) - - if self._trace_transitions and span is not None: - span.output = self.require_state_machine_data().model_dump() # type: ignore[assignment] - if span.data is not None: - span.data["output_state"] = next_state_name # type: ignore[index] - await adk.tracing.end_span(trace_id=self._task_id, span=span) - - await self.transition(next_state_name) - - return next_state_name - - async def reset_to_initial_state(self): - """ - Reset the state machine to its initial state. - """ - if self._trace_transitions: - if self._task_id is None: - raise ValueError( - "Task ID is must be set before tracing can be enabled" - ) - span = await adk.tracing.start_span( - trace_id=self._task_id, - name="state_transition_reset", - input={"input_state": self.get_current_state()}, - ) - - await self.transition(self._initial_state) - - if self._trace_transitions: - span.output = {"output_state": self._initial_state} # type: ignore[assignment,union-attr] - await adk.tracing.end_span(trace_id=self._task_id, span=span) - - def dump(self) -> dict[str, Any]: - """ - Save the current state of the state machine to a serializable dictionary. - This includes the current state, task_id, state machine data, and initial state. - - Returns: - Dict[str, Any]: A dictionary containing the serialized state machine state - """ - return { - "task_id": self._task_id, - "current_state": self.get_current_state(), - "initial_state": self._initial_state, - "state_machine_data": self.state_machine_data.model_dump(mode="json") - if self.state_machine_data - else None, - "trace_transitions": self._trace_transitions, - } - - @classmethod - async def load(cls, data: dict[str, Any], states: list[State]) -> "StateMachine[T]": - """ - Load a state machine from a previously saved dictionary. - - Args: - data: The dictionary containing the saved state machine state - states: List of all possible states - - Returns: - StateMachine: A new state machine instance restored to the saved state - - Raises: - ValueError: If the data is invalid or missing required fields - """ - try: - task_id = data.get("task_id") - current_state_name = data.get("current_state") - initial_state = data.get("initial_state") - state_machine_data_dict = data.get("state_machine_data") - trace_transitions = data.get("trace_transitions") - - if initial_state is None: - raise ValueError("Initial state not found in saved data") - - # Reconstruct the state machine data into its Pydantic model - state_machine_data = None - if state_machine_data_dict is not None: - # Get the actual model type from the class's type parameters - model_type = cls.__orig_bases__[0].__args__[0] # type: ignore[attr-defined] - state_machine_data = model_type.model_validate(state_machine_data_dict) - - # Create a new instance - instance = cls( - initial_state=initial_state, - states=states, - task_id=task_id, - state_machine_data=state_machine_data, - trace_transitions=trace_transitions, - ) - - # If there's a saved state, transition to it - if current_state_name: - await instance.transition(target_state_name=current_state_name) - - return instance - except Exception as e: - raise ValueError(f"Failed to restore state machine: {str(e)}") from e diff --git a/src/agentex/lib/sdk/state_machine/state_workflow.py b/src/agentex/lib/sdk/state_machine/state_workflow.py deleted file mode 100644 index cca7f46a..00000000 --- a/src/agentex/lib/sdk/state_machine/state_workflow.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import annotations - -from abc import ABC, abstractmethod -from typing import TYPE_CHECKING - -from pydantic import BaseModel - -# Import StateMachine only for type checking to avoid circular imports -if TYPE_CHECKING: - from agentex.lib.sdk.state_machine import StateMachine - - -class StateWorkflow(ABC): - @abstractmethod - async def execute( - self, state_machine: "StateMachine", state_machine_data: BaseModel | None = None - ) -> str: - pass diff --git a/src/agentex/lib/sdk/utils/__init__.py b/src/agentex/lib/sdk/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/sdk/utils/messages.py b/src/agentex/lib/sdk/utils/messages.py deleted file mode 100644 index bddd8105..00000000 --- a/src/agentex/lib/sdk/utils/messages.py +++ /dev/null @@ -1,225 +0,0 @@ -from __future__ import annotations - -import json -from abc import ABC, abstractmethod -from typing import Any, Literal, override - -from agentex.types.data_content import DataContent -from agentex.types.task_message import TaskMessage -from agentex.types.text_content import TextContent -from agentex.lib.types.llm_messages import ( - Message, - ToolCall, - ToolMessage, - UserMessage, - ToolCallRequest, - AssistantMessage, -) -from agentex.types.tool_request_content import ToolRequestContent -from agentex.types.tool_response_content import ToolResponseContent - - -class TaskMessageConverter(ABC): - """ - Abstract base class for converting a specific type of TaskMessage to an LLM Message. - - Each converter should be responsible for one content type. - """ - - @abstractmethod - def convert(self, task_message: TaskMessage) -> Message: - """ - Convert a TaskMessage to an LLM Message. - - Args: - task_message: The TaskMessage to convert - - Returns: - A Message (Pydantic model) - """ - pass - - -class DefaultTextContentConverter(TaskMessageConverter): - """Converter for TEXT content type.""" - - @override - def convert(self, task_message: TaskMessage) -> Message: - """Convert TEXT content to UserMessage or AssistantMessage based on author.""" - if not isinstance(task_message.content, TextContent): - raise ValueError(f"Expected TextContent, got {type(task_message.content)}") - content = task_message.content - if content.author == "user": - return UserMessage(content=content.content) - else: # AGENT or custom author - return AssistantMessage(content=content.content) - - -class DefaultToolRequestConverter(TaskMessageConverter): - """Converter for TOOL_REQUEST content type.""" - - @override - def convert(self, task_message: TaskMessage) -> Message: - """Convert TOOL_REQUEST content to AssistantMessage with tool_calls.""" - if not isinstance(task_message.content, ToolRequestContent): - raise ValueError(f"Expected ToolRequestContent, got {type(task_message.content)}") - - content = task_message.content - - # Ensure arguments are properly JSON serialized - arguments_str = json.dumps(content.arguments) - - tool_call = ToolCallRequest( - id=content.tool_call_id, - function=ToolCall(name=content.name, arguments=arguments_str), - ) - return AssistantMessage(content=None, tool_calls=[tool_call]) - - -class DefaultToolResponseConverter(TaskMessageConverter): - """Converter for TOOL_RESPONSE content type.""" - - @override - def convert(self, task_message: TaskMessage) -> Message: - """Convert TOOL_RESPONSE content to ToolMessage.""" - if not isinstance(task_message.content, ToolResponseContent): - raise ValueError(f"Expected ToolResponseContent, got {type(task_message.content)}") - - content = task_message.content - return ToolMessage( - content=str(content.content), - tool_call_id=content.tool_call_id, - name=content.name, - ) - - -class DefaultDataContentConverter(TaskMessageConverter): - """Converter for DATA content type.""" - - @override - def convert(self, task_message: TaskMessage) -> Message: - """Convert DATA content to UserMessage or AssistantMessage based on author.""" - if not isinstance(task_message.content, DataContent): - raise ValueError(f"Expected DataContent, got {type(task_message.content)}") - - content = task_message.content - content_str = str(content.data) - if content.author == "user": - return UserMessage(content=content_str) - else: # AGENT or custom author - return AssistantMessage(content=content_str) - - -class DefaultUnknownContentConverter(TaskMessageConverter): - """Converter for unknown content types.""" - - @override - def convert(self, task_message: TaskMessage) -> Message: - """Convert unknown content types to AssistantMessage with fallback text.""" - - content = task_message.content - fallback_content = f"Unknown message type: {content.type}" - return AssistantMessage(content=fallback_content) - - -def convert_task_message_to_llm_messages( - task_message: TaskMessage, - output_mode: Literal["pydantic", "dict"] = "pydantic", - text_converter: TaskMessageConverter | None = None, - tool_request_converter: TaskMessageConverter | None = None, - tool_response_converter: TaskMessageConverter | None = None, - data_converter: TaskMessageConverter | None = None, - unknown_converter: TaskMessageConverter | None = None, -) -> Message | dict[str, Any]: - """ - Convert a TaskMessage to an LLM Message format. - - Args: - task_message: The TaskMessage to convert - output_mode: Whether to return a Pydantic model or dict - text_converter: Optional converter for TEXT content. Uses DefaultTextContentConverter if None. - tool_request_converter: Optional converter for TOOL_REQUEST content. Uses DefaultToolRequestConverter if None. - tool_response_converter: Optional converter for TOOL_RESPONSE content. Uses DefaultToolResponseConverter if None. - data_converter: Optional converter for DATA content. Uses DefaultDataContentConverter if None. - unknown_converter: Optional converter for unknown content. Uses DefaultUnknownContentConverter if None. - - Returns: - Either a Message (Pydantic model) or dict representation - """ - content = task_message.content - - # Get the appropriate converter for this content type - if content.type == "text": - converter = ( - text_converter - if text_converter is not None - else DefaultTextContentConverter() - ) - elif content.type == "tool_request": - converter = ( - tool_request_converter - if tool_request_converter is not None - else DefaultToolRequestConverter() - ) - elif content.type == "tool_response": - converter = ( - tool_response_converter - if tool_response_converter is not None - else DefaultToolResponseConverter() - ) - elif content.type == "data": - converter = ( - data_converter - if data_converter is not None - else DefaultDataContentConverter() - ) - else: - converter = ( - unknown_converter - if unknown_converter is not None - else DefaultUnknownContentConverter() - ) - - message = converter.convert(task_message) - - if output_mode == "dict": - return message.model_dump() - return message - - -def convert_task_messages_to_llm_messages( - task_messages: list[TaskMessage], - output_mode: Literal["pydantic", "dict"] = "pydantic", - text_converter: TaskMessageConverter | None = None, - tool_request_converter: TaskMessageConverter | None = None, - tool_response_converter: TaskMessageConverter | None = None, - data_converter: TaskMessageConverter | None = None, - unknown_converter: TaskMessageConverter | None = None, -) -> list[Message | dict[str, Any]]: - """ - Convert a list of TaskMessages to LLM Message format. - - Args: - task_messages: List of TaskMessages to convert - output_mode: Whether to return Pydantic models or dicts - text_converter: Optional converter for TEXT content. Uses DefaultTextContentConverter if None. - tool_request_converter: Optional converter for TOOL_REQUEST content. Uses DefaultToolRequestConverter if None. - tool_response_converter: Optional converter for TOOL_RESPONSE content. Uses DefaultToolResponseConverter if None. - data_converter: Optional converter for DATA content. Uses DefaultDataContentConverter if None. - unknown_converter: Optional converter for unknown content. Uses DefaultUnknownContentConverter if None. - - Returns: - List of either Messages (Pydantic models) or dicts - """ - return [ - convert_task_message_to_llm_messages( - task_message, - output_mode, - text_converter, - tool_request_converter, - tool_response_converter, - data_converter, - unknown_converter, - ) - for task_message in task_messages - ] diff --git a/src/agentex/lib/types/__init__.py b/src/agentex/lib/types/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/types/acp.py b/src/agentex/lib/types/acp.py deleted file mode 100644 index d719b4fd..00000000 --- a/src/agentex/lib/types/acp.py +++ /dev/null @@ -1,116 +0,0 @@ -from __future__ import annotations - -from enum import Enum -from typing import Any - -from pydantic import Field, BaseModel - -from agentex.types.task import Task -from agentex.types.agent import Agent -from agentex.types.event import Event -from agentex.types.task_message_content import TaskMessageContent - - -class RPCMethod(str, Enum): - """Available JSON-RPC methods for agent communication.""" - - EVENT_SEND = "event/send" - MESSAGE_SEND = "message/send" - TASK_CANCEL = "task/cancel" - TASK_CREATE = "task/create" - - -class CreateTaskParams(BaseModel): - """Parameters for task/create method. - - Attributes: - agent: The agent that the task was sent to. - task: The task to be created. - params: The parameters for the task as inputted by the user. - request: Additional request context including headers forwarded to this agent. - """ - - agent: Agent = Field(..., description="The agent that the task was sent to") - task: Task = Field(..., description="The task to be created") - params: dict[str, Any] | None = Field( - None, - description="The parameters for the task as inputted by the user", - ) - request: dict[str, Any] | None = Field( - default=None, - description="Additional request context including headers forwarded to this agent", - ) - - -class SendMessageParams(BaseModel): - """Parameters for message/send method. - - Attributes: - agent: The agent that the message was sent to. - task: The task that the message was sent to. - content: The message that was sent to the agent. - stream: Whether to stream the message back to the agentex server from the agent. - request: Additional request context including headers forwarded to this agent. - """ - - agent: Agent = Field(..., description="The agent that the message was sent to") - task: Task = Field(..., description="The task that the message was sent to") - content: TaskMessageContent = Field( - ..., description="The message that was sent to the agent" - ) - stream: bool = Field( - False, - description="Whether to stream the message back to the agentex server from the agent", - ) - request: dict[str, Any] | None = Field( - default=None, - description="Additional request context including headers forwarded to this agent", - ) - - -class SendEventParams(BaseModel): - """Parameters for event/send method. - - Attributes: - agent: The agent that the event was sent to. - task: The task that the message was sent to. - event: The event that was sent to the agent. - request: Additional request context including headers forwarded to this agent. - """ - - agent: Agent = Field(..., description="The agent that the event was sent to") - task: Task = Field(..., description="The task that the message was sent to") - event: Event = Field(..., description="The event that was sent to the agent") - request: dict[str, Any] | None = Field( - default=None, - description="Additional request context including headers forwarded to this agent", - ) - - -class CancelTaskParams(BaseModel): - """Parameters for task/cancel method. - - Attributes: - agent: The agent that the task was sent to. - task: The task that was cancelled. - request: Additional request context including headers forwarded to this agent. - """ - - agent: Agent = Field(..., description="The agent that the task was sent to") - task: Task = Field(..., description="The task that was cancelled") - request: dict[str, Any] | None = Field( - default=None, - description="Additional request context including headers forwarded to this agent", - ) - - -RPC_SYNC_METHODS = [ - RPCMethod.MESSAGE_SEND, -] - -PARAMS_MODEL_BY_METHOD: dict[RPCMethod, type[BaseModel]] = { - RPCMethod.EVENT_SEND: SendEventParams, - RPCMethod.TASK_CANCEL: CancelTaskParams, - RPCMethod.MESSAGE_SEND: SendMessageParams, - RPCMethod.TASK_CREATE: CreateTaskParams, -} diff --git a/src/agentex/lib/types/agent_configs.py b/src/agentex/lib/types/agent_configs.py deleted file mode 100644 index 7a3a0f09..00000000 --- a/src/agentex/lib/types/agent_configs.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import annotations - -from pydantic import Field, BaseModel, validator, model_validator - - -class TemporalWorkflowConfig(BaseModel): - """ - Configuration for the temporal workflow that defines the agent. - - Attributes: - name: The name of the temporal workflow that defines the agent. - queue_name: The name of the temporal queue to send tasks to. - """ - - name: str = Field( - ..., description="The name of the temporal workflow that defines the agent." - ) - queue_name: str = Field( - ..., description="The name of the temporal queue to send tasks to." - ) - - -# TODO: Remove this class when we remove the agentex agents create -class TemporalWorkerConfig(BaseModel): - """ - Configuration for temporal worker deployment - - Attributes: - image: The image to use for the temporal worker - workflow: The temporal workflow configuration - """ - - image: str | None = Field( - default=None, description="Image to use for the temporal worker" - ) - workflow: TemporalWorkflowConfig | None = Field( - default=None, - description="Configuration for the temporal workflow that defines the agent. Only required for agents that leverage Temporal.", - ) - - -class TemporalConfig(BaseModel): - """ - Simplified temporal configuration for agents - - Attributes: - enabled: Whether this agent uses Temporal workflows - workflow: The temporal workflow configuration - workflows: The list of temporal workflow configurations - health_check_port: Port for temporal worker health check endpoint - """ - - enabled: bool = Field( - default=False, description="Whether this agent uses Temporal workflows" - ) - workflow: TemporalWorkflowConfig | None = Field( - default=None, - description="Temporal workflow configuration. Required when enabled=True. (deprecated: use workflows instead)", - ) - workflows: list[TemporalWorkflowConfig] | None = Field( - default=None, - description="List of temporal workflow configurations. Used when enabled=true.", - ) - health_check_port: int | None = Field( - default=None, - description="Port for temporal worker health check endpoint. Defaults to 80 if not specified.", - ) - - @validator("workflows") - def validate_workflows_not_empty(cls, v): - """Ensure workflows list is not empty when provided""" - if v is not None and len(v) == 0: - raise ValueError("workflows list cannot be empty when provided") - return v - - @model_validator(mode="after") - def validate_temporal_config_when_enabled(self): - """Validate that workflow configuration exists when enabled=true""" - if self.enabled: - # Must have either workflow (legacy) or workflows (new) - if not self.workflow and (not self.workflows or len(self.workflows) == 0): - raise ValueError( - "When temporal.enabled=true, either 'workflow' or 'workflows' must be provided and non-empty" - ) - - return self diff --git a/src/agentex/lib/types/agent_results.py b/src/agentex/lib/types/agent_results.py deleted file mode 100644 index 909593c1..00000000 --- a/src/agentex/lib/types/agent_results.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import annotations - -from typing import Any - -from pydantic import BaseModel - - -class SerializableRunResult(BaseModel): - """ - Serializable version of RunResult. - - Attributes: - final_output: The final output of the run. - final_input_list: The final input list of the run. - """ - - final_output: Any - final_input_list: list[dict[str, Any]] - - -class SerializableRunResultStreaming(BaseModel): - """ - Serializable version of RunResultStreaming. - - Attributes: - final_output: The final output of the run. - final_input_list: The final input list of the run. - """ - - final_output: Any - final_input_list: list[dict[str, Any]] diff --git a/src/agentex/lib/types/converters.py b/src/agentex/lib/types/converters.py deleted file mode 100644 index 1e3676b5..00000000 --- a/src/agentex/lib/types/converters.py +++ /dev/null @@ -1,64 +0,0 @@ -from __future__ import annotations - -import json - -from agents import TResponseInputItem - -from agentex.types.task_message import TaskMessage -from agentex.types.text_content import TextContent -from agentex.types.tool_request_content import ToolRequestContent -from agentex.types.tool_response_content import ToolResponseContent - - -def convert_task_messages_to_oai_agents_inputs( - task_messages: list[TaskMessage], -) -> list[TResponseInputItem]: - """ - Convert a list of TaskMessages to a list of OpenAI Agents SDK inputs (TResponseInputItem). - - Args: - task_messages: The list of TaskMessages to convert. - - Returns: - A list of OpenAI Agents SDK inputs (TResponseInputItem). - """ - converted_messages = [] - for task_message in task_messages: - task_message_content = task_message.content - if isinstance(task_message_content, TextContent): - converted_messages.append( - { - "role": ( - "user" if task_message_content.author == "user" else "assistant" - ), - "content": task_message_content.content, - } - ) - elif isinstance(task_message_content, ToolRequestContent): - converted_messages.append( - { - "type": "function_call", - "call_id": task_message_content.tool_call_id, - "name": task_message_content.name, - "arguments": json.dumps(task_message_content.arguments), - } - ) - elif isinstance(task_message_content, ToolResponseContent): - content_str = ( - task_message_content.content - if isinstance(task_message_content.content, str) - else json.dumps(task_message_content.content) - ) - converted_messages.append( - { - "type": "function_call_output", - "call_id": task_message_content.tool_call_id, - "output": content_str, - } - ) - else: - raise ValueError( - f"Unsupported content type for converting TaskMessage to OpenAI Agents SDK input: {type(task_message.content)}" - ) - - return converted_messages diff --git a/src/agentex/lib/types/credentials.py b/src/agentex/lib/types/credentials.py deleted file mode 100644 index 7f4b79d1..00000000 --- a/src/agentex/lib/types/credentials.py +++ /dev/null @@ -1,34 +0,0 @@ -from pydantic import Field, BaseModel - - -class CredentialMapping(BaseModel): - """Maps a Kubernetes secret to an environment variable in the agent container. - - This allows agents to securely access credentials stored in Kubernetes secrets - by mapping them to environment variables. For example, you can map a secret - containing an API key to an environment variable that your agent code expects. - - Example: - A mapping of {"env_var_name": "OPENAI_API_KEY", - "secret_name": "ai-credentials", - "secret_key": "openai-key"} - will make the value from the "openai-key" field in the "ai-credentials" - Kubernetes secret available to the agent as OPENAI_API_KEY environment variable. - - Attributes: - env_var_name: The name of the environment variable that will be available to the agent - secret_name: The name of the Kubernetes secret containing the credential - secret_key: The key within the Kubernetes secret that contains the credential value - """ - - env_var_name: str = Field( - ..., - description="Name of the environment variable that will be available to the agent", - ) - secret_name: str = Field( - ..., description="Name of the Kubernetes secret containing the credential" - ) - secret_key: str = Field( - ..., - description="Key within the Kubernetes secret that contains the credential value", - ) diff --git a/src/agentex/lib/types/fastacp.py b/src/agentex/lib/types/fastacp.py deleted file mode 100644 index c589a0c9..00000000 --- a/src/agentex/lib/types/fastacp.py +++ /dev/null @@ -1,84 +0,0 @@ -from __future__ import annotations - -from typing import Any, Literal - -from pydantic import Field, BaseModel, field_validator - -from agentex.lib.core.clients.temporal.utils import validate_client_plugins, validate_worker_interceptors - - -class BaseACPConfig(BaseModel): - """ - Base configuration for all ACP implementations - - Attributes: - type: The type of ACP implementation - """ - - pass - - -class SyncACPConfig(BaseACPConfig): - """ - Configuration for SyncACP implementation - - Attributes: - type: The type of ACP implementation - """ - - pass - - -class AsyncACPConfig(BaseACPConfig): - """ - Base class for async ACP configurations - - Attributes: - type: The type of ACP implementation - """ - - type: Literal["temporal", "base"] = Field(..., frozen=True) - -AgenticACPConfig = AsyncACPConfig - -class TemporalACPConfig(AsyncACPConfig): - """ - Configuration for TemporalACP implementation - - Attributes: - type: The type of ACP implementation - temporal_address: The address of the temporal server - plugins: List of Temporal client plugins - interceptors: List of Temporal worker interceptors - """ - - type: Literal["temporal"] = Field(default="temporal", frozen=True) - temporal_address: str = Field(default="temporal-frontend.temporal.svc.cluster.local:7233", frozen=True) - plugins: list[Any] = Field(default=[], frozen=True) - interceptors: list[Any] = Field(default=[], frozen=True) - - @field_validator("plugins") - @classmethod - def validate_plugins(cls, v: list[Any]) -> list[Any]: - """Validate that all plugins are valid Temporal client plugins.""" - validate_client_plugins(v) - return v - - @field_validator("interceptors") - @classmethod - def validate_interceptors(cls, v: list[Any]) -> list[Any]: - """Validate that all interceptors are valid Temporal worker interceptors.""" - validate_worker_interceptors(v) - return v - - -class AsyncBaseACPConfig(AsyncACPConfig): - """Configuration for AsyncBaseACP implementation - - Attributes: - type: The type of ACP implementation - """ - - type: Literal["base"] = Field(default="base", frozen=True) - -AgenticBaseACPConfig = AsyncBaseACPConfig \ No newline at end of file diff --git a/src/agentex/lib/types/files.py b/src/agentex/lib/types/files.py deleted file mode 100644 index ddf104dd..00000000 --- a/src/agentex/lib/types/files.py +++ /dev/null @@ -1,13 +0,0 @@ -from agentex.lib.utils.model_utils import BaseModel - - -class FileContentResponse(BaseModel): - """Response model for downloaded file content. - - Attributes: - mime_type: The MIME type of the file - base64_content: The base64 encoded content of the file - """ - - mime_type: str - base64_content: str diff --git a/src/agentex/lib/types/json_rpc.py b/src/agentex/lib/types/json_rpc.py deleted file mode 100644 index b89e9d6b..00000000 --- a/src/agentex/lib/types/json_rpc.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import annotations - -from typing import Any, Literal - -from agentex.lib.utils.model_utils import BaseModel - - -class JSONRPCError(BaseModel): - """JSON-RPC 2.0 Error - - Attributes: - code: The error code - message: The error message - data: The error data - """ - - code: int - message: str - data: Any | None = None - - -class JSONRPCRequest(BaseModel): - """JSON-RPC 2.0 Request - - Attributes: - jsonrpc: The JSON-RPC version - method: The method to call - params: The parameters for the request - id: The ID of the request - """ - - jsonrpc: Literal["2.0"] = "2.0" - method: str - params: dict[str, Any] - id: int | str | None = None - - -class JSONRPCResponse(BaseModel): - """JSON-RPC 2.0 Response - - Attributes: - jsonrpc: The JSON-RPC version - result: The result of the request - error: The error of the request - id: The ID of the request - """ - - jsonrpc: Literal["2.0"] = "2.0" - result: dict[str, Any] | None = None - error: JSONRPCError | None = None - id: int | str | None = None diff --git a/src/agentex/lib/types/llm_messages.py b/src/agentex/lib/types/llm_messages.py deleted file mode 100644 index 706939f1..00000000 --- a/src/agentex/lib/types/llm_messages.py +++ /dev/null @@ -1,356 +0,0 @@ -from __future__ import annotations - -from typing import Any, Literal - -try: - from typing import Annotated -except ImportError: - from typing import Annotated -from pydantic import Field - -from agentex.lib.utils.model_utils import BaseModel - - -class LLMConfig(BaseModel): - """ - LLMConfig is the configuration for the LLM. - - Attributes: - model: The model to use - messages: The messages to send to the LLM - temperature: The temperature to use - top_p: The top_p to use - n: The number of completions to generate - stream: Whether to stream the completions - stream_options: The options for the stream - stop: The stop sequence to use - max_tokens: The maximum number of tokens to generate - max_completion_tokens: The maximum number of tokens to generate for the completion - presence_penalty: The presence penalty to use - frequency_penalty: The frequency penalty to use - logit_bias: The logit bias to use - response_format: The response format to use - seed: The seed to use - tools: The tools to use - tool_choice: The tool choice to use - parallel_tool_calls: Whether to allow parallel tool calls - logprobs: Whether to return log probabilities - top_logprobs: The number of top log probabilities to return - """ - - model: str - messages: list = [] - temperature: float | None = None - top_p: float | None = None - n: int | None = None - stream: bool | None = None - stream_options: dict | None = None - stop: str | list | None = None - max_tokens: int | None = None - max_completion_tokens: int | None = None - presence_penalty: float | None = None - frequency_penalty: float | None = None - logit_bias: dict | None = None - response_format: dict | type[BaseModel] | str | None = None - seed: int | None = None - tools: list | None = None - tool_choice: str | None = None - parallel_tool_calls: bool | None = None - logprobs: bool | None = None - top_logprobs: int | None = None - - -class ContentPartText(BaseModel): - """ - ContentPartText is the text content of the message. - - Attributes: - text: The text content. - type: The type of the content part. - """ - - text: str = Field(..., description="The text content.") - type: Literal["text"] = Field( - default="text", description="The type of the content part." - ) - - -class ImageURL(BaseModel): - """ - ImageURL is the URL of the image. - - Attributes: - url: The URL of the image. - detail: The detail level of the image. - """ - - url: str = Field( - ..., description="Either a URL of the image or the base64 encoded image data." - ) - detail: Literal["auto", "low", "high"] = Field( - ..., - description="""Specifies the detail level of the image. - -Learn more in the -[Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). -""", - ) - - -class ContentPartImage(BaseModel): - """ - ContentPartImage is the image content of the message. - - Attributes: - image_url: The URL of the image. - type: The type of the content part. - """ - - image_url: ImageURL = Field(..., description="The image URL.") - type: Literal["image_url"] = Field(..., description="The type of the content part.") - - -class FileContent(BaseModel): - """ - FileContent is the file content of the message. - - Attributes: - filename: The name of the file. - file_data: The base64 encoded file data with MIME type, e.g., 'data:application/pdf;base64,...' - """ - - filename: str = Field(..., description="The name of the file.") - file_data: str = Field( - ..., - description="The base64 encoded file data with MIME type, e.g., 'data:application/pdf;base64,...'", - ) - - -class ContentPartFile(BaseModel): - """ - ContentPartFile is the file content of the message. - - Attributes: - file: The file content. - type: The type of the content part. - """ - - file: FileContent = Field(..., description="The file content.") - type: Literal["file"] = Field( - default="file", description="The type of the content part." - ) - - -ContentPart = ContentPartText | ContentPartImage | ContentPartFile - - -class SystemMessage(BaseModel): - """ - SystemMessage is the system message of the message. - - Attributes: - role: The role of the messages author, in this case `system`. - content: The contents of the system message. - """ - - role: Literal["system"] = Field( - default="system", - description="The role of the messages author, in this case `system`.", - ) - content: str = Field(..., description="The contents of the system message.") - - -class UserMessage(BaseModel): - """ - UserMessage is the user message of the message. - - Attributes: - role: The role of the messages author, in this case `user`. - content: The contents of the user message. - """ - - role: Literal["user"] = Field( - default="user", - description="The role of the messages author, in this case `user`.", - ) - content: str | list[ContentPart] = Field( - ..., - description="The contents of the user message. Can be a string or a list of content parts.", - ) - - -class ToolCall(BaseModel): - """ - ToolCall is the tool call of the message. - - Attributes: - name: The name of the function to call. - arguments: The arguments to call the function with, as generated by the model in JSON format. - """ - - name: str | None = Field( - default=None, description="The name of the function to call." - ) - arguments: str | None = Field( - default=None, - description=""" -The arguments to call the function with, as generated by the model in JSON -format. Note that the model does not always generate valid JSON, and may -hallucinate parameters not defined by your function schema. Validate the -arguments in your code before calling your function. -""", - ) - - -class ToolCallRequest(BaseModel): - """ - ToolCallRequest is the tool call request of the message. - - Attributes: - type: The type of the tool. Currently, only `function` is supported. - id: The ID of the tool call request. - function: The function that the model is requesting. - index: The index of the tool call request. - """ - - type: Literal["function"] = Field( - default="function", - description="The type of the tool. Currently, only `function` is supported.", - ) - id: str | None = Field(default=None, description="The ID of the tool call request.") - function: ToolCall = Field( - ..., description="The function that the model is requesting." - ) - index: int | None = None - - -class AssistantMessage(BaseModel): - """ - AssistantMessage is the assistant message of the message. - - Attributes: - role: The role of the messages author, in this case `assistant`. - content: The contents of the assistant message. - tool_calls: The tool calls generated by the model, such as function calls. - parsed: The parsed content of the message to a specific type - """ - - role: Literal["assistant"] = Field( - default="assistant", - description="The role of the messages author, in this case `assistant`.", - ) - content: str | None = Field( - default=None, - description="""The contents of the assistant message. - -Required unless `tool_calls` or `function_call` is specified. -""", - ) - tool_calls: list[ToolCallRequest] | None = Field( - default=None, - description="The tool calls generated by the model, such as function calls.", - ) - parsed: Any | None = Field( - default=None, description="The parsed content of the message to a specific type" - ) - - -class ToolMessage(BaseModel): - """ - ToolMessage is the tool message of the message. - - Attributes: - role: The role of the messages author, in this case `tool`. - content: The contents of the tool message. - tool_call_id: The tool call that this message is responding to. - name: The name of the tool called. - is_error: Whether the tool call was successful. - """ - - role: Literal["tool"] = Field( - default="tool", - description="The role of the messages author, in this case `tool`.", - ) - content: str | list[ContentPart] = Field( - ..., description="The contents of the tool message." - ) - tool_call_id: str = Field( - ..., description="Tool call that this message is responding to." - ) - # name is optional based on OAI API defined here for chat_completion_input: https://platform.openai.com/docs/api-reference/chat/create - name: str | None = Field(default=None, description="The name of the tool called.") - is_error: bool | None = Field( - default=None, description="Whether the tool call was successful." - ) - - -Message = Annotated[ - SystemMessage | UserMessage | AssistantMessage | ToolMessage, - Field(discriminator="role"), -] - - -class Delta(BaseModel): - """ - Delta is the delta of the message. - - Attributes: - content: The content of the delta. - role: The role of the delta. - tool_calls: The tool calls of the delta. - """ - - content: str | None = Field(default=None) - role: str | None = Field(default=None) - tool_calls: list[ToolCallRequest] | None = Field(default=None) - - -class Choice(BaseModel): - """ - Choice is the choice of the message. - - Attributes: - index: The index of the choice. - finish_reason: The finish reason of the choice. - message: The message of the choice. - delta: The delta of the choice. - """ - - index: int - finish_reason: Literal["stop", "length", "content_filter", "tool_calls"] | None = ( - None - ) - message: AssistantMessage | None = None - delta: Delta | None = None - - -class Usage(BaseModel): - """ - Usage is the usage of the message. - - Attributes: - prompt_tokens: The number of prompt tokens. - completion_tokens: The number of completion tokens. - total_tokens: The total number of tokens. - """ - - prompt_tokens: int - completion_tokens: int - total_tokens: int - - -class Completion(BaseModel): - """ - Completion is the completion of the message. - - Attributes: - choices: The choices of the completion. - created: The created time of the completion. - model: The model of the completion. - usage: The usage of the completion. - """ - - choices: list[Choice] - created: int | None = None - model: str | None = None - usage: Usage | None = None diff --git a/src/agentex/lib/types/tracing.py b/src/agentex/lib/types/tracing.py deleted file mode 100644 index 721d8779..00000000 --- a/src/agentex/lib/types/tracing.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import annotations - -from typing import Literal, Annotated - -from pydantic import Field - -from agentex.lib.utils.model_utils import BaseModel - - -class BaseModelWithTraceParams(BaseModel): - """ - Base model with trace parameters. - - Attributes: - trace_id: The trace ID - parent_span_id: The parent span ID - """ - - trace_id: str | None = None - parent_span_id: str | None = None - - -class AgentexTracingProcessorConfig(BaseModel): - type: Literal["agentex"] = "agentex" - - -class SGPTracingProcessorConfig(BaseModel): - type: Literal["sgp"] = "sgp" - sgp_api_key: str - sgp_account_id: str - sgp_base_url: str | None = None - - -TracingProcessorConfig = Annotated[ - AgentexTracingProcessorConfig | SGPTracingProcessorConfig, - Field(discriminator="type"), -] diff --git a/src/agentex/lib/utils/__init__.py b/src/agentex/lib/utils/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agentex/lib/utils/completions.py b/src/agentex/lib/utils/completions.py deleted file mode 100644 index fe62c7d1..00000000 --- a/src/agentex/lib/utils/completions.py +++ /dev/null @@ -1,147 +0,0 @@ -from __future__ import annotations - -from copy import deepcopy -from typing import Any -from functools import reduce, singledispatch - -from agentex.lib.types.llm_messages import ( - Delta, - Usage, - Choice, - ToolCall, - Completion, - ToolCallRequest, -) - - -@singledispatch -def _concat_chunks(_a: None, b: Any): - return b - - -@_concat_chunks.register -def _(a: Completion, b: Completion) -> Completion: - a.choices = [_concat_chunks(*c) for c in zip(a.choices, b.choices, strict=False)] - a.usage = _concat_chunks(a.usage, b.usage) - - return a - - -@_concat_chunks.register -def _(a: Choice, b: Choice) -> Choice: - if hasattr(a, "index") and hasattr(b, "index"): - assert a.index == b.index - - if hasattr(a, "delta") and hasattr(b, "delta"): - a.delta = _concat_chunks(a.delta, b.delta) - - a.finish_reason = a.finish_reason or b.finish_reason - return a - -@_concat_chunks.register -def _(a: Usage | None, b: Usage | None) -> Usage | None: - if a is not None and b is not None: - return Usage( - prompt_tokens=a.prompt_tokens + b.prompt_tokens, - completion_tokens=a.completion_tokens + b.completion_tokens, - total_tokens=a.total_tokens + b.total_tokens, - ) - else: - return a or b - - -@_concat_chunks.register -def _(a: Delta, b: Delta) -> Delta: - a.content = a.content + b.content if a.content and b.content else a.content or b.content - - if hasattr(a, "tool_calls") and hasattr(b, "tool_calls") and a.tool_calls and b.tool_calls: - # Group tool calls by index - grouped_tool_calls = {} - for tool_call in a.tool_calls + b.tool_calls: - if tool_call.index not in grouped_tool_calls: - grouped_tool_calls[tool_call.index] = tool_call - else: - grouped_tool_calls[tool_call.index] = _concat_chunks( - grouped_tool_calls[tool_call.index], tool_call - ) - - a.tool_calls = list(grouped_tool_calls.values()) - elif hasattr(b, "tool_calls") and b.tool_calls: - a.tool_calls = b.tool_calls - - return a - - -@_concat_chunks.register -def _(a: ToolCallRequest, b: ToolCallRequest) -> ToolCallRequest: - # Preserve id from either a or b, with preference for a - id_val = a.id if a.id is not None else b.id - - # Use index from either a or b, with preference for a's index - index_val = a.index if hasattr(a, "index") and a.index is not None else b.index - - # Concatenate the function part - function_val = ( - _concat_chunks(a.function, b.function) - if a.function and b.function - else a.function or b.function - ) - - # Set all properties - a.id = id_val - a.index = index_val - a.function = function_val - - return a - - -@_concat_chunks.register -def _(a: ToolCall, b: ToolCall) -> ToolCall: - # Preserve name from either a or b, with preference for a - name_val = a.name or b.name - - # Concatenate arguments string - args_val = "" - if a.arguments is not None and b.arguments is not None: - args_val = a.arguments + b.arguments - else: - args_val = a.arguments or b.arguments - - # Set all properties - a.name = name_val - a.arguments = args_val - - return a - - -def concat_completion_chunks(chunks: list[Completion]) -> Completion: - """ - Accumulates all chunks returned from a streaming completion call into a `Completion` message. - This is useful when you stream responses from an LLM and want to keep track of the context (i.e. previous messages + current message). - - Args: - chunks: list of completion chunks returned from streamed completion - Returns: - Completion: same as type returned from non-streaming completion - - - - To implement `concat_completion_chunks` we first implement a binary `_concat_chunks` function for each - type. Using `singledispatch` to dispatch the call to the appropriate function based on the type of the first argument. - Each nested type is then concatenated. We can then use reduce to accumulate the entire stream into a single a - single `CompletionChunk`. Finally we convert the type to the appropriate non-streaming type `Completion` and return it. - """ - if not chunks: - raise ValueError("Cannot concatenate empty chunks list") - - chunks_copy = chunks.copy() - chunks_copy[0] = deepcopy(chunks_copy[0]) # _concat_chunks mutates first argument - accumulated_chunks = reduce(_concat_chunks, chunks_copy) - - data = accumulated_chunks.model_dump() - data["object"] = "chat.completion" - choices = data["choices"] - for choice in choices: - choice["message"] = choice.pop("delta") - - return Completion.model_validate(data) diff --git a/src/agentex/lib/utils/console.py b/src/agentex/lib/utils/console.py deleted file mode 100644 index eab21efa..00000000 --- a/src/agentex/lib/utils/console.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import annotations - -from rich import box -from rich.table import Table -from rich.console import Console - -console = Console() - - -def print_section(name: str, contents: list[str], subtitle: str | None = None): - console.print() - table = Table(box=box.SQUARE, caption=subtitle, show_header=False, expand=True) - table.title = name - table.add_column(name, style="dim", width=12) - table.add_row(*contents) - console.print(table) diff --git a/src/agentex/lib/utils/debug.py b/src/agentex/lib/utils/debug.py deleted file mode 100644 index 69cbf6b1..00000000 --- a/src/agentex/lib/utils/debug.py +++ /dev/null @@ -1,68 +0,0 @@ -""" -Debug utilities for AgentEx development. - -Provides debugging setup functionality that can be used across different components. -""" - -import os - -import debugpy # type: ignore - -from agentex.lib.utils.logging import make_logger - -logger = make_logger(__name__) - - -def setup_debug_if_enabled() -> None: - """ - Setup debugpy if debug mode is enabled via environment variables. - - This function checks for AgentEx debug environment variables and configures - debugpy accordingly. It's designed to be called early in worker startup. - - Environment Variables: - AGENTEX_DEBUG_ENABLED: Set to "true" to enable debug mode - AGENTEX_DEBUG_PORT: Port for debug server (default: 5678) - AGENTEX_DEBUG_TYPE: Type identifier for logging (default: "worker") - AGENTEX_DEBUG_WAIT_FOR_ATTACH: Set to "true" to wait for debugger attachment - - Raises: - Any exception from debugpy setup (will bubble up naturally) - """ - if os.getenv("AGENTEX_DEBUG_ENABLED") == "true": - debug_port = int(os.getenv("AGENTEX_DEBUG_PORT", "5678")) - debug_type = os.getenv("AGENTEX_DEBUG_TYPE", "worker") - wait_for_attach = os.getenv("AGENTEX_DEBUG_WAIT_FOR_ATTACH", "false").lower() == "true" - - # Configure debugpy - debugpy.configure(subProcess=False) - debugpy.listen(debug_port) - - logger.info(f"๐Ÿ› [{debug_type.upper()}] Debug server listening on port {debug_port}") - - if wait_for_attach: - logger.info(f"โณ [{debug_type.upper()}] Waiting for debugger to attach...") - debugpy.wait_for_client() - logger.info(f"โœ… [{debug_type.upper()}] Debugger attached!") - else: - logger.info(f"๐Ÿ“ก [{debug_type.upper()}] Ready for debugger attachment") - - -def is_debug_enabled() -> bool: - """ - Check if debug mode is currently enabled. - - Returns: - bool: True if AGENTEX_DEBUG_ENABLED is set to "true" - """ - return os.getenv("AGENTEX_DEBUG_ENABLED", "false").lower() == "true" - - -def get_debug_port() -> int: - """ - Get the debug port from environment variables. - - Returns: - int: Debug port (default: 5678) - """ - return int(os.getenv("AGENTEX_DEBUG_PORT", "5678")) diff --git a/src/agentex/lib/utils/dev_tools/__init__.py b/src/agentex/lib/utils/dev_tools/__init__.py deleted file mode 100644 index 38d7726a..00000000 --- a/src/agentex/lib/utils/dev_tools/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Development tools for AgentEx.""" - -from .async_messages import print_task_message, print_task_message_update, subscribe_to_async_task_messages - -__all__ = [ - "print_task_message", - "print_task_message_update", - "subscribe_to_async_task_messages", -] diff --git a/src/agentex/lib/utils/dev_tools/async_messages.py b/src/agentex/lib/utils/dev_tools/async_messages.py deleted file mode 100644 index 7c627532..00000000 --- a/src/agentex/lib/utils/dev_tools/async_messages.py +++ /dev/null @@ -1,423 +0,0 @@ -""" -Development utility for subscribing to async task messages with streaming support. - -This module provides utilities to read existing messages from a task and subscribe -to new streaming messages, handling mid-stream connections gracefully. -""" - -import json -from typing import List, Optional -from datetime import datetime, timezone - -from yaspin import yaspin # type: ignore[import-untyped] -from rich.panel import Panel -from yaspin.core import Yaspin # type: ignore[import-untyped] -from rich.console import Console -from rich.markdown import Markdown - -from agentex import Agentex -from agentex.types import Task, TaskMessage, TextContent, ReasoningContent, ToolRequestContent, ToolResponseContent -from agentex.types.text_delta import TextDelta -from agentex.types.task_message_update import ( - TaskMessageUpdate, - StreamTaskMessageDone, - StreamTaskMessageFull, - StreamTaskMessageDelta, - StreamTaskMessageStart, -) - - -def print_task_message( - message: TaskMessage, - print_messages: bool = True, - rich_print: bool = True, -) -> None: - """ - Print a task message in a formatted way. - - Args: - message: The task message to print - print_messages: Whether to actually print the message (for debugging) - rich_print: Whether to use rich to print the message - """ - if not print_messages: - return - - # Skip empty messages - if isinstance(message.content, TextContent) and not message.content.content.strip(): - return - - # Skip empty reasoning messages - if isinstance(message.content, ReasoningContent): - has_summary = bool(message.content.summary) and any(s for s in message.content.summary if s) - has_content = bool(message.content.content) and any(c for c in message.content.content if c) if message.content.content is not None else False - if not has_summary and not has_content: - return - - timestamp = message.created_at.strftime("%m/%d/%Y %H:%M:%S") if message.created_at else "N/A" - - console = None - if rich_print: - console = Console(width=80) # Fit better in Jupyter cells - - if isinstance(message.content, TextContent): - content = message.content.content - content_type = "text" - elif isinstance(message.content, ToolRequestContent): - tool_name = message.content.name - tool_args = message.content.arguments - - # Format arguments as pretty JSON - try: - if isinstance(tool_args, str): - parsed_args = json.loads(tool_args) - formatted_args = json.dumps(parsed_args, indent=2) - else: - formatted_args = json.dumps(tool_args, indent=2) - content = f"๐Ÿ”ง **Tool Request: {tool_name}**\n\n**Arguments:**\n```json\n{formatted_args}\n```" - except (json.JSONDecodeError, TypeError): - content = f"๐Ÿ”ง **Tool Request: {tool_name}**\n\n**Arguments:**\n```json\n{tool_args}\n```" - - content_type = "tool_request" - elif isinstance(message.content, ToolResponseContent): - tool_name = message.content.name - tool_response = message.content.content - - # Try to parse and format JSON response nicely - try: - if isinstance(tool_response, str): - parsed_response = json.loads(tool_response) - formatted_json = json.dumps(parsed_response, indent=2) - content = f"โœ… **Tool Response: {tool_name}**\n\n**Response:**\n```json\n{formatted_json}\n```" - else: - formatted_json = json.dumps(tool_response, indent=2) - content = f"โœ… **Tool Response: {tool_name}**\n\n**Response:**\n```json\n{formatted_json}\n```" - except (json.JSONDecodeError, TypeError): - # If it's not valid JSON, display as text - if isinstance(tool_response, str): - # Try to extract text content if it's a JSON string with text field - try: - parsed = json.loads(tool_response) - if isinstance(parsed, dict) and "text" in parsed: - text_content = str(parsed["text"]) - content = f"โœ… **Tool Response: {tool_name}**\n\n{text_content}" - else: - content = f"โœ… **Tool Response: {tool_name}**\n\n{tool_response}" - except json.JSONDecodeError: - content = f"โœ… **Tool Response: {tool_name}**\n\n{tool_response}" - else: - content = f"โœ… **Tool Response: {tool_name}**\n\n{tool_response}" - - content_type = "tool_response" - elif isinstance(message.content, ReasoningContent): - # Format reasoning content - reasoning_parts = [] - - # Add summary if available - if message.content.summary: - # Join summaries with double newline for better formatting - summary_text = "\n\n".join(s for s in message.content.summary if s) - if summary_text: - reasoning_parts.append(summary_text) - - # Add full reasoning content if available - if message.content.content: - content_text = "\n\n".join(c for c in message.content.content if c) - if content_text: - reasoning_parts.append(content_text) - - # Format reasoning content (we already checked it's not empty at the top) - content = "๐Ÿง  **Reasoning**\n\n" + "\n\n".join(reasoning_parts) - content_type = "reasoning" - else: - content = f"{type(message.content).__name__}: {message.content}" - content_type = "other" - - if rich_print and console: - author_color = "bright_cyan" if message.content.author == "user" else "green" - - # Use different border styles and colors for different content types - if content_type == "tool_request": - border_style = "yellow" - elif content_type == "tool_response": - border_style = "bright_green" - elif content_type == "reasoning": - border_style = "bright_magenta" - author_color = "bright_magenta" # Also make the author text magenta - else: - border_style = author_color - - title = f"[bold {author_color}]{message.content.author.upper()}[/bold {author_color}] [{timestamp}]" - panel = Panel(Markdown(content), title=title, border_style=border_style, width=80) - console.print(panel) - else: - title = f"{message.content.author.upper()} [{timestamp}]" - if content_type == "reasoning": - title = f"๐Ÿง  REASONING [{timestamp}]" - print(f"{title}\n{content}\n") - - -def print_task_message_update( - task_message_update: TaskMessageUpdate, - print_messages: bool = True, - rich_print: bool = True, - show_deltas: bool = True, -) -> None: - """ - Print a task message update in a formatted way. - - This function handles different types of TaskMessageUpdate objects: - - StreamTaskMessageStart: Shows start indicator - - StreamTaskMessageDelta: Shows deltas in real-time (if show_deltas=True) - - StreamTaskMessageFull: Shows complete message content - - StreamTaskMessageDone: Shows completion indicator - - Args: - task_message_update: The TaskMessageUpdate object to print - print_messages: Whether to actually print the message (for debugging) - rich_print: Whether to use rich formatting - show_deltas: Whether to show delta updates in real-time - """ - if not print_messages: - return - - console = None - if rich_print: - console = Console(width=80) - - if isinstance(task_message_update, StreamTaskMessageStart): - if rich_print and console: - console.print("๐Ÿš€ [cyan]Agent started responding...[/cyan]") - else: - print("๐Ÿš€ Agent started responding...") - - elif isinstance(task_message_update, StreamTaskMessageDelta): - if show_deltas and task_message_update.delta: - if isinstance(task_message_update.delta, TextDelta): - print(task_message_update.delta.text_delta, end="", flush=True) - elif rich_print and console: - console.print(f"[yellow]Non-text delta: {type(task_message_update.delta).__name__}[/yellow]") - else: - print(f"Non-text delta: {type(task_message_update.delta).__name__}") - - elif isinstance(task_message_update, StreamTaskMessageFull): - if isinstance(task_message_update.content, TextContent): - timestamp = datetime.now().strftime("%m/%d/%Y %H:%M:%S") - - if rich_print and console: - author_color = "bright_cyan" if task_message_update.content.author == "user" else "green" - title = f"[bold {author_color}]{task_message_update.content.author.upper()}[/bold {author_color}] [{timestamp}]" - panel = Panel(Markdown(task_message_update.content.content), title=title, border_style=author_color, width=80) - console.print(panel) - else: - title = f"{task_message_update.content.author.upper()} [{timestamp}]" - print(f"\n{title}\n{task_message_update.content.content}\n") - else: - content_type = type(task_message_update.content).__name__ - if rich_print and console: - console.print(f"[yellow]Non-text content: {content_type}[/yellow]") - else: - print(f"Non-text content: {content_type}") - - else: # StreamTaskMessageDone - if rich_print and console: - console.print("\nโœ… [green]Agent finished responding.[/green]") - else: - print("\nโœ… Agent finished responding.") - - -def subscribe_to_async_task_messages( - client: Agentex, - task: Task, - only_after_timestamp: Optional[datetime] = None, - print_messages: bool = True, - rich_print: bool = True, - timeout: int = 10, -) -> List[TaskMessage]: - """ - Subscribe to async task messages and collect completed messages. - - This function: - 1. Reads all existing messages from the task - 2. Optionally filters messages after a timestamp - 3. Shows a loading message while listening - 4. Subscribes to task message events - 5. Fetches and displays complete messages when they finish - 6. Returns all messages collected during the session - - Features: - - Uses Rich library for beautiful formatting in Jupyter notebooks - - Agent messages are formatted as Markdown - - User and agent messages are displayed in colored panels with fixed width - - Optimized for Jupyter notebook display - - Args: - client: The Agentex client instance - task: The task to subscribe to - print_messages: Whether to print messages as they arrive - only_after_timestamp: Only include messages created after this timestamp. If None, all messages will be included. - rich_print: Whether to use rich to print the message - timeout: The timeout in seconds for the streaming connection. If the connection times out, the function will return with any messages collected so far. - Returns: - List of TaskMessage objects collected during the session - - Raises: - ValueError: If the task doesn't have a name (required for streaming) - """ - - messages_to_return: List[TaskMessage] = [] - - # Read existing messages - messages = [] - try: - # List all messages for this task - MessageListResponse is just a List[TaskMessage] - messages = client.messages.list(task_id=task.id) - - except Exception as e: - print(f"Error reading existing messages: {e}") - - # Filter and display existing messages - for message in messages: - if only_after_timestamp: - if message.created_at is not None: - # Handle timezone comparison - make both datetimes timezone-aware - message_time = message.created_at - if message_time.tzinfo is None: - # If message time is naive, assume it's in UTC - message_time = message_time.replace(tzinfo=timezone.utc) - - comparison_time = only_after_timestamp - if comparison_time.tzinfo is None: - # If comparison time is naive, assume it's in UTC - comparison_time = comparison_time.replace(tzinfo=timezone.utc) - - if message_time < comparison_time: - continue - else: - messages_to_return.append(message) - print_task_message(message, print_messages, rich_print) - else: - messages_to_return.append(message) - print_task_message(message, print_messages, rich_print) - - # Subscribe to server-side events using tasks.stream_events_by_name - # This is the proper way to get agent responses after sending an event in async agents - - # Ensure task has a name - if not task.name: - print("Error: Task must have a name to use stream_events_by_name") - raise ValueError("Task name is required") - - try: - # Use stream_events_by_name to subscribe to TaskMessageUpdate events for this task - # This doesn't require knowing the agent_id, just the task name - - # Track active streaming spinners per message index - active_spinners: dict[int, Yaspin] = {} # index -> yaspin spinner object - - with client.tasks.with_streaming_response.stream_events_by_name( - task_name=task.name, - timeout=timeout - ) as response: - - try: - for task_message_update_str in response.iter_text(): - try: - # Parse SSE format - if task_message_update_str.strip().startswith('data: '): - task_message_update_json = task_message_update_str.strip()[6:] # Remove 'data: ' prefix - task_message_update_data = json.loads(task_message_update_json) - - # Deserialize the discriminated union TaskMessageUpdate based on the "type" field - message_type = task_message_update_data.get("type", "unknown") - - # Handle different message types for streaming progress - if message_type == "start": - task_message_update = StreamTaskMessageStart.model_validate(task_message_update_data) - index = task_message_update.index or 0 - - # Start a yaspin spinner for this message - if print_messages and index not in active_spinners: - spinner = yaspin(text="๐Ÿ”„ Agent responding...") - spinner.start() - active_spinners[index] = spinner - - elif message_type == "delta": - task_message_update = StreamTaskMessageDelta.model_validate(task_message_update_data) - index = task_message_update.index or 0 - - # Spinner continues running (no update needed for HTML) or if spinner has not been created yet, create it - if print_messages and index not in active_spinners: - spinner = yaspin(text="๐Ÿ”„ Agent responding...") - spinner.start() - active_spinners[index] = spinner - - elif message_type == "full": - task_message_update = StreamTaskMessageFull.model_validate(task_message_update_data) - index = task_message_update.index or 0 - - # Stop spinner and show message - if index in active_spinners: - active_spinners[index].stop() - del active_spinners[index] - # Ensure clean line after spinner - if print_messages: - print() - - if task_message_update.parent_task_message and task_message_update.parent_task_message.id: - finished_message = client.messages.retrieve(task_message_update.parent_task_message.id) - messages_to_return.append(finished_message) - print_task_message(finished_message, print_messages, rich_print) - - elif message_type == "done": - task_message_update = StreamTaskMessageDone.model_validate(task_message_update_data) - index = task_message_update.index or 0 - - # Stop spinner and show message - if index in active_spinners: - active_spinners[index].stop() - del active_spinners[index] - # Ensure clean line after spinner - if print_messages: - print() - - if task_message_update.parent_task_message and task_message_update.parent_task_message.id: - finished_message = client.messages.retrieve(task_message_update.parent_task_message.id) - messages_to_return.append(finished_message) - print_task_message(finished_message, print_messages, rich_print) - - # Ignore "connected" message type - elif message_type == "connected": - pass - else: - if print_messages: - print(f"Unknown TaskMessageUpdate type: {message_type}") - - except json.JSONDecodeError: - # Skip invalid JSON or SSE metadata lines - if task_message_update_str.strip() and not task_message_update_str.startswith(':'): - if print_messages: - print(f"Skipping non-JSON: {task_message_update_str.strip()}") - continue - except Exception as e: - if print_messages: - print(f"Error processing TaskMessageUpdate: {e}") - print(f"Raw data: {task_message_update_str.strip()}") - continue - finally: - # Stop any remaining spinners when we're done - for spinner in active_spinners.values(): - spinner.stop() - active_spinners.clear() - - except Exception as e: - # Handle timeout gracefully - if "timeout" in str(e).lower() or "timed out" in str(e).lower(): - if print_messages: - print(f"Streaming timed out after {timeout} seconds - returning collected messages") - else: - if print_messages: - print(f"Error subscribing to events: {e}") - print("Make sure your agent is running and the task exists") - - return messages_to_return \ No newline at end of file diff --git a/src/agentex/lib/utils/io.py b/src/agentex/lib/utils/io.py deleted file mode 100644 index f8dfcc46..00000000 --- a/src/agentex/lib/utils/io.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import annotations - -from typing import Any - -import yaml -from yaml.scanner import ScannerError - - -class InvalidYAMLError(ValueError): - """ - Raised when trying to red a YAML file, but the file is not formatted correctly. - """ - - -def load_yaml_file(file_path: str) -> dict[str, Any]: - """ - Loads a YAML file from the specified path. - - :param file_path: The path of the YAML file to load. - :type file_path: str - :return: The contents of the YAML file. - :rtype: dict - """ - try: - with open(file_path) as file: - yaml_dict = yaml.safe_load(file) - return yaml_dict - except ScannerError as error: - raise InvalidYAMLError( - f"The following file is not in valid YAML format: {file_path}" - ) from error diff --git a/src/agentex/lib/utils/iterables.py b/src/agentex/lib/utils/iterables.py deleted file mode 100644 index 7119ddb6..00000000 --- a/src/agentex/lib/utils/iterables.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import annotations - -from typing import Any -from collections.abc import AsyncGenerator - - -async def async_enumerate( - aiterable: AsyncGenerator, start: int = 0 -) -> AsyncGenerator[tuple[int, Any], None]: - """ - Enumerate an async generator. - """ - i = start - async for item in aiterable: - yield i, item - i += 1 diff --git a/src/agentex/lib/utils/json_schema.py b/src/agentex/lib/utils/json_schema.py deleted file mode 100644 index 6c8fa5c3..00000000 --- a/src/agentex/lib/utils/json_schema.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import annotations - -from typing import Any - -import jsonref -from jsonschema import validate as schema_validation - - -def resolve_refs(schema: dict) -> dict: - """ - Resolve JSON references in a schema. - """ - resolved = jsonref.replace_refs(schema, proxies=False, lazy_load=False) - serializable = { - "type": resolved.get("type"), # type: ignore[union-attr] - "properties": resolved.get("properties"), # type: ignore[union-attr] - "required": list(resolved.get("required", [])), # type: ignore[union-attr] - "additionalProperties": resolved.get("additionalProperties", False), # type: ignore[union-attr] - } - return serializable - - -def validate_payload(json_schema: dict[str, Any], payload: dict[str, Any]) -> None: - """Validate the payload against the JSON schema.""" - schema_validation(instance=payload, schema=json_schema) diff --git a/src/agentex/lib/utils/logging.py b/src/agentex/lib/utils/logging.py deleted file mode 100644 index 5bbaf61a..00000000 --- a/src/agentex/lib/utils/logging.py +++ /dev/null @@ -1,79 +0,0 @@ -import os -import logging -import contextvars - -import ddtrace -import json_log_formatter -from rich.console import Console -from rich.logging import RichHandler - -_is_datadog_configured = bool(os.environ.get("DD_AGENT_HOST")) - -ctx_var_request_id = contextvars.ContextVar[str]("request_id") - - -class CustomJSONFormatter(json_log_formatter.JSONFormatter): - def json_record(self, message: str, extra: dict, record: logging.LogRecord) -> dict: # type: ignore[override] - extra = super().json_record(message, extra, record) - extra["level"] = record.levelname - extra["name"] = record.name - extra["lineno"] = record.lineno - extra["pathname"] = record.pathname - extra["request_id"] = ctx_var_request_id.get(None) - if _is_datadog_configured: - extra["dd.trace_id"] = ddtrace.tracer.get_log_correlation_context().get("dd.trace_id", None) or getattr( # type: ignore[attr-defined] - record, "dd.trace_id", 0 - ) - extra["dd.span_id"] = ddtrace.tracer.get_log_correlation_context().get("dd.span_id", None) or getattr( # type: ignore[attr-defined] - record, "dd.span_id", 0 - ) - # add the env, service, and version configured for the tracer - # If tracing is not set up, then this should pull values from DD_ENV, DD_SERVICE, and DD_VERSION. - service_override = ddtrace.config.service or os.getenv("DD_SERVICE") - if service_override: - extra["dd.service"] = service_override - - env_override = ddtrace.config.env or os.getenv("DD_ENV") - if env_override: - extra["dd.env"] = env_override - - version_override = ddtrace.config.version or os.getenv("DD_VERSION") - if version_override: - extra["dd.version"] = version_override - - return extra - -def make_logger(name: str) -> logging.Logger: - """ - Creates a logger object with a RichHandler to print colored text. - :param name: The name of the module to create the logger for. - :return: A logger object. - """ - # Create a console object to print colored text - logger = logging.getLogger(name) - logger.setLevel(logging.INFO) - - environment = os.getenv("ENVIRONMENT") - if environment == "local": - console = Console() - # Add the RichHandler to the logger to print colored text - handler = RichHandler( - console=console, - show_level=False, - show_path=False, - show_time=False, - ) - logger.addHandler(handler) - return logger - - stream_handler = logging.StreamHandler() - if _is_datadog_configured: - stream_handler.setFormatter(CustomJSONFormatter()) - else: - stream_handler.setFormatter( - logging.Formatter("%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] - %(message)s") - ) - - logger.addHandler(stream_handler) - # Create a logger object with the name of the current module - return logger diff --git a/src/agentex/lib/utils/mcp.py b/src/agentex/lib/utils/mcp.py deleted file mode 100644 index bebe9364..00000000 --- a/src/agentex/lib/utils/mcp.py +++ /dev/null @@ -1,20 +0,0 @@ -from __future__ import annotations - -from typing import Any - -from mcp import StdioServerParameters - - -def redact_mcp_server_params( - mcp_server_params: list[StdioServerParameters], -) -> list[dict[str, Any]]: - """Redact MCP server params for logging.""" - return [ - { - **{k: v for k, v in server_param.model_dump().items() if k != "env"}, - "env": dict.fromkeys(server_param.env, "********") - if server_param.env - else None, - } - for server_param in mcp_server_params - ] diff --git a/src/agentex/lib/utils/model_utils.py b/src/agentex/lib/utils/model_utils.py deleted file mode 100644 index 8826ba12..00000000 --- a/src/agentex/lib/utils/model_utils.py +++ /dev/null @@ -1,71 +0,0 @@ -from __future__ import annotations - -from typing import Any, TypeVar -from datetime import datetime -from collections.abc import Mapping, Iterable - -from pydantic import BaseModel as PydanticBaseModel, ConfigDict - -from agentex.lib.utils.io import load_yaml_file - -T = TypeVar("T", bound="BaseModel") - - -class BaseModel(PydanticBaseModel): - model_config = ConfigDict(from_attributes=True, populate_by_name=True) - - @classmethod - def from_yaml(cls: type[T], file_path: str) -> T: - """ - Returns an instance of this class by deserializing from a YAML file. - - :param file_path: The path to the YAML file. - :return: An instance of this class. - """ - yaml_dict = load_yaml_file(file_path=file_path) - class_object = cls.model_validate(yaml_dict) - return class_object - - def to_json(self, *args, **kwargs) -> str: - return self.model_dump_json(*args, **kwargs) - - def to_dict(self, *_args, **_kwargs) -> dict[str, Any]: - return recursive_model_dump(self) - - -def recursive_model_dump(obj: Any) -> Any: - if isinstance(obj, PydanticBaseModel): - # Get the model data as dict and recursively process each field - # This allows us to handle non-serializable objects like functions - try: - return obj.model_dump(mode="json") - except Exception: - # If model_dump fails (e.g., due to functions), manually process - model_dict = {} - for field_name in obj.__class__.model_fields: - field_value = getattr(obj, field_name) - model_dict[field_name] = recursive_model_dump(field_value) - return model_dict - elif isinstance(obj, datetime): - # Serialize datetime to ISO format string - return obj.isoformat() - elif callable(obj): - # Serialize functions and other callable objects - if hasattr(obj, "__name__"): - func_name = obj.__name__ - else: - func_name = str(obj) - - if hasattr(obj, "__module__"): - return f"" - else: - return f"" - elif isinstance(obj, Mapping): - # Recursively serialize dictionary values - return {k: recursive_model_dump(v) for k, v in obj.items()} - elif isinstance(obj, Iterable) and not isinstance(obj, str | bytes): - # Recursively serialize items in lists, tuples, sets, etc. - return [recursive_model_dump(item) for item in obj] - else: - # Return primitive types as-is - return obj diff --git a/src/agentex/lib/utils/parsing.py b/src/agentex/lib/utils/parsing.py deleted file mode 100644 index ecb61206..00000000 --- a/src/agentex/lib/utils/parsing.py +++ /dev/null @@ -1,15 +0,0 @@ -from urllib.parse import urlsplit, urlunsplit - - -def remove_query_params(url): - split_url = urlsplit(url) - scheme, netloc, path, query, fragment = split_url - - if query: - query = '' - else: - amp_index = path.find('&') - if amp_index != -1: - path = path[:amp_index] - - return urlunsplit((scheme, netloc, path, query, fragment)) diff --git a/src/agentex/lib/utils/regex.py b/src/agentex/lib/utils/regex.py deleted file mode 100644 index c760b10d..00000000 --- a/src/agentex/lib/utils/regex.py +++ /dev/null @@ -1,6 +0,0 @@ -import re - - -def camel_to_snake(camel_case_str: str) -> str: - # Substitute capital letters with an underscore followed by the lowercase letter - return re.sub(r'(? CreateTaskResponse: - if agent_id is not None and agent_name is not None: - raise ValueError("Either agent_id or agent_name must be provided, but not both") - - if agent_id is not None: - raw_agent_rpc_response = self.rpc( - agent_id=agent_id, - method="task/create", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - elif agent_name is not None: - raw_agent_rpc_response = self.rpc_by_name( - agent_name=agent_name, - method="task/create", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - else: - raise ValueError("Either agent_id or agent_name must be provided") - - return CreateTaskResponse.model_validate(raw_agent_rpc_response, from_attributes=True) - - def cancel_task( - self, - agent_id: str | None = None, - agent_name: str | None = None, - *, - params: agent_rpc_params.ParamsCancelTaskRequest, - id: Union[int, str, None] | NotGiven = NOT_GIVEN, - jsonrpc: Literal["2.0"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CancelTaskResponse: - if agent_id is not None and agent_name is not None: - raise ValueError("Either agent_id or agent_name must be provided, but not both") - - if agent_id is not None: - raw_agent_rpc_response = self.rpc( - agent_id=agent_id, - method="task/cancel", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - elif agent_name is not None: - raw_agent_rpc_response = self.rpc_by_name( - agent_name=agent_name, - method="task/cancel", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - else: - raise ValueError("Either agent_id or agent_name must be provided") - - return CancelTaskResponse.model_validate(raw_agent_rpc_response, from_attributes=True) - - def send_message( - self, - agent_id: str | None = None, - agent_name: str | None = None, - *, - params: agent_rpc_params.ParamsSendMessageRequest, - id: Union[int, str, None] | NotGiven = NOT_GIVEN, - jsonrpc: Literal["2.0"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SendMessageResponse: - if agent_id is not None and agent_name is not None: - raise ValueError("Either agent_id or agent_name must be provided, but not both") - - if "stream" in params and params["stream"] == True: - raise ValueError("If stream is set to True, use send_message_stream() instead") - else: - if agent_id is not None: - raw_agent_rpc_response = self.rpc( - agent_id=agent_id, - method="message/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - elif agent_name is not None: - raw_agent_rpc_response = self.rpc_by_name( - agent_name=agent_name, - method="message/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - else: - raise ValueError("Either agent_id or agent_name must be provided") - - return SendMessageResponse.model_validate(raw_agent_rpc_response, from_attributes=True) - - def send_message_stream( - self, - agent_id: str | None = None, - agent_name: str | None = None, - *, - params: agent_rpc_params.ParamsSendMessageRequest, - id: Union[int, str, None] | NotGiven = NOT_GIVEN, - jsonrpc: Literal["2.0"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Generator[SendMessageStreamResponse, None, None]: - if agent_id is not None and agent_name is not None: - raise ValueError("Either agent_id or agent_name must be provided, but not both") - - if "stream" in params and params["stream"] == False: - raise ValueError("If stream is set to False, use send_message() instead") - - params["stream"] = True - - if agent_id is not None: - raw_agent_rpc_response = self.with_streaming_response.rpc( - agent_id=agent_id, - method="message/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - elif agent_name is not None: - raw_agent_rpc_response = self.with_streaming_response.rpc_by_name( - agent_name=agent_name, - method="message/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - else: - raise ValueError("Either agent_id or agent_name must be provided") - - with raw_agent_rpc_response as response: - for _line in response.iter_lines(): - if not _line: - continue - line = _line.strip() - # Handle optional SSE-style prefix - if line.startswith("data:"): - line = line[len("data:"):].strip() - if not line: - continue - try: - chunk_rpc_response = SendMessageStreamResponse.model_validate( - json.loads(line), - from_attributes=True - ) - yield chunk_rpc_response - except json.JSONDecodeError: - # Skip invalid JSON lines - continue - - def send_event( - self, - agent_id: str | None = None, - agent_name: str | None = None, - *, - params: agent_rpc_params.ParamsSendEventRequest, - id: Union[int, str, None] | NotGiven = NOT_GIVEN, - jsonrpc: Literal["2.0"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SendEventResponse: - if agent_id is not None and agent_name is not None: - raise ValueError("Either agent_id or agent_name must be provided, but not both") - - if agent_id is not None: - raw_agent_rpc_response = self.rpc( - agent_id=agent_id, - method="event/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - elif agent_name is not None: - raw_agent_rpc_response = self.rpc_by_name( - agent_name=agent_name, - method="event/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - else: - raise ValueError("Either agent_id or agent_name must be provided") - - return SendEventResponse.model_validate(raw_agent_rpc_response, from_attributes=True) class AsyncAgentsResource(AsyncAPIResource): @@ -909,269 +639,7 @@ async def rpc_by_name( ), cast_to=AgentRpcResponse, ) - - async def create_task( - self, - agent_id: str | None = None, - agent_name: str | None = None, - *, - params: agent_rpc_params.ParamsCreateTaskRequest, - id: Union[int, str, None] | NotGiven = NOT_GIVEN, - jsonrpc: Literal["2.0"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateTaskResponse: - if agent_id is not None and agent_name is not None: - raise ValueError("Either agent_id or agent_name must be provided, but not both") - - if agent_id is not None: - raw_agent_rpc_response = await self.rpc( - agent_id=agent_id, - method="task/create", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - elif agent_name is not None: - raw_agent_rpc_response = await self.rpc_by_name( - agent_name=agent_name, - method="task/create", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - else: - raise ValueError("Either agent_id or agent_name must be provided") - - return CreateTaskResponse.model_validate(raw_agent_rpc_response, from_attributes=True) - - async def cancel_task( - self, - agent_id: str | None = None, - agent_name: str | None = None, - *, - params: agent_rpc_params.ParamsCancelTaskRequest, - id: Union[int, str, None] | NotGiven = NOT_GIVEN, - jsonrpc: Literal["2.0"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CancelTaskResponse: - if agent_id is not None and agent_name is not None: - raise ValueError("Either agent_id or agent_name must be provided, but not both") - - if agent_id is not None: - raw_agent_rpc_response = await self.rpc( - agent_id=agent_id, - method="task/cancel", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - elif agent_name is not None: - raw_agent_rpc_response = await self.rpc_by_name( - agent_name=agent_name, - method="task/cancel", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - else: - raise ValueError("Either agent_id or agent_name must be provided") - - return CancelTaskResponse.model_validate(raw_agent_rpc_response, from_attributes=True) - - async def send_message( - self, - agent_id: str | None = None, - agent_name: str | None = None, - *, - params: agent_rpc_params.ParamsSendMessageRequest, - id: Union[int, str, None] | NotGiven = NOT_GIVEN, - jsonrpc: Literal["2.0"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SendMessageResponse: - if agent_id is not None and agent_name is not None: - raise ValueError("Either agent_id or agent_name must be provided, but not both") - - if "stream" in params and params["stream"] == True: - raise ValueError("If stream is set to True, use send_message_stream() instead") - else: - if agent_id is not None: - raw_agent_rpc_response = await self.rpc( - agent_id=agent_id, - method="message/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - elif agent_name is not None: - raw_agent_rpc_response = await self.rpc_by_name( - agent_name=agent_name, - method="message/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - else: - raise ValueError("Either agent_id or agent_name must be provided") - - return SendMessageResponse.model_validate(raw_agent_rpc_response, from_attributes=True) - - async def send_message_stream( - self, - agent_id: str | None = None, - agent_name: str | None = None, - *, - params: agent_rpc_params.ParamsSendMessageRequest, - id: Union[int, str, None] | NotGiven = NOT_GIVEN, - jsonrpc: Literal["2.0"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncGenerator[SendMessageStreamResponse, None]: - if agent_id is not None and agent_name is not None: - raise ValueError("Either agent_id or agent_name must be provided, but not both") - - if "stream" in params and params["stream"] == False: - raise ValueError("If stream is set to False, use send_message() instead") - - params["stream"] = True - - if agent_id is not None: - raw_agent_rpc_response = self.with_streaming_response.rpc( - agent_id=agent_id, - method="message/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - elif agent_name is not None: - raw_agent_rpc_response = self.with_streaming_response.rpc_by_name( - agent_name=agent_name, - method="message/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - else: - raise ValueError("Either agent_id or agent_name must be provided") - - async with raw_agent_rpc_response as response: - async for _line in response.iter_lines(): - if not _line: - continue - line = _line.strip() - # Handle optional SSE-style prefix - if line.startswith("data:"): - line = line[len("data:"):].strip() - if not line: - continue - try: - chunk_rpc_response = SendMessageStreamResponse.model_validate( - json.loads(line), - from_attributes=True - ) - yield chunk_rpc_response - except json.JSONDecodeError: - # Skip invalid JSON lines - continue - except ValidationError as e: - raise ValueError(f"Invalid SendMessageStreamResponse returned: {line}") from e - - async def send_event( - self, - agent_id: str | None = None, - agent_name: str | None = None, - *, - params: agent_rpc_params.ParamsSendEventRequest, - id: Union[int, str, None] | NotGiven = NOT_GIVEN, - jsonrpc: Literal["2.0"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SendEventResponse: - if agent_id is not None and agent_name is not None: - raise ValueError("Either agent_id or agent_name must be provided, but not both") - - if agent_id is not None: - raw_agent_rpc_response = await self.rpc( - agent_id=agent_id, - method="event/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - elif agent_name is not None: - raw_agent_rpc_response = await self.rpc_by_name( - agent_name=agent_name, - method="event/send", - params=params, - id=id, - jsonrpc=jsonrpc, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - else: - raise ValueError("Either agent_id or agent_name must be provided") - - return SendEventResponse.model_validate(raw_agent_rpc_response, from_attributes=True) + class AgentsResourceWithRawResponse: def __init__(self, agents: AgentsResource) -> None: diff --git a/src/agentex/resources/deployment_history.py b/src/agentex/resources/deployment_history.py deleted file mode 100644 index 4f3638cf..00000000 --- a/src/agentex/resources/deployment_history.py +++ /dev/null @@ -1,272 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -import httpx - -from ..types import deployment_history_list_params -from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.deployment_history import DeploymentHistory -from ..types.deployment_history_list_response import DeploymentHistoryListResponse - -__all__ = ["DeploymentHistoryResource", "AsyncDeploymentHistoryResource"] - - -class DeploymentHistoryResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> DeploymentHistoryResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/scaleapi/scale-agentex-python#accessing-raw-response-data-eg-headers - """ - return DeploymentHistoryResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> DeploymentHistoryResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/scaleapi/scale-agentex-python#with_streaming_response - """ - return DeploymentHistoryResourceWithStreamingResponse(self) - - def retrieve( - self, - deployment_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> DeploymentHistory: - """ - Get a deployment record by its unique ID. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not deployment_id: - raise ValueError(f"Expected a non-empty value for `deployment_id` but received {deployment_id!r}") - return self._get( - f"/deployment-history/{deployment_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DeploymentHistory, - ) - - def list( - self, - *, - agent_id: Optional[str] | Omit = omit, - agent_name: Optional[str] | Omit = omit, - limit: int | Omit = omit, - page_number: int | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> DeploymentHistoryListResponse: - """ - List deployment history for an agent. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/deployment-history", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "agent_id": agent_id, - "agent_name": agent_name, - "limit": limit, - "page_number": page_number, - }, - deployment_history_list_params.DeploymentHistoryListParams, - ), - ), - cast_to=DeploymentHistoryListResponse, - ) - - -class AsyncDeploymentHistoryResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncDeploymentHistoryResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/scaleapi/scale-agentex-python#accessing-raw-response-data-eg-headers - """ - return AsyncDeploymentHistoryResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncDeploymentHistoryResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/scaleapi/scale-agentex-python#with_streaming_response - """ - return AsyncDeploymentHistoryResourceWithStreamingResponse(self) - - async def retrieve( - self, - deployment_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> DeploymentHistory: - """ - Get a deployment record by its unique ID. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not deployment_id: - raise ValueError(f"Expected a non-empty value for `deployment_id` but received {deployment_id!r}") - return await self._get( - f"/deployment-history/{deployment_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DeploymentHistory, - ) - - async def list( - self, - *, - agent_id: Optional[str] | Omit = omit, - agent_name: Optional[str] | Omit = omit, - limit: int | Omit = omit, - page_number: int | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> DeploymentHistoryListResponse: - """ - List deployment history for an agent. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/deployment-history", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "agent_id": agent_id, - "agent_name": agent_name, - "limit": limit, - "page_number": page_number, - }, - deployment_history_list_params.DeploymentHistoryListParams, - ), - ), - cast_to=DeploymentHistoryListResponse, - ) - - -class DeploymentHistoryResourceWithRawResponse: - def __init__(self, deployment_history: DeploymentHistoryResource) -> None: - self._deployment_history = deployment_history - - self.retrieve = to_raw_response_wrapper( - deployment_history.retrieve, - ) - self.list = to_raw_response_wrapper( - deployment_history.list, - ) - - -class AsyncDeploymentHistoryResourceWithRawResponse: - def __init__(self, deployment_history: AsyncDeploymentHistoryResource) -> None: - self._deployment_history = deployment_history - - self.retrieve = async_to_raw_response_wrapper( - deployment_history.retrieve, - ) - self.list = async_to_raw_response_wrapper( - deployment_history.list, - ) - - -class DeploymentHistoryResourceWithStreamingResponse: - def __init__(self, deployment_history: DeploymentHistoryResource) -> None: - self._deployment_history = deployment_history - - self.retrieve = to_streamed_response_wrapper( - deployment_history.retrieve, - ) - self.list = to_streamed_response_wrapper( - deployment_history.list, - ) - - -class AsyncDeploymentHistoryResourceWithStreamingResponse: - def __init__(self, deployment_history: AsyncDeploymentHistoryResource) -> None: - self._deployment_history = deployment_history - - self.retrieve = async_to_streamed_response_wrapper( - deployment_history.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - deployment_history.list, - ) diff --git a/src/agentex/types/__init__.py b/src/agentex/types/__init__.py index 140acd92..8628bd3c 100644 --- a/src/agentex/types/__init__.py +++ b/src/agentex/types/__init__.py @@ -28,7 +28,6 @@ from .agent_rpc_response import AgentRpcResponse as AgentRpcResponse from .agent_task_tracker import AgentTaskTracker as AgentTaskTracker from .data_content_param import DataContentParam as DataContentParam -from .deployment_history import DeploymentHistory as DeploymentHistory from .span_create_params import SpanCreateParams as SpanCreateParams from .span_list_response import SpanListResponse as SpanListResponse from .span_update_params import SpanUpdateParams as SpanUpdateParams @@ -64,7 +63,5 @@ from .tool_response_content_param import ToolResponseContentParam as ToolResponseContentParam from .task_retrieve_by_name_params import TaskRetrieveByNameParams as TaskRetrieveByNameParams from .message_list_paginated_params import MessageListPaginatedParams as MessageListPaginatedParams -from .deployment_history_list_params import DeploymentHistoryListParams as DeploymentHistoryListParams from .task_retrieve_by_name_response import TaskRetrieveByNameResponse as TaskRetrieveByNameResponse from .message_list_paginated_response import MessageListPaginatedResponse as MessageListPaginatedResponse -from .deployment_history_list_response import DeploymentHistoryListResponse as DeploymentHistoryListResponse diff --git a/src/agentex/types/agent_rpc_response.py b/src/agentex/types/agent_rpc_response.py index 84fbab70..e9995e80 100644 --- a/src/agentex/types/agent_rpc_response.py +++ b/src/agentex/types/agent_rpc_response.py @@ -1,49 +1,20 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from __future__ import annotations from typing import Union, Optional from typing_extensions import Literal -from .task import Task -from .event import Event from .._models import BaseModel -from .task_message import TaskMessage from .agent_rpc_result import AgentRpcResult -from .task_message_update import TaskMessageUpdate __all__ = ["AgentRpcResponse"] -class BaseAgentRpcResponse(BaseModel): - id: Union[int, str, None] = None - error: Optional[object] = None - jsonrpc: Optional[Literal["2.0"]] = None - - -class AgentRpcResponse(BaseAgentRpcResponse): +class AgentRpcResponse(BaseModel): result: Optional[AgentRpcResult] = None """The result of the agent RPC request""" + id: Union[int, str, None] = None -class CreateTaskResponse(BaseAgentRpcResponse): - result: Task - """The result of the task creation""" - - -class CancelTaskResponse(BaseAgentRpcResponse): - result: Task - """The result of the task cancellation""" - - -class SendMessageResponse(BaseAgentRpcResponse): - result: list[TaskMessage] - """The result of the message sending""" - -class SendMessageStreamResponse(BaseAgentRpcResponse): - result: TaskMessageUpdate - """The result of the message sending""" - + error: Optional[object] = None -class SendEventResponse(BaseAgentRpcResponse): - result: Event - """The result of the event sending""" \ No newline at end of file + jsonrpc: Optional[Literal["2.0"]] = None diff --git a/src/agentex/types/data_content.py b/src/agentex/types/data_content.py index f23212fe..2ed34045 100644 --- a/src/agentex/types/data_content.py +++ b/src/agentex/types/data_content.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict +from typing import Dict, Optional from typing_extensions import Literal from .._models import BaseModel @@ -20,11 +20,11 @@ class DataContent(BaseModel): data: Dict[str, object] """The contents of the data message.""" - style: MessageStyle = "static" + style: Optional[MessageStyle] = None """The style of the message. This is used by the client to determine how to display the message. """ - type: Literal["data"] = "data" + type: Optional[Literal["data"]] = None """The type of the message, in this case `data`.""" diff --git a/src/agentex/types/deployment_history.py b/src/agentex/types/deployment_history.py deleted file mode 100644 index f9e3ce51..00000000 --- a/src/agentex/types/deployment_history.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from datetime import datetime - -from .._models import BaseModel - -__all__ = ["DeploymentHistory"] - - -class DeploymentHistory(BaseModel): - id: str - """The unique identifier of the deployment record""" - - agent_id: str - """The ID of the agent this deployment belongs to""" - - author_email: str - """Email of the commit author""" - - author_name: str - """Name of the commit author""" - - branch_name: str - """Name of the branch""" - - build_timestamp: datetime - """When the build was created""" - - commit_hash: str - """Git commit hash for this deployment""" - - deployment_timestamp: datetime - """When this deployment was first seen in the system""" diff --git a/src/agentex/types/deployment_history_list_params.py b/src/agentex/types/deployment_history_list_params.py deleted file mode 100644 index b26b2462..00000000 --- a/src/agentex/types/deployment_history_list_params.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional -from typing_extensions import TypedDict - -__all__ = ["DeploymentHistoryListParams"] - - -class DeploymentHistoryListParams(TypedDict, total=False): - agent_id: Optional[str] - - agent_name: Optional[str] - - limit: int - - page_number: int diff --git a/src/agentex/types/deployment_history_list_response.py b/src/agentex/types/deployment_history_list_response.py deleted file mode 100644 index c71a8f03..00000000 --- a/src/agentex/types/deployment_history_list_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import TypeAlias - -from .deployment_history import DeploymentHistory - -__all__ = ["DeploymentHistoryListResponse"] - -DeploymentHistoryListResponse: TypeAlias = List[DeploymentHistory] diff --git a/src/agentex/types/text_content.py b/src/agentex/types/text_content.py index 8c8b77e8..35fefb06 100644 --- a/src/agentex/types/text_content.py +++ b/src/agentex/types/text_content.py @@ -40,17 +40,17 @@ class TextContent(BaseModel): attachments: Optional[List[Attachment]] = None """Optional list of file attachments with structured metadata.""" - format: TextFormat = "plain" + format: Optional[TextFormat] = None """The format of the message. This is used by the client to determine how to display the message. """ - style: MessageStyle = "static" + style: Optional[MessageStyle] = None """The style of the message. This is used by the client to determine how to display the message. """ - type: Literal["text"] = "text" + type: Optional[Literal["text"]] = None """The type of the message, in this case `text`.""" diff --git a/src/agentex/types/tool_request_content.py b/src/agentex/types/tool_request_content.py index 8282ac3b..66128630 100644 --- a/src/agentex/types/tool_request_content.py +++ b/src/agentex/types/tool_request_content.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict +from typing import Dict, Optional from typing_extensions import Literal from .._models import BaseModel @@ -26,11 +26,11 @@ class ToolRequestContent(BaseModel): tool_call_id: str """The ID of the tool call that is being requested.""" - style: MessageStyle = "static" + style: Optional[MessageStyle] = None """The style of the message. This is used by the client to determine how to display the message. """ - type: Literal["tool_request"] = "tool_request" + type: Optional[Literal["tool_request"]] = None """The type of the message, in this case `tool_request`.""" diff --git a/src/agentex/types/tool_response_content.py b/src/agentex/types/tool_response_content.py index bf155974..f6ba15b7 100644 --- a/src/agentex/types/tool_response_content.py +++ b/src/agentex/types/tool_response_content.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Optional from typing_extensions import Literal from .._models import BaseModel @@ -25,11 +26,11 @@ class ToolResponseContent(BaseModel): tool_call_id: str """The ID of the tool call that is being responded to.""" - style: MessageStyle = "static" + style: Optional[MessageStyle] = None """The style of the message. This is used by the client to determine how to display the message. """ - type: Literal["tool_response"] = "tool_response" + type: Optional[Literal["tool_response"]] = None """The type of the message, in this case `tool_response`.""" diff --git a/src/agentex_sdk/lib/.keep b/src/agentex_sdk/lib/.keep new file mode 100644 index 00000000..5e2c99fd --- /dev/null +++ b/src/agentex_sdk/lib/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store custom files to expand the SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/tests/api_resources/messages/test_batch.py b/tests/api_resources/messages/test_batch.py index da404855..1b25aaab 100644 --- a/tests/api_resources/messages/test_batch.py +++ b/tests/api_resources/messages/test_batch.py @@ -8,10 +8,9 @@ import pytest from agentex import Agentex, AsyncAgentex +from tests.utils import assert_matches_type from agentex.types.messages import BatchCreateResponse, BatchUpdateResponse -from ...utils import assert_matches_type - base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 2b078045..340238ba 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -8,6 +8,7 @@ import pytest from agentex import Agentex, AsyncAgentex +from tests.utils import assert_matches_type from agentex.types import ( Agent, AgentRpcResponse, @@ -15,8 +16,6 @@ ) from agentex.types.shared import DeleteResponse -from ..utils import assert_matches_type - base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_deployment_history.py b/tests/api_resources/test_deployment_history.py deleted file mode 100644 index 89012bf9..00000000 --- a/tests/api_resources/test_deployment_history.py +++ /dev/null @@ -1,187 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from agentex import Agentex, AsyncAgentex -from agentex.types import DeploymentHistory, DeploymentHistoryListResponse - -from ..utils import assert_matches_type - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestDeploymentHistory: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_method_retrieve(self, client: Agentex) -> None: - deployment_history = client.deployment_history.retrieve( - "deployment_id", - ) - assert_matches_type(DeploymentHistory, deployment_history, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_raw_response_retrieve(self, client: Agentex) -> None: - response = client.deployment_history.with_raw_response.retrieve( - "deployment_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - deployment_history = response.parse() - assert_matches_type(DeploymentHistory, deployment_history, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_streaming_response_retrieve(self, client: Agentex) -> None: - with client.deployment_history.with_streaming_response.retrieve( - "deployment_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - deployment_history = response.parse() - assert_matches_type(DeploymentHistory, deployment_history, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_path_params_retrieve(self, client: Agentex) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `deployment_id` but received ''"): - client.deployment_history.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_method_list(self, client: Agentex) -> None: - deployment_history = client.deployment_history.list() - assert_matches_type(DeploymentHistoryListResponse, deployment_history, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_method_list_with_all_params(self, client: Agentex) -> None: - deployment_history = client.deployment_history.list( - agent_id="agent_id", - agent_name="agent_name", - limit=0, - page_number=0, - ) - assert_matches_type(DeploymentHistoryListResponse, deployment_history, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_raw_response_list(self, client: Agentex) -> None: - response = client.deployment_history.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - deployment_history = response.parse() - assert_matches_type(DeploymentHistoryListResponse, deployment_history, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_streaming_response_list(self, client: Agentex) -> None: - with client.deployment_history.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - deployment_history = response.parse() - assert_matches_type(DeploymentHistoryListResponse, deployment_history, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncDeploymentHistory: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_method_retrieve(self, async_client: AsyncAgentex) -> None: - deployment_history = await async_client.deployment_history.retrieve( - "deployment_id", - ) - assert_matches_type(DeploymentHistory, deployment_history, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncAgentex) -> None: - response = await async_client.deployment_history.with_raw_response.retrieve( - "deployment_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - deployment_history = await response.parse() - assert_matches_type(DeploymentHistory, deployment_history, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncAgentex) -> None: - async with async_client.deployment_history.with_streaming_response.retrieve( - "deployment_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - deployment_history = await response.parse() - assert_matches_type(DeploymentHistory, deployment_history, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncAgentex) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `deployment_id` but received ''"): - await async_client.deployment_history.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_method_list(self, async_client: AsyncAgentex) -> None: - deployment_history = await async_client.deployment_history.list() - assert_matches_type(DeploymentHistoryListResponse, deployment_history, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncAgentex) -> None: - deployment_history = await async_client.deployment_history.list( - agent_id="agent_id", - agent_name="agent_name", - limit=0, - page_number=0, - ) - assert_matches_type(DeploymentHistoryListResponse, deployment_history, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_raw_response_list(self, async_client: AsyncAgentex) -> None: - response = await async_client.deployment_history.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - deployment_history = await response.parse() - assert_matches_type(DeploymentHistoryListResponse, deployment_history, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_streaming_response_list(self, async_client: AsyncAgentex) -> None: - async with async_client.deployment_history.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - deployment_history = await response.parse() - assert_matches_type(DeploymentHistoryListResponse, deployment_history, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_events.py b/tests/api_resources/test_events.py index ccf5f7bf..fad95592 100644 --- a/tests/api_resources/test_events.py +++ b/tests/api_resources/test_events.py @@ -8,10 +8,9 @@ import pytest from agentex import Agentex, AsyncAgentex +from tests.utils import assert_matches_type from agentex.types import Event, EventListResponse -from ..utils import assert_matches_type - base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_messages.py b/tests/api_resources/test_messages.py index 19a51ae7..4b234334 100644 --- a/tests/api_resources/test_messages.py +++ b/tests/api_resources/test_messages.py @@ -8,14 +8,13 @@ import pytest from agentex import Agentex, AsyncAgentex +from tests.utils import assert_matches_type from agentex.types import ( TaskMessage, MessageListResponse, MessageListPaginatedResponse, ) -from ..utils import assert_matches_type - base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_spans.py b/tests/api_resources/test_spans.py index 8bfdb952..ef991d4e 100644 --- a/tests/api_resources/test_spans.py +++ b/tests/api_resources/test_spans.py @@ -8,11 +8,10 @@ import pytest from agentex import Agentex, AsyncAgentex +from tests.utils import assert_matches_type from agentex.types import Span, SpanListResponse from agentex._utils import parse_datetime -from ..utils import assert_matches_type - base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_states.py b/tests/api_resources/test_states.py index 0b319342..c8cfd84c 100644 --- a/tests/api_resources/test_states.py +++ b/tests/api_resources/test_states.py @@ -8,10 +8,9 @@ import pytest from agentex import Agentex, AsyncAgentex +from tests.utils import assert_matches_type from agentex.types import State, StateListResponse -from ..utils import assert_matches_type - base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_tasks.py b/tests/api_resources/test_tasks.py index ce240bad..6c2f2c54 100644 --- a/tests/api_resources/test_tasks.py +++ b/tests/api_resources/test_tasks.py @@ -8,6 +8,7 @@ import pytest from agentex import Agentex, AsyncAgentex +from tests.utils import assert_matches_type from agentex.types import ( TaskListResponse, TaskRetrieveResponse, @@ -15,8 +16,6 @@ ) from agentex.types.shared import DeleteResponse -from ..utils import assert_matches_type - base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_tracker.py b/tests/api_resources/test_tracker.py index ae4a3a91..5ea656f6 100644 --- a/tests/api_resources/test_tracker.py +++ b/tests/api_resources/test_tracker.py @@ -8,10 +8,9 @@ import pytest from agentex import Agentex, AsyncAgentex +from tests.utils import assert_matches_type from agentex.types import AgentTaskTracker, TrackerListResponse -from ..utils import assert_matches_type - base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/lib/__init__.py b/tests/lib/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/lib/adk/__init__.py b/tests/lib/adk/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/lib/adk/providers/__init__.py b/tests/lib/adk/providers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/lib/adk/providers/test_openai_activities.py b/tests/lib/adk/providers/test_openai_activities.py deleted file mode 100644 index c933b6ce..00000000 --- a/tests/lib/adk/providers/test_openai_activities.py +++ /dev/null @@ -1,705 +0,0 @@ -from unittest.mock import Mock, patch - -import pytest -from agents import RunResult, RunResultStreaming -from temporalio.testing import ActivityEnvironment -from openai.types.responses import ResponseCodeInterpreterToolCall - - -class TestOpenAIActivities: - @pytest.fixture - def sample_run_result(self): - """Create a sample RunResult for mocking.""" - mock_result = Mock(spec=RunResult) - mock_result.final_output = "Hello! How can I help you today?" - mock_result.to_input_list.return_value = [ - {"role": "user", "content": "Hello, world!"}, - {"role": "assistant", "content": "Hello! How can I help you today?"}, - ] - # Add new_items attribute that the OpenAIService expects - mock_result.new_items = [] - return mock_result - - @pytest.mark.parametrize( - "max_turns,should_be_passed", - [ - (None, False), - (7, True), # Test with non-default value (default is 10) - ], - ) - @patch("agents.Runner.run") - async def test_run_agent(self, mock_runner_run, max_turns, should_be_passed, sample_run_result): - """Comprehensive test for run_agent covering all major scenarios.""" - from agentex.lib.core.temporal.activities.adk.providers.openai_activities import RunAgentParams - - # Arrange - mock_runner_run.return_value = sample_run_result - mock_tracer = self._create_mock_tracer() - _, openai_activities, env = self._create_test_setup(mock_tracer) - - # Create params with or without max_turns - params = RunAgentParams( - input_list=[{"role": "user", "content": "Hello, world!"}], - mcp_server_params=[], - agent_name="test_agent", - agent_instructions="You are a helpful assistant", - max_turns=max_turns, - trace_id="test-trace-id", - parent_span_id="test-span-id", - ) - - # Act - result = await env.run(openai_activities.run_agent, params) - - # Assert - Result structure - self._assert_result_structure(result) - - # Assert - Runner call - mock_runner_run.assert_called_once() - call_args = mock_runner_run.call_args - - # Assert - Runner signature validation - self._assert_runner_call_signature(call_args) - - # Assert - Input parameter matches - assert call_args.kwargs["input"] == params.input_list - - # Assert - Starting agent parameters - starting_agent = call_args.kwargs["starting_agent"] - self._assert_starting_agent_params(starting_agent, params) - - # Assert - Max turns parameter handling - if should_be_passed: - assert "max_turns" in call_args.kwargs, f"max_turns should be passed when set to {max_turns}" - assert call_args.kwargs["max_turns"] == max_turns, f"max_turns value should be {max_turns}" - else: - assert "max_turns" not in call_args.kwargs, "max_turns should not be passed when None" - - @pytest.mark.parametrize( - "previous_response_id,should_be_passed", - [ - (None, False), - ("response_123", True), - ], - ) - @patch("agents.Runner.run") - async def test_run_agent_previous_response_id( - self, mock_runner_run, previous_response_id, should_be_passed, sample_run_result - ): - """Test run_agent with previous_response_id parameter.""" - from agentex.lib.core.temporal.activities.adk.providers.openai_activities import RunAgentParams - - # Arrange - mock_runner_run.return_value = sample_run_result - mock_tracer = self._create_mock_tracer() - _, openai_activities, env = self._create_test_setup(mock_tracer) - - # Create params with or without previous_response_id - params = RunAgentParams( - input_list=[{"role": "user", "content": "Hello, world!"}], - mcp_server_params=[], - agent_name="test_agent", - agent_instructions="You are a helpful assistant", - previous_response_id=previous_response_id, - trace_id="test-trace-id", - parent_span_id="test-span-id", - ) - - # Act - result = await env.run(openai_activities.run_agent, params) - - # Assert - Result structure - self._assert_result_structure(result) - - # Assert - Runner call - mock_runner_run.assert_called_once() - call_args = mock_runner_run.call_args - - # Assert - Runner signature validation - self._assert_runner_call_signature(call_args) - - # Assert - Previous response ID parameter handling - if should_be_passed: - assert "previous_response_id" in call_args.kwargs, ( - f"previous_response_id should be passed when set to {previous_response_id}" - ) - assert call_args.kwargs["previous_response_id"] == previous_response_id, ( - f"previous_response_id value should be {previous_response_id}" - ) - else: - assert "previous_response_id" not in call_args.kwargs, "previous_response_id should not be passed when None" - - @pytest.mark.parametrize( - "tools_case", - [ - "no_tools", - "function_tool", - "web_search_tool", - "file_search_tool", - "computer_tool", - "code_interpreter_tool", - "image_generation_tool", - "local_shell_tool", - "mixed_tools", - ], - ) - @patch("agents.Runner.run") - async def test_run_agent_tools_conversion(self, mock_runner_run, tools_case, sample_run_result): - """Test that tools are properly converted from Temporal to OpenAI agents format.""" - from agentex.lib.core.temporal.activities.adk.providers.openai_activities import ( - RunAgentParams, - ) - - # Arrange - mock_runner_run.return_value = sample_run_result - mock_tracer = self._create_mock_tracer() - _, openai_activities, env = self._create_test_setup(mock_tracer) - - # Create different tool configurations based on test case - tools = self._create_tools_for_case(tools_case) - - params = RunAgentParams( - input_list=[{"role": "user", "content": "Hello, world!"}], - mcp_server_params=[], - agent_name="test_agent", - agent_instructions="You are a helpful assistant", - tools=tools, - trace_id="test-trace-id", - parent_span_id="test-span-id", - ) - - # Act - result = await env.run(openai_activities.run_agent, params) - - # Assert - Result structure - self._assert_result_structure(result) - - # Assert - Runner call - mock_runner_run.assert_called_once() - call_args = mock_runner_run.call_args - - # Assert - Runner signature validation - self._assert_runner_call_signature(call_args) - - # Assert - Agent was created and tools were converted properly - starting_agent = call_args.kwargs["starting_agent"] - self._assert_tools_conversion(starting_agent, tools_case, tools) - - @patch("agents.Runner.run") - async def test_run_agent_auto_send_with_tool_responses(self, mock_runner_run): - """Test run_agent_auto_send with code interpreter tool responses.""" - from agentex.lib.core.temporal.activities.adk.providers.openai_activities import ( - CodeInterpreterTool, - RunAgentAutoSendParams, - ) - - # Arrange - Setup test environment - mock_tracer = self._create_mock_tracer() - openai_service, openai_activities, env = self._create_test_setup(mock_tracer) - mock_streaming_context = self._setup_streaming_service_mocks(openai_service) - - # Create tool call and response mocks using helpers - code_interpreter_call = self._create_code_interpreter_tool_call_mock() - mock_tool_call_item = self._create_tool_call_item_mock(code_interpreter_call) - mock_tool_output_item = self._create_tool_output_item_mock() - - # Create a mock result with tool calls that will be processed - mock_result_with_tools = Mock(spec=RunResult) - mock_result_with_tools.final_output = "Code executed successfully" - mock_result_with_tools.to_input_list.return_value = [ - {"role": "user", "content": "Run some Python code"}, - {"role": "assistant", "content": "Code executed successfully"}, - ] - mock_result_with_tools.new_items = [mock_tool_call_item, mock_tool_output_item] - mock_runner_run.return_value = mock_result_with_tools - - # Create test parameters - params = RunAgentAutoSendParams( - input_list=[{"role": "user", "content": "Run some Python code"}], - mcp_server_params=[], - agent_name="test_agent", - agent_instructions=("You are a helpful assistant with code interpreter"), - tools=[CodeInterpreterTool(tool_config={"type": "code_interpreter"})], - trace_id="test-trace-id", - parent_span_id="test-span-id", - task_id="test-task-id", - ) - - result = await env.run(openai_activities.run_agent_auto_send, params) - - assert result.final_output == "Code executed successfully" - - # Verify runner.run was called with expected signature - mock_runner_run.assert_called_once() - call_args = mock_runner_run.call_args - self._assert_runner_call_signature(call_args) - - # Verify starting agent parameters - starting_agent = call_args.kwargs["starting_agent"] - # Create a mock object with the expected attributes - expected_params = Mock() - expected_params.agent_name = "test_agent" - expected_params.agent_instructions = "You are a helpful assistant with code interpreter" - expected_params.tools = [CodeInterpreterTool(tool_config={"type": "code_interpreter"})] - self._assert_starting_agent_params(starting_agent, expected_params) - - # Verify streaming context received tool request and response updates - # Should have been called twice - once for tool request, once for response - assert mock_streaming_context.stream_update.call_count == 2 - - # First call should be tool request - first_call = mock_streaming_context.stream_update.call_args_list[0] - first_update = first_call[1]["update"] # keyword argument - assert hasattr(first_update, "content") - assert first_update.content.name == "code_interpreter" - assert first_update.content.tool_call_id == "code_interpreter_call_123" - - # Second call should be tool response - second_call = mock_streaming_context.stream_update.call_args_list[1] - second_update = second_call[1]["update"] # keyword argument - assert hasattr(second_update, "content") - assert second_update.content.name == "code_interpreter_call" - assert second_update.content.tool_call_id == "code_interpreter_call_123" - - @patch("agents.Runner.run_streamed") - async def test_run_agent_streamed_auto_send(self, mock_runner_run_streamed): - """Test run_agent_streamed_auto_send with streaming and tool responses.""" - from agentex.lib.core.temporal.activities.adk.providers.openai_activities import ( - CodeInterpreterTool, - RunAgentStreamedAutoSendParams, - ) - - # Create streaming result mock using helper - mock_streaming_result = self._create_streaming_result_mock() - - # Create mock streaming events - async def mock_stream_events(): - # Tool call event - tool_call_event = Mock() - tool_call_event.type = "run_item_stream_event" - tool_call_item = Mock() - tool_call_item.type = "tool_call_item" - tool_call_item.raw_item = self._create_code_interpreter_tool_call_mock() - tool_call_event.item = tool_call_item - yield tool_call_event - - # Tool response event - tool_response_event = Mock() - tool_response_event.type = "run_item_stream_event" - tool_response_item = Mock() - tool_response_item.type = "tool_call_output_item" - tool_response_item.raw_item = {"call_id": "code_interpreter_call_123", "output": "Hello from streaming"} - tool_response_event.item = tool_response_item - yield tool_response_event - - mock_streaming_result.stream_events = mock_stream_events - mock_runner_run_streamed.return_value = mock_streaming_result - - # Setup test environment - mock_tracer = self._create_mock_tracer() - openai_service, openai_activities, env = self._create_test_setup(mock_tracer) - mock_streaming_context = self._setup_streaming_service_mocks(openai_service) - - # Create test parameters - params = RunAgentStreamedAutoSendParams( - input_list=[{"role": "user", "content": "Run some Python code"}], - mcp_server_params=[], - agent_name="test_agent", - agent_instructions=("You are a helpful assistant with code interpreter"), - tools=[CodeInterpreterTool(tool_config={"type": "code_interpreter"})], - trace_id="test-trace-id", - parent_span_id="test-span-id", - task_id="test-task-id", - ) - - # Act - result = await env.run(openai_activities.run_agent_streamed_auto_send, params) - - # Assert - Result structure (expecting SerializableRunResultStreaming from activity) - from agentex.lib.types.agent_results import SerializableRunResultStreaming - - assert isinstance(result, SerializableRunResultStreaming) - assert result.final_output == "Code executed successfully" - - # Verify runner.run_streamed was called with expected signature - mock_runner_run_streamed.assert_called_once() - call_args = mock_runner_run_streamed.call_args - self._assert_runner_call_signature_streamed(call_args) - - # Verify starting agent parameters - starting_agent = call_args.kwargs["starting_agent"] - # Create a mock object with the expected attributes - expected_params = Mock() - expected_params.agent_name = "test_agent" - expected_params.agent_instructions = "You are a helpful assistant with code interpreter" - expected_params.tools = [CodeInterpreterTool(tool_config={"type": "code_interpreter"})] - self._assert_starting_agent_params(starting_agent, expected_params) - - # Verify streaming context received tool request and response updates - # Should have been called twice - once for tool request, once for response - assert mock_streaming_context.stream_update.call_count == 2 - - # First call should be tool request - first_call = mock_streaming_context.stream_update.call_args_list[0] - first_update = first_call[1]["update"] # keyword argument - assert hasattr(first_update, "content") - assert first_update.content.name == "code_interpreter" - assert first_update.content.tool_call_id == "code_interpreter_call_123" - - # Second call should be tool response - second_call = mock_streaming_context.stream_update.call_args_list[1] - second_update = second_call[1]["update"] # keyword argument - assert hasattr(second_update, "content") - assert second_update.content.name == "code_interpreter_call" - assert second_update.content.tool_call_id == "code_interpreter_call_123" - - def _create_mock_tracer(self): - """Helper method to create a properly mocked tracer with async context manager support.""" - mock_tracer = Mock() - mock_trace = Mock() - mock_span = Mock() - - # Setup the span context manager - async def mock_span_aenter(_): - return mock_span - - async def mock_span_aexit(_, _exc_type, _exc_val, _exc_tb): - return None - - mock_span.__aenter__ = mock_span_aenter - mock_span.__aexit__ = mock_span_aexit - mock_trace.span.return_value = mock_span - mock_tracer.trace.return_value = mock_trace - - return mock_tracer - - def _create_test_setup(self, mock_tracer): - """Helper method to create OpenAIService and OpenAIActivities instances.""" - # Import here to avoid circular imports - from agentex.lib.core.services.adk.providers.openai import OpenAIService - from agentex.lib.core.temporal.activities.adk.providers.openai_activities import OpenAIActivities - - openai_service = OpenAIService(tracer=mock_tracer) - openai_activities = OpenAIActivities(openai_service) - env = ActivityEnvironment() - - return openai_service, openai_activities, env - - def _assert_runner_call_signature(self, call_args): - """Helper method to validate Runner.run call signature.""" - actual_kwargs = set(call_args.kwargs.keys()) - - # Check that we only pass valid Runner.run parameters - valid_params = { - "starting_agent", - "input", - "context", - "max_turns", - "hooks", - "run_config", - "previous_response_id", - "session", - } - invalid_kwargs = actual_kwargs - valid_params - assert not invalid_kwargs, f"Invalid arguments passed to Runner.run: {invalid_kwargs}" - - # Verify required arguments are present - assert "starting_agent" in call_args.kwargs, "starting_agent is required for Runner.run" - assert "input" in call_args.kwargs, "input is required for Runner.run" - - # Verify starting_agent is not None (actual agent object created) - assert call_args.kwargs["starting_agent"] is not None, "starting_agent should not be None" - - def _assert_runner_call_signature_streamed(self, call_args): - """Helper method to validate Runner.run_streamed call signature.""" - actual_kwargs = set(call_args.kwargs.keys()) - - # Check that we only pass valid Runner.run_streamed parameters - valid_params = { - "starting_agent", - "input", - "context", - "max_turns", - "hooks", - "run_config", - "previous_response_id", - "session", - } - invalid_kwargs = actual_kwargs - valid_params - assert not invalid_kwargs, f"Invalid arguments passed to Runner.run_streamed: {invalid_kwargs}" - - # Verify required arguments are present - assert "starting_agent" in call_args.kwargs, "starting_agent is required for Runner.run_streamed" - assert "input" in call_args.kwargs, "input is required for Runner.run_streamed" - - # Verify starting_agent is not None (actual agent object created) - assert call_args.kwargs["starting_agent"] is not None, "starting_agent should not be None" - - def _assert_starting_agent_params(self, starting_agent, expected_params): - """Helper method to validate starting_agent parameters match expected values.""" - # Verify agent name and instructions match - assert starting_agent.name == expected_params.agent_name, f"Agent name should be {expected_params.agent_name}" - assert starting_agent.instructions == expected_params.agent_instructions, f"Agent instructions should match" - - # Note: Other agent parameters like tools, guardrails would be tested here - # but they require more complex inspection of the agent object - - def _assert_result_structure(self, result, expected_output="Hello! How can I help you today?"): - """Helper method to validate the result structure.""" - from agentex.lib.types.agent_results import SerializableRunResult - - assert isinstance(result, SerializableRunResult) - assert result.final_output == expected_output - assert len(result.final_input_list) == 2 - assert result.final_input_list[0]["role"] == "user" - assert result.final_input_list[1]["role"] == "assistant" - - def _create_tools_for_case(self, tools_case): - """Helper method to create tools based on test case.""" - from agentex.lib.core.temporal.activities.adk.providers.openai_activities import ( - ComputerTool, - FunctionTool, - WebSearchTool, - FileSearchTool, - LocalShellTool, - CodeInterpreterTool, - ImageGenerationTool, - ) - - def sample_tool_function(_context, args): - return f"Tool called with {args}" - - def sample_computer(): - return Mock() # Mock computer object - - def sample_safety_check(_data): - return True - - def sample_executor(): - return Mock() # Mock executor - - if tools_case == "no_tools": - return None - elif tools_case == "function_tool": - return [ - FunctionTool( - name="test_function", - description="A test function tool", - params_json_schema={"type": "object", "properties": {}}, - on_invoke_tool=sample_tool_function, - ) - ] - elif tools_case == "web_search_tool": - return [WebSearchTool()] - elif tools_case == "file_search_tool": - return [ - FileSearchTool(vector_store_ids=["store1", "store2"], max_num_results=10, include_search_results=True) - ] - elif tools_case == "computer_tool": - return [ComputerTool(computer=sample_computer(), on_safety_check=sample_safety_check)] - elif tools_case == "code_interpreter_tool": - return [ - CodeInterpreterTool( - tool_config={"type": "code_interpreter", "container": {"type": "static", "image": "python:3.11"}} - ) - ] - elif tools_case == "image_generation_tool": - return [ - ImageGenerationTool( - tool_config={ - "type": "image_generation", - "quality": "high", - "size": "1024x1024", - "output_format": "png", - } - ) - ] - elif tools_case == "local_shell_tool": - return [LocalShellTool(executor=sample_executor())] - elif tools_case == "mixed_tools": - return [ - FunctionTool( - name="calculator", - description="A calculator tool", - params_json_schema={"type": "object", "properties": {"expression": {"type": "string"}}}, - on_invoke_tool=sample_tool_function, - ), - WebSearchTool(), - FileSearchTool(vector_store_ids=["store1"], max_num_results=5), - ] - else: - raise ValueError(f"Unknown tools_case: {tools_case}") - - def _assert_tools_conversion(self, starting_agent, tools_case, _original_tools): - """Helper method to validate that tools were properly converted.""" - from agents.tool import ( - ComputerTool as OAIComputerTool, - FunctionTool as OAIFunctionTool, - WebSearchTool as OAIWebSearchTool, - FileSearchTool as OAIFileSearchTool, - LocalShellTool as OAILocalShellTool, - CodeInterpreterTool as OAICodeInterpreterTool, - ImageGenerationTool as OAIImageGenerationTool, - ) - - if tools_case == "no_tools": - # When no tools are provided, the agent should have an empty tools list - assert starting_agent.tools == [], "Agent should have empty tools list when no tools provided" - - elif tools_case == "function_tool": - assert len(starting_agent.tools) == 1, "Agent should have 1 tool" - agent_tool = starting_agent.tools[0] - assert isinstance(agent_tool, OAIFunctionTool), "Tool should be converted to OAIFunctionTool" - assert agent_tool.name == "test_function", "Tool name should be preserved" - assert agent_tool.description == "A test function tool", "Tool description should be preserved" - # Check that the schema contains our expected fields (may have additional fields) - assert "type" in agent_tool.params_json_schema, "Tool schema should have type field" - assert agent_tool.params_json_schema["type"] == "object", "Tool schema type should be object" - assert "properties" in agent_tool.params_json_schema, "Tool schema should have properties field" - assert callable(agent_tool.on_invoke_tool), "Tool function should be callable" - - elif tools_case == "web_search_tool": - assert len(starting_agent.tools) == 1, "Agent should have 1 tool" - agent_tool = starting_agent.tools[0] - assert isinstance(agent_tool, OAIWebSearchTool), "Tool should be converted to OAIWebSearchTool" - - elif tools_case == "file_search_tool": - assert len(starting_agent.tools) == 1, "Agent should have 1 tool" - agent_tool = starting_agent.tools[0] - assert isinstance(agent_tool, OAIFileSearchTool), "Tool should be converted to OAIFileSearchTool" - assert agent_tool.vector_store_ids == ["store1", "store2"], "Vector store IDs should be preserved" - assert agent_tool.max_num_results == 10, "Max results should be preserved" - assert agent_tool.include_search_results, "Include search results flag should be preserved" - - elif tools_case == "computer_tool": - assert len(starting_agent.tools) == 1, "Agent should have 1 tool" - agent_tool = starting_agent.tools[0] - assert isinstance(agent_tool, OAIComputerTool), "Tool should be converted to OAIComputerTool" - assert agent_tool.computer is not None, "Computer object should be present" - assert agent_tool.on_safety_check is not None, "Safety check function should be present" - - elif tools_case == "code_interpreter_tool": - assert len(starting_agent.tools) == 1, "Agent should have 1 tool" - agent_tool = starting_agent.tools[0] - assert isinstance(agent_tool, OAICodeInterpreterTool), "Tool should be converted to OAICodeInterpreterTool" - - elif tools_case == "image_generation_tool": - assert len(starting_agent.tools) == 1, "Agent should have 1 tool" - agent_tool = starting_agent.tools[0] - assert isinstance(agent_tool, OAIImageGenerationTool), "Tool should be converted to OAIImageGenerationTool" - - elif tools_case == "local_shell_tool": - assert len(starting_agent.tools) == 1, "Agent should have 1 tool" - agent_tool = starting_agent.tools[0] - assert isinstance(agent_tool, OAILocalShellTool), "Tool should be converted to OAILocalShellTool" - assert agent_tool.executor is not None, "Executor should be present" - - elif tools_case == "mixed_tools": - assert len(starting_agent.tools) == 3, "Agent should have 3 tools" - - # Check first tool (FunctionTool) - function_tool = starting_agent.tools[0] - assert isinstance(function_tool, OAIFunctionTool), "First tool should be OAIFunctionTool" - assert function_tool.name == "calculator", "Function tool name should be preserved" - - # Check second tool (WebSearchTool) - web_tool = starting_agent.tools[1] - assert isinstance(web_tool, OAIWebSearchTool), "Second tool should be OAIWebSearchTool" - - # Check third tool (FileSearchTool) - file_tool = starting_agent.tools[2] - assert isinstance(file_tool, OAIFileSearchTool), "Third tool should be OAIFileSearchTool" - - else: - raise ValueError(f"Unknown tools_case: {tools_case}") - - def _setup_streaming_service_mocks(self, openai_service): - """Helper method to setup streaming service mocks for run_agent_auto_send.""" - from unittest.mock import AsyncMock - - # Mock the streaming service and agentex client - mock_streaming_service = AsyncMock() - mock_agentex_client = AsyncMock() - - # Mock streaming context manager - mock_streaming_context = AsyncMock() - - # Create a proper TaskMessage mock that passes validation - from agentex.types.task_message import TaskMessage - - mock_task_message = Mock(spec=TaskMessage) - mock_task_message.id = "test-task-message-id" - mock_task_message.task_id = "test-task-id" - mock_task_message.content = {"type": "text", "content": "test"} - - mock_streaming_context.task_message = mock_task_message - mock_streaming_context.stream_update = AsyncMock() - - # Create a proper async context manager mock - from contextlib import asynccontextmanager - from unittest.mock import AsyncMock - - @asynccontextmanager - async def mock_streaming_context_manager(*_args, **_kwargs): - yield mock_streaming_context - - mock_streaming_service.streaming_task_message_context = mock_streaming_context_manager - - openai_service.streaming_service = mock_streaming_service - openai_service.agentex_client = mock_agentex_client - - return mock_streaming_context - - def _create_code_interpreter_tool_call_mock(self, call_id="code_interpreter_call_123"): - """Helper to create ResponseCodeInterpreterToolCall mock objects.""" - return ResponseCodeInterpreterToolCall( - id=call_id, - type="code_interpreter_call", - status="completed", - code="print('Hello from code interpreter')", - container_id="container_123", - outputs=[], - ) - - def _create_tool_call_item_mock(self, tool_call): - """Helper to create tool call item mock.""" - mock_tool_call_item = Mock() - mock_tool_call_item.type = "tool_call_item" - mock_tool_call_item.raw_item = tool_call - return mock_tool_call_item - - def _create_tool_output_item_mock(self, call_id="code_interpreter_call_123", output="Hello from code interpreter"): - """Helper to create tool output item mock.""" - mock_tool_output_item = Mock() - mock_tool_output_item.type = "tool_call_output_item" - mock_tool_output_item.raw_item = {"call_id": call_id, "output": output} - return mock_tool_output_item - - def _create_streaming_result_mock(self, final_output="Code executed successfully"): - """Helper to create streaming result mock with common setup.""" - mock_streaming_result = Mock(spec=RunResultStreaming) - mock_streaming_result.final_output = final_output - mock_streaming_result.new_items = [] - mock_streaming_result.final_input_list = [ - {"role": "user", "content": "Run some Python code"}, - {"role": "assistant", "content": final_output}, - ] - mock_streaming_result.to_input_list.return_value = [ - {"role": "user", "content": "Run some Python code"}, - {"role": "assistant", "content": final_output}, - ] - return mock_streaming_result - - def _create_common_agent_params(self, **overrides): - """Helper to create common agent parameters with defaults.""" - defaults = { - "input_list": [{"role": "user", "content": "Run some Python code"}], - "mcp_server_params": [], - "agent_name": "test_agent", - "agent_instructions": "You are a helpful assistant with code interpreter", - "trace_id": "test-trace-id", - "parent_span_id": "test-span-id", - "task_id": "test-task-id", - } - defaults.update(overrides) - return defaults diff --git a/tests/lib/test_agentex_worker.py b/tests/lib/test_agentex_worker.py deleted file mode 100644 index 76347f0d..00000000 --- a/tests/lib/test_agentex_worker.py +++ /dev/null @@ -1,90 +0,0 @@ -import os -from unittest.mock import patch - -import pytest - - -class TestAgentexWorker: - """Tests for AgentexWorker initialization and configuration.""" - - @pytest.fixture(autouse=True) - def cleanup_env(self): - """Cleanup environment variables after each test.""" - yield - # Clean up HEALTH_CHECK_PORT if it was set during test - os.environ.pop("HEALTH_CHECK_PORT", None) - - def test_worker_init_uses_default_health_check_port(self): - """Test that worker uses default health_check_port of 80 when not provided.""" - from agentex.lib.core.temporal.workers.worker import AgentexWorker - - # Ensure HEALTH_CHECK_PORT is not in environment - os.environ.pop("HEALTH_CHECK_PORT", None) - - # Mock EnvironmentVariables.refresh to avoid loading .env files - with patch("agentex.lib.core.temporal.workers.worker.EnvironmentVariables") as mock_env_vars: - mock_instance = mock_env_vars.refresh.return_value - mock_instance.HEALTH_CHECK_PORT = 80 - - worker = AgentexWorker(task_queue="test-queue") - - assert worker.health_check_port == 80, "Worker should use default health_check_port of 80" - - def test_worker_init_with_explicit_health_check_port(self): - """Test that worker uses explicit health_check_port parameter when provided.""" - from agentex.lib.core.temporal.workers.worker import AgentexWorker - - worker = AgentexWorker(task_queue="test-queue", health_check_port=8080) - - assert worker.health_check_port == 8080, "Worker should use explicitly provided health_check_port" - - def test_worker_init_explicit_port_overrides_environment(self): - """Test that explicit health_check_port parameter overrides environment variable.""" - from agentex.lib.core.temporal.workers.worker import AgentexWorker - - # Set environment variable - os.environ["HEALTH_CHECK_PORT"] = "9000" - - worker = AgentexWorker(task_queue="test-queue", health_check_port=8080) - - assert worker.health_check_port == 8080, "Explicit parameter should override environment variable" - - @pytest.mark.parametrize( - "env_port,expected_port", - [ - (None, 80), # No env var, should use default - ("8080", 8080), # Env var set, should use it - ("443", 443), # Different port - ], - ) - def test_worker_init_respects_environment_variable(self, env_port, expected_port): - """Test that worker respects HEALTH_CHECK_PORT from EnvironmentVariables.""" - from agentex.lib.core.temporal.workers.worker import AgentexWorker - - # Mock EnvironmentVariables.refresh to return expected port - with patch("agentex.lib.core.temporal.workers.worker.EnvironmentVariables") as mock_env_vars: - mock_instance = mock_env_vars.refresh.return_value - mock_instance.HEALTH_CHECK_PORT = expected_port - - worker = AgentexWorker(task_queue="test-queue") - - assert worker.health_check_port == expected_port, f"Worker should use health_check_port {expected_port}" - - def test_worker_init_basic_attributes(self): - """Test that worker initializes with correct basic attributes.""" - from agentex.lib.core.temporal.workers.worker import AgentexWorker - - worker = AgentexWorker( - task_queue="test-queue", - max_workers=20, - max_concurrent_activities=15, - health_check_port=8080, - ) - - assert worker.task_queue == "test-queue" - assert worker.max_workers == 20 - assert worker.max_concurrent_activities == 15 - assert worker.health_check_port == 8080 - assert worker.health_check_server_running is False - assert worker.healthy is False - assert worker.plugins == [] diff --git a/tests/test_client.py b/tests/test_client.py index 54f850e8..dad275e8 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -6,13 +6,10 @@ import os import sys import json -import time import asyncio import inspect -import subprocess import tracemalloc from typing import Any, Union, cast -from textwrap import dedent from unittest import mock from typing_extensions import Literal @@ -23,14 +20,17 @@ from agentex import Agentex, AsyncAgentex, APIResponseValidationError from agentex._types import Omit +from agentex._utils import asyncify from agentex._models import BaseModel, FinalRequestOptions from agentex._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError from agentex._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, + OtherPlatform, DefaultHttpxClient, DefaultAsyncHttpxClient, + get_platform, make_request_options, ) @@ -1681,50 +1681,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.http_request.headers.get("x-stainless-retry-count") == "42" - def test_get_platform(self) -> None: - # A previous implementation of asyncify could leave threads unterminated when - # used with nest_asyncio. - # - # Since nest_asyncio.apply() is global and cannot be un-applied, this - # test is run in a separate process to avoid affecting other tests. - test_code = dedent(""" - import asyncio - import nest_asyncio - import threading - - from agentex._utils import asyncify - from agentex._base_client import get_platform - - async def test_main() -> None: - result = await asyncify(get_platform)() - print(result) - for thread in threading.enumerate(): - print(thread.name) - - nest_asyncio.apply() - asyncio.run(test_main()) - """) - with subprocess.Popen( - [sys.executable, "-c", test_code], - text=True, - ) as process: - timeout = 10 # seconds - - start_time = time.monotonic() - while True: - return_code = process.poll() - if return_code is not None: - if return_code != 0: - raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") - - # success - break - - if time.monotonic() - start_time > timeout: - process.kill() - raise AssertionError("calling get_platform using asyncify resulted in a hung process") - - time.sleep(0.1) + async def test_get_platform(self) -> None: + platform = await asyncify(get_platform)() + assert isinstance(platform, (str, OtherPlatform)) async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: # Test that the proxy environment variables are set correctly diff --git a/tests/test_function_tool.py b/tests/test_function_tool.py deleted file mode 100644 index 91312e22..00000000 --- a/tests/test_function_tool.py +++ /dev/null @@ -1,256 +0,0 @@ -from __future__ import annotations - -import json -from typing import Any, override - -import pytest -from pydantic import ValidationError - -from src.agentex.lib.core.temporal.activities.adk.providers.openai_activities import ( # type: ignore[import-untyped] - FunctionTool, -) - - -def sample_handler(context, args: str) -> str: - """Sample handler function for testing.""" - return f"Processed: {args}" - - -def complex_handler(context, args: str) -> dict[str, Any]: - """More complex handler that returns structured data.""" - parsed_args = json.loads(args) if args else {} - return { - "status": "success", - "input": parsed_args, - "context_info": str(type(context)), - } - - -class TestFunctionTool: - """Test cases for FunctionTool serialization with JSON.""" - - def test_basic_serialization_with_json(self): - """Test that FunctionTool can be serialized and deserialized with JSON.""" - # Create a FunctionTool with a callable - tool = FunctionTool( - name="test_tool", - description="A test tool", - params_json_schema={"type": "string"}, - strict_json_schema=True, - is_enabled=True, - on_invoke_tool=sample_handler, - ) - - # Serialize to JSON (this is what the caller will do) - json_data = json.dumps(tool.model_dump()) - - # Deserialize from JSON - data = json.loads(json_data) - new_tool = FunctionTool.model_validate(data) - - # Test that the callable is restored - assert new_tool.on_invoke_tool is not None - assert callable(new_tool.on_invoke_tool) - - # Test that the callable works as expected - result = new_tool.on_invoke_tool(None, "test_input") - assert result == "Processed: test_input" - - def test_complex_function_serialization(self): - """Test serialization of more complex functions.""" - tool = FunctionTool( - name="complex_tool", - description="A complex test tool", - params_json_schema={ - "type": "object", - "properties": {"key": {"type": "string"}}, - }, - on_invoke_tool=complex_handler, - ) - - # Serialize and deserialize via JSON - json_data = json.dumps(tool.model_dump()) - data = json.loads(json_data) - new_tool = FunctionTool.model_validate(data) - - # Test the complex function - test_input = '{"test": "value"}' - result = new_tool.on_invoke_tool(None, test_input) - - assert result["status"] == "success" - assert result["input"] == {"test": "value"} - - def test_none_callable_handling(self): - """Test that passing None for callable raises an error.""" - # Test that None callable raises ValueError - with pytest.raises( - ValueError, - match="One of `on_invoke_tool` or `on_invoke_tool_serialized` should be set", - ): - FunctionTool( - name="empty_tool", - description="Tool with no callable", - params_json_schema={"type": "string"}, - on_invoke_tool=None, - ) - - # Test with valid function - this should work - tool_func = FunctionTool( - name="func_tool", - description="Tool with function", - params_json_schema={"type": "string"}, - on_invoke_tool=sample_handler, - ) - assert tool_func.on_invoke_tool is not None - - def test_lambda_function_serialization(self): - """Test that lambda functions can be serialized.""" - # Set a lambda function - tool = FunctionTool( - name="lambda_tool", - description="Tool with lambda", - params_json_schema={"type": "string"}, - on_invoke_tool=lambda ctx, args: f"Lambda result: {args}", - ) - - # Serialize and deserialize via JSON - json_data = json.dumps(tool.model_dump()) - data = json.loads(json_data) - new_tool = FunctionTool.model_validate(data) - - # Test that the lambda works - result = new_tool.on_invoke_tool(None, "test") - assert result == "Lambda result: test" - - def test_closure_serialization(self): - """Test that closures can be serialized.""" - - def create_handler(prefix: str): - def handler(context, args: str) -> str: - return f"{prefix}: {args}" - - return handler - - # Set a closure - tool = FunctionTool( - name="closure_tool", - description="Tool with closure", - params_json_schema={"type": "string"}, - on_invoke_tool=create_handler("PREFIX"), - ) - - # Serialize and deserialize via JSON - json_data = json.dumps(tool.model_dump()) - data = json.loads(json_data) - new_tool = FunctionTool.model_validate(data) - - # Test that the closure works with captured variable - result = new_tool.on_invoke_tool(None, "test") - assert result == "PREFIX: test" - - def test_function_tool_with_none_handler_raises_error(self): - """Test that trying to create tool with None handler raises error.""" - # Test that None callable raises ValueError - with pytest.raises( - ValueError, - match="One of `on_invoke_tool` or `on_invoke_tool_serialized` should be set", - ): - FunctionTool( - name="none_handler_test", - description="Test tool with None handler", - params_json_schema={"type": "string"}, - on_invoke_tool=None, - ) - - def test_to_oai_function_tool_with_valid_handler(self): - """Test that to_oai_function_tool works with valid function.""" - tool = FunctionTool( - name="valid_handler_test", - description="Test tool with valid handler", - params_json_schema={"type": "string"}, - on_invoke_tool=sample_handler, - ) - - # This should work when on_invoke_tool is set - oai_tool = tool.to_oai_function_tool() - - # Verify the OAI tool was created successfully - assert oai_tool is not None - assert oai_tool.name == "valid_handler_test" - assert oai_tool.description == "Test tool with valid handler" - assert oai_tool.on_invoke_tool is not None - assert callable(oai_tool.on_invoke_tool) - - # Test that the handler works through the OAI tool - result = oai_tool.on_invoke_tool(None, "test_input") - assert result == "Processed: test_input" - - def test_serialization_error_handling(self): - """Test error handling when serialization fails.""" - - # Try to create a FunctionTool with an unserializable callable - class UnserializableCallable: - def __call__(self, context, args): - return "test" - - @override - def __getstate__(self): - raise Exception("Cannot serialize this object") - - unserializable = UnserializableCallable() - - # This should raise an Exception during construction (from the unserializable object) - with pytest.raises(Exception, match="Cannot serialize this object"): - FunctionTool( - name="error_test_with_unserializable", - description="Test error handling with unserializable", - params_json_schema={"type": "string"}, - on_invoke_tool=unserializable, - ) - - def test_deserialization_error_handling(self): - """Test error handling when deserialization fails.""" - - # Create a tool and manually corrupt its serialized data to test deserialization error - # First, create a valid tool - valid_tool = FunctionTool( - name="valid_tool", - description="Valid tool for corruption", - params_json_schema={"type": "string"}, - on_invoke_tool=sample_handler, - ) - - # Serialize it - serialized_data = valid_tool.model_dump() - - # Corrupt the serialized callable data with invalid base64 - serialized_data["on_invoke_tool_serialized"] = ( - "invalid_base64_data!" # Add invalid character - ) - - # This should raise an error during model validation due to invalid base64 - with pytest.raises((ValidationError, ValueError)): - FunctionTool.model_validate(serialized_data) - - def test_full_roundtrip_with_serialization(self): - """Test a full roundtrip with a single tool.""" - tool = FunctionTool( - name="test_tool", - description="Test tool for roundtrip", - params_json_schema={"type": "string"}, - on_invoke_tool=lambda ctx, args: f"Tool result: {args}", - ) - - # Serialize tool to JSON - json_data = json.dumps(tool.model_dump()) - - # Deserialize from JSON - data = json.loads(json_data) - new_tool = FunctionTool.model_validate(data) - - # Test the tool - result = new_tool.on_invoke_tool(None, "test") - assert "Tool result: test" == result - - result = new_tool.to_oai_function_tool().on_invoke_tool(None, "test") - assert "Tool result: test" == result diff --git a/tests/test_header_forwarding.py b/tests/test_header_forwarding.py deleted file mode 100644 index 51c3a685..00000000 --- a/tests/test_header_forwarding.py +++ /dev/null @@ -1,541 +0,0 @@ -# ruff: noqa: I001 -from __future__ import annotations -from typing import Any, override -import sys -import types -from datetime import datetime, timezone -from unittest.mock import AsyncMock, Mock - -import pytest -from fastapi.testclient import TestClient - -"""Header forwarding tests consolidated. - -We stub tracing modules to avoid circular imports when importing ACPService. -""" - -# Stub tracing modules before importing ACPService -tracer_stub = types.ModuleType("agentex.lib.core.tracing.tracer") - -class _StubSpan: - async def __aenter__(self): - return self - async def __aexit__(self, exc_type: type[BaseException] | None, exc: BaseException | None, tb: object) -> bool: - return False - -class _StubTrace: - def span(self, **kwargs: Any) -> _StubSpan: # type: ignore[name-defined] - return _StubSpan() - -class _StubAsyncTracer: - def __init__(self, *args: Any, **kwargs: Any) -> None: - pass - def trace(self, trace_id: str | None = None) -> _StubTrace: # type: ignore[name-defined] - return _StubTrace() - -class _StubTracer(_StubAsyncTracer): - pass -tracer_stub.AsyncTracer = _StubAsyncTracer # type: ignore[attr-defined] -tracer_stub.Tracer = _StubTracer # type: ignore[attr-defined] -sys.modules["agentex.lib.core.tracing.tracer"] = tracer_stub - -tracing_pkg_stub = types.ModuleType("agentex.lib.core.tracing") -tracing_pkg_stub.AsyncTracer = _StubAsyncTracer # type: ignore[attr-defined] -tracing_pkg_stub.Tracer = _StubTracer # type: ignore[attr-defined] -sys.modules["agentex.lib.core.tracing"] = tracing_pkg_stub - -from agentex.lib.core.services.adk.acp.acp import ACPService -from agentex.lib.sdk.fastacp.base.base_acp_server import BaseACPServer -from agentex.lib.types.acp import RPCMethod, SendMessageParams, SendEventParams -from agentex.types.task_message_content import TextContent -from agentex.lib.sdk.fastacp.impl.temporal_acp import TemporalACP -from agentex.lib.core.temporal.services.temporal_task_service import TemporalTaskService -from agentex.lib.environment_variables import EnvironmentVariables -from agentex.types.agent import Agent -from agentex.types.task import Task -from agentex.types.event import Event - - -class DummySpan: - def __init__(self, **_kwargs: Any) -> None: - self.output = None - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type: type[BaseException] | None, exc: BaseException | None, tb: object) -> bool: - return False - - -class DummyTrace: - def span(self, **kwargs: Any) -> DummySpan: - return DummySpan(**kwargs) - - -class DummyTracer: - def trace(self, trace_id: str | None = None) -> DummyTrace: - return DummyTrace() - - -class DummyAgents: - async def rpc_by_name(self, *args: Any, **kwargs: Any) -> Any: - # Support both positional and keyword agent name, and both params/_params - method = kwargs.get("method") - extra_headers = kwargs.get("extra_headers") - # Ensure headers are forwarded as-is - assert extra_headers == {"x-user": "a", "authorization": "b"} - # Minimal response object with .result - if method == "task/create": - return type("R", (), {"result": {"id": "t1"}})() - if method == "message/send": - # include required task_id for TaskMessage model - return type("R", (), {"result": {"id": "m1", "task_id": "t1", "content": {"type": "text", "author": "user", "content": "ok"}}})() - if method == "event/send": - # include required fields for Event model - return type("R", (), {"result": {"id": "e1", "agent_id": "a1", "task_id": "t1", "sequence_id": 1}})() - if method == "task/cancel": - return type("R", (), {"result": {"id": "t1"}})() - raise AssertionError("Unexpected method") - - -class DummyClient: - def __init__(self) -> None: - self.agents = DummyAgents() - - -@pytest.mark.asyncio -async def test_header_forwarding() -> None: - client = DummyClient() - svc = ACPService(agentex_client=client, tracer=DummyTracer()) # type: ignore[arg-type] - - # Create task - task = await svc.task_create(agent_name="x", request={"headers": {"x-user": "a", "authorization": "b"}}) - assert task.id == "t1" - - # Send message - msgs = await svc.message_send( - agent_name="x", - task_id="t1", - content=TextContent(author="user", content="hi"), - request={"headers": {"x-user": "a", "authorization": "b"}}, - ) - assert len(msgs) == 1 - - # Send event - evt = await svc.event_send( - agent_name="x", - task_id="t1", - content=TextContent(author="user", content="hi"), - request={"headers": {"x-user": "a", "authorization": "b"}}, - ) - assert evt.id == "e1" - - # Cancel - task2 = await svc.task_cancel(agent_name="x", task_id="t1", request={"headers": {"x-user": "a", "authorization": "b"}}) - assert task2.id == "t1" - - -class TestServer(BaseACPServer): - __test__ = False - @override - def _setup_handlers(self): - @self.on_message_send - async def handler(params: SendMessageParams): # type: ignore[reportUnusedFunction] - headers = (params.request or {}).get("headers", {}) - assert "x-agent-api-key" not in headers - assert headers.get("x-user") == "a" - return TextContent(author="agent", content="ok") - - -def test_excludes_agent_api_key_header(): - app = TestServer.create() - client = TestClient(app) - req = { - "jsonrpc": "2.0", - "method": RPCMethod.MESSAGE_SEND.value, - "params": { - "agent": {"id": "a1", "name": "n1", "description": "d", "acp_type": "sync"}, - "task": {"id": "t1"}, - "content": {"type": "text", "author": "user", "content": "hi"}, - "stream": False, - }, - "id": 1, - } - r = client.post("/api", json=req, headers={"x-user": "a", "x-agent-api-key": "secret"}) - assert r.status_code == 200 - - -def filter_headers_standalone( - headers: dict[str, str] | None, - allowlist: list[str] | None -) -> dict[str, str]: - """Standalone header filtering function matching the production implementation.""" - if not headers: - return {} - - # Pass-through behavior: if no allowlist, forward all headers - if allowlist is None: - return headers - - # Apply filtering based on allowlist - if not allowlist: - return {} - - import fnmatch - filtered = {} - for header_name, header_value in headers.items(): - # Check against allowlist patterns (case-insensitive) - header_allowed = False - for pattern in allowlist: - if fnmatch.fnmatch(header_name.lower(), pattern.lower()): - header_allowed = True - break - - if header_allowed: - filtered[header_name] = header_value - - return filtered - - -def test_filter_headers_no_headers() -> None: - allowlist = ["x-user-email"] - result = filter_headers_standalone(None, allowlist) - assert result == {} - - result = filter_headers_standalone({}, allowlist) - assert result == {} - - -def test_filter_headers_pass_through_by_default() -> None: - headers = { - "x-user-email": "test@example.com", - "x-admin-token": "secret", - "authorization": "Bearer token", - "x-custom-header": "value" - } - result = filter_headers_standalone(headers, None) - assert result == headers - - -def test_filter_headers_empty_allowlist() -> None: - allowlist: list[str] = [] - headers = {"x-user-email": "test@example.com", "x-admin-token": "secret"} - result = filter_headers_standalone(headers, allowlist) - assert result == {} - - -def test_filter_headers_allowed_headers() -> None: - allowlist = ["x-user-email", "x-tenant-id"] - headers = { - "x-user-email": "test@example.com", - "x-tenant-id": "tenant123", - "x-admin-token": "secret", - "content-type": "application/json" - } - result = filter_headers_standalone(headers, allowlist) - expected = { - "x-user-email": "test@example.com", - "x-tenant-id": "tenant123" - } - assert result == expected - - -def test_filter_headers_case_insensitive_patterns() -> None: - allowlist = ["X-User-Email", "x-tenant-*"] - headers = { - "x-user-email": "test@example.com", - "X-TENANT-ID": "tenant123", - "x-tenant-name": "acme", - "x-admin-token": "secret" - } - result = filter_headers_standalone(headers, allowlist) - expected = { - "x-user-email": "test@example.com", - "X-TENANT-ID": "tenant123", - "x-tenant-name": "acme" - } - assert result == expected - - -def test_filter_headers_wildcard_patterns() -> None: - allowlist = ["x-user-*", "authorization"] - headers = { - "x-user-id": "123", - "x-user-email": "test@example.com", - "x-user-role": "admin", - "authorization": "Bearer token", - "x-system-info": "blocked", - "content-type": "application/json" - } - result = filter_headers_standalone(headers, allowlist) - expected = { - "x-user-id": "123", - "x-user-email": "test@example.com", - "x-user-role": "admin", - "authorization": "Bearer token" - } - assert result == expected - - -def test_filter_headers_complex_patterns() -> None: - allowlist = ["x-tenant-*", "x-user-[abc]*", "auth*"] - headers = { - "x-tenant-id": "tenant1", - "x-tenant-name": "acme", - "x-user-admin": "true", - "x-user-beta": "false", - "x-user-delta": "test", - "authorization": "Bearer x", - "authenticate": "digest", - "content-type": "json", - } - result = filter_headers_standalone(headers, allowlist) - expected = { - "x-tenant-id": "tenant1", - "x-tenant-name": "acme", - "x-user-admin": "true", - "x-user-beta": "false", - "authorization": "Bearer x", - "authenticate": "digest" - } - assert result == expected - - -def test_filter_headers_all_types() -> None: - allowlist = ["authorization", "accept-language", "custom-*"] - headers = { - "authorization": "Bearer token", - "accept-language": "en-US", - "custom-header": "value", - "custom-auth": "token", - "content-type": "application/json", - "x-blocked": "value" - } - result = filter_headers_standalone(headers, allowlist) - expected = { - "authorization": "Bearer token", - "accept-language": "en-US", - "custom-header": "value", - "custom-auth": "token" - } - assert result == expected - - - -# ============================================================================ -# Temporal Header Forwarding Tests -# ============================================================================ - -@pytest.fixture -def mock_temporal_client(): - """Create a mock TemporalClient""" - client = AsyncMock() - client.send_signal = AsyncMock(return_value=None) - return client - - -@pytest.fixture -def mock_env_vars(): - """Create mock environment variables""" - env_vars = Mock(spec=EnvironmentVariables) - env_vars.WORKFLOW_NAME = "test-workflow" - env_vars.WORKFLOW_TASK_QUEUE = "test-queue" - return env_vars - - -@pytest.fixture -def temporal_task_service(mock_temporal_client, mock_env_vars): - """Create TemporalTaskService with mocked client""" - return TemporalTaskService( - temporal_client=mock_temporal_client, - env_vars=mock_env_vars, - ) - - -@pytest.fixture -def sample_agent(): - """Create a sample agent""" - return Agent( - id="agent-123", - name="test-agent", - description="Test agent", - acp_type="async", - created_at=datetime.now(timezone.utc), - updated_at=datetime.now(timezone.utc) - ) - - -@pytest.fixture -def sample_task(): - """Create a sample task""" - return Task(id="task-456") - - -@pytest.fixture -def sample_event(): - """Create a sample event""" - return Event( - id="event-789", - agent_id="agent-123", - task_id="task-456", - sequence_id=1, - content=TextContent(author="user", content="Test message") - ) - - -@pytest.mark.asyncio -async def test_temporal_task_service_send_event_with_headers( - temporal_task_service, - mock_temporal_client, - sample_agent, - sample_task, - sample_event -): - """Test that TemporalTaskService forwards request headers in signal payload""" - # Given - request_headers = { - "x-user-oauth-credentials": "test-oauth-token", - "x-custom-header": "custom-value" - } - request = {"headers": request_headers} - - # When - await temporal_task_service.send_event( - agent=sample_agent, - task=sample_task, - event=sample_event, - request=request - ) - - # Then - mock_temporal_client.send_signal.assert_called_once() - call_args = mock_temporal_client.send_signal.call_args - - # Verify the signal was sent to the correct workflow - assert call_args.kwargs["workflow_id"] == sample_task.id - assert call_args.kwargs["signal"] == "receive_event" - - # Verify the payload includes the request with headers - payload = call_args.kwargs["payload"] - assert "request" in payload - assert payload["request"] == request - assert payload["request"]["headers"] == request_headers - - -@pytest.mark.asyncio -async def test_temporal_task_service_send_event_without_headers( - temporal_task_service, - mock_temporal_client, - sample_agent, - sample_task, - sample_event -): - """Test that TemporalTaskService handles missing request gracefully""" - # When - Send event without request parameter - await temporal_task_service.send_event( - agent=sample_agent, - task=sample_task, - event=sample_event, - request=None - ) - - # Then - mock_temporal_client.send_signal.assert_called_once() - call_args = mock_temporal_client.send_signal.call_args - - # Verify the payload has request as None - payload = call_args.kwargs["payload"] - assert payload["request"] is None - - -@pytest.mark.asyncio -async def test_temporal_acp_integration_with_request_headers( - mock_temporal_client, - mock_env_vars, - sample_agent, - sample_task, - sample_event -): - """Test end-to-end integration: TemporalACP -> TemporalTaskService -> TemporalClient signal""" - # Given - Create real TemporalTaskService with mocked client - task_service = TemporalTaskService( - temporal_client=mock_temporal_client, - env_vars=mock_env_vars, - ) - - # Create TemporalACP with real task service - temporal_acp = TemporalACP( - temporal_address="localhost:7233", - temporal_task_service=task_service, - ) - temporal_acp._setup_handlers() - - request_headers = { - "x-user-id": "user-123", - "authorization": "Bearer token", - "x-tenant-id": "tenant-456" - } - request = {"headers": request_headers} - - # Create SendEventParams as TemporalACP would receive it - params = SendEventParams( - agent=sample_agent, - task=sample_task, - event=sample_event, - request=request - ) - - # When - Trigger the event handler via the decorated function - # The handler is registered via @temporal_acp.on_task_event_send - # We'll directly call the task service method as the handler does - await task_service.send_event( - agent=params.agent, - task=params.task, - event=params.event, - request=params.request - ) - - # Then - Verify the temporal client received the signal with request headers - mock_temporal_client.send_signal.assert_called_once() - call_args = mock_temporal_client.send_signal.call_args - - # Verify signal payload includes request with headers - payload = call_args.kwargs["payload"] - assert payload["request"] == request - assert payload["request"]["headers"] == request_headers - - -@pytest.mark.asyncio -async def test_temporal_task_service_preserves_all_header_types( - temporal_task_service, - mock_temporal_client, - sample_agent, - sample_task, - sample_event -): - """Test that various header types are preserved correctly""" - # Given - Headers with different patterns - request_headers = { - "x-user-oauth-credentials": "oauth-token-12345", - "authorization": "Bearer jwt-token", - "x-tenant-id": "tenant-999", - "x-custom-app-header": "custom-value" - } - request = {"headers": request_headers} - - # When - await temporal_task_service.send_event( - agent=sample_agent, - task=sample_task, - event=sample_event, - request=request - ) - - # Then - Verify all headers are preserved in the signal payload - call_args = mock_temporal_client.send_signal.call_args - payload = call_args.kwargs["payload"] - - assert payload["request"]["headers"] == request_headers - # Verify each header individually - for header_name, header_value in request_headers.items(): - assert payload["request"]["headers"][header_name] == header_value diff --git a/tests/test_model_utils.py b/tests/test_model_utils.py deleted file mode 100644 index 9c570223..00000000 --- a/tests/test_model_utils.py +++ /dev/null @@ -1,226 +0,0 @@ -import json -from datetime import datetime - -from pydantic import BaseModel - -from agentex.lib.utils.model_utils import recursive_model_dump - - -class SampleModel(BaseModel): - """Sample model for testing recursive_model_dump functionality.""" - - name: str - value: int - - -def sample_function(): - """A sample function for testing function serialization.""" - return "test" - - -def another_function(x: int) -> str: - """Another sample function with parameters.""" - return str(x) - - -class TestRecursiveModelDump: - """Test cases for the recursive_model_dump function.""" - - def test_pydantic_model_serialization(self): - """Test that Pydantic models are properly serialized.""" - model = SampleModel(name="test", value=42) - result = recursive_model_dump(model) - - assert isinstance(result, dict) - assert result["name"] == "test" - assert result["value"] == 42 - - def test_datetime_serialization(self): - """Test that datetime objects are serialized to ISO format.""" - dt = datetime(2023, 12, 25, 10, 30, 45) - result = recursive_model_dump(dt) - - assert isinstance(result, str) - assert result == "2023-12-25T10:30:45" - - def test_function_serialization(self): - """Test that functions are properly serialized to string representation.""" - result = recursive_model_dump(sample_function) - - assert isinstance(result, str) - assert result.startswith(" int: - return x * 2 - - result = recursive_model_dump(lambda_like_func) - - assert isinstance(result, str) - assert result.startswith(" None: - """ - Test that demonstrates the task cancellation bug fix. - - Previously: task_cancel(task_name="my-task") incorrectly treated task_name as agent_name - Fixed: task_cancel(task_name="my-task", agent_name="my-agent") correctly identifies both - """ - # This test documents the correct usage pattern - # In practice, you would need a real agent and task for this to work - try: - task = await client.agents.cancel_task( - agent_name="test-agent", # REQUIRED: Agent that owns the task - params={ - "task_id": "test-task-123" # REQUIRED: Task to cancel - } - ) - assert_matches_type(Task, task, path=["response"]) - except Exception: - # Expected to fail in test environment without real agents/tasks - # The important thing is that the API now requires both parameters - pass diff --git a/uv.lock b/uv.lock deleted file mode 100644 index 0a0242ae..00000000 --- a/uv.lock +++ /dev/null @@ -1,2678 +0,0 @@ -version = 1 -revision = 3 -requires-python = ">=3.12, <4" -resolution-markers = [ - "python_full_version >= '3.13'", - "python_full_version < '3.13'", -] - -[[package]] -name = "agentex-sdk" -version = "0.6.7" -source = { editable = "." } -dependencies = [ - { name = "aiohttp" }, - { name = "anthropic" }, - { name = "anyio" }, - { name = "claude-agent-sdk" }, - { name = "cloudpickle" }, - { name = "datadog" }, - { name = "ddtrace" }, - { name = "distro" }, - { name = "fastapi" }, - { name = "httpx" }, - { name = "ipykernel" }, - { name = "jinja2" }, - { name = "json-log-formatter" }, - { name = "jsonref" }, - { name = "jsonschema" }, - { name = "kubernetes" }, - { name = "litellm" }, - { name = "mcp", extra = ["cli"] }, - { name = "openai" }, - { name = "openai-agents" }, - { name = "pydantic" }, - { name = "pytest" }, - { name = "pytest-asyncio" }, - { name = "python-on-whales" }, - { name = "pyyaml" }, - { name = "questionary" }, - { name = "redis" }, - { name = "rich" }, - { name = "scale-gp" }, - { name = "scale-gp-beta" }, - { name = "sniffio" }, - { name = "temporalio" }, - { name = "typer" }, - { name = "typing-extensions" }, - { name = "tzdata" }, - { name = "tzlocal" }, - { name = "uvicorn" }, - { name = "watchfiles" }, - { name = "yaspin" }, -] - -[package.optional-dependencies] -aiohttp = [ - { name = "aiohttp" }, - { name = "httpx-aiohttp" }, -] -dev = [ - { name = "ruff" }, -] - -[package.dev-dependencies] -dev = [ - { name = "ipywidgets" }, - { name = "nbstripout" }, - { name = "yaspin" }, -] - -[package.metadata] -requires-dist = [ - { name = "aiohttp", specifier = ">=3.10.10,<4" }, - { name = "aiohttp", marker = "extra == 'aiohttp'" }, - { name = "anthropic", specifier = ">=0.40.0" }, - { name = "anyio", specifier = ">=3.5.0,<5" }, - { name = "claude-agent-sdk", specifier = ">=0.1.0" }, - { name = "cloudpickle", specifier = ">=3.1.1" }, - { name = "datadog", specifier = ">=0.52.1" }, - { name = "ddtrace", specifier = ">=3.13.0" }, - { name = "distro", specifier = ">=1.7.0,<2" }, - { name = "fastapi", specifier = ">=0.115.0,<0.116" }, - { name = "httpx", specifier = ">=0.27.2,<0.28" }, - { name = "httpx-aiohttp", marker = "extra == 'aiohttp'", specifier = ">=0.1.9" }, - { name = "ipykernel", specifier = ">=6.29.5" }, - { name = "jinja2", specifier = ">=3.1.3,<4" }, - { name = "json-log-formatter", specifier = ">=1.1.1" }, - { name = "jsonref", specifier = ">=1.1.0,<2" }, - { name = "jsonschema", specifier = ">=4.23.0,<5" }, - { name = "kubernetes", specifier = ">=25.0.0,<29.0.0" }, - { name = "litellm", specifier = ">=1.66.0,<2" }, - { name = "mcp", extras = ["cli"], specifier = ">=1.4.1" }, - { name = "openai", specifier = ">=2.2,<3" }, - { name = "openai-agents", specifier = "==0.4.2" }, - { name = "pydantic", specifier = ">=2.0.0,<3" }, - { name = "pytest", specifier = ">=8.4.0" }, - { name = "pytest-asyncio", specifier = ">=1.0.0" }, - { name = "python-on-whales", specifier = ">=0.73.0,<0.74" }, - { name = "pyyaml", specifier = ">=6.0.2,<7" }, - { name = "questionary", specifier = ">=2.0.1,<3" }, - { name = "redis", specifier = ">=5.2.0,<6" }, - { name = "rich", specifier = ">=13.9.2,<14" }, - { name = "ruff", marker = "extra == 'dev'", specifier = ">=0.3.4" }, - { name = "scale-gp", specifier = ">=0.1.0a59" }, - { name = "scale-gp-beta", specifier = "==0.1.0a20" }, - { name = "sniffio" }, - { name = "temporalio", specifier = ">=1.18.2,<2" }, - { name = "typer", specifier = ">=0.16,<0.17" }, - { name = "typing-extensions", specifier = ">=4.10,<5" }, - { name = "tzdata", specifier = ">=2025.2" }, - { name = "tzlocal", specifier = ">=5.3.1" }, - { name = "uvicorn", specifier = ">=0.31.1" }, - { name = "watchfiles", specifier = ">=0.24.0,<1.0" }, - { name = "yaspin", specifier = ">=3.1.0" }, -] -provides-extras = ["aiohttp", "dev"] - -[package.metadata.requires-dev] -dev = [ - { name = "ipywidgets", specifier = ">=8.1.7" }, - { name = "nbstripout", specifier = ">=0.8.1" }, - { name = "yaspin", specifier = ">=3.1.0" }, -] - -[[package]] -name = "aiohappyeyeballs" -version = "2.6.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, -] - -[[package]] -name = "aiohttp" -version = "3.12.15" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohappyeyeballs" }, - { name = "aiosignal" }, - { name = "attrs" }, - { name = "frozenlist" }, - { name = "multidict" }, - { name = "propcache" }, - { name = "yarl" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/9b/e7/d92a237d8802ca88483906c388f7c201bbe96cd80a165ffd0ac2f6a8d59f/aiohttp-3.12.15.tar.gz", hash = "sha256:4fc61385e9c98d72fcdf47e6dd81833f47b2f77c114c29cd64a361be57a763a2", size = 7823716, upload-time = "2025-07-29T05:52:32.215Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/63/97/77cb2450d9b35f517d6cf506256bf4f5bda3f93a66b4ad64ba7fc917899c/aiohttp-3.12.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:802d3868f5776e28f7bf69d349c26fc0efadb81676d0afa88ed00d98a26340b7", size = 702333, upload-time = "2025-07-29T05:50:46.507Z" }, - { url = "https://files.pythonhosted.org/packages/83/6d/0544e6b08b748682c30b9f65640d006e51f90763b41d7c546693bc22900d/aiohttp-3.12.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2800614cd560287be05e33a679638e586a2d7401f4ddf99e304d98878c29444", size = 476948, upload-time = "2025-07-29T05:50:48.067Z" }, - { url = "https://files.pythonhosted.org/packages/3a/1d/c8c40e611e5094330284b1aea8a4b02ca0858f8458614fa35754cab42b9c/aiohttp-3.12.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8466151554b593909d30a0a125d638b4e5f3836e5aecde85b66b80ded1cb5b0d", size = 469787, upload-time = "2025-07-29T05:50:49.669Z" }, - { url = "https://files.pythonhosted.org/packages/38/7d/b76438e70319796bfff717f325d97ce2e9310f752a267bfdf5192ac6082b/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e5a495cb1be69dae4b08f35a6c4579c539e9b5706f606632102c0f855bcba7c", size = 1716590, upload-time = "2025-07-29T05:50:51.368Z" }, - { url = "https://files.pythonhosted.org/packages/79/b1/60370d70cdf8b269ee1444b390cbd72ce514f0d1cd1a715821c784d272c9/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6404dfc8cdde35c69aaa489bb3542fb86ef215fc70277c892be8af540e5e21c0", size = 1699241, upload-time = "2025-07-29T05:50:53.628Z" }, - { url = "https://files.pythonhosted.org/packages/a3/2b/4968a7b8792437ebc12186db31523f541943e99bda8f30335c482bea6879/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ead1c00f8521a5c9070fcb88f02967b1d8a0544e6d85c253f6968b785e1a2ab", size = 1754335, upload-time = "2025-07-29T05:50:55.394Z" }, - { url = "https://files.pythonhosted.org/packages/fb/c1/49524ed553f9a0bec1a11fac09e790f49ff669bcd14164f9fab608831c4d/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6990ef617f14450bc6b34941dba4f12d5613cbf4e33805932f853fbd1cf18bfb", size = 1800491, upload-time = "2025-07-29T05:50:57.202Z" }, - { url = "https://files.pythonhosted.org/packages/de/5e/3bf5acea47a96a28c121b167f5ef659cf71208b19e52a88cdfa5c37f1fcc/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd736ed420f4db2b8148b52b46b88ed038d0354255f9a73196b7bbce3ea97545", size = 1719929, upload-time = "2025-07-29T05:50:59.192Z" }, - { url = "https://files.pythonhosted.org/packages/39/94/8ae30b806835bcd1cba799ba35347dee6961a11bd507db634516210e91d8/aiohttp-3.12.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c5092ce14361a73086b90c6efb3948ffa5be2f5b6fbcf52e8d8c8b8848bb97c", size = 1635733, upload-time = "2025-07-29T05:51:01.394Z" }, - { url = "https://files.pythonhosted.org/packages/7a/46/06cdef71dd03acd9da7f51ab3a9107318aee12ad38d273f654e4f981583a/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aaa2234bb60c4dbf82893e934d8ee8dea30446f0647e024074237a56a08c01bd", size = 1696790, upload-time = "2025-07-29T05:51:03.657Z" }, - { url = "https://files.pythonhosted.org/packages/02/90/6b4cfaaf92ed98d0ec4d173e78b99b4b1a7551250be8937d9d67ecb356b4/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6d86a2fbdd14192e2f234a92d3b494dd4457e683ba07e5905a0b3ee25389ac9f", size = 1718245, upload-time = "2025-07-29T05:51:05.911Z" }, - { url = "https://files.pythonhosted.org/packages/2e/e6/2593751670fa06f080a846f37f112cbe6f873ba510d070136a6ed46117c6/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a041e7e2612041a6ddf1c6a33b883be6a421247c7afd47e885969ee4cc58bd8d", size = 1658899, upload-time = "2025-07-29T05:51:07.753Z" }, - { url = "https://files.pythonhosted.org/packages/8f/28/c15bacbdb8b8eb5bf39b10680d129ea7410b859e379b03190f02fa104ffd/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5015082477abeafad7203757ae44299a610e89ee82a1503e3d4184e6bafdd519", size = 1738459, upload-time = "2025-07-29T05:51:09.56Z" }, - { url = "https://files.pythonhosted.org/packages/00/de/c269cbc4faa01fb10f143b1670633a8ddd5b2e1ffd0548f7aa49cb5c70e2/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:56822ff5ddfd1b745534e658faba944012346184fbfe732e0d6134b744516eea", size = 1766434, upload-time = "2025-07-29T05:51:11.423Z" }, - { url = "https://files.pythonhosted.org/packages/52/b0/4ff3abd81aa7d929b27d2e1403722a65fc87b763e3a97b3a2a494bfc63bc/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2acbbfff69019d9014508c4ba0401822e8bae5a5fdc3b6814285b71231b60f3", size = 1726045, upload-time = "2025-07-29T05:51:13.689Z" }, - { url = "https://files.pythonhosted.org/packages/71/16/949225a6a2dd6efcbd855fbd90cf476052e648fb011aa538e3b15b89a57a/aiohttp-3.12.15-cp312-cp312-win32.whl", hash = "sha256:d849b0901b50f2185874b9a232f38e26b9b3d4810095a7572eacea939132d4e1", size = 423591, upload-time = "2025-07-29T05:51:15.452Z" }, - { url = "https://files.pythonhosted.org/packages/2b/d8/fa65d2a349fe938b76d309db1a56a75c4fb8cc7b17a398b698488a939903/aiohttp-3.12.15-cp312-cp312-win_amd64.whl", hash = "sha256:b390ef5f62bb508a9d67cb3bba9b8356e23b3996da7062f1a57ce1a79d2b3d34", size = 450266, upload-time = "2025-07-29T05:51:17.239Z" }, - { url = "https://files.pythonhosted.org/packages/f2/33/918091abcf102e39d15aba2476ad9e7bd35ddb190dcdd43a854000d3da0d/aiohttp-3.12.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9f922ffd05034d439dde1c77a20461cf4a1b0831e6caa26151fe7aa8aaebc315", size = 696741, upload-time = "2025-07-29T05:51:19.021Z" }, - { url = "https://files.pythonhosted.org/packages/b5/2a/7495a81e39a998e400f3ecdd44a62107254803d1681d9189be5c2e4530cd/aiohttp-3.12.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ee8a8ac39ce45f3e55663891d4b1d15598c157b4d494a4613e704c8b43112cd", size = 474407, upload-time = "2025-07-29T05:51:21.165Z" }, - { url = "https://files.pythonhosted.org/packages/49/fc/a9576ab4be2dcbd0f73ee8675d16c707cfc12d5ee80ccf4015ba543480c9/aiohttp-3.12.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3eae49032c29d356b94eee45a3f39fdf4b0814b397638c2f718e96cfadf4c4e4", size = 466703, upload-time = "2025-07-29T05:51:22.948Z" }, - { url = "https://files.pythonhosted.org/packages/09/2f/d4bcc8448cf536b2b54eed48f19682031ad182faa3a3fee54ebe5b156387/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97752ff12cc12f46a9b20327104448042fce5c33a624f88c18f66f9368091c7", size = 1705532, upload-time = "2025-07-29T05:51:25.211Z" }, - { url = "https://files.pythonhosted.org/packages/f1/f3/59406396083f8b489261e3c011aa8aee9df360a96ac8fa5c2e7e1b8f0466/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:894261472691d6fe76ebb7fcf2e5870a2ac284c7406ddc95823c8598a1390f0d", size = 1686794, upload-time = "2025-07-29T05:51:27.145Z" }, - { url = "https://files.pythonhosted.org/packages/dc/71/164d194993a8d114ee5656c3b7ae9c12ceee7040d076bf7b32fb98a8c5c6/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fa5d9eb82ce98959fc1031c28198b431b4d9396894f385cb63f1e2f3f20ca6b", size = 1738865, upload-time = "2025-07-29T05:51:29.366Z" }, - { url = "https://files.pythonhosted.org/packages/1c/00/d198461b699188a93ead39cb458554d9f0f69879b95078dce416d3209b54/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0fa751efb11a541f57db59c1dd821bec09031e01452b2b6217319b3a1f34f3d", size = 1788238, upload-time = "2025-07-29T05:51:31.285Z" }, - { url = "https://files.pythonhosted.org/packages/85/b8/9e7175e1fa0ac8e56baa83bf3c214823ce250d0028955dfb23f43d5e61fd/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5346b93e62ab51ee2a9d68e8f73c7cf96ffb73568a23e683f931e52450e4148d", size = 1710566, upload-time = "2025-07-29T05:51:33.219Z" }, - { url = "https://files.pythonhosted.org/packages/59/e4/16a8eac9df39b48ae102ec030fa9f726d3570732e46ba0c592aeeb507b93/aiohttp-3.12.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:049ec0360f939cd164ecbfd2873eaa432613d5e77d6b04535e3d1fbae5a9e645", size = 1624270, upload-time = "2025-07-29T05:51:35.195Z" }, - { url = "https://files.pythonhosted.org/packages/1f/f8/cd84dee7b6ace0740908fd0af170f9fab50c2a41ccbc3806aabcb1050141/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b52dcf013b57464b6d1e51b627adfd69a8053e84b7103a7cd49c030f9ca44461", size = 1677294, upload-time = "2025-07-29T05:51:37.215Z" }, - { url = "https://files.pythonhosted.org/packages/ce/42/d0f1f85e50d401eccd12bf85c46ba84f947a84839c8a1c2c5f6e8ab1eb50/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9b2af240143dd2765e0fb661fd0361a1b469cab235039ea57663cda087250ea9", size = 1708958, upload-time = "2025-07-29T05:51:39.328Z" }, - { url = "https://files.pythonhosted.org/packages/d5/6b/f6fa6c5790fb602538483aa5a1b86fcbad66244997e5230d88f9412ef24c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac77f709a2cde2cc71257ab2d8c74dd157c67a0558a0d2799d5d571b4c63d44d", size = 1651553, upload-time = "2025-07-29T05:51:41.356Z" }, - { url = "https://files.pythonhosted.org/packages/04/36/a6d36ad545fa12e61d11d1932eef273928b0495e6a576eb2af04297fdd3c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:47f6b962246f0a774fbd3b6b7be25d59b06fdb2f164cf2513097998fc6a29693", size = 1727688, upload-time = "2025-07-29T05:51:43.452Z" }, - { url = "https://files.pythonhosted.org/packages/aa/c8/f195e5e06608a97a4e52c5d41c7927301bf757a8e8bb5bbf8cef6c314961/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:760fb7db442f284996e39cf9915a94492e1896baac44f06ae551974907922b64", size = 1761157, upload-time = "2025-07-29T05:51:45.643Z" }, - { url = "https://files.pythonhosted.org/packages/05/6a/ea199e61b67f25ba688d3ce93f63b49b0a4e3b3d380f03971b4646412fc6/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad702e57dc385cae679c39d318def49aef754455f237499d5b99bea4ef582e51", size = 1710050, upload-time = "2025-07-29T05:51:48.203Z" }, - { url = "https://files.pythonhosted.org/packages/b4/2e/ffeb7f6256b33635c29dbed29a22a723ff2dd7401fff42ea60cf2060abfb/aiohttp-3.12.15-cp313-cp313-win32.whl", hash = "sha256:f813c3e9032331024de2eb2e32a88d86afb69291fbc37a3a3ae81cc9917fb3d0", size = 422647, upload-time = "2025-07-29T05:51:50.718Z" }, - { url = "https://files.pythonhosted.org/packages/1b/8e/78ee35774201f38d5e1ba079c9958f7629b1fd079459aea9467441dbfbf5/aiohttp-3.12.15-cp313-cp313-win_amd64.whl", hash = "sha256:1a649001580bdb37c6fdb1bebbd7e3bc688e8ec2b5c6f52edbb664662b17dc84", size = 449067, upload-time = "2025-07-29T05:51:52.549Z" }, -] - -[[package]] -name = "aiosignal" -version = "1.4.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "frozenlist" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, -] - -[[package]] -name = "annotated-types" -version = "0.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, -] - -[[package]] -name = "anthropic" -version = "0.74.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, - { name = "distro" }, - { name = "docstring-parser" }, - { name = "httpx" }, - { name = "jiter" }, - { name = "pydantic" }, - { name = "sniffio" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/5b/f9/baa1b885c8664b446e6a13003938046901e54ffd70b532bbebd01256e34b/anthropic-0.74.0.tar.gz", hash = "sha256:114ec10cb394b6764e199da06335da4747b019c5629e53add33572f66964ad99", size = 428958, upload-time = "2025-11-18T15:29:47.579Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/61/27/8c404b290ec650e634eacc674df943913722ec21097b0476d68458250c2f/anthropic-0.74.0-py3-none-any.whl", hash = "sha256:df29b8dfcdbd2751fa31177f643d8d8f66c5315fe06bdc42f9139e9f00d181d5", size = 371474, upload-time = "2025-11-18T15:29:45.748Z" }, -] - -[[package]] -name = "anyio" -version = "4.11.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "idna" }, - { name = "sniffio" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/c6/78/7d432127c41b50bccba979505f272c16cbcadcc33645d5fa3a738110ae75/anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4", size = 219094, upload-time = "2025-09-23T09:19:12.58Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc", size = 109097, upload-time = "2025-09-23T09:19:10.601Z" }, -] - -[[package]] -name = "appnope" -version = "0.1.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/35/5d/752690df9ef5b76e169e68d6a129fa6d08a7100ca7f754c89495db3c6019/appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee", size = 4170, upload-time = "2024-02-06T09:43:11.258Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/81/29/5ecc3a15d5a33e31b26c11426c45c501e439cb865d0bff96315d86443b78/appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c", size = 4321, upload-time = "2024-02-06T09:43:09.663Z" }, -] - -[[package]] -name = "asttokens" -version = "3.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978, upload-time = "2024-11-30T04:30:14.439Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918, upload-time = "2024-11-30T04:30:10.946Z" }, -] - -[[package]] -name = "attrs" -version = "25.3.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, -] - -[[package]] -name = "bytecode" -version = "0.17.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/98/c4/4818b392104bd426171fc2ce9c79c8edb4019ba6505747626d0f7107766c/bytecode-0.17.0.tar.gz", hash = "sha256:0c37efa5bd158b1b873f530cceea2c645611d55bd2dc2a4758b09f185749b6fd", size = 105863, upload-time = "2025-09-03T19:55:45.703Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ce/80/379e685099841f8501a19fb58b496512ef432331fed38276c3938ab09d8e/bytecode-0.17.0-py3-none-any.whl", hash = "sha256:64fb10cde1db7ef5cc39bd414ecebd54ba3b40e1c4cf8121ca5e72f170916ff8", size = 43045, upload-time = "2025-09-03T19:55:43.879Z" }, -] - -[[package]] -name = "cachetools" -version = "6.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9d/61/e4fad8155db4a04bfb4734c7c8ff0882f078f24294d42798b3568eb63bff/cachetools-6.2.0.tar.gz", hash = "sha256:38b328c0889450f05f5e120f56ab68c8abaf424e1275522b138ffc93253f7e32", size = 30988, upload-time = "2025-08-25T18:57:30.924Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/6c/56/3124f61d37a7a4e7cc96afc5492c78ba0cb551151e530b54669ddd1436ef/cachetools-6.2.0-py3-none-any.whl", hash = "sha256:1c76a8960c0041fcc21097e357f882197c79da0dbff766e7317890a65d7d8ba6", size = 11276, upload-time = "2025-08-25T18:57:29.684Z" }, -] - -[[package]] -name = "certifi" -version = "2025.8.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/dc/67/960ebe6bf230a96cda2e0abcf73af550ec4f090005363542f0765df162e0/certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407", size = 162386, upload-time = "2025-08-03T03:07:47.08Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e5/48/1549795ba7742c948d2ad169c1c8cdbae65bc450d6cd753d124b17c8cd32/certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5", size = 161216, upload-time = "2025-08-03T03:07:45.777Z" }, -] - -[[package]] -name = "cffi" -version = "2.0.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pycparser", marker = "implementation_name != 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, - { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, - { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, - { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, - { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, - { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, - { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, - { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, - { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, - { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, - { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, - { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, - { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, - { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, - { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, - { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, - { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, - { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, - { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, - { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, - { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, - { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, - { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, - { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, - { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, - { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, - { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, - { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, - { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, - { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, - { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, - { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, - { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, - { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, - { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, - { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, - { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, - { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, - { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, - { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, - { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, - { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, - { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, - { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, - { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, - { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, -] - -[[package]] -name = "charset-normalizer" -version = "3.4.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/83/2d/5fd176ceb9b2fc619e63405525573493ca23441330fcdaee6bef9460e924/charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14", size = 122371, upload-time = "2025-08-09T07:57:28.46Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/5e/14c94999e418d9b87682734589404a25854d5f5d0408df68bc15b6ff54bb/charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1", size = 205655, upload-time = "2025-08-09T07:56:08.475Z" }, - { url = "https://files.pythonhosted.org/packages/7d/a8/c6ec5d389672521f644505a257f50544c074cf5fc292d5390331cd6fc9c3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884", size = 146223, upload-time = "2025-08-09T07:56:09.708Z" }, - { url = "https://files.pythonhosted.org/packages/fc/eb/a2ffb08547f4e1e5415fb69eb7db25932c52a52bed371429648db4d84fb1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018", size = 159366, upload-time = "2025-08-09T07:56:11.326Z" }, - { url = "https://files.pythonhosted.org/packages/82/10/0fd19f20c624b278dddaf83b8464dcddc2456cb4b02bb902a6da126b87a1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392", size = 157104, upload-time = "2025-08-09T07:56:13.014Z" }, - { url = "https://files.pythonhosted.org/packages/16/ab/0233c3231af734f5dfcf0844aa9582d5a1466c985bbed6cedab85af9bfe3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f", size = 151830, upload-time = "2025-08-09T07:56:14.428Z" }, - { url = "https://files.pythonhosted.org/packages/ae/02/e29e22b4e02839a0e4a06557b1999d0a47db3567e82989b5bb21f3fbbd9f/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154", size = 148854, upload-time = "2025-08-09T07:56:16.051Z" }, - { url = "https://files.pythonhosted.org/packages/05/6b/e2539a0a4be302b481e8cafb5af8792da8093b486885a1ae4d15d452bcec/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491", size = 160670, upload-time = "2025-08-09T07:56:17.314Z" }, - { url = "https://files.pythonhosted.org/packages/31/e7/883ee5676a2ef217a40ce0bffcc3d0dfbf9e64cbcfbdf822c52981c3304b/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93", size = 158501, upload-time = "2025-08-09T07:56:18.641Z" }, - { url = "https://files.pythonhosted.org/packages/c1/35/6525b21aa0db614cf8b5792d232021dca3df7f90a1944db934efa5d20bb1/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f", size = 153173, upload-time = "2025-08-09T07:56:20.289Z" }, - { url = "https://files.pythonhosted.org/packages/50/ee/f4704bad8201de513fdc8aac1cabc87e38c5818c93857140e06e772b5892/charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37", size = 99822, upload-time = "2025-08-09T07:56:21.551Z" }, - { url = "https://files.pythonhosted.org/packages/39/f5/3b3836ca6064d0992c58c7561c6b6eee1b3892e9665d650c803bd5614522/charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc", size = 107543, upload-time = "2025-08-09T07:56:23.115Z" }, - { url = "https://files.pythonhosted.org/packages/65/ca/2135ac97709b400c7654b4b764daf5c5567c2da45a30cdd20f9eefe2d658/charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe", size = 205326, upload-time = "2025-08-09T07:56:24.721Z" }, - { url = "https://files.pythonhosted.org/packages/71/11/98a04c3c97dd34e49c7d247083af03645ca3730809a5509443f3c37f7c99/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8", size = 146008, upload-time = "2025-08-09T07:56:26.004Z" }, - { url = "https://files.pythonhosted.org/packages/60/f5/4659a4cb3c4ec146bec80c32d8bb16033752574c20b1252ee842a95d1a1e/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9", size = 159196, upload-time = "2025-08-09T07:56:27.25Z" }, - { url = "https://files.pythonhosted.org/packages/86/9e/f552f7a00611f168b9a5865a1414179b2c6de8235a4fa40189f6f79a1753/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31", size = 156819, upload-time = "2025-08-09T07:56:28.515Z" }, - { url = "https://files.pythonhosted.org/packages/7e/95/42aa2156235cbc8fa61208aded06ef46111c4d3f0de233107b3f38631803/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f", size = 151350, upload-time = "2025-08-09T07:56:29.716Z" }, - { url = "https://files.pythonhosted.org/packages/c2/a9/3865b02c56f300a6f94fc631ef54f0a8a29da74fb45a773dfd3dcd380af7/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927", size = 148644, upload-time = "2025-08-09T07:56:30.984Z" }, - { url = "https://files.pythonhosted.org/packages/77/d9/cbcf1a2a5c7d7856f11e7ac2d782aec12bdfea60d104e60e0aa1c97849dc/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9", size = 160468, upload-time = "2025-08-09T07:56:32.252Z" }, - { url = "https://files.pythonhosted.org/packages/f6/42/6f45efee8697b89fda4d50580f292b8f7f9306cb2971d4b53f8914e4d890/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5", size = 158187, upload-time = "2025-08-09T07:56:33.481Z" }, - { url = "https://files.pythonhosted.org/packages/70/99/f1c3bdcfaa9c45b3ce96f70b14f070411366fa19549c1d4832c935d8e2c3/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc", size = 152699, upload-time = "2025-08-09T07:56:34.739Z" }, - { url = "https://files.pythonhosted.org/packages/a3/ad/b0081f2f99a4b194bcbb1934ef3b12aa4d9702ced80a37026b7607c72e58/charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce", size = 99580, upload-time = "2025-08-09T07:56:35.981Z" }, - { url = "https://files.pythonhosted.org/packages/9a/8f/ae790790c7b64f925e5c953b924aaa42a243fb778fed9e41f147b2a5715a/charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef", size = 107366, upload-time = "2025-08-09T07:56:37.339Z" }, - { url = "https://files.pythonhosted.org/packages/8e/91/b5a06ad970ddc7a0e513112d40113e834638f4ca1120eb727a249fb2715e/charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15", size = 204342, upload-time = "2025-08-09T07:56:38.687Z" }, - { url = "https://files.pythonhosted.org/packages/ce/ec/1edc30a377f0a02689342f214455c3f6c2fbedd896a1d2f856c002fc3062/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db", size = 145995, upload-time = "2025-08-09T07:56:40.048Z" }, - { url = "https://files.pythonhosted.org/packages/17/e5/5e67ab85e6d22b04641acb5399c8684f4d37caf7558a53859f0283a650e9/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d", size = 158640, upload-time = "2025-08-09T07:56:41.311Z" }, - { url = "https://files.pythonhosted.org/packages/f1/e5/38421987f6c697ee3722981289d554957c4be652f963d71c5e46a262e135/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096", size = 156636, upload-time = "2025-08-09T07:56:43.195Z" }, - { url = "https://files.pythonhosted.org/packages/a0/e4/5a075de8daa3ec0745a9a3b54467e0c2967daaaf2cec04c845f73493e9a1/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa", size = 150939, upload-time = "2025-08-09T07:56:44.819Z" }, - { url = "https://files.pythonhosted.org/packages/02/f7/3611b32318b30974131db62b4043f335861d4d9b49adc6d57c1149cc49d4/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049", size = 148580, upload-time = "2025-08-09T07:56:46.684Z" }, - { url = "https://files.pythonhosted.org/packages/7e/61/19b36f4bd67f2793ab6a99b979b4e4f3d8fc754cbdffb805335df4337126/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0", size = 159870, upload-time = "2025-08-09T07:56:47.941Z" }, - { url = "https://files.pythonhosted.org/packages/06/57/84722eefdd338c04cf3030ada66889298eaedf3e7a30a624201e0cbe424a/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92", size = 157797, upload-time = "2025-08-09T07:56:49.756Z" }, - { url = "https://files.pythonhosted.org/packages/72/2a/aff5dd112b2f14bcc3462c312dce5445806bfc8ab3a7328555da95330e4b/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16", size = 152224, upload-time = "2025-08-09T07:56:51.369Z" }, - { url = "https://files.pythonhosted.org/packages/b7/8c/9839225320046ed279c6e839d51f028342eb77c91c89b8ef2549f951f3ec/charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce", size = 100086, upload-time = "2025-08-09T07:56:52.722Z" }, - { url = "https://files.pythonhosted.org/packages/ee/7a/36fbcf646e41f710ce0a563c1c9a343c6edf9be80786edeb15b6f62e17db/charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c", size = 107400, upload-time = "2025-08-09T07:56:55.172Z" }, - { url = "https://files.pythonhosted.org/packages/8a/1f/f041989e93b001bc4e44bb1669ccdcf54d3f00e628229a85b08d330615c5/charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a", size = 53175, upload-time = "2025-08-09T07:57:26.864Z" }, -] - -[[package]] -name = "claude-agent-sdk" -version = "0.1.8" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, - { name = "mcp" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/6e/2c/14828b10a5c99a3cdc42b12451c9ed03de6d53a712da4fe7b0b41c28e693/claude_agent_sdk-0.1.8.tar.gz", hash = "sha256:8ee495215132edc7f88e439f3f071154a016cea62d393fbf985eb806793ed3d1", size = 50899, upload-time = "2025-11-19T05:07:58.064Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/4e/fe4da2c056caaa4eb819181e77f2497f39ab2fb629ae93f0bed62a521982/claude_agent_sdk-0.1.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:bdec17988dba541bd48487d68d8e2dbcbe5fef718744f3c73b4236bb3e290875", size = 49380557, upload-time = "2025-11-19T05:07:48.607Z" }, - { url = "https://files.pythonhosted.org/packages/95/7d/b1f6648d6631c892205c3db44f862532471aa2dead846c9d78c260ba3a73/claude_agent_sdk-0.1.8-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:6640f4c977842dc73a277a7f934a889c0161ab78ad454806cfb2b34eb0a2a7f7", size = 65237961, upload-time = "2025-11-19T05:07:52.021Z" }, - { url = "https://files.pythonhosted.org/packages/b9/fd/cba2bad3c79519be68c266b95a55508f856f7c3b3eaa47cfa9051672a221/claude_agent_sdk-0.1.8-py3-none-win_amd64.whl", hash = "sha256:4b2db1276d553b5cfa939701d0f6b9da38797db93445b9579f97498d0b4f3724", size = 68148406, upload-time = "2025-11-19T05:07:55.279Z" }, -] - -[[package]] -name = "click" -version = "8.3.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/46/61/de6cd827efad202d7057d93e0fed9294b96952e188f7384832791c7b2254/click-8.3.0.tar.gz", hash = "sha256:e7b8232224eba16f4ebe410c25ced9f7875cb5f3263ffc93cc3e8da705e229c4", size = 276943, upload-time = "2025-09-18T17:32:23.696Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/db/d3/9dcc0f5797f070ec8edf30fbadfb200e71d9db6b84d211e3b2085a7589a0/click-8.3.0-py3-none-any.whl", hash = "sha256:9b9f285302c6e3064f4330c05f05b81945b2a39544279343e6e7c5f27a9baddc", size = 107295, upload-time = "2025-09-18T17:32:22.42Z" }, -] - -[[package]] -name = "cloudpickle" -version = "3.1.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/52/39/069100b84d7418bc358d81669d5748efb14b9cceacd2f9c75f550424132f/cloudpickle-3.1.1.tar.gz", hash = "sha256:b216fa8ae4019d5482a8ac3c95d8f6346115d8835911fd4aefd1a445e4242c64", size = 22113, upload-time = "2025-01-14T17:02:05.085Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/e8/64c37fadfc2816a7701fa8a6ed8d87327c7d54eacfbfb6edab14a2f2be75/cloudpickle-3.1.1-py3-none-any.whl", hash = "sha256:c8c5a44295039331ee9dad40ba100a9c7297b6f988e50e87ccdf3765a668350e", size = 20992, upload-time = "2025-01-14T17:02:02.417Z" }, -] - -[[package]] -name = "colorama" -version = "0.4.6" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, -] - -[[package]] -name = "comm" -version = "0.2.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4c/13/7d740c5849255756bc17888787313b61fd38a0a8304fc4f073dfc46122aa/comm-0.2.3.tar.gz", hash = "sha256:2dc8048c10962d55d7ad693be1e7045d891b7ce8d999c97963a5e3e99c055971", size = 6319, upload-time = "2025-07-25T14:02:04.452Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/60/97/891a0971e1e4a8c5d2b20bbe0e524dc04548d2307fee33cdeba148fd4fc7/comm-0.2.3-py3-none-any.whl", hash = "sha256:c615d91d75f7f04f095b30d1c1711babd43bdc6419c1be9886a85f2f4e489417", size = 7294, upload-time = "2025-07-25T14:02:02.896Z" }, -] - -[[package]] -name = "datadog" -version = "0.52.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a9/e6/ec5e4b4dbecd63cecae94009ef6dde9ab421d7d0022e6027586cc3776921/datadog-0.52.1.tar.gz", hash = "sha256:44c6deb563c4522dba206fba2e2bb93d3b04113c40191851ba3a241d82b5fd0b", size = 368037, upload-time = "2025-07-31T15:49:43.425Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/90/19/e0e39f10169ca3e37fa6b5be2f6d1c729c92d677f1bd21ad6d448df8bec8/datadog-0.52.1-py2.py3-none-any.whl", hash = "sha256:b8c92cd761618ee062f114171067e4c400d48c9f0dad16cb285042439d9d5d4e", size = 129952, upload-time = "2025-07-31T15:49:41.8Z" }, -] - -[[package]] -name = "ddtrace" -version = "3.15.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "bytecode" }, - { name = "envier" }, - { name = "legacy-cgi", marker = "python_full_version >= '3.13'" }, - { name = "opentelemetry-api" }, - { name = "protobuf" }, - { name = "wrapt" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/af/6b/15a712ea4aa390f50790af439edb9c08a21f126fea02fe500f9c223362fe/ddtrace-3.15.0.tar.gz", hash = "sha256:6f9df8cd22180c8521ab226f0b5ffc3ef2579484aa0daef879d4af3f3109d7a0", size = 7334604, upload-time = "2025-09-26T20:44:28.43Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c7/67/20e9785c8ad69f0d7aa48dadd056e056395c25fa93664faf4e835a934ed8/ddtrace-3.15.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:da7a090dead082ac6b8cc8c9b915b6456f84a98d3091ecdd73412ba37348e734", size = 6316867, upload-time = "2025-09-26T20:42:27.351Z" }, - { url = "https://files.pythonhosted.org/packages/1d/6d/930ee734ce40fbd65c9599b6ce13b49fd6be18ee263f07aee26bd00673b8/ddtrace-3.15.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:33add489db723b4e7ee130173ad3cebbda5bee24fa400604e3b1d423aebe966c", size = 6663729, upload-time = "2025-09-26T20:42:29.828Z" }, - { url = "https://files.pythonhosted.org/packages/57/4d/f9e2266ff45666ffb9064e8e58bfec88665d7ba6eb3c20e79e6396ed342e/ddtrace-3.15.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3c684efe655a26e7effb309e04656d71256f731f803b4dba4afa0c3954073158", size = 7359374, upload-time = "2025-09-26T20:42:31.997Z" }, - { url = "https://files.pythonhosted.org/packages/d8/96/cfbb203a551e417bfad6e4c11349bb94cdcd1a098f3100e3df230084bb27/ddtrace-3.15.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c9375cdcf022c6320ac2853971c39de5d3e1393650d634ab2ddf02f222ab8a44", size = 7633990, upload-time = "2025-09-26T20:42:34.11Z" }, - { url = "https://files.pythonhosted.org/packages/69/0b/3df92044fa40fc54d05ad31402d5cfe88ad23d2120e93457b52e9b8d8bb2/ddtrace-3.15.0-cp312-cp312-manylinux_2_28_i686.whl", hash = "sha256:e9e83b8bccfc96e9c41a89dcf3b0485b79d22e3350ffeddff9a1e925548d280a", size = 5481875, upload-time = "2025-09-26T20:42:36.464Z" }, - { url = "https://files.pythonhosted.org/packages/36/65/f4d611777bba4a1cf0f613ee42f684a1913ce25c57f37dc5e2b65d68a735/ddtrace-3.15.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7c8fa4afcd10a229af0b051ca49592011c9ca6893d22d83d101cb6b59d0f6608", size = 8375995, upload-time = "2025-09-26T20:42:39.197Z" }, - { url = "https://files.pythonhosted.org/packages/38/a8/d32756564b9760b20516f184a4ddd1d46a8f8a417dd51c861ebffb7027df/ddtrace-3.15.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a6bd4297fcd52bb7194f53445ceb5d41a2c363b9fd4fa2955acdb12468ee0f58", size = 6566482, upload-time = "2025-09-26T20:42:41.4Z" }, - { url = "https://files.pythonhosted.org/packages/71/fa/86253527f85675a46d7455875527ea825a4cc2bb17eef81f57c0646de64c/ddtrace-3.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8aae74c529733e5868eedba2439751c5e6f728655b1ca05bffdf97e7d23094ad", size = 8709114, upload-time = "2025-09-26T20:42:43.619Z" }, - { url = "https://files.pythonhosted.org/packages/bc/26/cd8fe70e8034aa82165fdc0acc8cec579248b78b143d985416d4e17c5876/ddtrace-3.15.0-cp312-cp312-win32.whl", hash = "sha256:ef49f4bda92c1505e1ea369ca8fe1c6b9fdc16eedbe57a082e82d60393da59bf", size = 5016949, upload-time = "2025-09-26T20:42:46.026Z" }, - { url = "https://files.pythonhosted.org/packages/21/48/fac1b93cd2eac915d142ba4960ccd1e13bbacadf3c1f2b71b2fe9234315c/ddtrace-3.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:0b8684ca8c20192c9fbc65b311d920d33e7c685b436b0027a2f6529802c40723", size = 5572340, upload-time = "2025-09-26T20:42:49.341Z" }, - { url = "https://files.pythonhosted.org/packages/9f/1d/26e97ba878c881289bb51f9ebaaf26372c32a6b518f51315cedcdf15e690/ddtrace-3.15.0-cp312-cp312-win_arm64.whl", hash = "sha256:6c22b5c6c6e46a3eedf688d3cf07bcad70b1c218b6ba2908e7979bf62dac219f", size = 5290707, upload-time = "2025-09-26T20:42:51.669Z" }, - { url = "https://files.pythonhosted.org/packages/5b/78/7dcf9c52c66f6eaec3ce9341da74d45f7fa9358be61974375057f899048a/ddtrace-3.15.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:53ad8354dff89196a59effdb50437c6a67ad51e953ac786d7d9185181076a2dc", size = 6311237, upload-time = "2025-09-26T20:42:54.123Z" }, - { url = "https://files.pythonhosted.org/packages/af/8e/ce612803daace00d20c75465a028e5a91076cf796a71c44b0cd6d08799e3/ddtrace-3.15.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:5a0c44bf5c3e4b6abb7145ecd69883b10744859483ea9fb7c69f821f1c35a1cb", size = 6658051, upload-time = "2025-09-26T20:42:57.315Z" }, - { url = "https://files.pythonhosted.org/packages/b8/24/8d8f835fdb06b324ec278b39920cb111f43bf15c45890797e881b808a182/ddtrace-3.15.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7e5c2e99840f613720b4d1ccb6075fd3b7a871fa00a0991b32380e49fed1b2ac", size = 7355364, upload-time = "2025-09-26T20:42:59.59Z" }, - { url = "https://files.pythonhosted.org/packages/3b/25/3fade0e92493eb23385b4809e23539a3ffa87225d8244822122132a3f36d/ddtrace-3.15.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b0ad8ad126d351b8ee68a572ec6e18596cc536f64ba2c9f76cb4c8cefcf33f25", size = 7627045, upload-time = "2025-09-26T20:43:01.941Z" }, - { url = "https://files.pythonhosted.org/packages/ab/1b/c408400414a17a84ce67c197b7217babf048085e89abf097a907a21bc59e/ddtrace-3.15.0-cp313-cp313-manylinux_2_28_i686.whl", hash = "sha256:76f7d0e3b1ba700220d497e2fcb251b6d076826b27a86afe7a900c2a01dfe8b2", size = 5476128, upload-time = "2025-09-26T20:43:04.283Z" }, - { url = "https://files.pythonhosted.org/packages/b3/6f/089a364607e39386d19e3bf040664f05a1ceb5657df4e3db77d6e565603c/ddtrace-3.15.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:765204cf089c02f5f9e2605c009bb18f7ea45f9e2c6e84a06d33e65971eba105", size = 8372109, upload-time = "2025-09-26T20:43:06.916Z" }, - { url = "https://files.pythonhosted.org/packages/49/32/75b32779e393e435ef4b497bb347d98741cc314f961b5e3195874bca60f1/ddtrace-3.15.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:67e683c06e68d41d2d4cfd759ce76d90de71566d78fd93c415b8ba83e91d057e", size = 6562682, upload-time = "2025-09-26T20:43:09.727Z" }, - { url = "https://files.pythonhosted.org/packages/6c/a0/e2bdf38fda58989947c4593eee4f65a7fc0d25c9ab6e1713ca7f94fbd523/ddtrace-3.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c5b0bf336aca9ac3783c19bf2b2c26f406bcf5a3b18ca3190d7b033607f46f70", size = 8705274, upload-time = "2025-09-26T20:43:12.247Z" }, - { url = "https://files.pythonhosted.org/packages/bd/e8/8e6a09a9bf4a9dbd316fd05336e3071f46b15071bd32e90c5e6839d8b405/ddtrace-3.15.0-cp313-cp313-win32.whl", hash = "sha256:ce903d79837aae5cc47284c0d49b8b910e985375bfcb19a7c7184582a8d09bfe", size = 5013999, upload-time = "2025-09-26T20:43:15.318Z" }, - { url = "https://files.pythonhosted.org/packages/31/37/e27df1d796d1c128f13b1c21ba5adfc059ca6763e2d2d0f95c2b5f427eaa/ddtrace-3.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:cbe8748058d2050d67101102e0c31767f6c9556a22dd865764b78a0adca458f5", size = 5569335, upload-time = "2025-09-26T20:43:17.864Z" }, - { url = "https://files.pythonhosted.org/packages/89/a3/0d0401d38e9e1767518c1edcd56501b7748b134737b800e33f9f636e4624/ddtrace-3.15.0-cp313-cp313-win_arm64.whl", hash = "sha256:6c38e5098b9bce89bf78dded4bf5f07096e9ccb44cb1af0433c373bc5c268472", size = 5288400, upload-time = "2025-09-26T20:43:20.39Z" }, -] - -[[package]] -name = "debugpy" -version = "1.8.17" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/15/ad/71e708ff4ca377c4230530d6a7aa7992592648c122a2cd2b321cf8b35a76/debugpy-1.8.17.tar.gz", hash = "sha256:fd723b47a8c08892b1a16b2c6239a8b96637c62a59b94bb5dab4bac592a58a8e", size = 1644129, upload-time = "2025-09-17T16:33:20.633Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/08/2b/9d8e65beb2751876c82e1aceb32f328c43ec872711fa80257c7674f45650/debugpy-1.8.17-cp312-cp312-macosx_15_0_universal2.whl", hash = "sha256:f14467edef672195c6f6b8e27ce5005313cb5d03c9239059bc7182b60c176e2d", size = 2549522, upload-time = "2025-09-17T16:33:38.466Z" }, - { url = "https://files.pythonhosted.org/packages/b4/78/eb0d77f02971c05fca0eb7465b18058ba84bd957062f5eec82f941ac792a/debugpy-1.8.17-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:24693179ef9dfa20dca8605905a42b392be56d410c333af82f1c5dff807a64cc", size = 4309417, upload-time = "2025-09-17T16:33:41.299Z" }, - { url = "https://files.pythonhosted.org/packages/37/42/c40f1d8cc1fed1e75ea54298a382395b8b937d923fcf41ab0797a554f555/debugpy-1.8.17-cp312-cp312-win32.whl", hash = "sha256:6a4e9dacf2cbb60d2514ff7b04b4534b0139facbf2abdffe0639ddb6088e59cf", size = 5277130, upload-time = "2025-09-17T16:33:43.554Z" }, - { url = "https://files.pythonhosted.org/packages/72/22/84263b205baad32b81b36eac076de0cdbe09fe2d0637f5b32243dc7c925b/debugpy-1.8.17-cp312-cp312-win_amd64.whl", hash = "sha256:e8f8f61c518952fb15f74a302e068b48d9c4691768ade433e4adeea961993464", size = 5319053, upload-time = "2025-09-17T16:33:53.033Z" }, - { url = "https://files.pythonhosted.org/packages/50/76/597e5cb97d026274ba297af8d89138dfd9e695767ba0e0895edb20963f40/debugpy-1.8.17-cp313-cp313-macosx_15_0_universal2.whl", hash = "sha256:857c1dd5d70042502aef1c6d1c2801211f3ea7e56f75e9c335f434afb403e464", size = 2538386, upload-time = "2025-09-17T16:33:54.594Z" }, - { url = "https://files.pythonhosted.org/packages/5f/60/ce5c34fcdfec493701f9d1532dba95b21b2f6394147234dce21160bd923f/debugpy-1.8.17-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:3bea3b0b12f3946e098cce9b43c3c46e317b567f79570c3f43f0b96d00788088", size = 4292100, upload-time = "2025-09-17T16:33:56.353Z" }, - { url = "https://files.pythonhosted.org/packages/e8/95/7873cf2146577ef71d2a20bf553f12df865922a6f87b9e8ee1df04f01785/debugpy-1.8.17-cp313-cp313-win32.whl", hash = "sha256:e34ee844c2f17b18556b5bbe59e1e2ff4e86a00282d2a46edab73fd7f18f4a83", size = 5277002, upload-time = "2025-09-17T16:33:58.231Z" }, - { url = "https://files.pythonhosted.org/packages/46/11/18c79a1cee5ff539a94ec4aa290c1c069a5580fd5cfd2fb2e282f8e905da/debugpy-1.8.17-cp313-cp313-win_amd64.whl", hash = "sha256:6c5cd6f009ad4fca8e33e5238210dc1e5f42db07d4b6ab21ac7ffa904a196420", size = 5319047, upload-time = "2025-09-17T16:34:00.586Z" }, - { url = "https://files.pythonhosted.org/packages/de/45/115d55b2a9da6de812696064ceb505c31e952c5d89c4ed1d9bb983deec34/debugpy-1.8.17-cp314-cp314-macosx_15_0_universal2.whl", hash = "sha256:045290c010bcd2d82bc97aa2daf6837443cd52f6328592698809b4549babcee1", size = 2536899, upload-time = "2025-09-17T16:34:02.657Z" }, - { url = "https://files.pythonhosted.org/packages/5a/73/2aa00c7f1f06e997ef57dc9b23d61a92120bec1437a012afb6d176585197/debugpy-1.8.17-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:b69b6bd9dba6a03632534cdf67c760625760a215ae289f7489a452af1031fe1f", size = 4268254, upload-time = "2025-09-17T16:34:04.486Z" }, - { url = "https://files.pythonhosted.org/packages/86/b5/ed3e65c63c68a6634e3ba04bd10255c8e46ec16ebed7d1c79e4816d8a760/debugpy-1.8.17-cp314-cp314-win32.whl", hash = "sha256:5c59b74aa5630f3a5194467100c3b3d1c77898f9ab27e3f7dc5d40fc2f122670", size = 5277203, upload-time = "2025-09-17T16:34:06.65Z" }, - { url = "https://files.pythonhosted.org/packages/b0/26/394276b71c7538445f29e792f589ab7379ae70fd26ff5577dfde71158e96/debugpy-1.8.17-cp314-cp314-win_amd64.whl", hash = "sha256:893cba7bb0f55161de4365584b025f7064e1f88913551bcd23be3260b231429c", size = 5318493, upload-time = "2025-09-17T16:34:08.483Z" }, - { url = "https://files.pythonhosted.org/packages/b0/d0/89247ec250369fc76db477720a26b2fce7ba079ff1380e4ab4529d2fe233/debugpy-1.8.17-py2.py3-none-any.whl", hash = "sha256:60c7dca6571efe660ccb7a9508d73ca14b8796c4ed484c2002abba714226cfef", size = 5283210, upload-time = "2025-09-17T16:34:25.835Z" }, -] - -[[package]] -name = "decorator" -version = "5.2.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, -] - -[[package]] -name = "distro" -version = "1.9.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, -] - -[[package]] -name = "docstring-parser" -version = "0.17.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b2/9d/c3b43da9515bd270df0f80548d9944e389870713cc1fe2b8fb35fe2bcefd/docstring_parser-0.17.0.tar.gz", hash = "sha256:583de4a309722b3315439bb31d64ba3eebada841f2e2cee23b99df001434c912", size = 27442, upload-time = "2025-07-21T07:35:01.868Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" }, -] - -[[package]] -name = "envier" -version = "0.6.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/19/e7/4fe4d3f6e21213cea9bcddc36ba60e6ae4003035f9ce8055e6a9f0322ddb/envier-0.6.1.tar.gz", hash = "sha256:3309a01bb3d8850c9e7a31a5166d5a836846db2faecb79b9cb32654dd50ca9f9", size = 10063, upload-time = "2024-10-22T09:56:47.226Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/56/e9/30493b1cc967f7c07869de4b2ab3929151a58e6bb04495015554d24b61db/envier-0.6.1-py3-none-any.whl", hash = "sha256:73609040a76be48bbcb97074d9969666484aa0de706183a6e9ef773156a8a6a9", size = 10638, upload-time = "2024-10-22T09:56:45.968Z" }, -] - -[[package]] -name = "executing" -version = "2.2.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cc/28/c14e053b6762b1044f34a13aab6859bbf40456d37d23aa286ac24cfd9a5d/executing-2.2.1.tar.gz", hash = "sha256:3632cc370565f6648cc328b32435bd120a1e4ebb20c77e3fdde9a13cd1e533c4", size = 1129488, upload-time = "2025-09-01T09:48:10.866Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" }, -] - -[[package]] -name = "fastapi" -version = "0.115.14" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pydantic" }, - { name = "starlette" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ca/53/8c38a874844a8b0fa10dd8adf3836ac154082cf88d3f22b544e9ceea0a15/fastapi-0.115.14.tar.gz", hash = "sha256:b1de15cdc1c499a4da47914db35d0e4ef8f1ce62b624e94e0e5824421df99739", size = 296263, upload-time = "2025-06-26T15:29:08.21Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/53/50/b1222562c6d270fea83e9c9075b8e8600b8479150a18e4516a6138b980d1/fastapi-0.115.14-py3-none-any.whl", hash = "sha256:6c0c8bf9420bd58f565e585036d971872472b4f7d3f6c73b698e10cffdefb3ca", size = 95514, upload-time = "2025-06-26T15:29:06.49Z" }, -] - -[[package]] -name = "fastjsonschema" -version = "2.21.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/20/b5/23b216d9d985a956623b6bd12d4086b60f0059b27799f23016af04a74ea1/fastjsonschema-2.21.2.tar.gz", hash = "sha256:b1eb43748041c880796cd077f1a07c3d94e93ae84bba5ed36800a33554ae05de", size = 374130, upload-time = "2025-08-14T18:49:36.666Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/cb/a8/20d0723294217e47de6d9e2e40fd4a9d2f7c4b6ef974babd482a59743694/fastjsonschema-2.21.2-py3-none-any.whl", hash = "sha256:1c797122d0a86c5cace2e54bf4e819c36223b552017172f32c5c024a6b77e463", size = 24024, upload-time = "2025-08-14T18:49:34.776Z" }, -] - -[[package]] -name = "fastuuid" -version = "0.13.5" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/15/80/3c16a1edad2e6cd82fbd15ac998cc1b881f478bf1f80ca717d941c441874/fastuuid-0.13.5.tar.gz", hash = "sha256:d4976821ab424d41542e1ea39bc828a9d454c3f8a04067c06fca123c5b95a1a1", size = 18255, upload-time = "2025-09-26T09:05:38.281Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/21/36/434f137c5970cac19e57834e1f7680e85301619d49891618c00666700c61/fastuuid-0.13.5-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:35fe8045e866bc6846f8de6fa05acb1de0c32478048484a995e96d31e21dff2a", size = 494638, upload-time = "2025-09-26T09:14:58.695Z" }, - { url = "https://files.pythonhosted.org/packages/ca/3c/083de2ac007b2b305523b9c006dba5051e5afd87a626ef1a39f76e2c6b82/fastuuid-0.13.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:02a460333f52d731a006d18a52ef6fcb2d295a1f5b1a5938d30744191b2f77b7", size = 253138, upload-time = "2025-09-26T09:13:33.283Z" }, - { url = "https://files.pythonhosted.org/packages/73/5e/630cffa1c8775db526e39e9e4c5c7db0c27be0786bb21ba82c912ae19f63/fastuuid-0.13.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:74b0e4f8c307b9f477a5d7284db4431ce53a3c1e3f4173db7a97db18564a6202", size = 244521, upload-time = "2025-09-26T09:14:40.682Z" }, - { url = "https://files.pythonhosted.org/packages/4d/51/55d78705f4fbdadf88fb40f382f508d6c7a4941ceddd7825fafebb4cc778/fastuuid-0.13.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6955a99ef455c2986f3851f4e0ccc35dec56ac1a7720f2b92e88a75d6684512e", size = 271557, upload-time = "2025-09-26T09:15:09.75Z" }, - { url = "https://files.pythonhosted.org/packages/6a/2b/1b89e90a8635e5587ccdbbeb169c590672ce7637880f2c047482a0359950/fastuuid-0.13.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f10c77b826738c1a27dcdaa92ea4dc1ec9d869748a99e1fde54f1379553d4854", size = 272334, upload-time = "2025-09-26T09:07:48.865Z" }, - { url = "https://files.pythonhosted.org/packages/0c/06/4c8207894eeb30414999e5c3f66ac039bc4003437eb4060d8a1bceb4cc6f/fastuuid-0.13.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bb25dccbeb249d16d5e664f65f17ebec05136821d5ef462c4110e3f76b86fb86", size = 290594, upload-time = "2025-09-26T09:12:54.124Z" }, - { url = "https://files.pythonhosted.org/packages/50/69/96d221931a31d77a47cc2487bdfacfb3091edfc2e7a04b1795df1aec05df/fastuuid-0.13.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a5becc646a3eeafb76ce0a6783ba190cd182e3790a8b2c78ca9db2b5e87af952", size = 452835, upload-time = "2025-09-26T09:14:00.994Z" }, - { url = "https://files.pythonhosted.org/packages/25/ef/bf045f0a47dcec96247497ef3f7a31d86ebc074330e2dccc34b8dbc0468a/fastuuid-0.13.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:69b34363752d06e9bb0dbdf02ae391ec56ac948c6f2eb00be90dad68e80774b9", size = 468225, upload-time = "2025-09-26T09:13:38.585Z" }, - { url = "https://files.pythonhosted.org/packages/30/46/4817ab5a3778927155a4bde92540d4c4fa996161ec8b8e080c8928b0984e/fastuuid-0.13.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57d0768afcad0eab8770c9b8cf904716bd3c547e8b9a4e755ee8a673b060a3a3", size = 444907, upload-time = "2025-09-26T09:14:30.163Z" }, - { url = "https://files.pythonhosted.org/packages/80/27/ab284117ce4dc9b356a7196bdbf220510285f201d27f1f078592cdc8187b/fastuuid-0.13.5-cp312-cp312-win32.whl", hash = "sha256:8ac6c6f5129d52eaa6ef9ea4b6e2f7c69468a053f3ab8e439661186b9c06bb85", size = 145415, upload-time = "2025-09-26T09:08:59.494Z" }, - { url = "https://files.pythonhosted.org/packages/f4/0c/f970a4222773b248931819f8940800b760283216ca3dda173ed027e94bdd/fastuuid-0.13.5-cp312-cp312-win_amd64.whl", hash = "sha256:ad630e97715beefef07ec37c9c162336e500400774e2c1cbe1a0df6f80d15b9a", size = 150840, upload-time = "2025-09-26T09:13:46.115Z" }, - { url = "https://files.pythonhosted.org/packages/4f/62/74fc53f6e04a4dc5b36c34e4e679f85a4c14eec800dcdb0f2c14b5442217/fastuuid-0.13.5-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:ea17dfd35e0e91920a35d91e65e5f9c9d1985db55ac4ff2f1667a0f61189cefa", size = 494678, upload-time = "2025-09-26T09:14:30.908Z" }, - { url = "https://files.pythonhosted.org/packages/09/ba/f28b9b7045738a8bfccfb9cd6aff4b91fce2669e6b383a48b0694ee9b3ff/fastuuid-0.13.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:be6ad91e5fefbcc2a4b478858a2715e386d405834ea3ae337c3b6b95cc0e47d6", size = 253162, upload-time = "2025-09-26T09:13:35.879Z" }, - { url = "https://files.pythonhosted.org/packages/b1/18/13fac89cb4c9f0cd7e81a9154a77ecebcc95d2b03477aa91d4d50f7227ee/fastuuid-0.13.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ea6df13a306aab3e0439d58c312ff1e6f4f07f09f667579679239b4a6121f64a", size = 244546, upload-time = "2025-09-26T09:14:58.13Z" }, - { url = "https://files.pythonhosted.org/packages/04/bf/9691167804d59411cc4269841df949f6dd5e76452ab10dcfcd1dbe04c5bc/fastuuid-0.13.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2354c1996d3cf12dc2ba3752e2c4d6edc46e1a38c63893146777b1939f3062d4", size = 271528, upload-time = "2025-09-26T09:14:48.996Z" }, - { url = "https://files.pythonhosted.org/packages/a9/b5/7a75a03d1c7aa0b6d573032fcca39391f0aef7f2caabeeb45a672bc0bd3c/fastuuid-0.13.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6cf9b7469fc26d1f9b1c43ac4b192e219e85b88fdf81d71aa755a6c08c8a817", size = 272292, upload-time = "2025-09-26T09:14:42.82Z" }, - { url = "https://files.pythonhosted.org/packages/c0/db/fa0f16cbf76e6880599533af4ef01bb586949c5320612e9d884eff13e603/fastuuid-0.13.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92ba539170097b9047551375f1ca09d8d2b4aefcc79eeae3e1c43fe49b42072e", size = 290466, upload-time = "2025-09-26T09:08:33.161Z" }, - { url = "https://files.pythonhosted.org/packages/1e/02/6b8c45bfbc8500994dd94edba7f59555f9683c4d8c9a164ae1d25d03c7c7/fastuuid-0.13.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:dbb81d05617bc2970765c1ad82db7e8716f6a2b7a361a14b83de5b9240ade448", size = 452838, upload-time = "2025-09-26T09:13:44.747Z" }, - { url = "https://files.pythonhosted.org/packages/27/12/85d95a84f265b888e8eb9f9e2b5aaf331e8be60c0a7060146364b3544b6a/fastuuid-0.13.5-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:d973bd6bf9d754d3cca874714ac0a6b22a47f239fb3d3c8687569db05aac3471", size = 468149, upload-time = "2025-09-26T09:13:18.712Z" }, - { url = "https://files.pythonhosted.org/packages/ad/da/dd9a137e9ea707e883c92470113a432233482ec9ad3e9b99c4defc4904e6/fastuuid-0.13.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e725ceef79486423f05ee657634d4b4c1ca5fb2c8a94e0708f5d6356a83f2a83", size = 444933, upload-time = "2025-09-26T09:14:09.494Z" }, - { url = "https://files.pythonhosted.org/packages/12/f4/ab363d7f4ac3989691e2dc5ae2d8391cfb0b4169e52ef7fa0ac363e936f0/fastuuid-0.13.5-cp313-cp313-win32.whl", hash = "sha256:a1c430a332ead0b2674f1ef71b17f43b8139ec5a4201182766a21f131a31e021", size = 145462, upload-time = "2025-09-26T09:14:15.105Z" }, - { url = "https://files.pythonhosted.org/packages/aa/8a/52eb77d9c294a54caa0d2d8cc9f906207aa6d916a22de963687ab6db8b86/fastuuid-0.13.5-cp313-cp313-win_amd64.whl", hash = "sha256:241fdd362fd96e6b337db62a65dd7cb3dfac20adf854573247a47510e192db6f", size = 150923, upload-time = "2025-09-26T09:13:03.923Z" }, -] - -[[package]] -name = "filelock" -version = "3.19.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/40/bb/0ab3e58d22305b6f5440629d20683af28959bf793d98d11950e305c1c326/filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58", size = 17687, upload-time = "2025-08-14T16:56:03.016Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/42/14/42b2651a2f46b022ccd948bca9f2d5af0fd8929c4eec235b8d6d844fbe67/filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d", size = 15988, upload-time = "2025-08-14T16:56:01.633Z" }, -] - -[[package]] -name = "frozenlist" -version = "1.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/79/b1/b64018016eeb087db503b038296fd782586432b9c077fc5c7839e9cb6ef6/frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f", size = 45078, upload-time = "2025-06-09T23:02:35.538Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/a2/c8131383f1e66adad5f6ecfcce383d584ca94055a34d683bbb24ac5f2f1c/frozenlist-1.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3dbf9952c4bb0e90e98aec1bd992b3318685005702656bc6f67c1a32b76787f2", size = 81424, upload-time = "2025-06-09T23:00:42.24Z" }, - { url = "https://files.pythonhosted.org/packages/4c/9d/02754159955088cb52567337d1113f945b9e444c4960771ea90eb73de8db/frozenlist-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1f5906d3359300b8a9bb194239491122e6cf1444c2efb88865426f170c262cdb", size = 47952, upload-time = "2025-06-09T23:00:43.481Z" }, - { url = "https://files.pythonhosted.org/packages/01/7a/0046ef1bd6699b40acd2067ed6d6670b4db2f425c56980fa21c982c2a9db/frozenlist-1.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3dabd5a8f84573c8d10d8859a50ea2dec01eea372031929871368c09fa103478", size = 46688, upload-time = "2025-06-09T23:00:44.793Z" }, - { url = "https://files.pythonhosted.org/packages/d6/a2/a910bafe29c86997363fb4c02069df4ff0b5bc39d33c5198b4e9dd42d8f8/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa57daa5917f1738064f302bf2626281a1cb01920c32f711fbc7bc36111058a8", size = 243084, upload-time = "2025-06-09T23:00:46.125Z" }, - { url = "https://files.pythonhosted.org/packages/64/3e/5036af9d5031374c64c387469bfcc3af537fc0f5b1187d83a1cf6fab1639/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c193dda2b6d49f4c4398962810fa7d7c78f032bf45572b3e04dd5249dff27e08", size = 233524, upload-time = "2025-06-09T23:00:47.73Z" }, - { url = "https://files.pythonhosted.org/packages/06/39/6a17b7c107a2887e781a48ecf20ad20f1c39d94b2a548c83615b5b879f28/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe2b675cf0aaa6d61bf8fbffd3c274b3c9b7b1623beb3809df8a81399a4a9c4", size = 248493, upload-time = "2025-06-09T23:00:49.742Z" }, - { url = "https://files.pythonhosted.org/packages/be/00/711d1337c7327d88c44d91dd0f556a1c47fb99afc060ae0ef66b4d24793d/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fc5d5cda37f62b262405cf9652cf0856839c4be8ee41be0afe8858f17f4c94b", size = 244116, upload-time = "2025-06-09T23:00:51.352Z" }, - { url = "https://files.pythonhosted.org/packages/24/fe/74e6ec0639c115df13d5850e75722750adabdc7de24e37e05a40527ca539/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0d5ce521d1dd7d620198829b87ea002956e4319002ef0bc8d3e6d045cb4646e", size = 224557, upload-time = "2025-06-09T23:00:52.855Z" }, - { url = "https://files.pythonhosted.org/packages/8d/db/48421f62a6f77c553575201e89048e97198046b793f4a089c79a6e3268bd/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:488d0a7d6a0008ca0db273c542098a0fa9e7dfaa7e57f70acef43f32b3f69dca", size = 241820, upload-time = "2025-06-09T23:00:54.43Z" }, - { url = "https://files.pythonhosted.org/packages/1d/fa/cb4a76bea23047c8462976ea7b7a2bf53997a0ca171302deae9d6dd12096/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:15a7eaba63983d22c54d255b854e8108e7e5f3e89f647fc854bd77a237e767df", size = 236542, upload-time = "2025-06-09T23:00:56.409Z" }, - { url = "https://files.pythonhosted.org/packages/5d/32/476a4b5cfaa0ec94d3f808f193301debff2ea42288a099afe60757ef6282/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1eaa7e9c6d15df825bf255649e05bd8a74b04a4d2baa1ae46d9c2d00b2ca2cb5", size = 249350, upload-time = "2025-06-09T23:00:58.468Z" }, - { url = "https://files.pythonhosted.org/packages/8d/ba/9a28042f84a6bf8ea5dbc81cfff8eaef18d78b2a1ad9d51c7bc5b029ad16/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4389e06714cfa9d47ab87f784a7c5be91d3934cd6e9a7b85beef808297cc025", size = 225093, upload-time = "2025-06-09T23:01:00.015Z" }, - { url = "https://files.pythonhosted.org/packages/bc/29/3a32959e68f9cf000b04e79ba574527c17e8842e38c91d68214a37455786/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:73bd45e1488c40b63fe5a7df892baf9e2a4d4bb6409a2b3b78ac1c6236178e01", size = 245482, upload-time = "2025-06-09T23:01:01.474Z" }, - { url = "https://files.pythonhosted.org/packages/80/e8/edf2f9e00da553f07f5fa165325cfc302dead715cab6ac8336a5f3d0adc2/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99886d98e1643269760e5fe0df31e5ae7050788dd288947f7f007209b8c33f08", size = 249590, upload-time = "2025-06-09T23:01:02.961Z" }, - { url = "https://files.pythonhosted.org/packages/1c/80/9a0eb48b944050f94cc51ee1c413eb14a39543cc4f760ed12657a5a3c45a/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:290a172aae5a4c278c6da8a96222e6337744cd9c77313efe33d5670b9f65fc43", size = 237785, upload-time = "2025-06-09T23:01:05.095Z" }, - { url = "https://files.pythonhosted.org/packages/f3/74/87601e0fb0369b7a2baf404ea921769c53b7ae00dee7dcfe5162c8c6dbf0/frozenlist-1.7.0-cp312-cp312-win32.whl", hash = "sha256:426c7bc70e07cfebc178bc4c2bf2d861d720c4fff172181eeb4a4c41d4ca2ad3", size = 39487, upload-time = "2025-06-09T23:01:06.54Z" }, - { url = "https://files.pythonhosted.org/packages/0b/15/c026e9a9fc17585a9d461f65d8593d281fedf55fbf7eb53f16c6df2392f9/frozenlist-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:563b72efe5da92e02eb68c59cb37205457c977aa7a449ed1b37e6939e5c47c6a", size = 43874, upload-time = "2025-06-09T23:01:07.752Z" }, - { url = "https://files.pythonhosted.org/packages/24/90/6b2cebdabdbd50367273c20ff6b57a3dfa89bd0762de02c3a1eb42cb6462/frozenlist-1.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee", size = 79791, upload-time = "2025-06-09T23:01:09.368Z" }, - { url = "https://files.pythonhosted.org/packages/83/2e/5b70b6a3325363293fe5fc3ae74cdcbc3e996c2a11dde2fd9f1fb0776d19/frozenlist-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d", size = 47165, upload-time = "2025-06-09T23:01:10.653Z" }, - { url = "https://files.pythonhosted.org/packages/f4/25/a0895c99270ca6966110f4ad98e87e5662eab416a17e7fd53c364bf8b954/frozenlist-1.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43", size = 45881, upload-time = "2025-06-09T23:01:12.296Z" }, - { url = "https://files.pythonhosted.org/packages/19/7c/71bb0bbe0832793c601fff68cd0cf6143753d0c667f9aec93d3c323f4b55/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d", size = 232409, upload-time = "2025-06-09T23:01:13.641Z" }, - { url = "https://files.pythonhosted.org/packages/c0/45/ed2798718910fe6eb3ba574082aaceff4528e6323f9a8570be0f7028d8e9/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee", size = 225132, upload-time = "2025-06-09T23:01:15.264Z" }, - { url = "https://files.pythonhosted.org/packages/ba/e2/8417ae0f8eacb1d071d4950f32f229aa6bf68ab69aab797b72a07ea68d4f/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb", size = 237638, upload-time = "2025-06-09T23:01:16.752Z" }, - { url = "https://files.pythonhosted.org/packages/f8/b7/2ace5450ce85f2af05a871b8c8719b341294775a0a6c5585d5e6170f2ce7/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f", size = 233539, upload-time = "2025-06-09T23:01:18.202Z" }, - { url = "https://files.pythonhosted.org/packages/46/b9/6989292c5539553dba63f3c83dc4598186ab2888f67c0dc1d917e6887db6/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60", size = 215646, upload-time = "2025-06-09T23:01:19.649Z" }, - { url = "https://files.pythonhosted.org/packages/72/31/bc8c5c99c7818293458fe745dab4fd5730ff49697ccc82b554eb69f16a24/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00", size = 232233, upload-time = "2025-06-09T23:01:21.175Z" }, - { url = "https://files.pythonhosted.org/packages/59/52/460db4d7ba0811b9ccb85af996019f5d70831f2f5f255f7cc61f86199795/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b", size = 227996, upload-time = "2025-06-09T23:01:23.098Z" }, - { url = "https://files.pythonhosted.org/packages/ba/c9/f4b39e904c03927b7ecf891804fd3b4df3db29b9e487c6418e37988d6e9d/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c", size = 242280, upload-time = "2025-06-09T23:01:24.808Z" }, - { url = "https://files.pythonhosted.org/packages/b8/33/3f8d6ced42f162d743e3517781566b8481322be321b486d9d262adf70bfb/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949", size = 217717, upload-time = "2025-06-09T23:01:26.28Z" }, - { url = "https://files.pythonhosted.org/packages/3e/e8/ad683e75da6ccef50d0ab0c2b2324b32f84fc88ceee778ed79b8e2d2fe2e/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca", size = 236644, upload-time = "2025-06-09T23:01:27.887Z" }, - { url = "https://files.pythonhosted.org/packages/b2/14/8d19ccdd3799310722195a72ac94ddc677541fb4bef4091d8e7775752360/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b", size = 238879, upload-time = "2025-06-09T23:01:29.524Z" }, - { url = "https://files.pythonhosted.org/packages/ce/13/c12bf657494c2fd1079a48b2db49fa4196325909249a52d8f09bc9123fd7/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e", size = 232502, upload-time = "2025-06-09T23:01:31.287Z" }, - { url = "https://files.pythonhosted.org/packages/d7/8b/e7f9dfde869825489382bc0d512c15e96d3964180c9499efcec72e85db7e/frozenlist-1.7.0-cp313-cp313-win32.whl", hash = "sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1", size = 39169, upload-time = "2025-06-09T23:01:35.503Z" }, - { url = "https://files.pythonhosted.org/packages/35/89/a487a98d94205d85745080a37860ff5744b9820a2c9acbcdd9440bfddf98/frozenlist-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba", size = 43219, upload-time = "2025-06-09T23:01:36.784Z" }, - { url = "https://files.pythonhosted.org/packages/56/d5/5c4cf2319a49eddd9dd7145e66c4866bdc6f3dbc67ca3d59685149c11e0d/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d", size = 84345, upload-time = "2025-06-09T23:01:38.295Z" }, - { url = "https://files.pythonhosted.org/packages/a4/7d/ec2c1e1dc16b85bc9d526009961953df9cec8481b6886debb36ec9107799/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d", size = 48880, upload-time = "2025-06-09T23:01:39.887Z" }, - { url = "https://files.pythonhosted.org/packages/69/86/f9596807b03de126e11e7d42ac91e3d0b19a6599c714a1989a4e85eeefc4/frozenlist-1.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b", size = 48498, upload-time = "2025-06-09T23:01:41.318Z" }, - { url = "https://files.pythonhosted.org/packages/5e/cb/df6de220f5036001005f2d726b789b2c0b65f2363b104bbc16f5be8084f8/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146", size = 292296, upload-time = "2025-06-09T23:01:42.685Z" }, - { url = "https://files.pythonhosted.org/packages/83/1f/de84c642f17c8f851a2905cee2dae401e5e0daca9b5ef121e120e19aa825/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74", size = 273103, upload-time = "2025-06-09T23:01:44.166Z" }, - { url = "https://files.pythonhosted.org/packages/88/3c/c840bfa474ba3fa13c772b93070893c6e9d5c0350885760376cbe3b6c1b3/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1", size = 292869, upload-time = "2025-06-09T23:01:45.681Z" }, - { url = "https://files.pythonhosted.org/packages/a6/1c/3efa6e7d5a39a1d5ef0abeb51c48fb657765794a46cf124e5aca2c7a592c/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1", size = 291467, upload-time = "2025-06-09T23:01:47.234Z" }, - { url = "https://files.pythonhosted.org/packages/4f/00/d5c5e09d4922c395e2f2f6b79b9a20dab4b67daaf78ab92e7729341f61f6/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384", size = 266028, upload-time = "2025-06-09T23:01:48.819Z" }, - { url = "https://files.pythonhosted.org/packages/4e/27/72765be905619dfde25a7f33813ac0341eb6b076abede17a2e3fbfade0cb/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb", size = 284294, upload-time = "2025-06-09T23:01:50.394Z" }, - { url = "https://files.pythonhosted.org/packages/88/67/c94103a23001b17808eb7dd1200c156bb69fb68e63fcf0693dde4cd6228c/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c", size = 281898, upload-time = "2025-06-09T23:01:52.234Z" }, - { url = "https://files.pythonhosted.org/packages/42/34/a3e2c00c00f9e2a9db5653bca3fec306349e71aff14ae45ecc6d0951dd24/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65", size = 290465, upload-time = "2025-06-09T23:01:53.788Z" }, - { url = "https://files.pythonhosted.org/packages/bb/73/f89b7fbce8b0b0c095d82b008afd0590f71ccb3dee6eee41791cf8cd25fd/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3", size = 266385, upload-time = "2025-06-09T23:01:55.769Z" }, - { url = "https://files.pythonhosted.org/packages/cd/45/e365fdb554159462ca12df54bc59bfa7a9a273ecc21e99e72e597564d1ae/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657", size = 288771, upload-time = "2025-06-09T23:01:57.4Z" }, - { url = "https://files.pythonhosted.org/packages/00/11/47b6117002a0e904f004d70ec5194fe9144f117c33c851e3d51c765962d0/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104", size = 288206, upload-time = "2025-06-09T23:01:58.936Z" }, - { url = "https://files.pythonhosted.org/packages/40/37/5f9f3c3fd7f7746082ec67bcdc204db72dad081f4f83a503d33220a92973/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf", size = 282620, upload-time = "2025-06-09T23:02:00.493Z" }, - { url = "https://files.pythonhosted.org/packages/0b/31/8fbc5af2d183bff20f21aa743b4088eac4445d2bb1cdece449ae80e4e2d1/frozenlist-1.7.0-cp313-cp313t-win32.whl", hash = "sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81", size = 43059, upload-time = "2025-06-09T23:02:02.072Z" }, - { url = "https://files.pythonhosted.org/packages/bb/ed/41956f52105b8dbc26e457c5705340c67c8cc2b79f394b79bffc09d0e938/frozenlist-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e", size = 47516, upload-time = "2025-06-09T23:02:03.779Z" }, - { url = "https://files.pythonhosted.org/packages/ee/45/b82e3c16be2182bff01179db177fe144d58b5dc787a7d4492c6ed8b9317f/frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e", size = 13106, upload-time = "2025-06-09T23:02:34.204Z" }, -] - -[[package]] -name = "fsspec" -version = "2025.9.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/de/e0/bab50af11c2d75c9c4a2a26a5254573c0bd97cea152254401510950486fa/fsspec-2025.9.0.tar.gz", hash = "sha256:19fd429483d25d28b65ec68f9f4adc16c17ea2c7c7bf54ec61360d478fb19c19", size = 304847, upload-time = "2025-09-02T19:10:49.215Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/47/71/70db47e4f6ce3e5c37a607355f80da8860a33226be640226ac52cb05ef2e/fsspec-2025.9.0-py3-none-any.whl", hash = "sha256:530dc2a2af60a414a832059574df4a6e10cce927f6f4a78209390fe38955cfb7", size = 199289, upload-time = "2025-09-02T19:10:47.708Z" }, -] - -[[package]] -name = "google-auth" -version = "2.41.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cachetools" }, - { name = "pyasn1-modules" }, - { name = "rsa" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a8/af/5129ce5b2f9688d2fa49b463e544972a7c82b0fdb50980dafee92e121d9f/google_auth-2.41.1.tar.gz", hash = "sha256:b76b7b1f9e61f0cb7e88870d14f6a94aeef248959ef6992670efee37709cbfd2", size = 292284, upload-time = "2025-09-30T22:51:26.363Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/be/a4/7319a2a8add4cc352be9e3efeff5e2aacee917c85ca2fa1647e29089983c/google_auth-2.41.1-py2.py3-none-any.whl", hash = "sha256:754843be95575b9a19c604a848a41be03f7f2afd8c019f716dc1f51ee41c639d", size = 221302, upload-time = "2025-09-30T22:51:24.212Z" }, -] - -[[package]] -name = "griffe" -version = "1.14.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ec/d7/6c09dd7ce4c7837e4cdb11dce980cb45ae3cd87677298dc3b781b6bce7d3/griffe-1.14.0.tar.gz", hash = "sha256:9d2a15c1eca966d68e00517de5d69dd1bc5c9f2335ef6c1775362ba5b8651a13", size = 424684, upload-time = "2025-09-05T15:02:29.167Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/b1/9ff6578d789a89812ff21e4e0f80ffae20a65d5dd84e7a17873fe3b365be/griffe-1.14.0-py3-none-any.whl", hash = "sha256:0e9d52832cccf0f7188cfe585ba962d2674b241c01916d780925df34873bceb0", size = 144439, upload-time = "2025-09-05T15:02:27.511Z" }, -] - -[[package]] -name = "h11" -version = "0.16.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, -] - -[[package]] -name = "hf-xet" -version = "1.1.10" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/74/31/feeddfce1748c4a233ec1aa5b7396161c07ae1aa9b7bdbc9a72c3c7dd768/hf_xet-1.1.10.tar.gz", hash = "sha256:408aef343800a2102374a883f283ff29068055c111f003ff840733d3b715bb97", size = 487910, upload-time = "2025-09-12T20:10:27.12Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f7/a2/343e6d05de96908366bdc0081f2d8607d61200be2ac802769c4284cc65bd/hf_xet-1.1.10-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:686083aca1a6669bc85c21c0563551cbcdaa5cf7876a91f3d074a030b577231d", size = 2761466, upload-time = "2025-09-12T20:10:22.836Z" }, - { url = "https://files.pythonhosted.org/packages/31/f9/6215f948ac8f17566ee27af6430ea72045e0418ce757260248b483f4183b/hf_xet-1.1.10-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:71081925383b66b24eedff3013f8e6bbd41215c3338be4b94ba75fd75b21513b", size = 2623807, upload-time = "2025-09-12T20:10:21.118Z" }, - { url = "https://files.pythonhosted.org/packages/15/07/86397573efefff941e100367bbda0b21496ffcdb34db7ab51912994c32a2/hf_xet-1.1.10-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b6bceb6361c80c1cc42b5a7b4e3efd90e64630bcf11224dcac50ef30a47e435", size = 3186960, upload-time = "2025-09-12T20:10:19.336Z" }, - { url = "https://files.pythonhosted.org/packages/01/a7/0b2e242b918cc30e1f91980f3c4b026ff2eedaf1e2ad96933bca164b2869/hf_xet-1.1.10-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:eae7c1fc8a664e54753ffc235e11427ca61f4b0477d757cc4eb9ae374b69f09c", size = 3087167, upload-time = "2025-09-12T20:10:17.255Z" }, - { url = "https://files.pythonhosted.org/packages/4a/25/3e32ab61cc7145b11eee9d745988e2f0f4fafda81b25980eebf97d8cff15/hf_xet-1.1.10-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0a0005fd08f002180f7a12d4e13b22be277725bc23ed0529f8add5c7a6309c06", size = 3248612, upload-time = "2025-09-12T20:10:24.093Z" }, - { url = "https://files.pythonhosted.org/packages/2c/3d/ab7109e607ed321afaa690f557a9ada6d6d164ec852fd6bf9979665dc3d6/hf_xet-1.1.10-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f900481cf6e362a6c549c61ff77468bd59d6dd082f3170a36acfef2eb6a6793f", size = 3353360, upload-time = "2025-09-12T20:10:25.563Z" }, - { url = "https://files.pythonhosted.org/packages/ee/0e/471f0a21db36e71a2f1752767ad77e92d8cde24e974e03d662931b1305ec/hf_xet-1.1.10-cp37-abi3-win_amd64.whl", hash = "sha256:5f54b19cc347c13235ae7ee98b330c26dd65ef1df47e5316ffb1e87713ca7045", size = 2804691, upload-time = "2025-09-12T20:10:28.433Z" }, -] - -[[package]] -name = "httpcore" -version = "1.0.9" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "certifi" }, - { name = "h11" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, -] - -[[package]] -name = "httpx" -version = "0.27.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, - { name = "certifi" }, - { name = "httpcore" }, - { name = "idna" }, - { name = "sniffio" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/78/82/08f8c936781f67d9e6b9eeb8a0c8b4e406136ea4c3d1f89a5db71d42e0e6/httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2", size = 144189, upload-time = "2024-08-27T12:54:01.334Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/56/95/9377bcb415797e44274b51d46e3249eba641711cf3348050f76ee7b15ffc/httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0", size = 76395, upload-time = "2024-08-27T12:53:59.653Z" }, -] - -[[package]] -name = "httpx-aiohttp" -version = "0.1.9" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohttp" }, - { name = "httpx" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/d8/f2/9a86ce9bc48cf57dabb3a3160dfed26d8bbe5a2478a51f9d1dbf89f2f1fc/httpx_aiohttp-0.1.9.tar.gz", hash = "sha256:4ee8b22e6f2e7c80cd03be29eff98bfe7d89bd77f021ce0b578ee76b73b4bfe6", size = 206023, upload-time = "2025-10-15T08:52:57.475Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a1/db/5cfa8254a86c34a1ab7fe0dbec9f81bb5ebd831cbdd65aa4be4f37027804/httpx_aiohttp-0.1.9-py3-none-any.whl", hash = "sha256:3dc2845568b07742588710fcf3d72db2cbcdf2acc93376edf85f789c4d8e5fda", size = 6180, upload-time = "2025-10-15T08:52:56.521Z" }, -] - -[[package]] -name = "httpx-sse" -version = "0.4.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6e/fa/66bd985dd0b7c109a3bcb89272ee0bfb7e2b4d06309ad7b38ff866734b2a/httpx_sse-0.4.1.tar.gz", hash = "sha256:8f44d34414bc7b21bf3602713005c5df4917884f76072479b21f68befa4ea26e", size = 12998, upload-time = "2025-06-24T13:21:05.71Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/25/0a/6269e3473b09aed2dab8aa1a600c70f31f00ae1349bee30658f7e358a159/httpx_sse-0.4.1-py3-none-any.whl", hash = "sha256:cba42174344c3a5b06f255ce65b350880f962d99ead85e776f23c6618a377a37", size = 8054, upload-time = "2025-06-24T13:21:04.772Z" }, -] - -[[package]] -name = "huggingface-hub" -version = "0.35.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "filelock" }, - { name = "fsspec" }, - { name = "hf-xet", marker = "platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, - { name = "packaging" }, - { name = "pyyaml" }, - { name = "requests" }, - { name = "tqdm" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/10/7e/a0a97de7c73671863ca6b3f61fa12518caf35db37825e43d63a70956738c/huggingface_hub-0.35.3.tar.gz", hash = "sha256:350932eaa5cc6a4747efae85126ee220e4ef1b54e29d31c3b45c5612ddf0b32a", size = 461798, upload-time = "2025-09-29T14:29:58.625Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/31/a0/651f93d154cb72323358bf2bbae3e642bdb5d2f1bfc874d096f7cb159fa0/huggingface_hub-0.35.3-py3-none-any.whl", hash = "sha256:0e3a01829c19d86d03793e4577816fe3bdfc1602ac62c7fb220d593d351224ba", size = 564262, upload-time = "2025-09-29T14:29:55.813Z" }, -] - -[[package]] -name = "idna" -version = "3.10" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, -] - -[[package]] -name = "importlib-metadata" -version = "8.7.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "zipp" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, -] - -[[package]] -name = "iniconfig" -version = "2.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, -] - -[[package]] -name = "ipykernel" -version = "6.30.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "appnope", marker = "sys_platform == 'darwin'" }, - { name = "comm" }, - { name = "debugpy" }, - { name = "ipython" }, - { name = "jupyter-client" }, - { name = "jupyter-core" }, - { name = "matplotlib-inline" }, - { name = "nest-asyncio" }, - { name = "packaging" }, - { name = "psutil" }, - { name = "pyzmq" }, - { name = "tornado" }, - { name = "traitlets" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/bb/76/11082e338e0daadc89c8ff866185de11daf67d181901038f9e139d109761/ipykernel-6.30.1.tar.gz", hash = "sha256:6abb270161896402e76b91394fcdce5d1be5d45f456671e5080572f8505be39b", size = 166260, upload-time = "2025-08-04T15:47:35.018Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fc/c7/b445faca8deb954fe536abebff4ece5b097b923de482b26e78448c89d1dd/ipykernel-6.30.1-py3-none-any.whl", hash = "sha256:aa6b9fb93dca949069d8b85b6c79b2518e32ac583ae9c7d37c51d119e18b3fb4", size = 117484, upload-time = "2025-08-04T15:47:32.622Z" }, -] - -[[package]] -name = "ipython" -version = "9.6.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "decorator" }, - { name = "ipython-pygments-lexers" }, - { name = "jedi" }, - { name = "matplotlib-inline" }, - { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, - { name = "prompt-toolkit" }, - { name = "pygments" }, - { name = "stack-data" }, - { name = "traitlets" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/2a/34/29b18c62e39ee2f7a6a3bba7efd952729d8aadd45ca17efc34453b717665/ipython-9.6.0.tar.gz", hash = "sha256:5603d6d5d356378be5043e69441a072b50a5b33b4503428c77b04cb8ce7bc731", size = 4396932, upload-time = "2025-09-29T10:55:53.948Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/48/c5/d5e07995077e48220269c28a221e168c91123ad5ceee44d548f54a057fc0/ipython-9.6.0-py3-none-any.whl", hash = "sha256:5f77efafc886d2f023442479b8149e7d86547ad0a979e9da9f045d252f648196", size = 616170, upload-time = "2025-09-29T10:55:47.676Z" }, -] - -[[package]] -name = "ipython-pygments-lexers" -version = "1.1.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pygments" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393, upload-time = "2025-01-17T11:24:34.505Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" }, -] - -[[package]] -name = "ipywidgets" -version = "8.1.7" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "comm" }, - { name = "ipython" }, - { name = "jupyterlab-widgets" }, - { name = "traitlets" }, - { name = "widgetsnbextension" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/3e/48/d3dbac45c2814cb73812f98dd6b38bbcc957a4e7bb31d6ea9c03bf94ed87/ipywidgets-8.1.7.tar.gz", hash = "sha256:15f1ac050b9ccbefd45dccfbb2ef6bed0029d8278682d569d71b8dd96bee0376", size = 116721, upload-time = "2025-05-05T12:42:03.489Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/58/6a/9166369a2f092bd286d24e6307de555d63616e8ddb373ebad2b5635ca4cd/ipywidgets-8.1.7-py3-none-any.whl", hash = "sha256:764f2602d25471c213919b8a1997df04bef869251db4ca8efba1b76b1bd9f7bb", size = 139806, upload-time = "2025-05-05T12:41:56.833Z" }, -] - -[[package]] -name = "jedi" -version = "0.19.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "parso" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" }, -] - -[[package]] -name = "jinja2" -version = "3.1.6" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "markupsafe" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, -] - -[[package]] -name = "jiter" -version = "0.11.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9d/c0/a3bb4cc13aced219dd18191ea66e874266bd8aa7b96744e495e1c733aa2d/jiter-0.11.0.tar.gz", hash = "sha256:1d9637eaf8c1d6a63d6562f2a6e5ab3af946c66037eb1b894e8fad75422266e4", size = 167094, upload-time = "2025-09-15T09:20:38.212Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ba/b5/3009b112b8f673e568ef79af9863d8309a15f0a8cdcc06ed6092051f377e/jiter-0.11.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:2fb7b377688cc3850bbe5c192a6bd493562a0bc50cbc8b047316428fbae00ada", size = 305510, upload-time = "2025-09-15T09:19:25.893Z" }, - { url = "https://files.pythonhosted.org/packages/fe/82/15514244e03b9e71e086bbe2a6de3e4616b48f07d5f834200c873956fb8c/jiter-0.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a1b7cbe3f25bd0d8abb468ba4302a5d45617ee61b2a7a638f63fee1dc086be99", size = 316521, upload-time = "2025-09-15T09:19:27.525Z" }, - { url = "https://files.pythonhosted.org/packages/92/94/7a2e905f40ad2d6d660e00b68d818f9e29fb87ffe82774f06191e93cbe4a/jiter-0.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0a7f0ec81d5b7588c5cade1eb1925b91436ae6726dc2df2348524aeabad5de6", size = 338214, upload-time = "2025-09-15T09:19:28.727Z" }, - { url = "https://files.pythonhosted.org/packages/a8/9c/5791ed5bdc76f12110158d3316a7a3ec0b1413d018b41c5ed399549d3ad5/jiter-0.11.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07630bb46ea2a6b9c6ed986c6e17e35b26148cce2c535454b26ee3f0e8dcaba1", size = 361280, upload-time = "2025-09-15T09:19:30.013Z" }, - { url = "https://files.pythonhosted.org/packages/d4/7f/b7d82d77ff0d2cb06424141000176b53a9e6b16a1125525bb51ea4990c2e/jiter-0.11.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7764f27d28cd4a9cbc61704dfcd80c903ce3aad106a37902d3270cd6673d17f4", size = 487895, upload-time = "2025-09-15T09:19:31.424Z" }, - { url = "https://files.pythonhosted.org/packages/42/44/10a1475d46f1fc1fd5cc2e82c58e7bca0ce5852208e0fa5df2f949353321/jiter-0.11.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d4a6c4a737d486f77f842aeb22807edecb4a9417e6700c7b981e16d34ba7c72", size = 378421, upload-time = "2025-09-15T09:19:32.746Z" }, - { url = "https://files.pythonhosted.org/packages/9a/5f/0dc34563d8164d31d07bc09d141d3da08157a68dcd1f9b886fa4e917805b/jiter-0.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf408d2a0abd919b60de8c2e7bc5eeab72d4dafd18784152acc7c9adc3291591", size = 347932, upload-time = "2025-09-15T09:19:34.612Z" }, - { url = "https://files.pythonhosted.org/packages/f7/de/b68f32a4fcb7b4a682b37c73a0e5dae32180140cd1caf11aef6ad40ddbf2/jiter-0.11.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cdef53eda7d18e799625023e1e250dbc18fbc275153039b873ec74d7e8883e09", size = 386959, upload-time = "2025-09-15T09:19:35.994Z" }, - { url = "https://files.pythonhosted.org/packages/76/0a/c08c92e713b6e28972a846a81ce374883dac2f78ec6f39a0dad9f2339c3a/jiter-0.11.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:53933a38ef7b551dd9c7f1064f9d7bb235bb3168d0fa5f14f0798d1b7ea0d9c5", size = 517187, upload-time = "2025-09-15T09:19:37.426Z" }, - { url = "https://files.pythonhosted.org/packages/89/b5/4a283bec43b15aad54fcae18d951f06a2ec3f78db5708d3b59a48e9c3fbd/jiter-0.11.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11840d2324c9ab5162fc1abba23bc922124fedcff0d7b7f85fffa291e2f69206", size = 509461, upload-time = "2025-09-15T09:19:38.761Z" }, - { url = "https://files.pythonhosted.org/packages/34/a5/f8bad793010534ea73c985caaeef8cc22dfb1fedb15220ecdf15c623c07a/jiter-0.11.0-cp312-cp312-win32.whl", hash = "sha256:4f01a744d24a5f2bb4a11657a1b27b61dc038ae2e674621a74020406e08f749b", size = 206664, upload-time = "2025-09-15T09:19:40.096Z" }, - { url = "https://files.pythonhosted.org/packages/ed/42/5823ec2b1469395a160b4bf5f14326b4a098f3b6898fbd327366789fa5d3/jiter-0.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:29fff31190ab3a26de026da2f187814f4b9c6695361e20a9ac2123e4d4378a4c", size = 203520, upload-time = "2025-09-15T09:19:41.798Z" }, - { url = "https://files.pythonhosted.org/packages/97/c4/d530e514d0f4f29b2b68145e7b389cbc7cac7f9c8c23df43b04d3d10fa3e/jiter-0.11.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:4441a91b80a80249f9a6452c14b2c24708f139f64de959943dfeaa6cb915e8eb", size = 305021, upload-time = "2025-09-15T09:19:43.523Z" }, - { url = "https://files.pythonhosted.org/packages/7a/77/796a19c567c5734cbfc736a6f987affc0d5f240af8e12063c0fb93990ffa/jiter-0.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ff85fc6d2a431251ad82dbd1ea953affb5a60376b62e7d6809c5cd058bb39471", size = 314384, upload-time = "2025-09-15T09:19:44.849Z" }, - { url = "https://files.pythonhosted.org/packages/14/9c/824334de0b037b91b6f3fa9fe5a191c83977c7ec4abe17795d3cb6d174cf/jiter-0.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5e86126d64706fd28dfc46f910d496923c6f95b395138c02d0e252947f452bd", size = 337389, upload-time = "2025-09-15T09:19:46.094Z" }, - { url = "https://files.pythonhosted.org/packages/a2/95/ed4feab69e6cf9b2176ea29d4ef9d01a01db210a3a2c8a31a44ecdc68c38/jiter-0.11.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ad8bd82165961867a10f52010590ce0b7a8c53da5ddd8bbb62fef68c181b921", size = 360519, upload-time = "2025-09-15T09:19:47.494Z" }, - { url = "https://files.pythonhosted.org/packages/b5/0c/2ad00f38d3e583caba3909d95b7da1c3a7cd82c0aa81ff4317a8016fb581/jiter-0.11.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b42c2cd74273455ce439fd9528db0c6e84b5623cb74572305bdd9f2f2961d3df", size = 487198, upload-time = "2025-09-15T09:19:49.116Z" }, - { url = "https://files.pythonhosted.org/packages/ea/8b/919b64cf3499b79bdfba6036da7b0cac5d62d5c75a28fb45bad7819e22f0/jiter-0.11.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0062dab98172dd0599fcdbf90214d0dcde070b1ff38a00cc1b90e111f071982", size = 377835, upload-time = "2025-09-15T09:19:50.468Z" }, - { url = "https://files.pythonhosted.org/packages/29/7f/8ebe15b6e0a8026b0d286c083b553779b4dd63db35b43a3f171b544de91d/jiter-0.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb948402821bc76d1f6ef0f9e19b816f9b09f8577844ba7140f0b6afe994bc64", size = 347655, upload-time = "2025-09-15T09:19:51.726Z" }, - { url = "https://files.pythonhosted.org/packages/8e/64/332127cef7e94ac75719dda07b9a472af6158ba819088d87f17f3226a769/jiter-0.11.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25a5b1110cca7329fd0daf5060faa1234be5c11e988948e4f1a1923b6a457fe1", size = 386135, upload-time = "2025-09-15T09:19:53.075Z" }, - { url = "https://files.pythonhosted.org/packages/20/c8/557b63527442f84c14774159948262a9d4fabb0d61166f11568f22fc60d2/jiter-0.11.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bf11807e802a214daf6c485037778843fadd3e2ec29377ae17e0706ec1a25758", size = 516063, upload-time = "2025-09-15T09:19:54.447Z" }, - { url = "https://files.pythonhosted.org/packages/86/13/4164c819df4a43cdc8047f9a42880f0ceef5afeb22e8b9675c0528ebdccd/jiter-0.11.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:dbb57da40631c267861dd0090461222060960012d70fd6e4c799b0f62d0ba166", size = 508139, upload-time = "2025-09-15T09:19:55.764Z" }, - { url = "https://files.pythonhosted.org/packages/fa/70/6e06929b401b331d41ddb4afb9f91cd1168218e3371972f0afa51c9f3c31/jiter-0.11.0-cp313-cp313-win32.whl", hash = "sha256:8e36924dad32c48d3c5e188d169e71dc6e84d6cb8dedefea089de5739d1d2f80", size = 206369, upload-time = "2025-09-15T09:19:57.048Z" }, - { url = "https://files.pythonhosted.org/packages/f4/0d/8185b8e15de6dce24f6afae63380e16377dd75686d56007baa4f29723ea1/jiter-0.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:452d13e4fd59698408087235259cebe67d9d49173b4dacb3e8d35ce4acf385d6", size = 202538, upload-time = "2025-09-15T09:19:58.35Z" }, - { url = "https://files.pythonhosted.org/packages/13/3a/d61707803260d59520721fa326babfae25e9573a88d8b7b9cb54c5423a59/jiter-0.11.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:089f9df9f69532d1339e83142438668f52c97cd22ee2d1195551c2b1a9e6cf33", size = 313737, upload-time = "2025-09-15T09:19:59.638Z" }, - { url = "https://files.pythonhosted.org/packages/cd/cc/c9f0eec5d00f2a1da89f6bdfac12b8afdf8d5ad974184863c75060026457/jiter-0.11.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29ed1fe69a8c69bf0f2a962d8d706c7b89b50f1332cd6b9fbda014f60bd03a03", size = 346183, upload-time = "2025-09-15T09:20:01.442Z" }, - { url = "https://files.pythonhosted.org/packages/a6/87/fc632776344e7aabbab05a95a0075476f418c5d29ab0f2eec672b7a1f0ac/jiter-0.11.0-cp313-cp313t-win_amd64.whl", hash = "sha256:a4d71d7ea6ea8786291423fe209acf6f8d398a0759d03e7f24094acb8ab686ba", size = 204225, upload-time = "2025-09-15T09:20:03.102Z" }, - { url = "https://files.pythonhosted.org/packages/ee/3b/e7f45be7d3969bdf2e3cd4b816a7a1d272507cd0edd2d6dc4b07514f2d9a/jiter-0.11.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:9a6dff27eca70930bdbe4cbb7c1a4ba8526e13b63dc808c0670083d2d51a4a72", size = 304414, upload-time = "2025-09-15T09:20:04.357Z" }, - { url = "https://files.pythonhosted.org/packages/06/32/13e8e0d152631fcc1907ceb4943711471be70496d14888ec6e92034e2caf/jiter-0.11.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b1ae2a7593a62132c7d4c2abbee80bbbb94fdc6d157e2c6cc966250c564ef774", size = 314223, upload-time = "2025-09-15T09:20:05.631Z" }, - { url = "https://files.pythonhosted.org/packages/0c/7e/abedd5b5a20ca083f778d96bba0d2366567fcecb0e6e34ff42640d5d7a18/jiter-0.11.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b13a431dba4b059e9e43019d3022346d009baf5066c24dcdea321a303cde9f0", size = 337306, upload-time = "2025-09-15T09:20:06.917Z" }, - { url = "https://files.pythonhosted.org/packages/ac/e2/30d59bdc1204c86aa975ec72c48c482fee6633120ee9c3ab755e4dfefea8/jiter-0.11.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:af62e84ca3889604ebb645df3b0a3f3bcf6b92babbff642bd214616f57abb93a", size = 360565, upload-time = "2025-09-15T09:20:08.283Z" }, - { url = "https://files.pythonhosted.org/packages/fe/88/567288e0d2ed9fa8f7a3b425fdaf2cb82b998633c24fe0d98f5417321aa8/jiter-0.11.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6f3b32bb723246e6b351aecace52aba78adb8eeb4b2391630322dc30ff6c773", size = 486465, upload-time = "2025-09-15T09:20:09.613Z" }, - { url = "https://files.pythonhosted.org/packages/18/6e/7b72d09273214cadd15970e91dd5ed9634bee605176107db21e1e4205eb1/jiter-0.11.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:adcab442f4a099a358a7f562eaa54ed6456fb866e922c6545a717be51dbed7d7", size = 377581, upload-time = "2025-09-15T09:20:10.884Z" }, - { url = "https://files.pythonhosted.org/packages/58/52/4db456319f9d14deed325f70102577492e9d7e87cf7097bda9769a1fcacb/jiter-0.11.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9967c2ab338ee2b2c0102fd379ec2693c496abf71ffd47e4d791d1f593b68e2", size = 347102, upload-time = "2025-09-15T09:20:12.175Z" }, - { url = "https://files.pythonhosted.org/packages/ce/b4/433d5703c38b26083aec7a733eb5be96f9c6085d0e270a87ca6482cbf049/jiter-0.11.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e7d0bed3b187af8b47a981d9742ddfc1d9b252a7235471ad6078e7e4e5fe75c2", size = 386477, upload-time = "2025-09-15T09:20:13.428Z" }, - { url = "https://files.pythonhosted.org/packages/c8/7a/a60bfd9c55b55b07c5c441c5085f06420b6d493ce9db28d069cc5b45d9f3/jiter-0.11.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:f6fe0283e903ebc55f1a6cc569b8c1f3bf4abd026fed85e3ff8598a9e6f982f0", size = 516004, upload-time = "2025-09-15T09:20:14.848Z" }, - { url = "https://files.pythonhosted.org/packages/2e/46/f8363e5ecc179b4ed0ca6cb0a6d3bfc266078578c71ff30642ea2ce2f203/jiter-0.11.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:4ee5821e3d66606b29ae5b497230b304f1376f38137d69e35f8d2bd5f310ff73", size = 507855, upload-time = "2025-09-15T09:20:16.176Z" }, - { url = "https://files.pythonhosted.org/packages/90/33/396083357d51d7ff0f9805852c288af47480d30dd31d8abc74909b020761/jiter-0.11.0-cp314-cp314-win32.whl", hash = "sha256:c2d13ba7567ca8799f17c76ed56b1d49be30df996eb7fa33e46b62800562a5e2", size = 205802, upload-time = "2025-09-15T09:20:17.661Z" }, - { url = "https://files.pythonhosted.org/packages/e7/ab/eb06ca556b2551d41de7d03bf2ee24285fa3d0c58c5f8d95c64c9c3281b1/jiter-0.11.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fb4790497369d134a07fc763cc88888c46f734abdd66f9fdf7865038bf3a8f40", size = 313405, upload-time = "2025-09-15T09:20:18.918Z" }, - { url = "https://files.pythonhosted.org/packages/af/22/7ab7b4ec3a1c1f03aef376af11d23b05abcca3fb31fbca1e7557053b1ba2/jiter-0.11.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e2bbf24f16ba5ad4441a9845e40e4ea0cb9eed00e76ba94050664ef53ef4406", size = 347102, upload-time = "2025-09-15T09:20:20.16Z" }, -] - -[[package]] -name = "json-log-formatter" -version = "1.1.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e8/ef/324f4a28ed0152a32b80685b26316b604218e4ac77487ea82719c3c28bc6/json_log_formatter-1.1.1.tar.gz", hash = "sha256:0815e3b4469e5c79cf3f6dc8a0613ba6601f4a7464f85ba03655cfa6e3e17d10", size = 5896, upload-time = "2025-02-27T22:56:15.643Z" } - -[[package]] -name = "jsonref" -version = "1.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/aa/0d/c1f3277e90ccdb50d33ed5ba1ec5b3f0a242ed8c1b1a85d3afeb68464dca/jsonref-1.1.0.tar.gz", hash = "sha256:32fe8e1d85af0fdefbebce950af85590b22b60f9e95443176adbde4e1ecea552", size = 8814, upload-time = "2023-01-16T16:10:04.455Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0c/ec/e1db9922bceb168197a558a2b8c03a7963f1afe93517ddd3cf99f202f996/jsonref-1.1.0-py3-none-any.whl", hash = "sha256:590dc7773df6c21cbf948b5dac07a72a251db28b0238ceecce0a2abfa8ec30a9", size = 9425, upload-time = "2023-01-16T16:10:02.255Z" }, -] - -[[package]] -name = "jsonschema" -version = "4.25.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "attrs" }, - { name = "jsonschema-specifications" }, - { name = "referencing" }, - { name = "rpds-py" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, -] - -[[package]] -name = "jsonschema-specifications" -version = "2025.9.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "referencing" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, -] - -[[package]] -name = "jupyter-client" -version = "8.6.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "jupyter-core" }, - { name = "python-dateutil" }, - { name = "pyzmq" }, - { name = "tornado" }, - { name = "traitlets" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/71/22/bf9f12fdaeae18019a468b68952a60fe6dbab5d67cd2a103cac7659b41ca/jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419", size = 342019, upload-time = "2024-09-17T10:44:17.613Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/11/85/b0394e0b6fcccd2c1eeefc230978a6f8cb0c5df1e4cd3e7625735a0d7d1e/jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f", size = 106105, upload-time = "2024-09-17T10:44:15.218Z" }, -] - -[[package]] -name = "jupyter-core" -version = "5.8.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "platformdirs" }, - { name = "pywin32", marker = "platform_python_implementation != 'PyPy' and sys_platform == 'win32'" }, - { name = "traitlets" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/99/1b/72906d554acfeb588332eaaa6f61577705e9ec752ddb486f302dafa292d9/jupyter_core-5.8.1.tar.gz", hash = "sha256:0a5f9706f70e64786b75acba995988915ebd4601c8a52e534a40b51c95f59941", size = 88923, upload-time = "2025-05-27T07:38:16.655Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2f/57/6bffd4b20b88da3800c5d691e0337761576ee688eb01299eae865689d2df/jupyter_core-5.8.1-py3-none-any.whl", hash = "sha256:c28d268fc90fb53f1338ded2eb410704c5449a358406e8a948b75706e24863d0", size = 28880, upload-time = "2025-05-27T07:38:15.137Z" }, -] - -[[package]] -name = "jupyterlab-widgets" -version = "3.0.15" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b9/7d/160595ca88ee87ac6ba95d82177d29ec60aaa63821d3077babb22ce031a5/jupyterlab_widgets-3.0.15.tar.gz", hash = "sha256:2920888a0c2922351a9202817957a68c07d99673504d6cd37345299e971bb08b", size = 213149, upload-time = "2025-05-05T12:32:31.004Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/6a/ca128561b22b60bd5a0c4ea26649e68c8556b82bc70a0c396eebc977fe86/jupyterlab_widgets-3.0.15-py3-none-any.whl", hash = "sha256:d59023d7d7ef71400d51e6fee9a88867f6e65e10a4201605d2d7f3e8f012a31c", size = 216571, upload-time = "2025-05-05T12:32:29.534Z" }, -] - -[[package]] -name = "kubernetes" -version = "28.1.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "certifi" }, - { name = "google-auth" }, - { name = "oauthlib" }, - { name = "python-dateutil" }, - { name = "pyyaml" }, - { name = "requests" }, - { name = "requests-oauthlib" }, - { name = "six" }, - { name = "urllib3" }, - { name = "websocket-client" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/3c/5e/d27f39f447137a9a3d1f31142c77ce74bcedfda7dafe922d725c7ef2da33/kubernetes-28.1.0.tar.gz", hash = "sha256:1468069a573430fb1cb5ad22876868f57977930f80a6749405da31cd6086a7e9", size = 817854, upload-time = "2023-09-18T17:32:07.314Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f5/6a/1f69c2d8b1ff03f8d8e10d801f4ac3016ed4c1b00aa9795732c6ec900bba/kubernetes-28.1.0-py2.py3-none-any.whl", hash = "sha256:10f56f8160dcb73647f15fafda268e7f60cf7dbc9f8e46d52fcd46d3beb0c18d", size = 1566315, upload-time = "2023-09-18T17:32:05.283Z" }, -] - -[[package]] -name = "legacy-cgi" -version = "2.6.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a6/ed/300cabc9693209d5a03e2ebc5eb5c4171b51607c08ed84a2b71c9015e0f3/legacy_cgi-2.6.3.tar.gz", hash = "sha256:4c119d6cb8e9d8b6ad7cc0ddad880552c62df4029622835d06dfd18f438a8154", size = 24401, upload-time = "2025-03-27T00:48:56.957Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5a/33/68c6c38193684537757e0d50a7ccb4f4656e5c2f7cd2be737a9d4a1bff71/legacy_cgi-2.6.3-py3-none-any.whl", hash = "sha256:6df2ea5ae14c71ef6f097f8b6372b44f6685283dc018535a75c924564183cdab", size = 19851, upload-time = "2025-03-27T00:48:55.366Z" }, -] - -[[package]] -name = "litellm" -version = "1.77.5" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohttp" }, - { name = "click" }, - { name = "fastuuid" }, - { name = "httpx" }, - { name = "importlib-metadata" }, - { name = "jinja2" }, - { name = "jsonschema" }, - { name = "openai" }, - { name = "pydantic" }, - { name = "python-dotenv" }, - { name = "tiktoken" }, - { name = "tokenizers" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/e1/a3/85fc92d998ec9645c9fac108618681ef411ca4b338cc7544d6b3aad57699/litellm-1.77.5.tar.gz", hash = "sha256:8e8a83b49c4a6ae044b1a1c01adfbdef72b0031b86f1463dd743e267fa1d7b99", size = 10351819, upload-time = "2025-09-28T07:17:39.393Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/94/4c/89553f7e375ef39497d86f2266a0cdb37371a07e9e0aa8949f33c15a4198/litellm-1.77.5-py3-none-any.whl", hash = "sha256:07f53964c08d555621d4376cc42330458301ae889bfb6303155dcabc51095fbf", size = 9165458, upload-time = "2025-09-28T07:17:35.474Z" }, -] - -[[package]] -name = "markdown-it-py" -version = "4.0.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "mdurl" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, -] - -[[package]] -name = "markupsafe" -version = "3.0.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5a/72/147da192e38635ada20e0a2e1a51cf8823d2119ce8883f7053879c2199b5/markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e", size = 11615, upload-time = "2025-09-27T18:36:30.854Z" }, - { url = "https://files.pythonhosted.org/packages/9a/81/7e4e08678a1f98521201c3079f77db69fb552acd56067661f8c2f534a718/markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce", size = 12020, upload-time = "2025-09-27T18:36:31.971Z" }, - { url = "https://files.pythonhosted.org/packages/1e/2c/799f4742efc39633a1b54a92eec4082e4f815314869865d876824c257c1e/markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d", size = 24332, upload-time = "2025-09-27T18:36:32.813Z" }, - { url = "https://files.pythonhosted.org/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d", size = 22947, upload-time = "2025-09-27T18:36:33.86Z" }, - { url = "https://files.pythonhosted.org/packages/2c/54/887f3092a85238093a0b2154bd629c89444f395618842e8b0c41783898ea/markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a", size = 21962, upload-time = "2025-09-27T18:36:35.099Z" }, - { url = "https://files.pythonhosted.org/packages/c9/2f/336b8c7b6f4a4d95e91119dc8521402461b74a485558d8f238a68312f11c/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b", size = 23760, upload-time = "2025-09-27T18:36:36.001Z" }, - { url = "https://files.pythonhosted.org/packages/32/43/67935f2b7e4982ffb50a4d169b724d74b62a3964bc1a9a527f5ac4f1ee2b/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f", size = 21529, upload-time = "2025-09-27T18:36:36.906Z" }, - { url = "https://files.pythonhosted.org/packages/89/e0/4486f11e51bbba8b0c041098859e869e304d1c261e59244baa3d295d47b7/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b", size = 23015, upload-time = "2025-09-27T18:36:37.868Z" }, - { url = "https://files.pythonhosted.org/packages/2f/e1/78ee7a023dac597a5825441ebd17170785a9dab23de95d2c7508ade94e0e/markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d", size = 14540, upload-time = "2025-09-27T18:36:38.761Z" }, - { url = "https://files.pythonhosted.org/packages/aa/5b/bec5aa9bbbb2c946ca2733ef9c4ca91c91b6a24580193e891b5f7dbe8e1e/markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c", size = 15105, upload-time = "2025-09-27T18:36:39.701Z" }, - { url = "https://files.pythonhosted.org/packages/e5/f1/216fc1bbfd74011693a4fd837e7026152e89c4bcf3e77b6692fba9923123/markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f", size = 13906, upload-time = "2025-09-27T18:36:40.689Z" }, - { url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" }, - { url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" }, - { url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" }, - { url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" }, - { url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" }, - { url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" }, - { url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" }, - { url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" }, - { url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" }, - { url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" }, - { url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" }, - { url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" }, - { url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" }, - { url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" }, - { url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" }, - { url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" }, - { url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" }, - { url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" }, - { url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" }, - { url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" }, - { url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" }, - { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" }, - { url = "https://files.pythonhosted.org/packages/33/8a/8e42d4838cd89b7dde187011e97fe6c3af66d8c044997d2183fbd6d31352/markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe", size = 11619, upload-time = "2025-09-27T18:37:06.342Z" }, - { url = "https://files.pythonhosted.org/packages/b5/64/7660f8a4a8e53c924d0fa05dc3a55c9cee10bbd82b11c5afb27d44b096ce/markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026", size = 12029, upload-time = "2025-09-27T18:37:07.213Z" }, - { url = "https://files.pythonhosted.org/packages/da/ef/e648bfd021127bef5fa12e1720ffed0c6cbb8310c8d9bea7266337ff06de/markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737", size = 24408, upload-time = "2025-09-27T18:37:09.572Z" }, - { url = "https://files.pythonhosted.org/packages/41/3c/a36c2450754618e62008bf7435ccb0f88053e07592e6028a34776213d877/markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97", size = 23005, upload-time = "2025-09-27T18:37:10.58Z" }, - { url = "https://files.pythonhosted.org/packages/bc/20/b7fdf89a8456b099837cd1dc21974632a02a999ec9bf7ca3e490aacd98e7/markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d", size = 22048, upload-time = "2025-09-27T18:37:11.547Z" }, - { url = "https://files.pythonhosted.org/packages/9a/a7/591f592afdc734f47db08a75793a55d7fbcc6902a723ae4cfbab61010cc5/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda", size = 23821, upload-time = "2025-09-27T18:37:12.48Z" }, - { url = "https://files.pythonhosted.org/packages/7d/33/45b24e4f44195b26521bc6f1a82197118f74df348556594bd2262bda1038/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf", size = 21606, upload-time = "2025-09-27T18:37:13.485Z" }, - { url = "https://files.pythonhosted.org/packages/ff/0e/53dfaca23a69fbfbbf17a4b64072090e70717344c52eaaaa9c5ddff1e5f0/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe", size = 23043, upload-time = "2025-09-27T18:37:14.408Z" }, - { url = "https://files.pythonhosted.org/packages/46/11/f333a06fc16236d5238bfe74daccbca41459dcd8d1fa952e8fbd5dccfb70/markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9", size = 14747, upload-time = "2025-09-27T18:37:15.36Z" }, - { url = "https://files.pythonhosted.org/packages/28/52/182836104b33b444e400b14f797212f720cbc9ed6ba34c800639d154e821/markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581", size = 15341, upload-time = "2025-09-27T18:37:16.496Z" }, - { url = "https://files.pythonhosted.org/packages/6f/18/acf23e91bd94fd7b3031558b1f013adfa21a8e407a3fdb32745538730382/markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4", size = 14073, upload-time = "2025-09-27T18:37:17.476Z" }, - { url = "https://files.pythonhosted.org/packages/3c/f0/57689aa4076e1b43b15fdfa646b04653969d50cf30c32a102762be2485da/markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab", size = 11661, upload-time = "2025-09-27T18:37:18.453Z" }, - { url = "https://files.pythonhosted.org/packages/89/c3/2e67a7ca217c6912985ec766c6393b636fb0c2344443ff9d91404dc4c79f/markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175", size = 12069, upload-time = "2025-09-27T18:37:19.332Z" }, - { url = "https://files.pythonhosted.org/packages/f0/00/be561dce4e6ca66b15276e184ce4b8aec61fe83662cce2f7d72bd3249d28/markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634", size = 25670, upload-time = "2025-09-27T18:37:20.245Z" }, - { url = "https://files.pythonhosted.org/packages/50/09/c419f6f5a92e5fadde27efd190eca90f05e1261b10dbd8cbcb39cd8ea1dc/markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50", size = 23598, upload-time = "2025-09-27T18:37:21.177Z" }, - { url = "https://files.pythonhosted.org/packages/22/44/a0681611106e0b2921b3033fc19bc53323e0b50bc70cffdd19f7d679bb66/markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e", size = 23261, upload-time = "2025-09-27T18:37:22.167Z" }, - { url = "https://files.pythonhosted.org/packages/5f/57/1b0b3f100259dc9fffe780cfb60d4be71375510e435efec3d116b6436d43/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5", size = 24835, upload-time = "2025-09-27T18:37:23.296Z" }, - { url = "https://files.pythonhosted.org/packages/26/6a/4bf6d0c97c4920f1597cc14dd720705eca0bf7c787aebc6bb4d1bead5388/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523", size = 22733, upload-time = "2025-09-27T18:37:24.237Z" }, - { url = "https://files.pythonhosted.org/packages/14/c7/ca723101509b518797fedc2fdf79ba57f886b4aca8a7d31857ba3ee8281f/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc", size = 23672, upload-time = "2025-09-27T18:37:25.271Z" }, - { url = "https://files.pythonhosted.org/packages/fb/df/5bd7a48c256faecd1d36edc13133e51397e41b73bb77e1a69deab746ebac/markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d", size = 14819, upload-time = "2025-09-27T18:37:26.285Z" }, - { url = "https://files.pythonhosted.org/packages/1a/8a/0402ba61a2f16038b48b39bccca271134be00c5c9f0f623208399333c448/markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9", size = 15426, upload-time = "2025-09-27T18:37:27.316Z" }, - { url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" }, -] - -[[package]] -name = "matplotlib-inline" -version = "0.1.7" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "traitlets" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159, upload-time = "2024-04-15T13:44:44.803Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899, upload-time = "2024-04-15T13:44:43.265Z" }, -] - -[[package]] -name = "mcp" -version = "1.15.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, - { name = "httpx" }, - { name = "httpx-sse" }, - { name = "jsonschema" }, - { name = "pydantic" }, - { name = "pydantic-settings" }, - { name = "python-multipart" }, - { name = "pywin32", marker = "sys_platform == 'win32'" }, - { name = "sse-starlette" }, - { name = "starlette" }, - { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/0c/9e/e65114795f359f314d7061f4fcb50dfe60026b01b52ad0b986b4631bf8bb/mcp-1.15.0.tar.gz", hash = "sha256:5bda1f4d383cf539d3c035b3505a3de94b20dbd7e4e8b4bd071e14634eeb2d72", size = 469622, upload-time = "2025-09-25T15:39:51.995Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c9/82/4d0df23d5ff5bb982a59ad597bc7cb9920f2650278ccefb8e0d85c5ce3d4/mcp-1.15.0-py3-none-any.whl", hash = "sha256:314614c8addc67b663d6c3e4054db0a5c3dedc416c24ef8ce954e203fdc2333d", size = 166963, upload-time = "2025-09-25T15:39:50.538Z" }, -] - -[package.optional-dependencies] -cli = [ - { name = "python-dotenv" }, - { name = "typer" }, -] - -[[package]] -name = "mdurl" -version = "0.1.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, -] - -[[package]] -name = "multidict" -version = "6.6.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/69/7f/0652e6ed47ab288e3756ea9c0df8b14950781184d4bd7883f4d87dd41245/multidict-6.6.4.tar.gz", hash = "sha256:d2d4e4787672911b48350df02ed3fa3fffdc2f2e8ca06dd6afdf34189b76a9dd", size = 101843, upload-time = "2025-08-11T12:08:48.217Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/05/f6/512ffd8fd8b37fb2680e5ac35d788f1d71bbaf37789d21a820bdc441e565/multidict-6.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0ffb87be160942d56d7b87b0fdf098e81ed565add09eaa1294268c7f3caac4c8", size = 76516, upload-time = "2025-08-11T12:06:53.393Z" }, - { url = "https://files.pythonhosted.org/packages/99/58/45c3e75deb8855c36bd66cc1658007589662ba584dbf423d01df478dd1c5/multidict-6.6.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d191de6cbab2aff5de6c5723101705fd044b3e4c7cfd587a1929b5028b9714b3", size = 45394, upload-time = "2025-08-11T12:06:54.555Z" }, - { url = "https://files.pythonhosted.org/packages/fd/ca/e8c4472a93a26e4507c0b8e1f0762c0d8a32de1328ef72fd704ef9cc5447/multidict-6.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38a0956dd92d918ad5feff3db8fcb4a5eb7dba114da917e1a88475619781b57b", size = 43591, upload-time = "2025-08-11T12:06:55.672Z" }, - { url = "https://files.pythonhosted.org/packages/05/51/edf414f4df058574a7265034d04c935aa84a89e79ce90fcf4df211f47b16/multidict-6.6.4-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:6865f6d3b7900ae020b495d599fcf3765653bc927951c1abb959017f81ae8287", size = 237215, upload-time = "2025-08-11T12:06:57.213Z" }, - { url = "https://files.pythonhosted.org/packages/c8/45/8b3d6dbad8cf3252553cc41abea09ad527b33ce47a5e199072620b296902/multidict-6.6.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a2088c126b6f72db6c9212ad827d0ba088c01d951cee25e758c450da732c138", size = 258299, upload-time = "2025-08-11T12:06:58.946Z" }, - { url = "https://files.pythonhosted.org/packages/3c/e8/8ca2e9a9f5a435fc6db40438a55730a4bf4956b554e487fa1b9ae920f825/multidict-6.6.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0f37bed7319b848097085d7d48116f545985db988e2256b2e6f00563a3416ee6", size = 242357, upload-time = "2025-08-11T12:07:00.301Z" }, - { url = "https://files.pythonhosted.org/packages/0f/84/80c77c99df05a75c28490b2af8f7cba2a12621186e0a8b0865d8e745c104/multidict-6.6.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:01368e3c94032ba6ca0b78e7ccb099643466cf24f8dc8eefcfdc0571d56e58f9", size = 268369, upload-time = "2025-08-11T12:07:01.638Z" }, - { url = "https://files.pythonhosted.org/packages/0d/e9/920bfa46c27b05fb3e1ad85121fd49f441492dca2449c5bcfe42e4565d8a/multidict-6.6.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8fe323540c255db0bffee79ad7f048c909f2ab0edb87a597e1c17da6a54e493c", size = 269341, upload-time = "2025-08-11T12:07:02.943Z" }, - { url = "https://files.pythonhosted.org/packages/af/65/753a2d8b05daf496f4a9c367fe844e90a1b2cac78e2be2c844200d10cc4c/multidict-6.6.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8eb3025f17b0a4c3cd08cda49acf312a19ad6e8a4edd9dbd591e6506d999402", size = 256100, upload-time = "2025-08-11T12:07:04.564Z" }, - { url = "https://files.pythonhosted.org/packages/09/54/655be13ae324212bf0bc15d665a4e34844f34c206f78801be42f7a0a8aaa/multidict-6.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bbc14f0365534d35a06970d6a83478b249752e922d662dc24d489af1aa0d1be7", size = 253584, upload-time = "2025-08-11T12:07:05.914Z" }, - { url = "https://files.pythonhosted.org/packages/5c/74/ab2039ecc05264b5cec73eb018ce417af3ebb384ae9c0e9ed42cb33f8151/multidict-6.6.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:75aa52fba2d96bf972e85451b99d8e19cc37ce26fd016f6d4aa60da9ab2b005f", size = 251018, upload-time = "2025-08-11T12:07:08.301Z" }, - { url = "https://files.pythonhosted.org/packages/af/0a/ccbb244ac848e56c6427f2392741c06302bbfba49c0042f1eb3c5b606497/multidict-6.6.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4fefd4a815e362d4f011919d97d7b4a1e566f1dde83dc4ad8cfb5b41de1df68d", size = 251477, upload-time = "2025-08-11T12:07:10.248Z" }, - { url = "https://files.pythonhosted.org/packages/0e/b0/0ed49bba775b135937f52fe13922bc64a7eaf0a3ead84a36e8e4e446e096/multidict-6.6.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:db9801fe021f59a5b375ab778973127ca0ac52429a26e2fd86aa9508f4d26eb7", size = 263575, upload-time = "2025-08-11T12:07:11.928Z" }, - { url = "https://files.pythonhosted.org/packages/3e/d9/7fb85a85e14de2e44dfb6a24f03c41e2af8697a6df83daddb0e9b7569f73/multidict-6.6.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a650629970fa21ac1fb06ba25dabfc5b8a2054fcbf6ae97c758aa956b8dba802", size = 259649, upload-time = "2025-08-11T12:07:13.244Z" }, - { url = "https://files.pythonhosted.org/packages/03/9e/b3a459bcf9b6e74fa461a5222a10ff9b544cb1cd52fd482fb1b75ecda2a2/multidict-6.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:452ff5da78d4720d7516a3a2abd804957532dd69296cb77319c193e3ffb87e24", size = 251505, upload-time = "2025-08-11T12:07:14.57Z" }, - { url = "https://files.pythonhosted.org/packages/86/a2/8022f78f041dfe6d71e364001a5cf987c30edfc83c8a5fb7a3f0974cff39/multidict-6.6.4-cp312-cp312-win32.whl", hash = "sha256:8c2fcb12136530ed19572bbba61b407f655e3953ba669b96a35036a11a485793", size = 41888, upload-time = "2025-08-11T12:07:15.904Z" }, - { url = "https://files.pythonhosted.org/packages/c7/eb/d88b1780d43a56db2cba24289fa744a9d216c1a8546a0dc3956563fd53ea/multidict-6.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:047d9425860a8c9544fed1b9584f0c8bcd31bcde9568b047c5e567a1025ecd6e", size = 46072, upload-time = "2025-08-11T12:07:17.045Z" }, - { url = "https://files.pythonhosted.org/packages/9f/16/b929320bf5750e2d9d4931835a4c638a19d2494a5b519caaaa7492ebe105/multidict-6.6.4-cp312-cp312-win_arm64.whl", hash = "sha256:14754eb72feaa1e8ae528468f24250dd997b8e2188c3d2f593f9eba259e4b364", size = 43222, upload-time = "2025-08-11T12:07:18.328Z" }, - { url = "https://files.pythonhosted.org/packages/3a/5d/e1db626f64f60008320aab00fbe4f23fc3300d75892a3381275b3d284580/multidict-6.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f46a6e8597f9bd71b31cc708195d42b634c8527fecbcf93febf1052cacc1f16e", size = 75848, upload-time = "2025-08-11T12:07:19.912Z" }, - { url = "https://files.pythonhosted.org/packages/4c/aa/8b6f548d839b6c13887253af4e29c939af22a18591bfb5d0ee6f1931dae8/multidict-6.6.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:22e38b2bc176c5eb9c0a0e379f9d188ae4cd8b28c0f53b52bce7ab0a9e534657", size = 45060, upload-time = "2025-08-11T12:07:21.163Z" }, - { url = "https://files.pythonhosted.org/packages/eb/c6/f5e97e5d99a729bc2aa58eb3ebfa9f1e56a9b517cc38c60537c81834a73f/multidict-6.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5df8afd26f162da59e218ac0eefaa01b01b2e6cd606cffa46608f699539246da", size = 43269, upload-time = "2025-08-11T12:07:22.392Z" }, - { url = "https://files.pythonhosted.org/packages/dc/31/d54eb0c62516776f36fe67f84a732f97e0b0e12f98d5685bebcc6d396910/multidict-6.6.4-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:49517449b58d043023720aa58e62b2f74ce9b28f740a0b5d33971149553d72aa", size = 237158, upload-time = "2025-08-11T12:07:23.636Z" }, - { url = "https://files.pythonhosted.org/packages/c4/1c/8a10c1c25b23156e63b12165a929d8eb49a6ed769fdbefb06e6f07c1e50d/multidict-6.6.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae9408439537c5afdca05edd128a63f56a62680f4b3c234301055d7a2000220f", size = 257076, upload-time = "2025-08-11T12:07:25.049Z" }, - { url = "https://files.pythonhosted.org/packages/ad/86/90e20b5771d6805a119e483fd3d1e8393e745a11511aebca41f0da38c3e2/multidict-6.6.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:87a32d20759dc52a9e850fe1061b6e41ab28e2998d44168a8a341b99ded1dba0", size = 240694, upload-time = "2025-08-11T12:07:26.458Z" }, - { url = "https://files.pythonhosted.org/packages/e7/49/484d3e6b535bc0555b52a0a26ba86e4d8d03fd5587d4936dc59ba7583221/multidict-6.6.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:52e3c8d43cdfff587ceedce9deb25e6ae77daba560b626e97a56ddcad3756879", size = 266350, upload-time = "2025-08-11T12:07:27.94Z" }, - { url = "https://files.pythonhosted.org/packages/bf/b4/aa4c5c379b11895083d50021e229e90c408d7d875471cb3abf721e4670d6/multidict-6.6.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ad8850921d3a8d8ff6fbef790e773cecfc260bbfa0566998980d3fa8f520bc4a", size = 267250, upload-time = "2025-08-11T12:07:29.303Z" }, - { url = "https://files.pythonhosted.org/packages/80/e5/5e22c5bf96a64bdd43518b1834c6d95a4922cc2066b7d8e467dae9b6cee6/multidict-6.6.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:497a2954adc25c08daff36f795077f63ad33e13f19bfff7736e72c785391534f", size = 254900, upload-time = "2025-08-11T12:07:30.764Z" }, - { url = "https://files.pythonhosted.org/packages/17/38/58b27fed927c07035abc02befacab42491e7388ca105e087e6e0215ead64/multidict-6.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:024ce601f92d780ca1617ad4be5ac15b501cc2414970ffa2bb2bbc2bd5a68fa5", size = 252355, upload-time = "2025-08-11T12:07:32.205Z" }, - { url = "https://files.pythonhosted.org/packages/d0/a1/dad75d23a90c29c02b5d6f3d7c10ab36c3197613be5d07ec49c7791e186c/multidict-6.6.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a693fc5ed9bdd1c9e898013e0da4dcc640de7963a371c0bd458e50e046bf6438", size = 250061, upload-time = "2025-08-11T12:07:33.623Z" }, - { url = "https://files.pythonhosted.org/packages/b8/1a/ac2216b61c7f116edab6dc3378cca6c70dc019c9a457ff0d754067c58b20/multidict-6.6.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:190766dac95aab54cae5b152a56520fd99298f32a1266d66d27fdd1b5ac00f4e", size = 249675, upload-time = "2025-08-11T12:07:34.958Z" }, - { url = "https://files.pythonhosted.org/packages/d4/79/1916af833b800d13883e452e8e0977c065c4ee3ab7a26941fbfdebc11895/multidict-6.6.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:34d8f2a5ffdceab9dcd97c7a016deb2308531d5f0fced2bb0c9e1df45b3363d7", size = 261247, upload-time = "2025-08-11T12:07:36.588Z" }, - { url = "https://files.pythonhosted.org/packages/c5/65/d1f84fe08ac44a5fc7391cbc20a7cedc433ea616b266284413fd86062f8c/multidict-6.6.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:59e8d40ab1f5a8597abcef00d04845155a5693b5da00d2c93dbe88f2050f2812", size = 257960, upload-time = "2025-08-11T12:07:39.735Z" }, - { url = "https://files.pythonhosted.org/packages/13/b5/29ec78057d377b195ac2c5248c773703a6b602e132a763e20ec0457e7440/multidict-6.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:467fe64138cfac771f0e949b938c2e1ada2b5af22f39692aa9258715e9ea613a", size = 250078, upload-time = "2025-08-11T12:07:41.525Z" }, - { url = "https://files.pythonhosted.org/packages/c4/0e/7e79d38f70a872cae32e29b0d77024bef7834b0afb406ddae6558d9e2414/multidict-6.6.4-cp313-cp313-win32.whl", hash = "sha256:14616a30fe6d0a48d0a48d1a633ab3b8bec4cf293aac65f32ed116f620adfd69", size = 41708, upload-time = "2025-08-11T12:07:43.405Z" }, - { url = "https://files.pythonhosted.org/packages/9d/34/746696dffff742e97cd6a23da953e55d0ea51fa601fa2ff387b3edcfaa2c/multidict-6.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:40cd05eaeb39e2bc8939451f033e57feaa2ac99e07dbca8afe2be450a4a3b6cf", size = 45912, upload-time = "2025-08-11T12:07:45.082Z" }, - { url = "https://files.pythonhosted.org/packages/c7/87/3bac136181e271e29170d8d71929cdeddeb77f3e8b6a0c08da3a8e9da114/multidict-6.6.4-cp313-cp313-win_arm64.whl", hash = "sha256:f6eb37d511bfae9e13e82cb4d1af36b91150466f24d9b2b8a9785816deb16605", size = 43076, upload-time = "2025-08-11T12:07:46.746Z" }, - { url = "https://files.pythonhosted.org/packages/64/94/0a8e63e36c049b571c9ae41ee301ada29c3fee9643d9c2548d7d558a1d99/multidict-6.6.4-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:6c84378acd4f37d1b507dfa0d459b449e2321b3ba5f2338f9b085cf7a7ba95eb", size = 82812, upload-time = "2025-08-11T12:07:48.402Z" }, - { url = "https://files.pythonhosted.org/packages/25/1a/be8e369dfcd260d2070a67e65dd3990dd635cbd735b98da31e00ea84cd4e/multidict-6.6.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0e0558693063c75f3d952abf645c78f3c5dfdd825a41d8c4d8156fc0b0da6e7e", size = 48313, upload-time = "2025-08-11T12:07:49.679Z" }, - { url = "https://files.pythonhosted.org/packages/26/5a/dd4ade298674b2f9a7b06a32c94ffbc0497354df8285f27317c66433ce3b/multidict-6.6.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3f8e2384cb83ebd23fd07e9eada8ba64afc4c759cd94817433ab8c81ee4b403f", size = 46777, upload-time = "2025-08-11T12:07:51.318Z" }, - { url = "https://files.pythonhosted.org/packages/89/db/98aa28bc7e071bfba611ac2ae803c24e96dd3a452b4118c587d3d872c64c/multidict-6.6.4-cp313-cp313t-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:f996b87b420995a9174b2a7c1a8daf7db4750be6848b03eb5e639674f7963773", size = 229321, upload-time = "2025-08-11T12:07:52.965Z" }, - { url = "https://files.pythonhosted.org/packages/c7/bc/01ddda2a73dd9d167bd85d0e8ef4293836a8f82b786c63fb1a429bc3e678/multidict-6.6.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc356250cffd6e78416cf5b40dc6a74f1edf3be8e834cf8862d9ed5265cf9b0e", size = 249954, upload-time = "2025-08-11T12:07:54.423Z" }, - { url = "https://files.pythonhosted.org/packages/06/78/6b7c0f020f9aa0acf66d0ab4eb9f08375bac9a50ff5e3edb1c4ccd59eafc/multidict-6.6.4-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:dadf95aa862714ea468a49ad1e09fe00fcc9ec67d122f6596a8d40caf6cec7d0", size = 228612, upload-time = "2025-08-11T12:07:55.914Z" }, - { url = "https://files.pythonhosted.org/packages/00/44/3faa416f89b2d5d76e9d447296a81521e1c832ad6e40b92f990697b43192/multidict-6.6.4-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7dd57515bebffd8ebd714d101d4c434063322e4fe24042e90ced41f18b6d3395", size = 257528, upload-time = "2025-08-11T12:07:57.371Z" }, - { url = "https://files.pythonhosted.org/packages/05/5f/77c03b89af0fcb16f018f668207768191fb9dcfb5e3361a5e706a11db2c9/multidict-6.6.4-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:967af5f238ebc2eb1da4e77af5492219fbd9b4b812347da39a7b5f5c72c0fa45", size = 256329, upload-time = "2025-08-11T12:07:58.844Z" }, - { url = "https://files.pythonhosted.org/packages/cf/e9/ed750a2a9afb4f8dc6f13dc5b67b514832101b95714f1211cd42e0aafc26/multidict-6.6.4-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2a4c6875c37aae9794308ec43e3530e4aa0d36579ce38d89979bbf89582002bb", size = 247928, upload-time = "2025-08-11T12:08:01.037Z" }, - { url = "https://files.pythonhosted.org/packages/1f/b5/e0571bc13cda277db7e6e8a532791d4403dacc9850006cb66d2556e649c0/multidict-6.6.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:7f683a551e92bdb7fac545b9c6f9fa2aebdeefa61d607510b3533286fcab67f5", size = 245228, upload-time = "2025-08-11T12:08:02.96Z" }, - { url = "https://files.pythonhosted.org/packages/f3/a3/69a84b0eccb9824491f06368f5b86e72e4af54c3067c37c39099b6687109/multidict-6.6.4-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:3ba5aaf600edaf2a868a391779f7a85d93bed147854925f34edd24cc70a3e141", size = 235869, upload-time = "2025-08-11T12:08:04.746Z" }, - { url = "https://files.pythonhosted.org/packages/a9/9d/28802e8f9121a6a0804fa009debf4e753d0a59969ea9f70be5f5fdfcb18f/multidict-6.6.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:580b643b7fd2c295d83cad90d78419081f53fd532d1f1eb67ceb7060f61cff0d", size = 243446, upload-time = "2025-08-11T12:08:06.332Z" }, - { url = "https://files.pythonhosted.org/packages/38/ea/6c98add069b4878c1d66428a5f5149ddb6d32b1f9836a826ac764b9940be/multidict-6.6.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:37b7187197da6af3ee0b044dbc9625afd0c885f2800815b228a0e70f9a7f473d", size = 252299, upload-time = "2025-08-11T12:08:07.931Z" }, - { url = "https://files.pythonhosted.org/packages/3a/09/8fe02d204473e14c0af3affd50af9078839dfca1742f025cca765435d6b4/multidict-6.6.4-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e1b93790ed0bc26feb72e2f08299691ceb6da5e9e14a0d13cc74f1869af327a0", size = 246926, upload-time = "2025-08-11T12:08:09.467Z" }, - { url = "https://files.pythonhosted.org/packages/37/3d/7b1e10d774a6df5175ecd3c92bff069e77bed9ec2a927fdd4ff5fe182f67/multidict-6.6.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a506a77ddee1efcca81ecbeae27ade3e09cdf21a8ae854d766c2bb4f14053f92", size = 243383, upload-time = "2025-08-11T12:08:10.981Z" }, - { url = "https://files.pythonhosted.org/packages/50/b0/a6fae46071b645ae98786ab738447de1ef53742eaad949f27e960864bb49/multidict-6.6.4-cp313-cp313t-win32.whl", hash = "sha256:f93b2b2279883d1d0a9e1bd01f312d6fc315c5e4c1f09e112e4736e2f650bc4e", size = 47775, upload-time = "2025-08-11T12:08:12.439Z" }, - { url = "https://files.pythonhosted.org/packages/b2/0a/2436550b1520091af0600dff547913cb2d66fbac27a8c33bc1b1bccd8d98/multidict-6.6.4-cp313-cp313t-win_amd64.whl", hash = "sha256:6d46a180acdf6e87cc41dc15d8f5c2986e1e8739dc25dbb7dac826731ef381a4", size = 53100, upload-time = "2025-08-11T12:08:13.823Z" }, - { url = "https://files.pythonhosted.org/packages/97/ea/43ac51faff934086db9c072a94d327d71b7d8b40cd5dcb47311330929ef0/multidict-6.6.4-cp313-cp313t-win_arm64.whl", hash = "sha256:756989334015e3335d087a27331659820d53ba432befdef6a718398b0a8493ad", size = 45501, upload-time = "2025-08-11T12:08:15.173Z" }, - { url = "https://files.pythonhosted.org/packages/fd/69/b547032297c7e63ba2af494edba695d781af8a0c6e89e4d06cf848b21d80/multidict-6.6.4-py3-none-any.whl", hash = "sha256:27d8f8e125c07cb954e54d75d04905a9bba8a439c1d84aca94949d4d03d8601c", size = 12313, upload-time = "2025-08-11T12:08:46.891Z" }, -] - -[[package]] -name = "nbformat" -version = "5.10.4" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "fastjsonschema" }, - { name = "jsonschema" }, - { name = "jupyter-core" }, - { name = "traitlets" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/6d/fd/91545e604bc3dad7dca9ed03284086039b294c6b3d75c0d2fa45f9e9caf3/nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a", size = 142749, upload-time = "2024-04-04T11:20:37.371Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a9/82/0340caa499416c78e5d8f5f05947ae4bc3cba53c9f038ab6e9ed964e22f1/nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b", size = 78454, upload-time = "2024-04-04T11:20:34.895Z" }, -] - -[[package]] -name = "nbstripout" -version = "0.8.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "nbformat" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/92/6e/05d7e0e35598bd0d423167295f978005912a2dcd137c88ebf36e34047dc7/nbstripout-0.8.1.tar.gz", hash = "sha256:eaac8b6b4e729e8dfe1e5df2c0f8ba44abc5a17a65448f0480141f80be230bb1", size = 26399, upload-time = "2024-11-17T10:38:33.275Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/91/93b459c456b0e4389b2b3ddb3b82cd401d022691334a0f06e92c2046e780/nbstripout-0.8.1-py2.py3-none-any.whl", hash = "sha256:79a8c8da488d98c54c112fa87185045f0271a97d84f1d46918d6a3ee561b30e7", size = 16329, upload-time = "2024-11-17T10:38:31.803Z" }, -] - -[[package]] -name = "nest-asyncio" -version = "1.6.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418, upload-time = "2024-01-21T14:25:19.227Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, -] - -[[package]] -name = "nexus-rpc" -version = "1.1.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ef/66/540687556bd28cf1ec370cc6881456203dfddb9dab047b8979c6865b5984/nexus_rpc-1.1.0.tar.gz", hash = "sha256:d65ad6a2f54f14e53ebe39ee30555eaeb894102437125733fb13034a04a44553", size = 77383, upload-time = "2025-07-07T19:03:58.368Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/2f/9e9d0dcaa4c6ffa22b7aa31069a8a264c753ff8027b36af602cce038c92f/nexus_rpc-1.1.0-py3-none-any.whl", hash = "sha256:d1b007af2aba186a27e736f8eaae39c03aed05b488084ff6c3d1785c9ba2ad38", size = 27743, upload-time = "2025-07-07T19:03:57.556Z" }, -] - -[[package]] -name = "oauthlib" -version = "3.3.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0b/5f/19930f824ffeb0ad4372da4812c50edbd1434f678c90c2733e1188edfc63/oauthlib-3.3.1.tar.gz", hash = "sha256:0f0f8aa759826a193cf66c12ea1af1637f87b9b4622d46e866952bb022e538c9", size = 185918, upload-time = "2025-06-19T22:48:08.269Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/be/9c/92789c596b8df838baa98fa71844d84283302f7604ed565dafe5a6b5041a/oauthlib-3.3.1-py3-none-any.whl", hash = "sha256:88119c938d2b8fb88561af5f6ee0eec8cc8d552b7bb1f712743136eb7523b7a1", size = 160065, upload-time = "2025-06-19T22:48:06.508Z" }, -] - -[[package]] -name = "openai" -version = "2.7.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, - { name = "distro" }, - { name = "httpx" }, - { name = "jiter" }, - { name = "pydantic" }, - { name = "sniffio" }, - { name = "tqdm" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/51/a2/f4023c1e0c868a6a5854955b3374f17153388aed95e835af114a17eac95b/openai-2.7.1.tar.gz", hash = "sha256:df4d4a3622b2df3475ead8eb0fbb3c27fd1c070fa2e55d778ca4f40e0186c726", size = 595933, upload-time = "2025-11-04T06:07:23.069Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8c/74/6bfc3adc81f6c2cea4439f2a734c40e3a420703bbcdc539890096a732bbd/openai-2.7.1-py3-none-any.whl", hash = "sha256:2f2530354d94c59c614645a4662b9dab0a5b881c5cd767a8587398feac0c9021", size = 1008780, upload-time = "2025-11-04T06:07:20.818Z" }, -] - -[[package]] -name = "openai-agents" -version = "0.4.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "griffe" }, - { name = "mcp" }, - { name = "openai" }, - { name = "pydantic" }, - { name = "requests" }, - { name = "types-requests" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/2d/8e/71fd262046587a5b2b097aec6ce677f7bb23c81b3129da31942b7a0d0b26/openai_agents-0.4.2.tar.gz", hash = "sha256:281caff839b3ab2cf3bc52110abe93caca004985c41bf07de8e60d03c4a7528e", size = 1925615, upload-time = "2025-10-24T21:46:34.119Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/2e/23dbd9099555a9c7081c2819d00b7e1ee6ddbbd2fba8032f0ca4ddff778f/openai_agents-0.4.2-py3-none-any.whl", hash = "sha256:89fda02002dc0ac90ae177bb2f381a78b73aae329753bffb9276cfbdbfd20dc3", size = 216402, upload-time = "2025-10-24T21:46:32.065Z" }, -] - -[[package]] -name = "opentelemetry-api" -version = "1.37.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "importlib-metadata" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/63/04/05040d7ce33a907a2a02257e601992f0cdf11c73b33f13c4492bf6c3d6d5/opentelemetry_api-1.37.0.tar.gz", hash = "sha256:540735b120355bd5112738ea53621f8d5edb35ebcd6fe21ada3ab1c61d1cd9a7", size = 64923, upload-time = "2025-09-11T10:29:01.662Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/91/48/28ed9e55dcf2f453128df738210a980e09f4e468a456fa3c763dbc8be70a/opentelemetry_api-1.37.0-py3-none-any.whl", hash = "sha256:accf2024d3e89faec14302213bc39550ec0f4095d1cf5ca688e1bfb1c8612f47", size = 65732, upload-time = "2025-09-11T10:28:41.826Z" }, -] - -[[package]] -name = "packaging" -version = "25.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, -] - -[[package]] -name = "parso" -version = "0.8.5" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d4/de/53e0bcf53d13e005bd8c92e7855142494f41171b34c2536b86187474184d/parso-0.8.5.tar.gz", hash = "sha256:034d7354a9a018bdce352f48b2a8a450f05e9d6ee85db84764e9b6bd96dafe5a", size = 401205, upload-time = "2025-08-23T15:15:28.028Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/16/32/f8e3c85d1d5250232a5d3477a2a28cc291968ff175caeadaf3cc19ce0e4a/parso-0.8.5-py2.py3-none-any.whl", hash = "sha256:646204b5ee239c396d040b90f9e272e9a8017c630092bf59980beb62fd033887", size = 106668, upload-time = "2025-08-23T15:15:25.663Z" }, -] - -[[package]] -name = "pexpect" -version = "4.9.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "ptyprocess" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" }, -] - -[[package]] -name = "platformdirs" -version = "4.4.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/40/4b/2028861e724d3bd36227adfa20d3fd24c3fc6d52032f4a93c133be5d17ce/platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85", size = 18654, upload-time = "2025-08-26T14:32:02.735Z" }, -] - -[[package]] -name = "pluggy" -version = "1.6.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, -] - -[[package]] -name = "prompt-toolkit" -version = "3.0.52" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "wcwidth" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" }, -] - -[[package]] -name = "propcache" -version = "0.3.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a6/16/43264e4a779dd8588c21a70f0709665ee8f611211bdd2c87d952cfa7c776/propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168", size = 44139, upload-time = "2025-06-09T22:56:06.081Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/42/9ca01b0a6f48e81615dca4765a8f1dd2c057e0540f6116a27dc5ee01dfb6/propcache-0.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8de106b6c84506b31c27168582cd3cb3000a6412c16df14a8628e5871ff83c10", size = 73674, upload-time = "2025-06-09T22:54:30.551Z" }, - { url = "https://files.pythonhosted.org/packages/af/6e/21293133beb550f9c901bbece755d582bfaf2176bee4774000bd4dd41884/propcache-0.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:28710b0d3975117239c76600ea351934ac7b5ff56e60953474342608dbbb6154", size = 43570, upload-time = "2025-06-09T22:54:32.296Z" }, - { url = "https://files.pythonhosted.org/packages/0c/c8/0393a0a3a2b8760eb3bde3c147f62b20044f0ddac81e9d6ed7318ec0d852/propcache-0.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce26862344bdf836650ed2487c3d724b00fbfec4233a1013f597b78c1cb73615", size = 43094, upload-time = "2025-06-09T22:54:33.929Z" }, - { url = "https://files.pythonhosted.org/packages/37/2c/489afe311a690399d04a3e03b069225670c1d489eb7b044a566511c1c498/propcache-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bca54bd347a253af2cf4544bbec232ab982f4868de0dd684246b67a51bc6b1db", size = 226958, upload-time = "2025-06-09T22:54:35.186Z" }, - { url = "https://files.pythonhosted.org/packages/9d/ca/63b520d2f3d418c968bf596839ae26cf7f87bead026b6192d4da6a08c467/propcache-0.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55780d5e9a2ddc59711d727226bb1ba83a22dd32f64ee15594b9392b1f544eb1", size = 234894, upload-time = "2025-06-09T22:54:36.708Z" }, - { url = "https://files.pythonhosted.org/packages/11/60/1d0ed6fff455a028d678df30cc28dcee7af77fa2b0e6962ce1df95c9a2a9/propcache-0.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:035e631be25d6975ed87ab23153db6a73426a48db688070d925aa27e996fe93c", size = 233672, upload-time = "2025-06-09T22:54:38.062Z" }, - { url = "https://files.pythonhosted.org/packages/37/7c/54fd5301ef38505ab235d98827207176a5c9b2aa61939b10a460ca53e123/propcache-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee6f22b6eaa39297c751d0e80c0d3a454f112f5c6481214fcf4c092074cecd67", size = 224395, upload-time = "2025-06-09T22:54:39.634Z" }, - { url = "https://files.pythonhosted.org/packages/ee/1a/89a40e0846f5de05fdc6779883bf46ba980e6df4d2ff8fb02643de126592/propcache-0.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ca3aee1aa955438c4dba34fc20a9f390e4c79967257d830f137bd5a8a32ed3b", size = 212510, upload-time = "2025-06-09T22:54:41.565Z" }, - { url = "https://files.pythonhosted.org/packages/5e/33/ca98368586c9566a6b8d5ef66e30484f8da84c0aac3f2d9aec6d31a11bd5/propcache-0.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4f30862869fa2b68380d677cc1c5fcf1e0f2b9ea0cf665812895c75d0ca3b8", size = 222949, upload-time = "2025-06-09T22:54:43.038Z" }, - { url = "https://files.pythonhosted.org/packages/ba/11/ace870d0aafe443b33b2f0b7efdb872b7c3abd505bfb4890716ad7865e9d/propcache-0.3.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b77ec3c257d7816d9f3700013639db7491a434644c906a2578a11daf13176251", size = 217258, upload-time = "2025-06-09T22:54:44.376Z" }, - { url = "https://files.pythonhosted.org/packages/5b/d2/86fd6f7adffcfc74b42c10a6b7db721d1d9ca1055c45d39a1a8f2a740a21/propcache-0.3.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cab90ac9d3f14b2d5050928483d3d3b8fb6b4018893fc75710e6aa361ecb2474", size = 213036, upload-time = "2025-06-09T22:54:46.243Z" }, - { url = "https://files.pythonhosted.org/packages/07/94/2d7d1e328f45ff34a0a284cf5a2847013701e24c2a53117e7c280a4316b3/propcache-0.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0b504d29f3c47cf6b9e936c1852246c83d450e8e063d50562115a6be6d3a2535", size = 227684, upload-time = "2025-06-09T22:54:47.63Z" }, - { url = "https://files.pythonhosted.org/packages/b7/05/37ae63a0087677e90b1d14710e532ff104d44bc1efa3b3970fff99b891dc/propcache-0.3.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:ce2ac2675a6aa41ddb2a0c9cbff53780a617ac3d43e620f8fd77ba1c84dcfc06", size = 234562, upload-time = "2025-06-09T22:54:48.982Z" }, - { url = "https://files.pythonhosted.org/packages/a4/7c/3f539fcae630408d0bd8bf3208b9a647ccad10976eda62402a80adf8fc34/propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b4239611205294cc433845b914131b2a1f03500ff3c1ed093ed216b82621e1", size = 222142, upload-time = "2025-06-09T22:54:50.424Z" }, - { url = "https://files.pythonhosted.org/packages/7c/d2/34b9eac8c35f79f8a962546b3e97e9d4b990c420ee66ac8255d5d9611648/propcache-0.3.2-cp312-cp312-win32.whl", hash = "sha256:df4a81b9b53449ebc90cc4deefb052c1dd934ba85012aa912c7ea7b7e38b60c1", size = 37711, upload-time = "2025-06-09T22:54:52.072Z" }, - { url = "https://files.pythonhosted.org/packages/19/61/d582be5d226cf79071681d1b46b848d6cb03d7b70af7063e33a2787eaa03/propcache-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7046e79b989d7fe457bb755844019e10f693752d169076138abf17f31380800c", size = 41479, upload-time = "2025-06-09T22:54:53.234Z" }, - { url = "https://files.pythonhosted.org/packages/dc/d1/8c747fafa558c603c4ca19d8e20b288aa0c7cda74e9402f50f31eb65267e/propcache-0.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945", size = 71286, upload-time = "2025-06-09T22:54:54.369Z" }, - { url = "https://files.pythonhosted.org/packages/61/99/d606cb7986b60d89c36de8a85d58764323b3a5ff07770a99d8e993b3fa73/propcache-0.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252", size = 42425, upload-time = "2025-06-09T22:54:55.642Z" }, - { url = "https://files.pythonhosted.org/packages/8c/96/ef98f91bbb42b79e9bb82bdd348b255eb9d65f14dbbe3b1594644c4073f7/propcache-0.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f", size = 41846, upload-time = "2025-06-09T22:54:57.246Z" }, - { url = "https://files.pythonhosted.org/packages/5b/ad/3f0f9a705fb630d175146cd7b1d2bf5555c9beaed54e94132b21aac098a6/propcache-0.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33", size = 208871, upload-time = "2025-06-09T22:54:58.975Z" }, - { url = "https://files.pythonhosted.org/packages/3a/38/2085cda93d2c8b6ec3e92af2c89489a36a5886b712a34ab25de9fbca7992/propcache-0.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e", size = 215720, upload-time = "2025-06-09T22:55:00.471Z" }, - { url = "https://files.pythonhosted.org/packages/61/c1/d72ea2dc83ac7f2c8e182786ab0fc2c7bd123a1ff9b7975bee671866fe5f/propcache-0.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1", size = 215203, upload-time = "2025-06-09T22:55:01.834Z" }, - { url = "https://files.pythonhosted.org/packages/af/81/b324c44ae60c56ef12007105f1460d5c304b0626ab0cc6b07c8f2a9aa0b8/propcache-0.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3", size = 206365, upload-time = "2025-06-09T22:55:03.199Z" }, - { url = "https://files.pythonhosted.org/packages/09/73/88549128bb89e66d2aff242488f62869014ae092db63ccea53c1cc75a81d/propcache-0.3.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1", size = 196016, upload-time = "2025-06-09T22:55:04.518Z" }, - { url = "https://files.pythonhosted.org/packages/b9/3f/3bdd14e737d145114a5eb83cb172903afba7242f67c5877f9909a20d948d/propcache-0.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6", size = 205596, upload-time = "2025-06-09T22:55:05.942Z" }, - { url = "https://files.pythonhosted.org/packages/0f/ca/2f4aa819c357d3107c3763d7ef42c03980f9ed5c48c82e01e25945d437c1/propcache-0.3.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387", size = 200977, upload-time = "2025-06-09T22:55:07.792Z" }, - { url = "https://files.pythonhosted.org/packages/cd/4a/e65276c7477533c59085251ae88505caf6831c0e85ff8b2e31ebcbb949b1/propcache-0.3.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4", size = 197220, upload-time = "2025-06-09T22:55:09.173Z" }, - { url = "https://files.pythonhosted.org/packages/7c/54/fc7152e517cf5578278b242396ce4d4b36795423988ef39bb8cd5bf274c8/propcache-0.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88", size = 210642, upload-time = "2025-06-09T22:55:10.62Z" }, - { url = "https://files.pythonhosted.org/packages/b9/80/abeb4a896d2767bf5f1ea7b92eb7be6a5330645bd7fb844049c0e4045d9d/propcache-0.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206", size = 212789, upload-time = "2025-06-09T22:55:12.029Z" }, - { url = "https://files.pythonhosted.org/packages/b3/db/ea12a49aa7b2b6d68a5da8293dcf50068d48d088100ac016ad92a6a780e6/propcache-0.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43", size = 205880, upload-time = "2025-06-09T22:55:13.45Z" }, - { url = "https://files.pythonhosted.org/packages/d1/e5/9076a0bbbfb65d1198007059c65639dfd56266cf8e477a9707e4b1999ff4/propcache-0.3.2-cp313-cp313-win32.whl", hash = "sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02", size = 37220, upload-time = "2025-06-09T22:55:15.284Z" }, - { url = "https://files.pythonhosted.org/packages/d3/f5/b369e026b09a26cd77aa88d8fffd69141d2ae00a2abaaf5380d2603f4b7f/propcache-0.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05", size = 40678, upload-time = "2025-06-09T22:55:16.445Z" }, - { url = "https://files.pythonhosted.org/packages/a4/3a/6ece377b55544941a08d03581c7bc400a3c8cd3c2865900a68d5de79e21f/propcache-0.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b", size = 76560, upload-time = "2025-06-09T22:55:17.598Z" }, - { url = "https://files.pythonhosted.org/packages/0c/da/64a2bb16418740fa634b0e9c3d29edff1db07f56d3546ca2d86ddf0305e1/propcache-0.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0", size = 44676, upload-time = "2025-06-09T22:55:18.922Z" }, - { url = "https://files.pythonhosted.org/packages/36/7b/f025e06ea51cb72c52fb87e9b395cced02786610b60a3ed51da8af017170/propcache-0.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e", size = 44701, upload-time = "2025-06-09T22:55:20.106Z" }, - { url = "https://files.pythonhosted.org/packages/a4/00/faa1b1b7c3b74fc277f8642f32a4c72ba1d7b2de36d7cdfb676db7f4303e/propcache-0.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28", size = 276934, upload-time = "2025-06-09T22:55:21.5Z" }, - { url = "https://files.pythonhosted.org/packages/74/ab/935beb6f1756e0476a4d5938ff44bf0d13a055fed880caf93859b4f1baf4/propcache-0.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a", size = 278316, upload-time = "2025-06-09T22:55:22.918Z" }, - { url = "https://files.pythonhosted.org/packages/f8/9d/994a5c1ce4389610838d1caec74bdf0e98b306c70314d46dbe4fcf21a3e2/propcache-0.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c", size = 282619, upload-time = "2025-06-09T22:55:24.651Z" }, - { url = "https://files.pythonhosted.org/packages/2b/00/a10afce3d1ed0287cef2e09506d3be9822513f2c1e96457ee369adb9a6cd/propcache-0.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725", size = 265896, upload-time = "2025-06-09T22:55:26.049Z" }, - { url = "https://files.pythonhosted.org/packages/2e/a8/2aa6716ffa566ca57c749edb909ad27884680887d68517e4be41b02299f3/propcache-0.3.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892", size = 252111, upload-time = "2025-06-09T22:55:27.381Z" }, - { url = "https://files.pythonhosted.org/packages/36/4f/345ca9183b85ac29c8694b0941f7484bf419c7f0fea2d1e386b4f7893eed/propcache-0.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44", size = 268334, upload-time = "2025-06-09T22:55:28.747Z" }, - { url = "https://files.pythonhosted.org/packages/3e/ca/fcd54f78b59e3f97b3b9715501e3147f5340167733d27db423aa321e7148/propcache-0.3.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe", size = 255026, upload-time = "2025-06-09T22:55:30.184Z" }, - { url = "https://files.pythonhosted.org/packages/8b/95/8e6a6bbbd78ac89c30c225210a5c687790e532ba4088afb8c0445b77ef37/propcache-0.3.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81", size = 250724, upload-time = "2025-06-09T22:55:31.646Z" }, - { url = "https://files.pythonhosted.org/packages/ee/b0/0dd03616142baba28e8b2d14ce5df6631b4673850a3d4f9c0f9dd714a404/propcache-0.3.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba", size = 268868, upload-time = "2025-06-09T22:55:33.209Z" }, - { url = "https://files.pythonhosted.org/packages/c5/98/2c12407a7e4fbacd94ddd32f3b1e3d5231e77c30ef7162b12a60e2dd5ce3/propcache-0.3.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770", size = 271322, upload-time = "2025-06-09T22:55:35.065Z" }, - { url = "https://files.pythonhosted.org/packages/35/91/9cb56efbb428b006bb85db28591e40b7736847b8331d43fe335acf95f6c8/propcache-0.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330", size = 265778, upload-time = "2025-06-09T22:55:36.45Z" }, - { url = "https://files.pythonhosted.org/packages/9a/4c/b0fe775a2bdd01e176b14b574be679d84fc83958335790f7c9a686c1f468/propcache-0.3.2-cp313-cp313t-win32.whl", hash = "sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394", size = 41175, upload-time = "2025-06-09T22:55:38.436Z" }, - { url = "https://files.pythonhosted.org/packages/a4/ff/47f08595e3d9b5e149c150f88d9714574f1a7cbd89fe2817158a952674bf/propcache-0.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198", size = 44857, upload-time = "2025-06-09T22:55:39.687Z" }, - { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" }, -] - -[[package]] -name = "protobuf" -version = "6.32.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fa/a4/cc17347aa2897568beece2e674674359f911d6fe21b0b8d6268cd42727ac/protobuf-6.32.1.tar.gz", hash = "sha256:ee2469e4a021474ab9baafea6cd070e5bf27c7d29433504ddea1a4ee5850f68d", size = 440635, upload-time = "2025-09-11T21:38:42.935Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/98/645183ea03ab3995d29086b8bf4f7562ebd3d10c9a4b14ee3f20d47cfe50/protobuf-6.32.1-cp310-abi3-win32.whl", hash = "sha256:a8a32a84bc9f2aad712041b8b366190f71dde248926da517bde9e832e4412085", size = 424411, upload-time = "2025-09-11T21:38:27.427Z" }, - { url = "https://files.pythonhosted.org/packages/8c/f3/6f58f841f6ebafe076cebeae33fc336e900619d34b1c93e4b5c97a81fdfa/protobuf-6.32.1-cp310-abi3-win_amd64.whl", hash = "sha256:b00a7d8c25fa471f16bc8153d0e53d6c9e827f0953f3c09aaa4331c718cae5e1", size = 435738, upload-time = "2025-09-11T21:38:30.959Z" }, - { url = "https://files.pythonhosted.org/packages/10/56/a8a3f4e7190837139e68c7002ec749190a163af3e330f65d90309145a210/protobuf-6.32.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d8c7e6eb619ffdf105ee4ab76af5a68b60a9d0f66da3ea12d1640e6d8dab7281", size = 426454, upload-time = "2025-09-11T21:38:34.076Z" }, - { url = "https://files.pythonhosted.org/packages/3f/be/8dd0a927c559b37d7a6c8ab79034fd167dcc1f851595f2e641ad62be8643/protobuf-6.32.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:2f5b80a49e1eb7b86d85fcd23fe92df154b9730a725c3b38c4e43b9d77018bf4", size = 322874, upload-time = "2025-09-11T21:38:35.509Z" }, - { url = "https://files.pythonhosted.org/packages/5c/f6/88d77011b605ef979aace37b7703e4eefad066f7e84d935e5a696515c2dd/protobuf-6.32.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:b1864818300c297265c83a4982fd3169f97122c299f56a56e2445c3698d34710", size = 322013, upload-time = "2025-09-11T21:38:37.017Z" }, - { url = "https://files.pythonhosted.org/packages/97/b7/15cc7d93443d6c6a84626ae3258a91f4c6ac8c0edd5df35ea7658f71b79c/protobuf-6.32.1-py3-none-any.whl", hash = "sha256:2601b779fc7d32a866c6b4404f9d42a3f67c5b9f3f15b4db3cccabe06b95c346", size = 169289, upload-time = "2025-09-11T21:38:41.234Z" }, -] - -[[package]] -name = "psutil" -version = "7.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b3/31/4723d756b59344b643542936e37a31d1d3204bcdc42a7daa8ee9eb06fb50/psutil-7.1.0.tar.gz", hash = "sha256:655708b3c069387c8b77b072fc429a57d0e214221d01c0a772df7dfedcb3bcd2", size = 497660, upload-time = "2025-09-17T20:14:52.902Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/46/62/ce4051019ee20ce0ed74432dd73a5bb087a6704284a470bb8adff69a0932/psutil-7.1.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:76168cef4397494250e9f4e73eb3752b146de1dd950040b29186d0cce1d5ca13", size = 245242, upload-time = "2025-09-17T20:14:56.126Z" }, - { url = "https://files.pythonhosted.org/packages/38/61/f76959fba841bf5b61123fbf4b650886dc4094c6858008b5bf73d9057216/psutil-7.1.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:5d007560c8c372efdff9e4579c2846d71de737e4605f611437255e81efcca2c5", size = 246682, upload-time = "2025-09-17T20:14:58.25Z" }, - { url = "https://files.pythonhosted.org/packages/88/7a/37c99d2e77ec30d63398ffa6a660450b8a62517cabe44b3e9bae97696e8d/psutil-7.1.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22e4454970b32472ce7deaa45d045b34d3648ce478e26a04c7e858a0a6e75ff3", size = 287994, upload-time = "2025-09-17T20:14:59.901Z" }, - { url = "https://files.pythonhosted.org/packages/9d/de/04c8c61232f7244aa0a4b9a9fbd63a89d5aeaf94b2fc9d1d16e2faa5cbb0/psutil-7.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c70e113920d51e89f212dd7be06219a9b88014e63a4cec69b684c327bc474e3", size = 291163, upload-time = "2025-09-17T20:15:01.481Z" }, - { url = "https://files.pythonhosted.org/packages/f4/58/c4f976234bf6d4737bc8c02a81192f045c307b72cf39c9e5c5a2d78927f6/psutil-7.1.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d4a113425c037300de3ac8b331637293da9be9713855c4fc9d2d97436d7259d", size = 293625, upload-time = "2025-09-17T20:15:04.492Z" }, - { url = "https://files.pythonhosted.org/packages/79/87/157c8e7959ec39ced1b11cc93c730c4fb7f9d408569a6c59dbd92ceb35db/psutil-7.1.0-cp37-abi3-win32.whl", hash = "sha256:09ad740870c8d219ed8daae0ad3b726d3bf9a028a198e7f3080f6a1888b99bca", size = 244812, upload-time = "2025-09-17T20:15:07.462Z" }, - { url = "https://files.pythonhosted.org/packages/bf/e9/b44c4f697276a7a95b8e94d0e320a7bf7f3318521b23de69035540b39838/psutil-7.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:57f5e987c36d3146c0dd2528cd42151cf96cd359b9d67cfff836995cc5df9a3d", size = 247965, upload-time = "2025-09-17T20:15:09.673Z" }, - { url = "https://files.pythonhosted.org/packages/26/65/1070a6e3c036f39142c2820c4b52e9243246fcfc3f96239ac84472ba361e/psutil-7.1.0-cp37-abi3-win_arm64.whl", hash = "sha256:6937cb68133e7c97b6cc9649a570c9a18ba0efebed46d8c5dae4c07fa1b67a07", size = 244971, upload-time = "2025-09-17T20:15:12.262Z" }, -] - -[[package]] -name = "ptyprocess" -version = "0.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, -] - -[[package]] -name = "pure-eval" -version = "0.2.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" }, -] - -[[package]] -name = "pyasn1" -version = "0.6.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, -] - -[[package]] -name = "pyasn1-modules" -version = "0.4.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pyasn1" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, -] - -[[package]] -name = "pycparser" -version = "2.23" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, -] - -[[package]] -name = "pydantic" -version = "2.11.9" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "annotated-types" }, - { name = "pydantic-core" }, - { name = "typing-extensions" }, - { name = "typing-inspection" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ff/5d/09a551ba512d7ca404d785072700d3f6727a02f6f3c24ecfd081c7cf0aa8/pydantic-2.11.9.tar.gz", hash = "sha256:6b8ffda597a14812a7975c90b82a8a2e777d9257aba3453f973acd3c032a18e2", size = 788495, upload-time = "2025-09-13T11:26:39.325Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3e/d3/108f2006987c58e76691d5ae5d200dd3e0f532cb4e5fa3560751c3a1feba/pydantic-2.11.9-py3-none-any.whl", hash = "sha256:c42dd626f5cfc1c6950ce6205ea58c93efa406da65f479dcb4029d5934857da2", size = 444855, upload-time = "2025-09-13T11:26:36.909Z" }, -] - -[[package]] -name = "pydantic-core" -version = "2.33.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" }, - { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" }, - { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" }, - { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" }, - { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" }, - { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" }, - { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" }, - { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" }, - { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" }, - { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" }, - { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" }, - { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" }, - { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" }, - { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" }, - { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" }, - { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" }, - { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" }, - { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" }, - { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" }, - { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" }, - { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" }, - { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" }, - { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" }, - { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" }, - { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" }, - { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" }, - { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" }, - { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" }, - { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" }, - { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" }, - { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" }, -] - -[[package]] -name = "pydantic-settings" -version = "2.11.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pydantic" }, - { name = "python-dotenv" }, - { name = "typing-inspection" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/20/c5/dbbc27b814c71676593d1c3f718e6cd7d4f00652cefa24b75f7aa3efb25e/pydantic_settings-2.11.0.tar.gz", hash = "sha256:d0e87a1c7d33593beb7194adb8470fc426e95ba02af83a0f23474a04c9a08180", size = 188394, upload-time = "2025-09-24T14:19:11.764Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/83/d6/887a1ff844e64aa823fb4905978d882a633cfe295c32eacad582b78a7d8b/pydantic_settings-2.11.0-py3-none-any.whl", hash = "sha256:fe2cea3413b9530d10f3a5875adffb17ada5c1e1bab0b2885546d7310415207c", size = 48608, upload-time = "2025-09-24T14:19:10.015Z" }, -] - -[[package]] -name = "pygments" -version = "2.19.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, -] - -[[package]] -name = "pyjwt" -version = "2.10.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, -] - -[[package]] -name = "pytest" -version = "8.4.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "iniconfig" }, - { name = "packaging" }, - { name = "pluggy" }, - { name = "pygments" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, -] - -[[package]] -name = "pytest-asyncio" -version = "1.2.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pytest" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/42/86/9e3c5f48f7b7b638b216e4b9e645f54d199d7abbbab7a64a13b4e12ba10f/pytest_asyncio-1.2.0.tar.gz", hash = "sha256:c609a64a2a8768462d0c99811ddb8bd2583c33fd33cf7f21af1c142e824ffb57", size = 50119, upload-time = "2025-09-12T07:33:53.816Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/04/93/2fa34714b7a4ae72f2f8dad66ba17dd9a2c793220719e736dda28b7aec27/pytest_asyncio-1.2.0-py3-none-any.whl", hash = "sha256:8e17ae5e46d8e7efe51ab6494dd2010f4ca8dae51652aa3c8d55acf50bfb2e99", size = 15095, upload-time = "2025-09-12T07:33:52.639Z" }, -] - -[[package]] -name = "python-dateutil" -version = "2.9.0.post0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "six" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, -] - -[[package]] -name = "python-dotenv" -version = "1.1.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" }, -] - -[[package]] -name = "python-multipart" -version = "0.0.20" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" }, -] - -[[package]] -name = "python-on-whales" -version = "0.73.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pydantic" }, - { name = "requests" }, - { name = "tqdm" }, - { name = "typer" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/40/c3/f57dd3e7d20af8a0399bb87471eac4698e0686b04073eef4bc291204a709/python_on_whales-0.73.0.tar.gz", hash = "sha256:c76bf3633550e5c948fb4215918364f45efaddb2e09df5ddd169132f7ffdc249", size = 112019, upload-time = "2024-09-06T10:23:12.846Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d4/e9/ea125eb8954f64e76485aec5c63ca6a5b977e0127a5f3896993f1692166e/python_on_whales-0.73.0-py3-none-any.whl", hash = "sha256:66f31749c2544a0aacb4e3ba03772c2e9227235ea1aecd58aa7a4cdcf26f559a", size = 118125, upload-time = "2024-09-06T10:23:10.856Z" }, -] - -[[package]] -name = "pywin32" -version = "311" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e7/ab/01ea1943d4eba0f850c3c61e78e8dd59757ff815ff3ccd0a84de5f541f42/pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31", size = 8706543, upload-time = "2025-07-14T20:13:20.765Z" }, - { url = "https://files.pythonhosted.org/packages/d1/a8/a0e8d07d4d051ec7502cd58b291ec98dcc0c3fff027caad0470b72cfcc2f/pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067", size = 9495040, upload-time = "2025-07-14T20:13:22.543Z" }, - { url = "https://files.pythonhosted.org/packages/ba/3a/2ae996277b4b50f17d61f0603efd8253cb2d79cc7ae159468007b586396d/pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852", size = 8710102, upload-time = "2025-07-14T20:13:24.682Z" }, - { url = "https://files.pythonhosted.org/packages/a5/be/3fd5de0979fcb3994bfee0d65ed8ca9506a8a1260651b86174f6a86f52b3/pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d", size = 8705700, upload-time = "2025-07-14T20:13:26.471Z" }, - { url = "https://files.pythonhosted.org/packages/e3/28/e0a1909523c6890208295a29e05c2adb2126364e289826c0a8bc7297bd5c/pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d", size = 9494700, upload-time = "2025-07-14T20:13:28.243Z" }, - { url = "https://files.pythonhosted.org/packages/04/bf/90339ac0f55726dce7d794e6d79a18a91265bdf3aa70b6b9ca52f35e022a/pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a", size = 8709318, upload-time = "2025-07-14T20:13:30.348Z" }, - { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" }, - { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" }, - { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, -] - -[[package]] -name = "pyyaml" -version = "6.0.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" }, - { url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" }, - { url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" }, - { url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" }, - { url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" }, - { url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" }, - { url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" }, - { url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" }, - { url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" }, - { url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" }, - { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, - { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, - { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, - { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, - { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, - { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, - { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, - { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, - { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, - { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, - { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, - { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, - { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, - { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, - { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, - { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, - { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, - { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, - { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, - { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, - { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, - { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, - { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, - { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, - { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, - { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, - { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, - { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, -] - -[[package]] -name = "pyzmq" -version = "27.1.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cffi", marker = "implementation_name == 'pypy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/04/0b/3c9baedbdf613ecaa7aa07027780b8867f57b6293b6ee50de316c9f3222b/pyzmq-27.1.0.tar.gz", hash = "sha256:ac0765e3d44455adb6ddbf4417dcce460fc40a05978c08efdf2948072f6db540", size = 281750, upload-time = "2025-09-08T23:10:18.157Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/92/e7/038aab64a946d535901103da16b953c8c9cc9c961dadcbf3609ed6428d23/pyzmq-27.1.0-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:452631b640340c928fa343801b0d07eb0c3789a5ffa843f6e1a9cee0ba4eb4fc", size = 1306279, upload-time = "2025-09-08T23:08:03.807Z" }, - { url = "https://files.pythonhosted.org/packages/e8/5e/c3c49fdd0f535ef45eefcc16934648e9e59dace4a37ee88fc53f6cd8e641/pyzmq-27.1.0-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1c179799b118e554b66da67d88ed66cd37a169f1f23b5d9f0a231b4e8d44a113", size = 895645, upload-time = "2025-09-08T23:08:05.301Z" }, - { url = "https://files.pythonhosted.org/packages/f8/e5/b0b2504cb4e903a74dcf1ebae157f9e20ebb6ea76095f6cfffea28c42ecd/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3837439b7f99e60312f0c926a6ad437b067356dc2bc2ec96eb395fd0fe804233", size = 652574, upload-time = "2025-09-08T23:08:06.828Z" }, - { url = "https://files.pythonhosted.org/packages/f8/9b/c108cdb55560eaf253f0cbdb61b29971e9fb34d9c3499b0e96e4e60ed8a5/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43ad9a73e3da1fab5b0e7e13402f0b2fb934ae1c876c51d0afff0e7c052eca31", size = 840995, upload-time = "2025-09-08T23:08:08.396Z" }, - { url = "https://files.pythonhosted.org/packages/c2/bb/b79798ca177b9eb0825b4c9998c6af8cd2a7f15a6a1a4272c1d1a21d382f/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0de3028d69d4cdc475bfe47a6128eb38d8bc0e8f4d69646adfbcd840facbac28", size = 1642070, upload-time = "2025-09-08T23:08:09.989Z" }, - { url = "https://files.pythonhosted.org/packages/9c/80/2df2e7977c4ede24c79ae39dcef3899bfc5f34d1ca7a5b24f182c9b7a9ca/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:cf44a7763aea9298c0aa7dbf859f87ed7012de8bda0f3977b6fb1d96745df856", size = 2021121, upload-time = "2025-09-08T23:08:11.907Z" }, - { url = "https://files.pythonhosted.org/packages/46/bd/2d45ad24f5f5ae7e8d01525eb76786fa7557136555cac7d929880519e33a/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f30f395a9e6fbca195400ce833c731e7b64c3919aa481af4d88c3759e0cb7496", size = 1878550, upload-time = "2025-09-08T23:08:13.513Z" }, - { url = "https://files.pythonhosted.org/packages/e6/2f/104c0a3c778d7c2ab8190e9db4f62f0b6957b53c9d87db77c284b69f33ea/pyzmq-27.1.0-cp312-abi3-win32.whl", hash = "sha256:250e5436a4ba13885494412b3da5d518cd0d3a278a1ae640e113c073a5f88edd", size = 559184, upload-time = "2025-09-08T23:08:15.163Z" }, - { url = "https://files.pythonhosted.org/packages/fc/7f/a21b20d577e4100c6a41795842028235998a643b1ad406a6d4163ea8f53e/pyzmq-27.1.0-cp312-abi3-win_amd64.whl", hash = "sha256:9ce490cf1d2ca2ad84733aa1d69ce6855372cb5ce9223802450c9b2a7cba0ccf", size = 619480, upload-time = "2025-09-08T23:08:17.192Z" }, - { url = "https://files.pythonhosted.org/packages/78/c2/c012beae5f76b72f007a9e91ee9401cb88c51d0f83c6257a03e785c81cc2/pyzmq-27.1.0-cp312-abi3-win_arm64.whl", hash = "sha256:75a2f36223f0d535a0c919e23615fc85a1e23b71f40c7eb43d7b1dedb4d8f15f", size = 552993, upload-time = "2025-09-08T23:08:18.926Z" }, - { url = "https://files.pythonhosted.org/packages/60/cb/84a13459c51da6cec1b7b1dc1a47e6db6da50b77ad7fd9c145842750a011/pyzmq-27.1.0-cp313-cp313-android_24_arm64_v8a.whl", hash = "sha256:93ad4b0855a664229559e45c8d23797ceac03183c7b6f5b4428152a6b06684a5", size = 1122436, upload-time = "2025-09-08T23:08:20.801Z" }, - { url = "https://files.pythonhosted.org/packages/dc/b6/94414759a69a26c3dd674570a81813c46a078767d931a6c70ad29fc585cb/pyzmq-27.1.0-cp313-cp313-android_24_x86_64.whl", hash = "sha256:fbb4f2400bfda24f12f009cba62ad5734148569ff4949b1b6ec3b519444342e6", size = 1156301, upload-time = "2025-09-08T23:08:22.47Z" }, - { url = "https://files.pythonhosted.org/packages/a5/ad/15906493fd40c316377fd8a8f6b1f93104f97a752667763c9b9c1b71d42d/pyzmq-27.1.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:e343d067f7b151cfe4eb3bb796a7752c9d369eed007b91231e817071d2c2fec7", size = 1341197, upload-time = "2025-09-08T23:08:24.286Z" }, - { url = "https://files.pythonhosted.org/packages/14/1d/d343f3ce13db53a54cb8946594e567410b2125394dafcc0268d8dda027e0/pyzmq-27.1.0-cp313-cp313t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:08363b2011dec81c354d694bdecaef4770e0ae96b9afea70b3f47b973655cc05", size = 897275, upload-time = "2025-09-08T23:08:26.063Z" }, - { url = "https://files.pythonhosted.org/packages/69/2d/d83dd6d7ca929a2fc67d2c3005415cdf322af7751d773524809f9e585129/pyzmq-27.1.0-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d54530c8c8b5b8ddb3318f481297441af102517602b569146185fa10b63f4fa9", size = 660469, upload-time = "2025-09-08T23:08:27.623Z" }, - { url = "https://files.pythonhosted.org/packages/3e/cd/9822a7af117f4bc0f1952dbe9ef8358eb50a24928efd5edf54210b850259/pyzmq-27.1.0-cp313-cp313t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6f3afa12c392f0a44a2414056d730eebc33ec0926aae92b5ad5cf26ebb6cc128", size = 847961, upload-time = "2025-09-08T23:08:29.672Z" }, - { url = "https://files.pythonhosted.org/packages/9a/12/f003e824a19ed73be15542f172fd0ec4ad0b60cf37436652c93b9df7c585/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c65047adafe573ff023b3187bb93faa583151627bc9c51fc4fb2c561ed689d39", size = 1650282, upload-time = "2025-09-08T23:08:31.349Z" }, - { url = "https://files.pythonhosted.org/packages/d5/4a/e82d788ed58e9a23995cee70dbc20c9aded3d13a92d30d57ec2291f1e8a3/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:90e6e9441c946a8b0a667356f7078d96411391a3b8f80980315455574177ec97", size = 2024468, upload-time = "2025-09-08T23:08:33.543Z" }, - { url = "https://files.pythonhosted.org/packages/d9/94/2da0a60841f757481e402b34bf4c8bf57fa54a5466b965de791b1e6f747d/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:add071b2d25f84e8189aaf0882d39a285b42fa3853016ebab234a5e78c7a43db", size = 1885394, upload-time = "2025-09-08T23:08:35.51Z" }, - { url = "https://files.pythonhosted.org/packages/4f/6f/55c10e2e49ad52d080dc24e37adb215e5b0d64990b57598abc2e3f01725b/pyzmq-27.1.0-cp313-cp313t-win32.whl", hash = "sha256:7ccc0700cfdf7bd487bea8d850ec38f204478681ea02a582a8da8171b7f90a1c", size = 574964, upload-time = "2025-09-08T23:08:37.178Z" }, - { url = "https://files.pythonhosted.org/packages/87/4d/2534970ba63dd7c522d8ca80fb92777f362c0f321900667c615e2067cb29/pyzmq-27.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8085a9fba668216b9b4323be338ee5437a235fe275b9d1610e422ccc279733e2", size = 641029, upload-time = "2025-09-08T23:08:40.595Z" }, - { url = "https://files.pythonhosted.org/packages/f6/fa/f8aea7a28b0641f31d40dea42d7ef003fded31e184ef47db696bc74cd610/pyzmq-27.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:6bb54ca21bcfe361e445256c15eedf083f153811c37be87e0514934d6913061e", size = 561541, upload-time = "2025-09-08T23:08:42.668Z" }, - { url = "https://files.pythonhosted.org/packages/87/45/19efbb3000956e82d0331bafca5d9ac19ea2857722fa2caacefb6042f39d/pyzmq-27.1.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ce980af330231615756acd5154f29813d553ea555485ae712c491cd483df6b7a", size = 1341197, upload-time = "2025-09-08T23:08:44.973Z" }, - { url = "https://files.pythonhosted.org/packages/48/43/d72ccdbf0d73d1343936296665826350cb1e825f92f2db9db3e61c2162a2/pyzmq-27.1.0-cp314-cp314t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1779be8c549e54a1c38f805e56d2a2e5c009d26de10921d7d51cfd1c8d4632ea", size = 897175, upload-time = "2025-09-08T23:08:46.601Z" }, - { url = "https://files.pythonhosted.org/packages/2f/2e/a483f73a10b65a9ef0161e817321d39a770b2acf8bcf3004a28d90d14a94/pyzmq-27.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7200bb0f03345515df50d99d3db206a0a6bee1955fbb8c453c76f5bf0e08fb96", size = 660427, upload-time = "2025-09-08T23:08:48.187Z" }, - { url = "https://files.pythonhosted.org/packages/f5/d2/5f36552c2d3e5685abe60dfa56f91169f7a2d99bbaf67c5271022ab40863/pyzmq-27.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01c0e07d558b06a60773744ea6251f769cd79a41a97d11b8bf4ab8f034b0424d", size = 847929, upload-time = "2025-09-08T23:08:49.76Z" }, - { url = "https://files.pythonhosted.org/packages/c4/2a/404b331f2b7bf3198e9945f75c4c521f0c6a3a23b51f7a4a401b94a13833/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:80d834abee71f65253c91540445d37c4c561e293ba6e741b992f20a105d69146", size = 1650193, upload-time = "2025-09-08T23:08:51.7Z" }, - { url = "https://files.pythonhosted.org/packages/1c/0b/f4107e33f62a5acf60e3ded67ed33d79b4ce18de432625ce2fc5093d6388/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:544b4e3b7198dde4a62b8ff6685e9802a9a1ebf47e77478a5eb88eca2a82f2fd", size = 2024388, upload-time = "2025-09-08T23:08:53.393Z" }, - { url = "https://files.pythonhosted.org/packages/0d/01/add31fe76512642fd6e40e3a3bd21f4b47e242c8ba33efb6809e37076d9b/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cedc4c68178e59a4046f97eca31b148ddcf51e88677de1ef4e78cf06c5376c9a", size = 1885316, upload-time = "2025-09-08T23:08:55.702Z" }, - { url = "https://files.pythonhosted.org/packages/c4/59/a5f38970f9bf07cee96128de79590bb354917914a9be11272cfc7ff26af0/pyzmq-27.1.0-cp314-cp314t-win32.whl", hash = "sha256:1f0b2a577fd770aa6f053211a55d1c47901f4d537389a034c690291485e5fe92", size = 587472, upload-time = "2025-09-08T23:08:58.18Z" }, - { url = "https://files.pythonhosted.org/packages/70/d8/78b1bad170f93fcf5e3536e70e8fadac55030002275c9a29e8f5719185de/pyzmq-27.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:19c9468ae0437f8074af379e986c5d3d7d7bfe033506af442e8c879732bedbe0", size = 661401, upload-time = "2025-09-08T23:08:59.802Z" }, - { url = "https://files.pythonhosted.org/packages/81/d6/4bfbb40c9a0b42fc53c7cf442f6385db70b40f74a783130c5d0a5aa62228/pyzmq-27.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:dc5dbf68a7857b59473f7df42650c621d7e8923fb03fa74a526890f4d33cc4d7", size = 575170, upload-time = "2025-09-08T23:09:01.418Z" }, -] - -[[package]] -name = "questionary" -version = "2.1.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "prompt-toolkit" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f6/45/eafb0bba0f9988f6a2520f9ca2df2c82ddfa8d67c95d6625452e97b204a5/questionary-2.1.1.tar.gz", hash = "sha256:3d7e980292bb0107abaa79c68dd3eee3c561b83a0f89ae482860b181c8bd412d", size = 25845, upload-time = "2025-08-28T19:00:20.851Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/26/1062c7ec1b053db9e499b4d2d5bc231743201b74051c973dadeac80a8f43/questionary-2.1.1-py3-none-any.whl", hash = "sha256:a51af13f345f1cdea62347589fbb6df3b290306ab8930713bfae4d475a7d4a59", size = 36753, upload-time = "2025-08-28T19:00:19.56Z" }, -] - -[[package]] -name = "redis" -version = "5.3.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pyjwt" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/6a/cf/128b1b6d7086200c9f387bd4be9b2572a30b90745ef078bd8b235042dc9f/redis-5.3.1.tar.gz", hash = "sha256:ca49577a531ea64039b5a36db3d6cd1a0c7a60c34124d46924a45b956e8cf14c", size = 4626200, upload-time = "2025-07-25T08:06:27.778Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/26/5c5fa0e83c3621db835cfc1f1d789b37e7fa99ed54423b5f519beb931aa7/redis-5.3.1-py3-none-any.whl", hash = "sha256:dc1909bd24669cc31b5f67a039700b16ec30571096c5f1f0d9d2324bff31af97", size = 272833, upload-time = "2025-07-25T08:06:26.317Z" }, -] - -[[package]] -name = "referencing" -version = "0.36.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "attrs" }, - { name = "rpds-py" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/2f/db/98b5c277be99dd18bfd91dd04e1b759cad18d1a338188c936e92f921c7e2/referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa", size = 74744, upload-time = "2025-01-25T08:48:16.138Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c1/b1/3baf80dc6d2b7bc27a95a67752d0208e410351e3feb4eb78de5f77454d8d/referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0", size = 26775, upload-time = "2025-01-25T08:48:14.241Z" }, -] - -[[package]] -name = "regex" -version = "2025.9.18" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/49/d3/eaa0d28aba6ad1827ad1e716d9a93e1ba963ada61887498297d3da715133/regex-2025.9.18.tar.gz", hash = "sha256:c5ba23274c61c6fef447ba6a39333297d0c247f53059dba0bca415cac511edc4", size = 400917, upload-time = "2025-09-19T00:38:35.79Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b0/99/05859d87a66ae7098222d65748f11ef7f2dff51bfd7482a4e2256c90d72b/regex-2025.9.18-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:436e1b31d7efd4dcd52091d076482031c611dde58bf9c46ca6d0a26e33053a7e", size = 486335, upload-time = "2025-09-19T00:36:03.661Z" }, - { url = "https://files.pythonhosted.org/packages/97/7e/d43d4e8b978890932cf7b0957fce58c5b08c66f32698f695b0c2c24a48bf/regex-2025.9.18-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c190af81e5576b9c5fdc708f781a52ff20f8b96386c6e2e0557a78402b029f4a", size = 289720, upload-time = "2025-09-19T00:36:05.471Z" }, - { url = "https://files.pythonhosted.org/packages/bb/3b/ff80886089eb5dcf7e0d2040d9aaed539e25a94300403814bb24cc775058/regex-2025.9.18-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e4121f1ce2b2b5eec4b397cc1b277686e577e658d8f5870b7eb2d726bd2300ab", size = 287257, upload-time = "2025-09-19T00:36:07.072Z" }, - { url = "https://files.pythonhosted.org/packages/ee/66/243edf49dd8720cba8d5245dd4d6adcb03a1defab7238598c0c97cf549b8/regex-2025.9.18-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:300e25dbbf8299d87205e821a201057f2ef9aa3deb29caa01cd2cac669e508d5", size = 797463, upload-time = "2025-09-19T00:36:08.399Z" }, - { url = "https://files.pythonhosted.org/packages/df/71/c9d25a1142c70432e68bb03211d4a82299cd1c1fbc41db9409a394374ef5/regex-2025.9.18-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7b47fcf9f5316c0bdaf449e879407e1b9937a23c3b369135ca94ebc8d74b1742", size = 862670, upload-time = "2025-09-19T00:36:10.101Z" }, - { url = "https://files.pythonhosted.org/packages/f8/8f/329b1efc3a64375a294e3a92d43372bf1a351aa418e83c21f2f01cf6ec41/regex-2025.9.18-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:57a161bd3acaa4b513220b49949b07e252165e6b6dc910ee7617a37ff4f5b425", size = 910881, upload-time = "2025-09-19T00:36:12.223Z" }, - { url = "https://files.pythonhosted.org/packages/35/9e/a91b50332a9750519320ed30ec378b74c996f6befe282cfa6bb6cea7e9fd/regex-2025.9.18-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f130c3a7845ba42de42f380fff3c8aebe89a810747d91bcf56d40a069f15352", size = 802011, upload-time = "2025-09-19T00:36:13.901Z" }, - { url = "https://files.pythonhosted.org/packages/a4/1d/6be3b8d7856b6e0d7ee7f942f437d0a76e0d5622983abbb6d21e21ab9a17/regex-2025.9.18-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f96fa342b6f54dcba928dd452e8d8cb9f0d63e711d1721cd765bb9f73bb048d", size = 786668, upload-time = "2025-09-19T00:36:15.391Z" }, - { url = "https://files.pythonhosted.org/packages/cb/ce/4a60e53df58bd157c5156a1736d3636f9910bdcc271d067b32b7fcd0c3a8/regex-2025.9.18-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0f0d676522d68c207828dcd01fb6f214f63f238c283d9f01d85fc664c7c85b56", size = 856578, upload-time = "2025-09-19T00:36:16.845Z" }, - { url = "https://files.pythonhosted.org/packages/86/e8/162c91bfe7217253afccde112868afb239f94703de6580fb235058d506a6/regex-2025.9.18-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:40532bff8a1a0621e7903ae57fce88feb2e8a9a9116d341701302c9302aef06e", size = 849017, upload-time = "2025-09-19T00:36:18.597Z" }, - { url = "https://files.pythonhosted.org/packages/35/34/42b165bc45289646ea0959a1bc7531733e90b47c56a72067adfe6b3251f6/regex-2025.9.18-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:039f11b618ce8d71a1c364fdee37da1012f5a3e79b1b2819a9f389cd82fd6282", size = 788150, upload-time = "2025-09-19T00:36:20.464Z" }, - { url = "https://files.pythonhosted.org/packages/79/5d/cdd13b1f3c53afa7191593a7ad2ee24092a5a46417725ffff7f64be8342d/regex-2025.9.18-cp312-cp312-win32.whl", hash = "sha256:e1dd06f981eb226edf87c55d523131ade7285137fbde837c34dc9d1bf309f459", size = 264536, upload-time = "2025-09-19T00:36:21.922Z" }, - { url = "https://files.pythonhosted.org/packages/e0/f5/4a7770c9a522e7d2dc1fa3ffc83ab2ab33b0b22b447e62cffef186805302/regex-2025.9.18-cp312-cp312-win_amd64.whl", hash = "sha256:3d86b5247bf25fa3715e385aa9ff272c307e0636ce0c9595f64568b41f0a9c77", size = 275501, upload-time = "2025-09-19T00:36:23.4Z" }, - { url = "https://files.pythonhosted.org/packages/df/05/9ce3e110e70d225ecbed455b966003a3afda5e58e8aec2964042363a18f4/regex-2025.9.18-cp312-cp312-win_arm64.whl", hash = "sha256:032720248cbeeae6444c269b78cb15664458b7bb9ed02401d3da59fe4d68c3a5", size = 268601, upload-time = "2025-09-19T00:36:25.092Z" }, - { url = "https://files.pythonhosted.org/packages/d2/c7/5c48206a60ce33711cf7dcaeaed10dd737733a3569dc7e1dce324dd48f30/regex-2025.9.18-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2a40f929cd907c7e8ac7566ac76225a77701a6221bca937bdb70d56cb61f57b2", size = 485955, upload-time = "2025-09-19T00:36:26.822Z" }, - { url = "https://files.pythonhosted.org/packages/e9/be/74fc6bb19a3c491ec1ace943e622b5a8539068771e8705e469b2da2306a7/regex-2025.9.18-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c90471671c2cdf914e58b6af62420ea9ecd06d1554d7474d50133ff26ae88feb", size = 289583, upload-time = "2025-09-19T00:36:28.577Z" }, - { url = "https://files.pythonhosted.org/packages/25/c4/9ceaa433cb5dc515765560f22a19578b95b92ff12526e5a259321c4fc1a0/regex-2025.9.18-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a351aff9e07a2dabb5022ead6380cff17a4f10e4feb15f9100ee56c4d6d06af", size = 287000, upload-time = "2025-09-19T00:36:30.161Z" }, - { url = "https://files.pythonhosted.org/packages/7d/e6/68bc9393cb4dc68018456568c048ac035854b042bc7c33cb9b99b0680afa/regex-2025.9.18-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc4b8e9d16e20ddfe16430c23468a8707ccad3365b06d4536142e71823f3ca29", size = 797535, upload-time = "2025-09-19T00:36:31.876Z" }, - { url = "https://files.pythonhosted.org/packages/6a/1c/ebae9032d34b78ecfe9bd4b5e6575b55351dc8513485bb92326613732b8c/regex-2025.9.18-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4b8cdbddf2db1c5e80338ba2daa3cfa3dec73a46fff2a7dda087c8efbf12d62f", size = 862603, upload-time = "2025-09-19T00:36:33.344Z" }, - { url = "https://files.pythonhosted.org/packages/3b/74/12332c54b3882557a4bcd2b99f8be581f5c6a43cf1660a85b460dd8ff468/regex-2025.9.18-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a276937d9d75085b2c91fb48244349c6954f05ee97bba0963ce24a9d915b8b68", size = 910829, upload-time = "2025-09-19T00:36:34.826Z" }, - { url = "https://files.pythonhosted.org/packages/86/70/ba42d5ed606ee275f2465bfc0e2208755b06cdabd0f4c7c4b614d51b57ab/regex-2025.9.18-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:92a8e375ccdc1256401c90e9dc02b8642894443d549ff5e25e36d7cf8a80c783", size = 802059, upload-time = "2025-09-19T00:36:36.664Z" }, - { url = "https://files.pythonhosted.org/packages/da/c5/fcb017e56396a7f2f8357412638d7e2963440b131a3ca549be25774b3641/regex-2025.9.18-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0dc6893b1f502d73037cf807a321cdc9be29ef3d6219f7970f842475873712ac", size = 786781, upload-time = "2025-09-19T00:36:38.168Z" }, - { url = "https://files.pythonhosted.org/packages/c6/ee/21c4278b973f630adfb3bcb23d09d83625f3ab1ca6e40ebdffe69901c7a1/regex-2025.9.18-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a61e85bfc63d232ac14b015af1261f826260c8deb19401c0597dbb87a864361e", size = 856578, upload-time = "2025-09-19T00:36:40.129Z" }, - { url = "https://files.pythonhosted.org/packages/87/0b/de51550dc7274324435c8f1539373ac63019b0525ad720132866fff4a16a/regex-2025.9.18-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1ef86a9ebc53f379d921fb9a7e42b92059ad3ee800fcd9e0fe6181090e9f6c23", size = 849119, upload-time = "2025-09-19T00:36:41.651Z" }, - { url = "https://files.pythonhosted.org/packages/60/52/383d3044fc5154d9ffe4321696ee5b2ee4833a28c29b137c22c33f41885b/regex-2025.9.18-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d3bc882119764ba3a119fbf2bd4f1b47bc56c1da5d42df4ed54ae1e8e66fdf8f", size = 788219, upload-time = "2025-09-19T00:36:43.575Z" }, - { url = "https://files.pythonhosted.org/packages/20/bd/2614fc302671b7359972ea212f0e3a92df4414aaeacab054a8ce80a86073/regex-2025.9.18-cp313-cp313-win32.whl", hash = "sha256:3810a65675845c3bdfa58c3c7d88624356dd6ee2fc186628295e0969005f928d", size = 264517, upload-time = "2025-09-19T00:36:45.503Z" }, - { url = "https://files.pythonhosted.org/packages/07/0f/ab5c1581e6563a7bffdc1974fb2d25f05689b88e2d416525271f232b1946/regex-2025.9.18-cp313-cp313-win_amd64.whl", hash = "sha256:16eaf74b3c4180ede88f620f299e474913ab6924d5c4b89b3833bc2345d83b3d", size = 275481, upload-time = "2025-09-19T00:36:46.965Z" }, - { url = "https://files.pythonhosted.org/packages/49/22/ee47672bc7958f8c5667a587c2600a4fba8b6bab6e86bd6d3e2b5f7cac42/regex-2025.9.18-cp313-cp313-win_arm64.whl", hash = "sha256:4dc98ba7dd66bd1261927a9f49bd5ee2bcb3660f7962f1ec02617280fc00f5eb", size = 268598, upload-time = "2025-09-19T00:36:48.314Z" }, - { url = "https://files.pythonhosted.org/packages/e8/83/6887e16a187c6226cb85d8301e47d3b73ecc4505a3a13d8da2096b44fd76/regex-2025.9.18-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:fe5d50572bc885a0a799410a717c42b1a6b50e2f45872e2b40f4f288f9bce8a2", size = 489765, upload-time = "2025-09-19T00:36:49.996Z" }, - { url = "https://files.pythonhosted.org/packages/51/c5/e2f7325301ea2916ff301c8d963ba66b1b2c1b06694191df80a9c4fea5d0/regex-2025.9.18-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1b9d9a2d6cda6621551ca8cf7a06f103adf72831153f3c0d982386110870c4d3", size = 291228, upload-time = "2025-09-19T00:36:51.654Z" }, - { url = "https://files.pythonhosted.org/packages/91/60/7d229d2bc6961289e864a3a3cfebf7d0d250e2e65323a8952cbb7e22d824/regex-2025.9.18-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:13202e4c4ac0ef9a317fff817674b293c8f7e8c68d3190377d8d8b749f566e12", size = 289270, upload-time = "2025-09-19T00:36:53.118Z" }, - { url = "https://files.pythonhosted.org/packages/3c/d7/b4f06868ee2958ff6430df89857fbf3d43014bbf35538b6ec96c2704e15d/regex-2025.9.18-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:874ff523b0fecffb090f80ae53dc93538f8db954c8bb5505f05b7787ab3402a0", size = 806326, upload-time = "2025-09-19T00:36:54.631Z" }, - { url = "https://files.pythonhosted.org/packages/d6/e4/bca99034a8f1b9b62ccf337402a8e5b959dd5ba0e5e5b2ead70273df3277/regex-2025.9.18-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d13ab0490128f2bb45d596f754148cd750411afc97e813e4b3a61cf278a23bb6", size = 871556, upload-time = "2025-09-19T00:36:56.208Z" }, - { url = "https://files.pythonhosted.org/packages/6d/df/e06ffaf078a162f6dd6b101a5ea9b44696dca860a48136b3ae4a9caf25e2/regex-2025.9.18-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:05440bc172bc4b4b37fb9667e796597419404dbba62e171e1f826d7d2a9ebcef", size = 913817, upload-time = "2025-09-19T00:36:57.807Z" }, - { url = "https://files.pythonhosted.org/packages/9e/05/25b05480b63292fd8e84800b1648e160ca778127b8d2367a0a258fa2e225/regex-2025.9.18-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5514b8e4031fdfaa3d27e92c75719cbe7f379e28cacd939807289bce76d0e35a", size = 811055, upload-time = "2025-09-19T00:36:59.762Z" }, - { url = "https://files.pythonhosted.org/packages/70/97/7bc7574655eb651ba3a916ed4b1be6798ae97af30104f655d8efd0cab24b/regex-2025.9.18-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:65d3c38c39efce73e0d9dc019697b39903ba25b1ad45ebbd730d2cf32741f40d", size = 794534, upload-time = "2025-09-19T00:37:01.405Z" }, - { url = "https://files.pythonhosted.org/packages/b4/c2/d5da49166a52dda879855ecdba0117f073583db2b39bb47ce9a3378a8e9e/regex-2025.9.18-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:ae77e447ebc144d5a26d50055c6ddba1d6ad4a865a560ec7200b8b06bc529368", size = 866684, upload-time = "2025-09-19T00:37:03.441Z" }, - { url = "https://files.pythonhosted.org/packages/bd/2d/0a5c4e6ec417de56b89ff4418ecc72f7e3feca806824c75ad0bbdae0516b/regex-2025.9.18-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e3ef8cf53dc8df49d7e28a356cf824e3623764e9833348b655cfed4524ab8a90", size = 853282, upload-time = "2025-09-19T00:37:04.985Z" }, - { url = "https://files.pythonhosted.org/packages/f4/8e/d656af63e31a86572ec829665d6fa06eae7e144771e0330650a8bb865635/regex-2025.9.18-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9feb29817df349c976da9a0debf775c5c33fc1c8ad7b9f025825da99374770b7", size = 797830, upload-time = "2025-09-19T00:37:06.697Z" }, - { url = "https://files.pythonhosted.org/packages/db/ce/06edc89df8f7b83ffd321b6071be4c54dc7332c0f77860edc40ce57d757b/regex-2025.9.18-cp313-cp313t-win32.whl", hash = "sha256:168be0d2f9b9d13076940b1ed774f98595b4e3c7fc54584bba81b3cc4181742e", size = 267281, upload-time = "2025-09-19T00:37:08.568Z" }, - { url = "https://files.pythonhosted.org/packages/83/9a/2b5d9c8b307a451fd17068719d971d3634ca29864b89ed5c18e499446d4a/regex-2025.9.18-cp313-cp313t-win_amd64.whl", hash = "sha256:d59ecf3bb549e491c8104fea7313f3563c7b048e01287db0a90485734a70a730", size = 278724, upload-time = "2025-09-19T00:37:10.023Z" }, - { url = "https://files.pythonhosted.org/packages/3d/70/177d31e8089a278a764f8ec9a3faac8d14a312d622a47385d4b43905806f/regex-2025.9.18-cp313-cp313t-win_arm64.whl", hash = "sha256:dbef80defe9fb21310948a2595420b36c6d641d9bea4c991175829b2cc4bc06a", size = 269771, upload-time = "2025-09-19T00:37:13.041Z" }, - { url = "https://files.pythonhosted.org/packages/44/b7/3b4663aa3b4af16819f2ab6a78c4111c7e9b066725d8107753c2257448a5/regex-2025.9.18-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:c6db75b51acf277997f3adcd0ad89045d856190d13359f15ab5dda21581d9129", size = 486130, upload-time = "2025-09-19T00:37:14.527Z" }, - { url = "https://files.pythonhosted.org/packages/80/5b/4533f5d7ac9c6a02a4725fe8883de2aebc713e67e842c04cf02626afb747/regex-2025.9.18-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8f9698b6f6895d6db810e0bda5364f9ceb9e5b11328700a90cae573574f61eea", size = 289539, upload-time = "2025-09-19T00:37:16.356Z" }, - { url = "https://files.pythonhosted.org/packages/b8/8d/5ab6797c2750985f79e9995fad3254caa4520846580f266ae3b56d1cae58/regex-2025.9.18-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:29cd86aa7cb13a37d0f0d7c21d8d949fe402ffa0ea697e635afedd97ab4b69f1", size = 287233, upload-time = "2025-09-19T00:37:18.025Z" }, - { url = "https://files.pythonhosted.org/packages/cb/1e/95afcb02ba8d3a64e6ffeb801718ce73471ad6440c55d993f65a4a5e7a92/regex-2025.9.18-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7c9f285a071ee55cd9583ba24dde006e53e17780bb309baa8e4289cd472bcc47", size = 797876, upload-time = "2025-09-19T00:37:19.609Z" }, - { url = "https://files.pythonhosted.org/packages/c8/fb/720b1f49cec1f3b5a9fea5b34cd22b88b5ebccc8c1b5de9cc6f65eed165a/regex-2025.9.18-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5adf266f730431e3be9021d3e5b8d5ee65e563fec2883ea8093944d21863b379", size = 863385, upload-time = "2025-09-19T00:37:21.65Z" }, - { url = "https://files.pythonhosted.org/packages/a9/ca/e0d07ecf701e1616f015a720dc13b84c582024cbfbb3fc5394ae204adbd7/regex-2025.9.18-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1137cabc0f38807de79e28d3f6e3e3f2cc8cfb26bead754d02e6d1de5f679203", size = 910220, upload-time = "2025-09-19T00:37:23.723Z" }, - { url = "https://files.pythonhosted.org/packages/b6/45/bba86413b910b708eca705a5af62163d5d396d5f647ed9485580c7025209/regex-2025.9.18-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7cc9e5525cada99699ca9223cce2d52e88c52a3d2a0e842bd53de5497c604164", size = 801827, upload-time = "2025-09-19T00:37:25.684Z" }, - { url = "https://files.pythonhosted.org/packages/b8/a6/740fbd9fcac31a1305a8eed30b44bf0f7f1e042342be0a4722c0365ecfca/regex-2025.9.18-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:bbb9246568f72dce29bcd433517c2be22c7791784b223a810225af3b50d1aafb", size = 786843, upload-time = "2025-09-19T00:37:27.62Z" }, - { url = "https://files.pythonhosted.org/packages/80/a7/0579e8560682645906da640c9055506465d809cb0f5415d9976f417209a6/regex-2025.9.18-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:6a52219a93dd3d92c675383efff6ae18c982e2d7651c792b1e6d121055808743", size = 857430, upload-time = "2025-09-19T00:37:29.362Z" }, - { url = "https://files.pythonhosted.org/packages/8d/9b/4dc96b6c17b38900cc9fee254fc9271d0dde044e82c78c0811b58754fde5/regex-2025.9.18-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:ae9b3840c5bd456780e3ddf2f737ab55a79b790f6409182012718a35c6d43282", size = 848612, upload-time = "2025-09-19T00:37:31.42Z" }, - { url = "https://files.pythonhosted.org/packages/b3/6a/6f659f99bebb1775e5ac81a3fb837b85897c1a4ef5acffd0ff8ffe7e67fb/regex-2025.9.18-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d488c236ac497c46a5ac2005a952c1a0e22a07be9f10c3e735bc7d1209a34773", size = 787967, upload-time = "2025-09-19T00:37:34.019Z" }, - { url = "https://files.pythonhosted.org/packages/61/35/9e35665f097c07cf384a6b90a1ac11b0b1693084a0b7a675b06f760496c6/regex-2025.9.18-cp314-cp314-win32.whl", hash = "sha256:0c3506682ea19beefe627a38872d8da65cc01ffa25ed3f2e422dffa1474f0788", size = 269847, upload-time = "2025-09-19T00:37:35.759Z" }, - { url = "https://files.pythonhosted.org/packages/af/64/27594dbe0f1590b82de2821ebfe9a359b44dcb9b65524876cd12fabc447b/regex-2025.9.18-cp314-cp314-win_amd64.whl", hash = "sha256:57929d0f92bebb2d1a83af372cd0ffba2263f13f376e19b1e4fa32aec4efddc3", size = 278755, upload-time = "2025-09-19T00:37:37.367Z" }, - { url = "https://files.pythonhosted.org/packages/30/a3/0cd8d0d342886bd7d7f252d701b20ae1a3c72dc7f34ef4b2d17790280a09/regex-2025.9.18-cp314-cp314-win_arm64.whl", hash = "sha256:6a4b44df31d34fa51aa5c995d3aa3c999cec4d69b9bd414a8be51984d859f06d", size = 271873, upload-time = "2025-09-19T00:37:39.125Z" }, - { url = "https://files.pythonhosted.org/packages/99/cb/8a1ab05ecf404e18b54348e293d9b7a60ec2bd7aa59e637020c5eea852e8/regex-2025.9.18-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:b176326bcd544b5e9b17d6943f807697c0cb7351f6cfb45bf5637c95ff7e6306", size = 489773, upload-time = "2025-09-19T00:37:40.968Z" }, - { url = "https://files.pythonhosted.org/packages/93/3b/6543c9b7f7e734d2404fa2863d0d710c907bef99d4598760ed4563d634c3/regex-2025.9.18-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:0ffd9e230b826b15b369391bec167baed57c7ce39efc35835448618860995946", size = 291221, upload-time = "2025-09-19T00:37:42.901Z" }, - { url = "https://files.pythonhosted.org/packages/cd/91/e9fdee6ad6bf708d98c5d17fded423dcb0661795a49cba1b4ffb8358377a/regex-2025.9.18-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ec46332c41add73f2b57e2f5b642f991f6b15e50e9f86285e08ffe3a512ac39f", size = 289268, upload-time = "2025-09-19T00:37:44.823Z" }, - { url = "https://files.pythonhosted.org/packages/94/a6/bc3e8a918abe4741dadeaeb6c508e3a4ea847ff36030d820d89858f96a6c/regex-2025.9.18-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b80fa342ed1ea095168a3f116637bd1030d39c9ff38dc04e54ef7c521e01fc95", size = 806659, upload-time = "2025-09-19T00:37:46.684Z" }, - { url = "https://files.pythonhosted.org/packages/2b/71/ea62dbeb55d9e6905c7b5a49f75615ea1373afcad95830047e4e310db979/regex-2025.9.18-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f4d97071c0ba40f0cf2a93ed76e660654c399a0a04ab7d85472239460f3da84b", size = 871701, upload-time = "2025-09-19T00:37:48.882Z" }, - { url = "https://files.pythonhosted.org/packages/6a/90/fbe9dedb7dad24a3a4399c0bae64bfa932ec8922a0a9acf7bc88db30b161/regex-2025.9.18-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0ac936537ad87cef9e0e66c5144484206c1354224ee811ab1519a32373e411f3", size = 913742, upload-time = "2025-09-19T00:37:51.015Z" }, - { url = "https://files.pythonhosted.org/packages/f0/1c/47e4a8c0e73d41eb9eb9fdeba3b1b810110a5139a2526e82fd29c2d9f867/regex-2025.9.18-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dec57f96d4def58c422d212d414efe28218d58537b5445cf0c33afb1b4768571", size = 811117, upload-time = "2025-09-19T00:37:52.686Z" }, - { url = "https://files.pythonhosted.org/packages/2a/da/435f29fddfd015111523671e36d30af3342e8136a889159b05c1d9110480/regex-2025.9.18-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:48317233294648bf7cd068857f248e3a57222259a5304d32c7552e2284a1b2ad", size = 794647, upload-time = "2025-09-19T00:37:54.626Z" }, - { url = "https://files.pythonhosted.org/packages/23/66/df5e6dcca25c8bc57ce404eebc7342310a0d218db739d7882c9a2b5974a3/regex-2025.9.18-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:274687e62ea3cf54846a9b25fc48a04459de50af30a7bd0b61a9e38015983494", size = 866747, upload-time = "2025-09-19T00:37:56.367Z" }, - { url = "https://files.pythonhosted.org/packages/82/42/94392b39b531f2e469b2daa40acf454863733b674481fda17462a5ffadac/regex-2025.9.18-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:a78722c86a3e7e6aadf9579e3b0ad78d955f2d1f1a8ca4f67d7ca258e8719d4b", size = 853434, upload-time = "2025-09-19T00:37:58.39Z" }, - { url = "https://files.pythonhosted.org/packages/a8/f8/dcc64c7f7bbe58842a8f89622b50c58c3598fbbf4aad0a488d6df2c699f1/regex-2025.9.18-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:06104cd203cdef3ade989a1c45b6215bf42f8b9dd705ecc220c173233f7cba41", size = 798024, upload-time = "2025-09-19T00:38:00.397Z" }, - { url = "https://files.pythonhosted.org/packages/20/8d/edf1c5d5aa98f99a692313db813ec487732946784f8f93145e0153d910e5/regex-2025.9.18-cp314-cp314t-win32.whl", hash = "sha256:2e1eddc06eeaffd249c0adb6fafc19e2118e6308c60df9db27919e96b5656096", size = 273029, upload-time = "2025-09-19T00:38:02.383Z" }, - { url = "https://files.pythonhosted.org/packages/a7/24/02d4e4f88466f17b145f7ea2b2c11af3a942db6222429c2c146accf16054/regex-2025.9.18-cp314-cp314t-win_amd64.whl", hash = "sha256:8620d247fb8c0683ade51217b459cb4a1081c0405a3072235ba43a40d355c09a", size = 282680, upload-time = "2025-09-19T00:38:04.102Z" }, - { url = "https://files.pythonhosted.org/packages/1f/a3/c64894858aaaa454caa7cc47e2f225b04d3ed08ad649eacf58d45817fad2/regex-2025.9.18-cp314-cp314t-win_arm64.whl", hash = "sha256:b7531a8ef61de2c647cdf68b3229b071e46ec326b3138b2180acb4275f470b01", size = 273034, upload-time = "2025-09-19T00:38:05.807Z" }, -] - -[[package]] -name = "requests" -version = "2.32.5" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "certifi" }, - { name = "charset-normalizer" }, - { name = "idna" }, - { name = "urllib3" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, -] - -[[package]] -name = "requests-oauthlib" -version = "2.0.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "oauthlib" }, - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650, upload-time = "2024-03-22T20:32:29.939Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179, upload-time = "2024-03-22T20:32:28.055Z" }, -] - -[[package]] -name = "rich" -version = "13.9.4" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "markdown-it-py" }, - { name = "pygments" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ab/3a/0316b28d0761c6734d6bc14e770d85506c986c85ffb239e688eeaab2c2bc/rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", size = 223149, upload-time = "2024-11-01T16:43:57.873Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424, upload-time = "2024-11-01T16:43:55.817Z" }, -] - -[[package]] -name = "rpds-py" -version = "0.27.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e9/dd/2c0cbe774744272b0ae725f44032c77bdcab6e8bcf544bffa3b6e70c8dba/rpds_py-0.27.1.tar.gz", hash = "sha256:26a1c73171d10b7acccbded82bf6a586ab8203601e565badc74bbbf8bc5a10f8", size = 27479, upload-time = "2025-08-27T12:16:36.024Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bd/fe/38de28dee5df58b8198c743fe2bea0c785c6d40941b9950bac4cdb71a014/rpds_py-0.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ae2775c1973e3c30316892737b91f9283f9908e3cc7625b9331271eaaed7dc90", size = 361887, upload-time = "2025-08-27T12:13:10.233Z" }, - { url = "https://files.pythonhosted.org/packages/7c/9a/4b6c7eedc7dd90986bf0fab6ea2a091ec11c01b15f8ba0a14d3f80450468/rpds_py-0.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2643400120f55c8a96f7c9d858f7be0c88d383cd4653ae2cf0d0c88f668073e5", size = 345795, upload-time = "2025-08-27T12:13:11.65Z" }, - { url = "https://files.pythonhosted.org/packages/6f/0e/e650e1b81922847a09cca820237b0edee69416a01268b7754d506ade11ad/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16323f674c089b0360674a4abd28d5042947d54ba620f72514d69be4ff64845e", size = 385121, upload-time = "2025-08-27T12:13:13.008Z" }, - { url = "https://files.pythonhosted.org/packages/1b/ea/b306067a712988e2bff00dcc7c8f31d26c29b6d5931b461aa4b60a013e33/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a1f4814b65eacac94a00fc9a526e3fdafd78e439469644032032d0d63de4881", size = 398976, upload-time = "2025-08-27T12:13:14.368Z" }, - { url = "https://files.pythonhosted.org/packages/2c/0a/26dc43c8840cb8fe239fe12dbc8d8de40f2365e838f3d395835dde72f0e5/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ba32c16b064267b22f1850a34051121d423b6f7338a12b9459550eb2096e7ec", size = 525953, upload-time = "2025-08-27T12:13:15.774Z" }, - { url = "https://files.pythonhosted.org/packages/22/14/c85e8127b573aaf3a0cbd7fbb8c9c99e735a4a02180c84da2a463b766e9e/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5c20f33fd10485b80f65e800bbe5f6785af510b9f4056c5a3c612ebc83ba6cb", size = 407915, upload-time = "2025-08-27T12:13:17.379Z" }, - { url = "https://files.pythonhosted.org/packages/ed/7b/8f4fee9ba1fb5ec856eb22d725a4efa3deb47f769597c809e03578b0f9d9/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:466bfe65bd932da36ff279ddd92de56b042f2266d752719beb97b08526268ec5", size = 386883, upload-time = "2025-08-27T12:13:18.704Z" }, - { url = "https://files.pythonhosted.org/packages/86/47/28fa6d60f8b74fcdceba81b272f8d9836ac0340570f68f5df6b41838547b/rpds_py-0.27.1-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:41e532bbdcb57c92ba3be62c42e9f096431b4cf478da9bc3bc6ce5c38ab7ba7a", size = 405699, upload-time = "2025-08-27T12:13:20.089Z" }, - { url = "https://files.pythonhosted.org/packages/d0/fd/c5987b5e054548df56953a21fe2ebed51fc1ec7c8f24fd41c067b68c4a0a/rpds_py-0.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f149826d742b406579466283769a8ea448eed82a789af0ed17b0cd5770433444", size = 423713, upload-time = "2025-08-27T12:13:21.436Z" }, - { url = "https://files.pythonhosted.org/packages/ac/ba/3c4978b54a73ed19a7d74531be37a8bcc542d917c770e14d372b8daea186/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:80c60cfb5310677bd67cb1e85a1e8eb52e12529545441b43e6f14d90b878775a", size = 562324, upload-time = "2025-08-27T12:13:22.789Z" }, - { url = "https://files.pythonhosted.org/packages/b5/6c/6943a91768fec16db09a42b08644b960cff540c66aab89b74be6d4a144ba/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7ee6521b9baf06085f62ba9c7a3e5becffbc32480d2f1b351559c001c38ce4c1", size = 593646, upload-time = "2025-08-27T12:13:24.122Z" }, - { url = "https://files.pythonhosted.org/packages/11/73/9d7a8f4be5f4396f011a6bb7a19fe26303a0dac9064462f5651ced2f572f/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a512c8263249a9d68cac08b05dd59d2b3f2061d99b322813cbcc14c3c7421998", size = 558137, upload-time = "2025-08-27T12:13:25.557Z" }, - { url = "https://files.pythonhosted.org/packages/6e/96/6772cbfa0e2485bcceef8071de7821f81aeac8bb45fbfd5542a3e8108165/rpds_py-0.27.1-cp312-cp312-win32.whl", hash = "sha256:819064fa048ba01b6dadc5116f3ac48610435ac9a0058bbde98e569f9e785c39", size = 221343, upload-time = "2025-08-27T12:13:26.967Z" }, - { url = "https://files.pythonhosted.org/packages/67/b6/c82f0faa9af1c6a64669f73a17ee0eeef25aff30bb9a1c318509efe45d84/rpds_py-0.27.1-cp312-cp312-win_amd64.whl", hash = "sha256:d9199717881f13c32c4046a15f024971a3b78ad4ea029e8da6b86e5aa9cf4594", size = 232497, upload-time = "2025-08-27T12:13:28.326Z" }, - { url = "https://files.pythonhosted.org/packages/e1/96/2817b44bd2ed11aebacc9251da03689d56109b9aba5e311297b6902136e2/rpds_py-0.27.1-cp312-cp312-win_arm64.whl", hash = "sha256:33aa65b97826a0e885ef6e278fbd934e98cdcfed80b63946025f01e2f5b29502", size = 222790, upload-time = "2025-08-27T12:13:29.71Z" }, - { url = "https://files.pythonhosted.org/packages/cc/77/610aeee8d41e39080c7e14afa5387138e3c9fa9756ab893d09d99e7d8e98/rpds_py-0.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e4b9fcfbc021633863a37e92571d6f91851fa656f0180246e84cbd8b3f6b329b", size = 361741, upload-time = "2025-08-27T12:13:31.039Z" }, - { url = "https://files.pythonhosted.org/packages/3a/fc/c43765f201c6a1c60be2043cbdb664013def52460a4c7adace89d6682bf4/rpds_py-0.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1441811a96eadca93c517d08df75de45e5ffe68aa3089924f963c782c4b898cf", size = 345574, upload-time = "2025-08-27T12:13:32.902Z" }, - { url = "https://files.pythonhosted.org/packages/20/42/ee2b2ca114294cd9847d0ef9c26d2b0851b2e7e00bf14cc4c0b581df0fc3/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55266dafa22e672f5a4f65019015f90336ed31c6383bd53f5e7826d21a0e0b83", size = 385051, upload-time = "2025-08-27T12:13:34.228Z" }, - { url = "https://files.pythonhosted.org/packages/fd/e8/1e430fe311e4799e02e2d1af7c765f024e95e17d651612425b226705f910/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78827d7ac08627ea2c8e02c9e5b41180ea5ea1f747e9db0915e3adf36b62dcf", size = 398395, upload-time = "2025-08-27T12:13:36.132Z" }, - { url = "https://files.pythonhosted.org/packages/82/95/9dc227d441ff2670651c27a739acb2535ccaf8b351a88d78c088965e5996/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae92443798a40a92dc5f0b01d8a7c93adde0c4dc965310a29ae7c64d72b9fad2", size = 524334, upload-time = "2025-08-27T12:13:37.562Z" }, - { url = "https://files.pythonhosted.org/packages/87/01/a670c232f401d9ad461d9a332aa4080cd3cb1d1df18213dbd0d2a6a7ab51/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c46c9dd2403b66a2a3b9720ec4b74d4ab49d4fabf9f03dfdce2d42af913fe8d0", size = 407691, upload-time = "2025-08-27T12:13:38.94Z" }, - { url = "https://files.pythonhosted.org/packages/03/36/0a14aebbaa26fe7fab4780c76f2239e76cc95a0090bdb25e31d95c492fcd/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2efe4eb1d01b7f5f1939f4ef30ecea6c6b3521eec451fb93191bf84b2a522418", size = 386868, upload-time = "2025-08-27T12:13:40.192Z" }, - { url = "https://files.pythonhosted.org/packages/3b/03/8c897fb8b5347ff6c1cc31239b9611c5bf79d78c984430887a353e1409a1/rpds_py-0.27.1-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:15d3b4d83582d10c601f481eca29c3f138d44c92187d197aff663a269197c02d", size = 405469, upload-time = "2025-08-27T12:13:41.496Z" }, - { url = "https://files.pythonhosted.org/packages/da/07/88c60edc2df74850d496d78a1fdcdc7b54360a7f610a4d50008309d41b94/rpds_py-0.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4ed2e16abbc982a169d30d1a420274a709949e2cbdef119fe2ec9d870b42f274", size = 422125, upload-time = "2025-08-27T12:13:42.802Z" }, - { url = "https://files.pythonhosted.org/packages/6b/86/5f4c707603e41b05f191a749984f390dabcbc467cf833769b47bf14ba04f/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a75f305c9b013289121ec0f1181931975df78738cdf650093e6b86d74aa7d8dd", size = 562341, upload-time = "2025-08-27T12:13:44.472Z" }, - { url = "https://files.pythonhosted.org/packages/b2/92/3c0cb2492094e3cd9baf9e49bbb7befeceb584ea0c1a8b5939dca4da12e5/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:67ce7620704745881a3d4b0ada80ab4d99df390838839921f99e63c474f82cf2", size = 592511, upload-time = "2025-08-27T12:13:45.898Z" }, - { url = "https://files.pythonhosted.org/packages/10/bb/82e64fbb0047c46a168faa28d0d45a7851cd0582f850b966811d30f67ad8/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d992ac10eb86d9b6f369647b6a3f412fc0075cfd5d799530e84d335e440a002", size = 557736, upload-time = "2025-08-27T12:13:47.408Z" }, - { url = "https://files.pythonhosted.org/packages/00/95/3c863973d409210da7fb41958172c6b7dbe7fc34e04d3cc1f10bb85e979f/rpds_py-0.27.1-cp313-cp313-win32.whl", hash = "sha256:4f75e4bd8ab8db624e02c8e2fc4063021b58becdbe6df793a8111d9343aec1e3", size = 221462, upload-time = "2025-08-27T12:13:48.742Z" }, - { url = "https://files.pythonhosted.org/packages/ce/2c/5867b14a81dc217b56d95a9f2a40fdbc56a1ab0181b80132beeecbd4b2d6/rpds_py-0.27.1-cp313-cp313-win_amd64.whl", hash = "sha256:f9025faafc62ed0b75a53e541895ca272815bec18abe2249ff6501c8f2e12b83", size = 232034, upload-time = "2025-08-27T12:13:50.11Z" }, - { url = "https://files.pythonhosted.org/packages/c7/78/3958f3f018c01923823f1e47f1cc338e398814b92d83cd278364446fac66/rpds_py-0.27.1-cp313-cp313-win_arm64.whl", hash = "sha256:ed10dc32829e7d222b7d3b93136d25a406ba9788f6a7ebf6809092da1f4d279d", size = 222392, upload-time = "2025-08-27T12:13:52.587Z" }, - { url = "https://files.pythonhosted.org/packages/01/76/1cdf1f91aed5c3a7bf2eba1f1c4e4d6f57832d73003919a20118870ea659/rpds_py-0.27.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:92022bbbad0d4426e616815b16bc4127f83c9a74940e1ccf3cfe0b387aba0228", size = 358355, upload-time = "2025-08-27T12:13:54.012Z" }, - { url = "https://files.pythonhosted.org/packages/c3/6f/bf142541229374287604caf3bb2a4ae17f0a580798fd72d3b009b532db4e/rpds_py-0.27.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:47162fdab9407ec3f160805ac3e154df042e577dd53341745fc7fb3f625e6d92", size = 342138, upload-time = "2025-08-27T12:13:55.791Z" }, - { url = "https://files.pythonhosted.org/packages/1a/77/355b1c041d6be40886c44ff5e798b4e2769e497b790f0f7fd1e78d17e9a8/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb89bec23fddc489e5d78b550a7b773557c9ab58b7946154a10a6f7a214a48b2", size = 380247, upload-time = "2025-08-27T12:13:57.683Z" }, - { url = "https://files.pythonhosted.org/packages/d6/a4/d9cef5c3946ea271ce2243c51481971cd6e34f21925af2783dd17b26e815/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e48af21883ded2b3e9eb48cb7880ad8598b31ab752ff3be6457001d78f416723", size = 390699, upload-time = "2025-08-27T12:13:59.137Z" }, - { url = "https://files.pythonhosted.org/packages/3a/06/005106a7b8c6c1a7e91b73169e49870f4af5256119d34a361ae5240a0c1d/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f5b7bd8e219ed50299e58551a410b64daafb5017d54bbe822e003856f06a802", size = 521852, upload-time = "2025-08-27T12:14:00.583Z" }, - { url = "https://files.pythonhosted.org/packages/e5/3e/50fb1dac0948e17a02eb05c24510a8fe12d5ce8561c6b7b7d1339ab7ab9c/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08f1e20bccf73b08d12d804d6e1c22ca5530e71659e6673bce31a6bb71c1e73f", size = 402582, upload-time = "2025-08-27T12:14:02.034Z" }, - { url = "https://files.pythonhosted.org/packages/cb/b0/f4e224090dc5b0ec15f31a02d746ab24101dd430847c4d99123798661bfc/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dc5dceeaefcc96dc192e3a80bbe1d6c410c469e97bdd47494a7d930987f18b2", size = 384126, upload-time = "2025-08-27T12:14:03.437Z" }, - { url = "https://files.pythonhosted.org/packages/54/77/ac339d5f82b6afff1df8f0fe0d2145cc827992cb5f8eeb90fc9f31ef7a63/rpds_py-0.27.1-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d76f9cc8665acdc0c9177043746775aa7babbf479b5520b78ae4002d889f5c21", size = 399486, upload-time = "2025-08-27T12:14:05.443Z" }, - { url = "https://files.pythonhosted.org/packages/d6/29/3e1c255eee6ac358c056a57d6d6869baa00a62fa32eea5ee0632039c50a3/rpds_py-0.27.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134fae0e36022edad8290a6661edf40c023562964efea0cc0ec7f5d392d2aaef", size = 414832, upload-time = "2025-08-27T12:14:06.902Z" }, - { url = "https://files.pythonhosted.org/packages/3f/db/6d498b844342deb3fa1d030598db93937a9964fcf5cb4da4feb5f17be34b/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb11a4f1b2b63337cfd3b4d110af778a59aae51c81d195768e353d8b52f88081", size = 557249, upload-time = "2025-08-27T12:14:08.37Z" }, - { url = "https://files.pythonhosted.org/packages/60/f3/690dd38e2310b6f68858a331399b4d6dbb9132c3e8ef8b4333b96caf403d/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:13e608ac9f50a0ed4faec0e90ece76ae33b34c0e8656e3dceb9a7db994c692cd", size = 587356, upload-time = "2025-08-27T12:14:10.034Z" }, - { url = "https://files.pythonhosted.org/packages/86/e3/84507781cccd0145f35b1dc32c72675200c5ce8d5b30f813e49424ef68fc/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dd2135527aa40f061350c3f8f89da2644de26cd73e4de458e79606384f4f68e7", size = 555300, upload-time = "2025-08-27T12:14:11.783Z" }, - { url = "https://files.pythonhosted.org/packages/e5/ee/375469849e6b429b3516206b4580a79e9ef3eb12920ddbd4492b56eaacbe/rpds_py-0.27.1-cp313-cp313t-win32.whl", hash = "sha256:3020724ade63fe320a972e2ffd93b5623227e684315adce194941167fee02688", size = 216714, upload-time = "2025-08-27T12:14:13.629Z" }, - { url = "https://files.pythonhosted.org/packages/21/87/3fc94e47c9bd0742660e84706c311a860dcae4374cf4a03c477e23ce605a/rpds_py-0.27.1-cp313-cp313t-win_amd64.whl", hash = "sha256:8ee50c3e41739886606388ba3ab3ee2aae9f35fb23f833091833255a31740797", size = 228943, upload-time = "2025-08-27T12:14:14.937Z" }, - { url = "https://files.pythonhosted.org/packages/70/36/b6e6066520a07cf029d385de869729a895917b411e777ab1cde878100a1d/rpds_py-0.27.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:acb9aafccaae278f449d9c713b64a9e68662e7799dbd5859e2c6b3c67b56d334", size = 362472, upload-time = "2025-08-27T12:14:16.333Z" }, - { url = "https://files.pythonhosted.org/packages/af/07/b4646032e0dcec0df9c73a3bd52f63bc6c5f9cda992f06bd0e73fe3fbebd/rpds_py-0.27.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b7fb801aa7f845ddf601c49630deeeccde7ce10065561d92729bfe81bd21fb33", size = 345676, upload-time = "2025-08-27T12:14:17.764Z" }, - { url = "https://files.pythonhosted.org/packages/b0/16/2f1003ee5d0af4bcb13c0cf894957984c32a6751ed7206db2aee7379a55e/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0dd05afb46597b9a2e11c351e5e4283c741237e7f617ffb3252780cca9336a", size = 385313, upload-time = "2025-08-27T12:14:19.829Z" }, - { url = "https://files.pythonhosted.org/packages/05/cd/7eb6dd7b232e7f2654d03fa07f1414d7dfc980e82ba71e40a7c46fd95484/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b6dfb0e058adb12d8b1d1b25f686e94ffa65d9995a5157afe99743bf7369d62b", size = 399080, upload-time = "2025-08-27T12:14:21.531Z" }, - { url = "https://files.pythonhosted.org/packages/20/51/5829afd5000ec1cb60f304711f02572d619040aa3ec033d8226817d1e571/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed090ccd235f6fa8bb5861684567f0a83e04f52dfc2e5c05f2e4b1309fcf85e7", size = 523868, upload-time = "2025-08-27T12:14:23.485Z" }, - { url = "https://files.pythonhosted.org/packages/05/2c/30eebca20d5db95720ab4d2faec1b5e4c1025c473f703738c371241476a2/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf876e79763eecf3e7356f157540d6a093cef395b65514f17a356f62af6cc136", size = 408750, upload-time = "2025-08-27T12:14:24.924Z" }, - { url = "https://files.pythonhosted.org/packages/90/1a/cdb5083f043597c4d4276eae4e4c70c55ab5accec078da8611f24575a367/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12ed005216a51b1d6e2b02a7bd31885fe317e45897de81d86dcce7d74618ffff", size = 387688, upload-time = "2025-08-27T12:14:27.537Z" }, - { url = "https://files.pythonhosted.org/packages/7c/92/cf786a15320e173f945d205ab31585cc43969743bb1a48b6888f7a2b0a2d/rpds_py-0.27.1-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:ee4308f409a40e50593c7e3bb8cbe0b4d4c66d1674a316324f0c2f5383b486f9", size = 407225, upload-time = "2025-08-27T12:14:28.981Z" }, - { url = "https://files.pythonhosted.org/packages/33/5c/85ee16df5b65063ef26017bef33096557a4c83fbe56218ac7cd8c235f16d/rpds_py-0.27.1-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b08d152555acf1f455154d498ca855618c1378ec810646fcd7c76416ac6dc60", size = 423361, upload-time = "2025-08-27T12:14:30.469Z" }, - { url = "https://files.pythonhosted.org/packages/4b/8e/1c2741307fcabd1a334ecf008e92c4f47bb6f848712cf15c923becfe82bb/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:dce51c828941973a5684d458214d3a36fcd28da3e1875d659388f4f9f12cc33e", size = 562493, upload-time = "2025-08-27T12:14:31.987Z" }, - { url = "https://files.pythonhosted.org/packages/04/03/5159321baae9b2222442a70c1f988cbbd66b9be0675dd3936461269be360/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c1476d6f29eb81aa4151c9a31219b03f1f798dc43d8af1250a870735516a1212", size = 592623, upload-time = "2025-08-27T12:14:33.543Z" }, - { url = "https://files.pythonhosted.org/packages/ff/39/c09fd1ad28b85bc1d4554a8710233c9f4cefd03d7717a1b8fbfd171d1167/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:3ce0cac322b0d69b63c9cdb895ee1b65805ec9ffad37639f291dd79467bee675", size = 558800, upload-time = "2025-08-27T12:14:35.436Z" }, - { url = "https://files.pythonhosted.org/packages/c5/d6/99228e6bbcf4baa764b18258f519a9035131d91b538d4e0e294313462a98/rpds_py-0.27.1-cp314-cp314-win32.whl", hash = "sha256:dfbfac137d2a3d0725758cd141f878bf4329ba25e34979797c89474a89a8a3a3", size = 221943, upload-time = "2025-08-27T12:14:36.898Z" }, - { url = "https://files.pythonhosted.org/packages/be/07/c802bc6b8e95be83b79bdf23d1aa61d68324cb1006e245d6c58e959e314d/rpds_py-0.27.1-cp314-cp314-win_amd64.whl", hash = "sha256:a6e57b0abfe7cc513450fcf529eb486b6e4d3f8aee83e92eb5f1ef848218d456", size = 233739, upload-time = "2025-08-27T12:14:38.386Z" }, - { url = "https://files.pythonhosted.org/packages/c8/89/3e1b1c16d4c2d547c5717377a8df99aee8099ff050f87c45cb4d5fa70891/rpds_py-0.27.1-cp314-cp314-win_arm64.whl", hash = "sha256:faf8d146f3d476abfee026c4ae3bdd9ca14236ae4e4c310cbd1cf75ba33d24a3", size = 223120, upload-time = "2025-08-27T12:14:39.82Z" }, - { url = "https://files.pythonhosted.org/packages/62/7e/dc7931dc2fa4a6e46b2a4fa744a9fe5c548efd70e0ba74f40b39fa4a8c10/rpds_py-0.27.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:ba81d2b56b6d4911ce735aad0a1d4495e808b8ee4dc58715998741a26874e7c2", size = 358944, upload-time = "2025-08-27T12:14:41.199Z" }, - { url = "https://files.pythonhosted.org/packages/e6/22/4af76ac4e9f336bfb1a5f240d18a33c6b2fcaadb7472ac7680576512b49a/rpds_py-0.27.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:84f7d509870098de0e864cad0102711c1e24e9b1a50ee713b65928adb22269e4", size = 342283, upload-time = "2025-08-27T12:14:42.699Z" }, - { url = "https://files.pythonhosted.org/packages/1c/15/2a7c619b3c2272ea9feb9ade67a45c40b3eeb500d503ad4c28c395dc51b4/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e960fc78fecd1100539f14132425e1d5fe44ecb9239f8f27f079962021523e", size = 380320, upload-time = "2025-08-27T12:14:44.157Z" }, - { url = "https://files.pythonhosted.org/packages/a2/7d/4c6d243ba4a3057e994bb5bedd01b5c963c12fe38dde707a52acdb3849e7/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:62f85b665cedab1a503747617393573995dac4600ff51869d69ad2f39eb5e817", size = 391760, upload-time = "2025-08-27T12:14:45.845Z" }, - { url = "https://files.pythonhosted.org/packages/b4/71/b19401a909b83bcd67f90221330bc1ef11bc486fe4e04c24388d28a618ae/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fed467af29776f6556250c9ed85ea5a4dd121ab56a5f8b206e3e7a4c551e48ec", size = 522476, upload-time = "2025-08-27T12:14:47.364Z" }, - { url = "https://files.pythonhosted.org/packages/e4/44/1a3b9715c0455d2e2f0f6df5ee6d6f5afdc423d0773a8a682ed2b43c566c/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2729615f9d430af0ae6b36cf042cb55c0936408d543fb691e1a9e36648fd35a", size = 403418, upload-time = "2025-08-27T12:14:49.991Z" }, - { url = "https://files.pythonhosted.org/packages/1c/4b/fb6c4f14984eb56673bc868a66536f53417ddb13ed44b391998100a06a96/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b207d881a9aef7ba753d69c123a35d96ca7cb808056998f6b9e8747321f03b8", size = 384771, upload-time = "2025-08-27T12:14:52.159Z" }, - { url = "https://files.pythonhosted.org/packages/c0/56/d5265d2d28b7420d7b4d4d85cad8ef891760f5135102e60d5c970b976e41/rpds_py-0.27.1-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:639fd5efec029f99b79ae47e5d7e00ad8a773da899b6309f6786ecaf22948c48", size = 400022, upload-time = "2025-08-27T12:14:53.859Z" }, - { url = "https://files.pythonhosted.org/packages/8f/e9/9f5fc70164a569bdd6ed9046486c3568d6926e3a49bdefeeccfb18655875/rpds_py-0.27.1-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fecc80cb2a90e28af8a9b366edacf33d7a91cbfe4c2c4544ea1246e949cfebeb", size = 416787, upload-time = "2025-08-27T12:14:55.673Z" }, - { url = "https://files.pythonhosted.org/packages/d4/64/56dd03430ba491db943a81dcdef115a985aac5f44f565cd39a00c766d45c/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42a89282d711711d0a62d6f57d81aa43a1368686c45bc1c46b7f079d55692734", size = 557538, upload-time = "2025-08-27T12:14:57.245Z" }, - { url = "https://files.pythonhosted.org/packages/3f/36/92cc885a3129993b1d963a2a42ecf64e6a8e129d2c7cc980dbeba84e55fb/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:cf9931f14223de59551ab9d38ed18d92f14f055a5f78c1d8ad6493f735021bbb", size = 588512, upload-time = "2025-08-27T12:14:58.728Z" }, - { url = "https://files.pythonhosted.org/packages/dd/10/6b283707780a81919f71625351182b4f98932ac89a09023cb61865136244/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f39f58a27cc6e59f432b568ed8429c7e1641324fbe38131de852cd77b2d534b0", size = 555813, upload-time = "2025-08-27T12:15:00.334Z" }, - { url = "https://files.pythonhosted.org/packages/04/2e/30b5ea18c01379da6272a92825dd7e53dc9d15c88a19e97932d35d430ef7/rpds_py-0.27.1-cp314-cp314t-win32.whl", hash = "sha256:d5fa0ee122dc09e23607a28e6d7b150da16c662e66409bbe85230e4c85bb528a", size = 217385, upload-time = "2025-08-27T12:15:01.937Z" }, - { url = "https://files.pythonhosted.org/packages/32/7d/97119da51cb1dd3f2f3c0805f155a3aa4a95fa44fe7d78ae15e69edf4f34/rpds_py-0.27.1-cp314-cp314t-win_amd64.whl", hash = "sha256:6567d2bb951e21232c2f660c24cf3470bb96de56cdcb3f071a83feeaff8a2772", size = 230097, upload-time = "2025-08-27T12:15:03.961Z" }, -] - -[[package]] -name = "rsa" -version = "4.9.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pyasn1" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, -] - -[[package]] -name = "ruff" -version = "0.13.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/02/df/8d7d8c515d33adfc540e2edf6c6021ea1c5a58a678d8cfce9fae59aabcab/ruff-0.13.2.tar.gz", hash = "sha256:cb12fffd32fb16d32cef4ed16d8c7cdc27ed7c944eaa98d99d01ab7ab0b710ff", size = 5416417, upload-time = "2025-09-25T14:54:09.936Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/6e/84/5716a7fa4758e41bf70e603e13637c42cfb9dbf7ceb07180211b9bbf75ef/ruff-0.13.2-py3-none-linux_armv6l.whl", hash = "sha256:3796345842b55f033a78285e4f1641078f902020d8450cade03aad01bffd81c3", size = 12343254, upload-time = "2025-09-25T14:53:27.784Z" }, - { url = "https://files.pythonhosted.org/packages/9b/77/c7042582401bb9ac8eff25360e9335e901d7a1c0749a2b28ba4ecb239991/ruff-0.13.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ff7e4dda12e683e9709ac89e2dd436abf31a4d8a8fc3d89656231ed808e231d2", size = 13040891, upload-time = "2025-09-25T14:53:31.38Z" }, - { url = "https://files.pythonhosted.org/packages/c6/15/125a7f76eb295cb34d19c6778e3a82ace33730ad4e6f28d3427e134a02e0/ruff-0.13.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c75e9d2a2fafd1fdd895d0e7e24b44355984affdde1c412a6f6d3f6e16b22d46", size = 12243588, upload-time = "2025-09-25T14:53:33.543Z" }, - { url = "https://files.pythonhosted.org/packages/9e/eb/0093ae04a70f81f8be7fd7ed6456e926b65d238fc122311293d033fdf91e/ruff-0.13.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cceac74e7bbc53ed7d15d1042ffe7b6577bf294611ad90393bf9b2a0f0ec7cb6", size = 12491359, upload-time = "2025-09-25T14:53:35.892Z" }, - { url = "https://files.pythonhosted.org/packages/43/fe/72b525948a6956f07dad4a6f122336b6a05f2e3fd27471cea612349fedb9/ruff-0.13.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6ae3f469b5465ba6d9721383ae9d49310c19b452a161b57507764d7ef15f4b07", size = 12162486, upload-time = "2025-09-25T14:53:38.171Z" }, - { url = "https://files.pythonhosted.org/packages/6a/e3/0fac422bbbfb2ea838023e0d9fcf1f30183d83ab2482800e2cb892d02dfe/ruff-0.13.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f8f9e3cd6714358238cd6626b9d43026ed19c0c018376ac1ef3c3a04ffb42d8", size = 13871203, upload-time = "2025-09-25T14:53:41.943Z" }, - { url = "https://files.pythonhosted.org/packages/6b/82/b721c8e3ec5df6d83ba0e45dcf00892c4f98b325256c42c38ef136496cbf/ruff-0.13.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c6ed79584a8f6cbe2e5d7dbacf7cc1ee29cbdb5df1172e77fbdadc8bb85a1f89", size = 14929635, upload-time = "2025-09-25T14:53:43.953Z" }, - { url = "https://files.pythonhosted.org/packages/c4/a0/ad56faf6daa507b83079a1ad7a11694b87d61e6bf01c66bd82b466f21821/ruff-0.13.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aed130b2fde049cea2019f55deb939103123cdd191105f97a0599a3e753d61b0", size = 14338783, upload-time = "2025-09-25T14:53:46.205Z" }, - { url = "https://files.pythonhosted.org/packages/47/77/ad1d9156db8f99cd01ee7e29d74b34050e8075a8438e589121fcd25c4b08/ruff-0.13.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1887c230c2c9d65ed1b4e4cfe4d255577ea28b718ae226c348ae68df958191aa", size = 13355322, upload-time = "2025-09-25T14:53:48.164Z" }, - { url = "https://files.pythonhosted.org/packages/64/8b/e87cfca2be6f8b9f41f0bb12dc48c6455e2d66df46fe61bb441a226f1089/ruff-0.13.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5bcb10276b69b3cfea3a102ca119ffe5c6ba3901e20e60cf9efb53fa417633c3", size = 13354427, upload-time = "2025-09-25T14:53:50.486Z" }, - { url = "https://files.pythonhosted.org/packages/7f/df/bf382f3fbead082a575edb860897287f42b1b3c694bafa16bc9904c11ed3/ruff-0.13.2-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:afa721017aa55a555b2ff7944816587f1cb813c2c0a882d158f59b832da1660d", size = 13537637, upload-time = "2025-09-25T14:53:52.887Z" }, - { url = "https://files.pythonhosted.org/packages/51/70/1fb7a7c8a6fc8bd15636288a46e209e81913b87988f26e1913d0851e54f4/ruff-0.13.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1dbc875cf3720c64b3990fef8939334e74cb0ca65b8dbc61d1f439201a38101b", size = 12340025, upload-time = "2025-09-25T14:53:54.88Z" }, - { url = "https://files.pythonhosted.org/packages/4c/27/1e5b3f1c23ca5dd4106d9d580e5c13d9acb70288bff614b3d7b638378cc9/ruff-0.13.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:5b939a1b2a960e9742e9a347e5bbc9b3c3d2c716f86c6ae273d9cbd64f193f22", size = 12133449, upload-time = "2025-09-25T14:53:57.089Z" }, - { url = "https://files.pythonhosted.org/packages/2d/09/b92a5ccee289f11ab128df57d5911224197d8d55ef3bd2043534ff72ca54/ruff-0.13.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:50e2d52acb8de3804fc5f6e2fa3ae9bdc6812410a9e46837e673ad1f90a18736", size = 13051369, upload-time = "2025-09-25T14:53:59.124Z" }, - { url = "https://files.pythonhosted.org/packages/89/99/26c9d1c7d8150f45e346dc045cc49f23e961efceb4a70c47dea0960dea9a/ruff-0.13.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3196bc13ab2110c176b9a4ae5ff7ab676faaa1964b330a1383ba20e1e19645f2", size = 13523644, upload-time = "2025-09-25T14:54:01.622Z" }, - { url = "https://files.pythonhosted.org/packages/f7/00/e7f1501e81e8ec290e79527827af1d88f541d8d26151751b46108978dade/ruff-0.13.2-py3-none-win32.whl", hash = "sha256:7c2a0b7c1e87795fec3404a485096bcd790216c7c146a922d121d8b9c8f1aaac", size = 12245990, upload-time = "2025-09-25T14:54:03.647Z" }, - { url = "https://files.pythonhosted.org/packages/ee/bd/d9f33a73de84fafd0146c6fba4f497c4565fe8fa8b46874b8e438869abc2/ruff-0.13.2-py3-none-win_amd64.whl", hash = "sha256:17d95fb32218357c89355f6f6f9a804133e404fc1f65694372e02a557edf8585", size = 13324004, upload-time = "2025-09-25T14:54:06.05Z" }, - { url = "https://files.pythonhosted.org/packages/c3/12/28fa2f597a605884deb0f65c1b1ae05111051b2a7030f5d8a4ff7f4599ba/ruff-0.13.2-py3-none-win_arm64.whl", hash = "sha256:da711b14c530412c827219312b7d7fbb4877fb31150083add7e8c5336549cea7", size = 12484437, upload-time = "2025-09-25T14:54:08.022Z" }, -] - -[[package]] -name = "scale-gp" -version = "0.1.0a59" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, - { name = "distro" }, - { name = "httpx" }, - { name = "pydantic" }, - { name = "sniffio" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/c3/3e/c944564757c0a737937c3c87ef61ccfd5138bbe5201fa93edc704124a297/scale_gp-0.1.0a59.tar.gz", hash = "sha256:b7c1e6edb431824f44b8ed2e49969345465e45a1978c1ab6462a7db6d8718f1a", size = 408472, upload-time = "2025-02-05T18:01:22.365Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/13/7c/de71f2853d062535a762157eb9bc0c180fee160447039f074cbefe495728/scale_gp-0.1.0a59-py3-none-any.whl", hash = "sha256:841846c83e4760e14b76ac2a9b44d40a3e9800ad8505f9d24fef11d981466e35", size = 545196, upload-time = "2025-02-05T18:01:20.111Z" }, -] - -[[package]] -name = "scale-gp-beta" -version = "0.1.0a20" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, - { name = "distro" }, - { name = "httpx" }, - { name = "pydantic" }, - { name = "sniffio" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/86/2b/a6a373ec8d33fad8b553182aa1ceade94434752c935a4ce16ca84d188c0f/scale_gp_beta-0.1.0a20.tar.gz", hash = "sha256:a2be5c1afcb9171d3c2d6f15b45f4512105c79ab12be8e7ef5a22167ac77fb88", size = 157799, upload-time = "2025-06-12T14:46:17.73Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/af/12/55f47289a0ae1065e4115bc8018b2f01df0b6560b07bacfc0dcf6c3bdcbe/scale_gp_beta-0.1.0a20-py3-none-any.whl", hash = "sha256:482385ee6c3b912aecf70795948ac45b215a4d19feba60f67d6e10c4312440c6", size = 157906, upload-time = "2025-06-12T14:46:16.344Z" }, -] - -[[package]] -name = "shellingham" -version = "1.5.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" }, -] - -[[package]] -name = "six" -version = "1.17.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, -] - -[[package]] -name = "sniffio" -version = "1.3.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, -] - -[[package]] -name = "sse-starlette" -version = "3.0.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/42/6f/22ed6e33f8a9e76ca0a412405f31abb844b779d52c5f96660766edcd737c/sse_starlette-3.0.2.tar.gz", hash = "sha256:ccd60b5765ebb3584d0de2d7a6e4f745672581de4f5005ab31c3a25d10b52b3a", size = 20985, upload-time = "2025-07-27T09:07:44.565Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/10/c78f463b4ef22eef8491f218f692be838282cd65480f6e423d7730dfd1fb/sse_starlette-3.0.2-py3-none-any.whl", hash = "sha256:16b7cbfddbcd4eaca11f7b586f3b8a080f1afe952c15813455b162edea619e5a", size = 11297, upload-time = "2025-07-27T09:07:43.268Z" }, -] - -[[package]] -name = "stack-data" -version = "0.6.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "asttokens" }, - { name = "executing" }, - { name = "pure-eval" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707, upload-time = "2023-09-30T13:58:05.479Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" }, -] - -[[package]] -name = "starlette" -version = "0.46.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ce/20/08dfcd9c983f6a6f4a1000d934b9e6d626cff8d2eeb77a89a68eef20a2b7/starlette-0.46.2.tar.gz", hash = "sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5", size = 2580846, upload-time = "2025-04-13T13:56:17.942Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8b/0c/9d30a4ebeb6db2b25a841afbb80f6ef9a854fc3b41be131d249a977b4959/starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35", size = 72037, upload-time = "2025-04-13T13:56:16.21Z" }, -] - -[[package]] -name = "temporalio" -version = "1.18.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "nexus-rpc" }, - { name = "protobuf" }, - { name = "types-protobuf" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/61/ab/f866e19e02d46792ebd08e3e975f30ec86f985f1cd0cee4d5bc632d538e6/temporalio-1.18.2.tar.gz", hash = "sha256:b6ecb43562988ac698eef155d9c2527fa6c53a2ae564cb4787421499a0269599", size = 1788530, upload-time = "2025-10-27T19:24:50.624Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/35/7e/2fc0d8cf18644fba2e4e5d0ba66206bf2e6f22ad04e9da8f038261351ff6/temporalio-1.18.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b97f046b23a492b0a3956dffce3664ed44ca1f3577df6aa4b8023ec988f5d093", size = 12808656, upload-time = "2025-10-27T19:24:26.884Z" }, - { url = "https://files.pythonhosted.org/packages/f9/2f/8ee0836e31f60a03582358ff64210f6f0c91001f34e4fc85da892e5c3087/temporalio-1.18.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:dac1203ebabae3268bc0b43b8c841e6f9102893c616488a278bc5d06043b76e5", size = 12393520, upload-time = "2025-10-27T19:24:32.048Z" }, - { url = "https://files.pythonhosted.org/packages/fc/39/53d9add2e8c1d4066ea0c288476cdc944453ea309e36e7b24d4a1c43db79/temporalio-1.18.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f45b124099dbbe32a76a4df0d851db2ad94e6725f778fd74fccc8d33ee4868e9", size = 12733544, upload-time = "2025-10-27T19:24:36.871Z" }, - { url = "https://files.pythonhosted.org/packages/22/b2/5558978e9002fd1e71a1a459ddcc8fdd1fc1f4f595fd578eb7568be266a6/temporalio-1.18.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d427a398f6f402920e9f78d571b7d7a72876244d5affeaeaec8843c34013e84e", size = 12926857, upload-time = "2025-10-27T19:24:43.443Z" }, - { url = "https://files.pythonhosted.org/packages/5d/a2/ea81149ae7faa154aa842e9dd88390df3158a687db694e06d08716c030b6/temporalio-1.18.2-cp39-abi3-win_amd64.whl", hash = "sha256:7e1f3da98cf23af7094467f1c1180374a10c38aa712cd7e868e15412dd2c6cde", size = 13059111, upload-time = "2025-10-27T19:24:48.382Z" }, -] - -[[package]] -name = "termcolor" -version = "3.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ca/6c/3d75c196ac07ac8749600b60b03f4f6094d54e132c4d94ebac6ee0e0add0/termcolor-3.1.0.tar.gz", hash = "sha256:6a6dd7fbee581909eeec6a756cff1d7f7c376063b14e4a298dc4980309e55970", size = 14324, upload-time = "2025-04-30T11:37:53.791Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4f/bd/de8d508070629b6d84a30d01d57e4a65c69aa7f5abe7560b8fad3b50ea59/termcolor-3.1.0-py3-none-any.whl", hash = "sha256:591dd26b5c2ce03b9e43f391264626557873ce1d379019786f99b0c2bee140aa", size = 7684, upload-time = "2025-04-30T11:37:52.382Z" }, -] - -[[package]] -name = "tiktoken" -version = "0.11.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "regex" }, - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a7/86/ad0155a37c4f310935d5ac0b1ccf9bdb635dcb906e0a9a26b616dd55825a/tiktoken-0.11.0.tar.gz", hash = "sha256:3c518641aee1c52247c2b97e74d8d07d780092af79d5911a6ab5e79359d9b06a", size = 37648, upload-time = "2025-08-08T23:58:08.495Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e7/9e/eceddeffc169fc75fe0fd4f38471309f11cb1906f9b8aa39be4f5817df65/tiktoken-0.11.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fd9e6b23e860973cf9526544e220b223c60badf5b62e80a33509d6d40e6c8f5d", size = 1055199, upload-time = "2025-08-08T23:57:45.076Z" }, - { url = "https://files.pythonhosted.org/packages/4f/cf/5f02bfefffdc6b54e5094d2897bc80efd43050e5b09b576fd85936ee54bf/tiktoken-0.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6a76d53cee2da71ee2731c9caa747398762bda19d7f92665e882fef229cb0b5b", size = 996655, upload-time = "2025-08-08T23:57:46.304Z" }, - { url = "https://files.pythonhosted.org/packages/65/8e/c769b45ef379bc360c9978c4f6914c79fd432400a6733a8afc7ed7b0726a/tiktoken-0.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ef72aab3ea240646e642413cb363b73869fed4e604dcfd69eec63dc54d603e8", size = 1128867, upload-time = "2025-08-08T23:57:47.438Z" }, - { url = "https://files.pythonhosted.org/packages/d5/2d/4d77f6feb9292bfdd23d5813e442b3bba883f42d0ac78ef5fdc56873f756/tiktoken-0.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f929255c705efec7a28bf515e29dc74220b2f07544a8c81b8d69e8efc4578bd", size = 1183308, upload-time = "2025-08-08T23:57:48.566Z" }, - { url = "https://files.pythonhosted.org/packages/7a/65/7ff0a65d3bb0fc5a1fb6cc71b03e0f6e71a68c5eea230d1ff1ba3fd6df49/tiktoken-0.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:61f1d15822e4404953d499fd1dcc62817a12ae9fb1e4898033ec8fe3915fdf8e", size = 1244301, upload-time = "2025-08-08T23:57:49.642Z" }, - { url = "https://files.pythonhosted.org/packages/f5/6e/5b71578799b72e5bdcef206a214c3ce860d999d579a3b56e74a6c8989ee2/tiktoken-0.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:45927a71ab6643dfd3ef57d515a5db3d199137adf551f66453be098502838b0f", size = 884282, upload-time = "2025-08-08T23:57:50.759Z" }, - { url = "https://files.pythonhosted.org/packages/cc/cd/a9034bcee638716d9310443818d73c6387a6a96db93cbcb0819b77f5b206/tiktoken-0.11.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a5f3f25ffb152ee7fec78e90a5e5ea5b03b4ea240beed03305615847f7a6ace2", size = 1055339, upload-time = "2025-08-08T23:57:51.802Z" }, - { url = "https://files.pythonhosted.org/packages/f1/91/9922b345f611b4e92581f234e64e9661e1c524875c8eadd513c4b2088472/tiktoken-0.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7dc6e9ad16a2a75b4c4be7208055a1f707c9510541d94d9cc31f7fbdc8db41d8", size = 997080, upload-time = "2025-08-08T23:57:53.442Z" }, - { url = "https://files.pythonhosted.org/packages/d0/9d/49cd047c71336bc4b4af460ac213ec1c457da67712bde59b892e84f1859f/tiktoken-0.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a0517634d67a8a48fd4a4ad73930c3022629a85a217d256a6e9b8b47439d1e4", size = 1128501, upload-time = "2025-08-08T23:57:54.808Z" }, - { url = "https://files.pythonhosted.org/packages/52/d5/a0dcdb40dd2ea357e83cb36258967f0ae96f5dd40c722d6e382ceee6bba9/tiktoken-0.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fb4effe60574675118b73c6fbfd3b5868e5d7a1f570d6cc0d18724b09ecf318", size = 1182743, upload-time = "2025-08-08T23:57:56.307Z" }, - { url = "https://files.pythonhosted.org/packages/3b/17/a0fc51aefb66b7b5261ca1314afa83df0106b033f783f9a7bcbe8e741494/tiktoken-0.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94f984c9831fd32688aef4348803b0905d4ae9c432303087bae370dc1381a2b8", size = 1244057, upload-time = "2025-08-08T23:57:57.628Z" }, - { url = "https://files.pythonhosted.org/packages/50/79/bcf350609f3a10f09fe4fc207f132085e497fdd3612f3925ab24d86a0ca0/tiktoken-0.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:2177ffda31dec4023356a441793fed82f7af5291120751dee4d696414f54db0c", size = 883901, upload-time = "2025-08-08T23:57:59.359Z" }, -] - -[[package]] -name = "tokenizers" -version = "0.22.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "huggingface-hub" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/33/f4b2d94ada7ab297328fc671fed209368ddb82f965ec2224eb1892674c3a/tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73", size = 3069318, upload-time = "2025-09-19T09:49:11.848Z" }, - { url = "https://files.pythonhosted.org/packages/1c/58/2aa8c874d02b974990e89ff95826a4852a8b2a273c7d1b4411cdd45a4565/tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc", size = 2926478, upload-time = "2025-09-19T09:49:09.759Z" }, - { url = "https://files.pythonhosted.org/packages/1e/3b/55e64befa1e7bfea963cf4b787b2cea1011362c4193f5477047532ce127e/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a", size = 3256994, upload-time = "2025-09-19T09:48:56.701Z" }, - { url = "https://files.pythonhosted.org/packages/71/0b/fbfecf42f67d9b7b80fde4aabb2b3110a97fac6585c9470b5bff103a80cb/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7", size = 3153141, upload-time = "2025-09-19T09:48:59.749Z" }, - { url = "https://files.pythonhosted.org/packages/17/a9/b38f4e74e0817af8f8ef925507c63c6ae8171e3c4cb2d5d4624bf58fca69/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21", size = 3508049, upload-time = "2025-09-19T09:49:05.868Z" }, - { url = "https://files.pythonhosted.org/packages/d2/48/dd2b3dac46bb9134a88e35d72e1aa4869579eacc1a27238f1577270773ff/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214", size = 3710730, upload-time = "2025-09-19T09:49:01.832Z" }, - { url = "https://files.pythonhosted.org/packages/93/0e/ccabc8d16ae4ba84a55d41345207c1e2ea88784651a5a487547d80851398/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f", size = 3412560, upload-time = "2025-09-19T09:49:03.867Z" }, - { url = "https://files.pythonhosted.org/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4", size = 3250221, upload-time = "2025-09-19T09:49:07.664Z" }, - { url = "https://files.pythonhosted.org/packages/d7/a6/2c8486eef79671601ff57b093889a345dd3d576713ef047776015dc66de7/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879", size = 9345569, upload-time = "2025-09-19T09:49:14.214Z" }, - { url = "https://files.pythonhosted.org/packages/6b/16/32ce667f14c35537f5f605fe9bea3e415ea1b0a646389d2295ec348d5657/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446", size = 9271599, upload-time = "2025-09-19T09:49:16.639Z" }, - { url = "https://files.pythonhosted.org/packages/51/7c/a5f7898a3f6baa3fc2685c705e04c98c1094c523051c805cdd9306b8f87e/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a", size = 9533862, upload-time = "2025-09-19T09:49:19.146Z" }, - { url = "https://files.pythonhosted.org/packages/36/65/7e75caea90bc73c1dd8d40438adf1a7bc26af3b8d0a6705ea190462506e1/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390", size = 9681250, upload-time = "2025-09-19T09:49:21.501Z" }, - { url = "https://files.pythonhosted.org/packages/30/2c/959dddef581b46e6209da82df3b78471e96260e2bc463f89d23b1bf0e52a/tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82", size = 2472003, upload-time = "2025-09-19T09:49:27.089Z" }, - { url = "https://files.pythonhosted.org/packages/b3/46/e33a8c93907b631a99377ef4c5f817ab453d0b34f93529421f42ff559671/tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138", size = 2674684, upload-time = "2025-09-19T09:49:24.953Z" }, -] - -[[package]] -name = "tornado" -version = "6.5.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/09/ce/1eb500eae19f4648281bb2186927bb062d2438c2e5093d1360391afd2f90/tornado-6.5.2.tar.gz", hash = "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0", size = 510821, upload-time = "2025-08-08T18:27:00.78Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/48/6a7529df2c9cc12efd2e8f5dd219516184d703b34c06786809670df5b3bd/tornado-6.5.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6", size = 442563, upload-time = "2025-08-08T18:26:42.945Z" }, - { url = "https://files.pythonhosted.org/packages/f2/b5/9b575a0ed3e50b00c40b08cbce82eb618229091d09f6d14bce80fc01cb0b/tornado-6.5.2-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef", size = 440729, upload-time = "2025-08-08T18:26:44.473Z" }, - { url = "https://files.pythonhosted.org/packages/1b/4e/619174f52b120efcf23633c817fd3fed867c30bff785e2cd5a53a70e483c/tornado-6.5.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e", size = 444295, upload-time = "2025-08-08T18:26:46.021Z" }, - { url = "https://files.pythonhosted.org/packages/95/fa/87b41709552bbd393c85dd18e4e3499dcd8983f66e7972926db8d96aa065/tornado-6.5.2-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882", size = 443644, upload-time = "2025-08-08T18:26:47.625Z" }, - { url = "https://files.pythonhosted.org/packages/f9/41/fb15f06e33d7430ca89420283a8762a4e6b8025b800ea51796ab5e6d9559/tornado-6.5.2-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108", size = 443878, upload-time = "2025-08-08T18:26:50.599Z" }, - { url = "https://files.pythonhosted.org/packages/11/92/fe6d57da897776ad2e01e279170ea8ae726755b045fe5ac73b75357a5a3f/tornado-6.5.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c", size = 444549, upload-time = "2025-08-08T18:26:51.864Z" }, - { url = "https://files.pythonhosted.org/packages/9b/02/c8f4f6c9204526daf3d760f4aa555a7a33ad0e60843eac025ccfd6ff4a93/tornado-6.5.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4", size = 443973, upload-time = "2025-08-08T18:26:53.625Z" }, - { url = "https://files.pythonhosted.org/packages/ae/2d/f5f5707b655ce2317190183868cd0f6822a1121b4baeae509ceb9590d0bd/tornado-6.5.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04", size = 443954, upload-time = "2025-08-08T18:26:55.072Z" }, - { url = "https://files.pythonhosted.org/packages/e8/59/593bd0f40f7355806bf6573b47b8c22f8e1374c9b6fd03114bd6b7a3dcfd/tornado-6.5.2-cp39-abi3-win32.whl", hash = "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0", size = 445023, upload-time = "2025-08-08T18:26:56.677Z" }, - { url = "https://files.pythonhosted.org/packages/c7/2a/f609b420c2f564a748a2d80ebfb2ee02a73ca80223af712fca591386cafb/tornado-6.5.2-cp39-abi3-win_amd64.whl", hash = "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f", size = 445427, upload-time = "2025-08-08T18:26:57.91Z" }, - { url = "https://files.pythonhosted.org/packages/5e/4f/e1f65e8f8c76d73658b33d33b81eed4322fb5085350e4328d5c956f0c8f9/tornado-6.5.2-cp39-abi3-win_arm64.whl", hash = "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af", size = 444456, upload-time = "2025-08-08T18:26:59.207Z" }, -] - -[[package]] -name = "tqdm" -version = "4.67.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, -] - -[[package]] -name = "traitlets" -version = "5.14.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621, upload-time = "2024-04-19T11:11:49.746Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" }, -] - -[[package]] -name = "typer" -version = "0.16.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "click" }, - { name = "rich" }, - { name = "shellingham" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/43/78/d90f616bf5f88f8710ad067c1f8705bf7618059836ca084e5bb2a0855d75/typer-0.16.1.tar.gz", hash = "sha256:d358c65a464a7a90f338e3bb7ff0c74ac081449e53884b12ba658cbd72990614", size = 102836, upload-time = "2025-08-18T19:18:22.898Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2d/76/06dbe78f39b2203d2a47d5facc5df5102d0561e2807396471b5f7c5a30a1/typer-0.16.1-py3-none-any.whl", hash = "sha256:90ee01cb02d9b8395ae21ee3368421faf21fa138cb2a541ed369c08cec5237c9", size = 46397, upload-time = "2025-08-18T19:18:21.663Z" }, -] - -[[package]] -name = "types-protobuf" -version = "6.32.1.20250918" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/69/5a/bd06c2dbb77ebd4ea764473c9c4c014c7ba94432192cb965a274f8544b9d/types_protobuf-6.32.1.20250918.tar.gz", hash = "sha256:44ce0ae98475909ca72379946ab61a4435eec2a41090821e713c17e8faf5b88f", size = 63780, upload-time = "2025-09-18T02:50:39.391Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/37/5a/8d93d4f4af5dc3dd62aa4f020deae746b34b1d94fb5bee1f776c6b7e9d6c/types_protobuf-6.32.1.20250918-py3-none-any.whl", hash = "sha256:22ba6133d142d11cc34d3788ad6dead2732368ebb0406eaa7790ea6ae46c8d0b", size = 77885, upload-time = "2025-09-18T02:50:38.028Z" }, -] - -[[package]] -name = "types-requests" -version = "2.31.0.6" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "types-urllib3" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f9/b8/c1e8d39996b4929b918aba10dba5de07a8b3f4c8487bb61bb79882544e69/types-requests-2.31.0.6.tar.gz", hash = "sha256:cd74ce3b53c461f1228a9b783929ac73a666658f223e28ed29753771477b3bd0", size = 15535, upload-time = "2023-09-27T06:19:38.443Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5c/a1/6f8dc74d9069e790d604ddae70cb46dcbac668f1bb08136e7b0f2f5cd3bf/types_requests-2.31.0.6-py3-none-any.whl", hash = "sha256:a2db9cb228a81da8348b49ad6db3f5519452dd20a9c1e1a868c83c5fe88fd1a9", size = 14516, upload-time = "2023-09-27T06:19:36.373Z" }, -] - -[[package]] -name = "types-urllib3" -version = "1.26.25.14" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/73/de/b9d7a68ad39092368fb21dd6194b362b98a1daeea5dcfef5e1adb5031c7e/types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f", size = 11239, upload-time = "2023-07-20T15:19:31.307Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/11/7b/3fc711b2efea5e85a7a0bbfe269ea944aa767bbba5ec52f9ee45d362ccf3/types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e", size = 15377, upload-time = "2023-07-20T15:19:30.379Z" }, -] - -[[package]] -name = "typing-extensions" -version = "4.15.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, -] - -[[package]] -name = "typing-inspection" -version = "0.4.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, -] - -[[package]] -name = "tzdata" -version = "2025.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, -] - -[[package]] -name = "tzlocal" -version = "5.3.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "tzdata", marker = "sys_platform == 'win32'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/8b/2e/c14812d3d4d9cd1773c6be938f89e5735a1f11a9f184ac3639b93cef35d5/tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd", size = 30761, upload-time = "2025-03-05T21:17:41.549Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c2/14/e2a54fabd4f08cd7af1c07030603c3356b74da07f7cc056e600436edfa17/tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d", size = 18026, upload-time = "2025-03-05T21:17:39.857Z" }, -] - -[[package]] -name = "urllib3" -version = "1.26.20" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e4/e8/6ff5e6bc22095cfc59b6ea711b687e2b7ed4bdb373f7eeec370a97d7392f/urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32", size = 307380, upload-time = "2024-08-29T15:43:11.37Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/33/cf/8435d5a7159e2a9c83a95896ed596f68cf798005fe107cc655b5c5c14704/urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e", size = 144225, upload-time = "2024-08-29T15:43:08.921Z" }, -] - -[[package]] -name = "uvicorn" -version = "0.37.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "click" }, - { name = "h11" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/71/57/1616c8274c3442d802621abf5deb230771c7a0fec9414cb6763900eb3868/uvicorn-0.37.0.tar.gz", hash = "sha256:4115c8add6d3fd536c8ee77f0e14a7fd2ebba939fed9b02583a97f80648f9e13", size = 80367, upload-time = "2025-09-23T13:33:47.486Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/85/cd/584a2ceb5532af99dd09e50919e3615ba99aa127e9850eafe5f31ddfdb9a/uvicorn-0.37.0-py3-none-any.whl", hash = "sha256:913b2b88672343739927ce381ff9e2ad62541f9f8289664fa1d1d3803fa2ce6c", size = 67976, upload-time = "2025-09-23T13:33:45.842Z" }, -] - -[[package]] -name = "watchfiles" -version = "0.24.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/c8/27/2ba23c8cc85796e2d41976439b08d52f691655fdb9401362099502d1f0cf/watchfiles-0.24.0.tar.gz", hash = "sha256:afb72325b74fa7a428c009c1b8be4b4d7c2afedafb2982827ef2156646df2fe1", size = 37870, upload-time = "2024-08-28T16:21:37.42Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/35/82/92a7bb6dc82d183e304a5f84ae5437b59ee72d48cee805a9adda2488b237/watchfiles-0.24.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7211b463695d1e995ca3feb38b69227e46dbd03947172585ecb0588f19b0d87a", size = 374137, upload-time = "2024-08-28T16:20:23.055Z" }, - { url = "https://files.pythonhosted.org/packages/87/91/49e9a497ddaf4da5e3802d51ed67ff33024597c28f652b8ab1e7c0f5718b/watchfiles-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4b8693502d1967b00f2fb82fc1e744df128ba22f530e15b763c8d82baee15370", size = 367733, upload-time = "2024-08-28T16:20:24.543Z" }, - { url = "https://files.pythonhosted.org/packages/0d/d8/90eb950ab4998effea2df4cf3a705dc594f6bc501c5a353073aa990be965/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdab9555053399318b953a1fe1f586e945bc8d635ce9d05e617fd9fe3a4687d6", size = 437322, upload-time = "2024-08-28T16:20:25.572Z" }, - { url = "https://files.pythonhosted.org/packages/6c/a2/300b22e7bc2a222dd91fce121cefa7b49aa0d26a627b2777e7bdfcf1110b/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:34e19e56d68b0dad5cff62273107cf5d9fbaf9d75c46277aa5d803b3ef8a9e9b", size = 433409, upload-time = "2024-08-28T16:20:26.628Z" }, - { url = "https://files.pythonhosted.org/packages/99/44/27d7708a43538ed6c26708bcccdde757da8b7efb93f4871d4cc39cffa1cc/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:41face41f036fee09eba33a5b53a73e9a43d5cb2c53dad8e61fa6c9f91b5a51e", size = 452142, upload-time = "2024-08-28T16:20:28.003Z" }, - { url = "https://files.pythonhosted.org/packages/b0/ec/c4e04f755be003129a2c5f3520d2c47026f00da5ecb9ef1e4f9449637571/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5148c2f1ea043db13ce9b0c28456e18ecc8f14f41325aa624314095b6aa2e9ea", size = 469414, upload-time = "2024-08-28T16:20:29.55Z" }, - { url = "https://files.pythonhosted.org/packages/c5/4e/cdd7de3e7ac6432b0abf282ec4c1a1a2ec62dfe423cf269b86861667752d/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e4bd963a935aaf40b625c2499f3f4f6bbd0c3776f6d3bc7c853d04824ff1c9f", size = 472962, upload-time = "2024-08-28T16:20:31.314Z" }, - { url = "https://files.pythonhosted.org/packages/27/69/e1da9d34da7fc59db358424f5d89a56aaafe09f6961b64e36457a80a7194/watchfiles-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c79d7719d027b7a42817c5d96461a99b6a49979c143839fc37aa5748c322f234", size = 425705, upload-time = "2024-08-28T16:20:32.427Z" }, - { url = "https://files.pythonhosted.org/packages/e8/c1/24d0f7357be89be4a43e0a656259676ea3d7a074901f47022f32e2957798/watchfiles-0.24.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:32aa53a9a63b7f01ed32e316e354e81e9da0e6267435c7243bf8ae0f10b428ef", size = 612851, upload-time = "2024-08-28T16:20:33.527Z" }, - { url = "https://files.pythonhosted.org/packages/c7/af/175ba9b268dec56f821639c9893b506c69fd999fe6a2e2c51de420eb2f01/watchfiles-0.24.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce72dba6a20e39a0c628258b5c308779b8697f7676c254a845715e2a1039b968", size = 594868, upload-time = "2024-08-28T16:20:34.639Z" }, - { url = "https://files.pythonhosted.org/packages/44/81/1f701323a9f70805bc81c74c990137123344a80ea23ab9504a99492907f8/watchfiles-0.24.0-cp312-none-win32.whl", hash = "sha256:d9018153cf57fc302a2a34cb7564870b859ed9a732d16b41a9b5cb2ebed2d444", size = 264109, upload-time = "2024-08-28T16:20:35.692Z" }, - { url = "https://files.pythonhosted.org/packages/b4/0b/32cde5bc2ebd9f351be326837c61bdeb05ad652b793f25c91cac0b48a60b/watchfiles-0.24.0-cp312-none-win_amd64.whl", hash = "sha256:551ec3ee2a3ac9cbcf48a4ec76e42c2ef938a7e905a35b42a1267fa4b1645896", size = 277055, upload-time = "2024-08-28T16:20:36.849Z" }, - { url = "https://files.pythonhosted.org/packages/4b/81/daade76ce33d21dbec7a15afd7479de8db786e5f7b7d249263b4ea174e08/watchfiles-0.24.0-cp312-none-win_arm64.whl", hash = "sha256:b52a65e4ea43c6d149c5f8ddb0bef8d4a1e779b77591a458a893eb416624a418", size = 266169, upload-time = "2024-08-28T16:20:38.149Z" }, - { url = "https://files.pythonhosted.org/packages/30/dc/6e9f5447ae14f645532468a84323a942996d74d5e817837a5c8ce9d16c69/watchfiles-0.24.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:3d2e3ab79a1771c530233cadfd277fcc762656d50836c77abb2e5e72b88e3a48", size = 373764, upload-time = "2024-08-28T16:20:39.263Z" }, - { url = "https://files.pythonhosted.org/packages/79/c0/c3a9929c372816c7fc87d8149bd722608ea58dc0986d3ef7564c79ad7112/watchfiles-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327763da824817b38ad125dcd97595f942d720d32d879f6c4ddf843e3da3fe90", size = 367873, upload-time = "2024-08-28T16:20:40.399Z" }, - { url = "https://files.pythonhosted.org/packages/2e/11/ff9a4445a7cfc1c98caf99042df38964af12eed47d496dd5d0d90417349f/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd82010f8ab451dabe36054a1622870166a67cf3fce894f68895db6f74bbdc94", size = 438381, upload-time = "2024-08-28T16:20:41.371Z" }, - { url = "https://files.pythonhosted.org/packages/48/a3/763ba18c98211d7bb6c0f417b2d7946d346cdc359d585cc28a17b48e964b/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d64ba08db72e5dfd5c33be1e1e687d5e4fcce09219e8aee893a4862034081d4e", size = 432809, upload-time = "2024-08-28T16:20:42.504Z" }, - { url = "https://files.pythonhosted.org/packages/30/4c/616c111b9d40eea2547489abaf4ffc84511e86888a166d3a4522c2ba44b5/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1cf1f6dd7825053f3d98f6d33f6464ebdd9ee95acd74ba2c34e183086900a827", size = 451801, upload-time = "2024-08-28T16:20:43.696Z" }, - { url = "https://files.pythonhosted.org/packages/b6/be/d7da83307863a422abbfeb12903a76e43200c90ebe5d6afd6a59d158edea/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43e3e37c15a8b6fe00c1bce2473cfa8eb3484bbeecf3aefbf259227e487a03df", size = 468886, upload-time = "2024-08-28T16:20:44.847Z" }, - { url = "https://files.pythonhosted.org/packages/1d/d3/3dfe131ee59d5e90b932cf56aba5c996309d94dafe3d02d204364c23461c/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88bcd4d0fe1d8ff43675360a72def210ebad3f3f72cabfeac08d825d2639b4ab", size = 472973, upload-time = "2024-08-28T16:20:45.991Z" }, - { url = "https://files.pythonhosted.org/packages/42/6c/279288cc5653a289290d183b60a6d80e05f439d5bfdfaf2d113738d0f932/watchfiles-0.24.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:999928c6434372fde16c8f27143d3e97201160b48a614071261701615a2a156f", size = 425282, upload-time = "2024-08-28T16:20:47.579Z" }, - { url = "https://files.pythonhosted.org/packages/d6/d7/58afe5e85217e845edf26d8780c2d2d2ae77675eeb8d1b8b8121d799ce52/watchfiles-0.24.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:30bbd525c3262fd9f4b1865cb8d88e21161366561cd7c9e1194819e0a33ea86b", size = 612540, upload-time = "2024-08-28T16:20:48.915Z" }, - { url = "https://files.pythonhosted.org/packages/6d/d5/b96eeb9fe3fda137200dd2f31553670cbc731b1e13164fd69b49870b76ec/watchfiles-0.24.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:edf71b01dec9f766fb285b73930f95f730bb0943500ba0566ae234b5c1618c18", size = 593625, upload-time = "2024-08-28T16:20:50.543Z" }, - { url = "https://files.pythonhosted.org/packages/c1/e5/c326fe52ee0054107267608d8cea275e80be4455b6079491dfd9da29f46f/watchfiles-0.24.0-cp313-none-win32.whl", hash = "sha256:f4c96283fca3ee09fb044f02156d9570d156698bc3734252175a38f0e8975f07", size = 263899, upload-time = "2024-08-28T16:20:51.759Z" }, - { url = "https://files.pythonhosted.org/packages/a6/8b/8a7755c5e7221bb35fe4af2dc44db9174f90ebf0344fd5e9b1e8b42d381e/watchfiles-0.24.0-cp313-none-win_amd64.whl", hash = "sha256:a974231b4fdd1bb7f62064a0565a6b107d27d21d9acb50c484d2cdba515b9366", size = 276622, upload-time = "2024-08-28T16:20:52.82Z" }, -] - -[[package]] -name = "wcwidth" -version = "0.2.14" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/24/30/6b0809f4510673dc723187aeaf24c7f5459922d01e2f794277a3dfb90345/wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", size = 102293, upload-time = "2025-09-22T16:29:53.023Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" }, -] - -[[package]] -name = "websocket-client" -version = "1.8.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e6/30/fba0d96b4b5fbf5948ed3f4681f7da2f9f64512e1d303f94b4cc174c24a5/websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da", size = 54648, upload-time = "2024-04-23T22:16:16.976Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5a/84/44687a29792a70e111c5c477230a72c4b957d88d16141199bf9acb7537a3/websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526", size = 58826, upload-time = "2024-04-23T22:16:14.422Z" }, -] - -[[package]] -name = "widgetsnbextension" -version = "4.0.14" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/41/53/2e0253c5efd69c9656b1843892052a31c36d37ad42812b5da45c62191f7e/widgetsnbextension-4.0.14.tar.gz", hash = "sha256:a3629b04e3edb893212df862038c7232f62973373869db5084aed739b437b5af", size = 1097428, upload-time = "2025-04-10T13:01:25.628Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ca/51/5447876806d1088a0f8f71e16542bf350918128d0a69437df26047c8e46f/widgetsnbextension-4.0.14-py3-none-any.whl", hash = "sha256:4875a9eaf72fbf5079dc372a51a9f268fc38d46f767cbf85c43a36da5cb9b575", size = 2196503, upload-time = "2025-04-10T13:01:23.086Z" }, -] - -[[package]] -name = "wrapt" -version = "1.17.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/95/8f/aeb76c5b46e273670962298c23e7ddde79916cb74db802131d49a85e4b7d/wrapt-1.17.3.tar.gz", hash = "sha256:f66eb08feaa410fe4eebd17f2a2c8e2e46d3476e9f8c783daa8e09e0faa666d0", size = 55547, upload-time = "2025-08-12T05:53:21.714Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/41/cad1aba93e752f1f9268c77270da3c469883d56e2798e7df6240dcb2287b/wrapt-1.17.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ab232e7fdb44cdfbf55fc3afa31bcdb0d8980b9b95c38b6405df2acb672af0e0", size = 53998, upload-time = "2025-08-12T05:51:47.138Z" }, - { url = "https://files.pythonhosted.org/packages/60/f8/096a7cc13097a1869fe44efe68dace40d2a16ecb853141394047f0780b96/wrapt-1.17.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9baa544e6acc91130e926e8c802a17f3b16fbea0fd441b5a60f5cf2cc5c3deba", size = 39020, upload-time = "2025-08-12T05:51:35.906Z" }, - { url = "https://files.pythonhosted.org/packages/33/df/bdf864b8997aab4febb96a9ae5c124f700a5abd9b5e13d2a3214ec4be705/wrapt-1.17.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b538e31eca1a7ea4605e44f81a48aa24c4632a277431a6ed3f328835901f4fd", size = 39098, upload-time = "2025-08-12T05:51:57.474Z" }, - { url = "https://files.pythonhosted.org/packages/9f/81/5d931d78d0eb732b95dc3ddaeeb71c8bb572fb01356e9133916cd729ecdd/wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:042ec3bb8f319c147b1301f2393bc19dba6e176b7da446853406d041c36c7828", size = 88036, upload-time = "2025-08-12T05:52:34.784Z" }, - { url = "https://files.pythonhosted.org/packages/ca/38/2e1785df03b3d72d34fc6252d91d9d12dc27a5c89caef3335a1bbb8908ca/wrapt-1.17.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3af60380ba0b7b5aeb329bc4e402acd25bd877e98b3727b0135cb5c2efdaefe9", size = 88156, upload-time = "2025-08-12T05:52:13.599Z" }, - { url = "https://files.pythonhosted.org/packages/b3/8b/48cdb60fe0603e34e05cffda0b2a4adab81fd43718e11111a4b0100fd7c1/wrapt-1.17.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0b02e424deef65c9f7326d8c19220a2c9040c51dc165cddb732f16198c168396", size = 87102, upload-time = "2025-08-12T05:52:14.56Z" }, - { url = "https://files.pythonhosted.org/packages/3c/51/d81abca783b58f40a154f1b2c56db1d2d9e0d04fa2d4224e357529f57a57/wrapt-1.17.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:74afa28374a3c3a11b3b5e5fca0ae03bef8450d6aa3ab3a1e2c30e3a75d023dc", size = 87732, upload-time = "2025-08-12T05:52:36.165Z" }, - { url = "https://files.pythonhosted.org/packages/9e/b1/43b286ca1392a006d5336412d41663eeef1ad57485f3e52c767376ba7e5a/wrapt-1.17.3-cp312-cp312-win32.whl", hash = "sha256:4da9f45279fff3543c371d5ababc57a0384f70be244de7759c85a7f989cb4ebe", size = 36705, upload-time = "2025-08-12T05:53:07.123Z" }, - { url = "https://files.pythonhosted.org/packages/28/de/49493f962bd3c586ab4b88066e967aa2e0703d6ef2c43aa28cb83bf7b507/wrapt-1.17.3-cp312-cp312-win_amd64.whl", hash = "sha256:e71d5c6ebac14875668a1e90baf2ea0ef5b7ac7918355850c0908ae82bcb297c", size = 38877, upload-time = "2025-08-12T05:53:05.436Z" }, - { url = "https://files.pythonhosted.org/packages/f1/48/0f7102fe9cb1e8a5a77f80d4f0956d62d97034bbe88d33e94699f99d181d/wrapt-1.17.3-cp312-cp312-win_arm64.whl", hash = "sha256:604d076c55e2fdd4c1c03d06dc1a31b95130010517b5019db15365ec4a405fc6", size = 36885, upload-time = "2025-08-12T05:52:54.367Z" }, - { url = "https://files.pythonhosted.org/packages/fc/f6/759ece88472157acb55fc195e5b116e06730f1b651b5b314c66291729193/wrapt-1.17.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a47681378a0439215912ef542c45a783484d4dd82bac412b71e59cf9c0e1cea0", size = 54003, upload-time = "2025-08-12T05:51:48.627Z" }, - { url = "https://files.pythonhosted.org/packages/4f/a9/49940b9dc6d47027dc850c116d79b4155f15c08547d04db0f07121499347/wrapt-1.17.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a30837587c6ee3cd1a4d1c2ec5d24e77984d44e2f34547e2323ddb4e22eb77", size = 39025, upload-time = "2025-08-12T05:51:37.156Z" }, - { url = "https://files.pythonhosted.org/packages/45/35/6a08de0f2c96dcdd7fe464d7420ddb9a7655a6561150e5fc4da9356aeaab/wrapt-1.17.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:16ecf15d6af39246fe33e507105d67e4b81d8f8d2c6598ff7e3ca1b8a37213f7", size = 39108, upload-time = "2025-08-12T05:51:58.425Z" }, - { url = "https://files.pythonhosted.org/packages/0c/37/6faf15cfa41bf1f3dba80cd3f5ccc6622dfccb660ab26ed79f0178c7497f/wrapt-1.17.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6fd1ad24dc235e4ab88cda009e19bf347aabb975e44fd5c2fb22a3f6e4141277", size = 88072, upload-time = "2025-08-12T05:52:37.53Z" }, - { url = "https://files.pythonhosted.org/packages/78/f2/efe19ada4a38e4e15b6dff39c3e3f3f73f5decf901f66e6f72fe79623a06/wrapt-1.17.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ed61b7c2d49cee3c027372df5809a59d60cf1b6c2f81ee980a091f3afed6a2d", size = 88214, upload-time = "2025-08-12T05:52:15.886Z" }, - { url = "https://files.pythonhosted.org/packages/40/90/ca86701e9de1622b16e09689fc24b76f69b06bb0150990f6f4e8b0eeb576/wrapt-1.17.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:423ed5420ad5f5529db9ce89eac09c8a2f97da18eb1c870237e84c5a5c2d60aa", size = 87105, upload-time = "2025-08-12T05:52:17.914Z" }, - { url = "https://files.pythonhosted.org/packages/fd/e0/d10bd257c9a3e15cbf5523025252cc14d77468e8ed644aafb2d6f54cb95d/wrapt-1.17.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e01375f275f010fcbf7f643b4279896d04e571889b8a5b3f848423d91bf07050", size = 87766, upload-time = "2025-08-12T05:52:39.243Z" }, - { url = "https://files.pythonhosted.org/packages/e8/cf/7d848740203c7b4b27eb55dbfede11aca974a51c3d894f6cc4b865f42f58/wrapt-1.17.3-cp313-cp313-win32.whl", hash = "sha256:53e5e39ff71b3fc484df8a522c933ea2b7cdd0d5d15ae82e5b23fde87d44cbd8", size = 36711, upload-time = "2025-08-12T05:53:10.074Z" }, - { url = "https://files.pythonhosted.org/packages/57/54/35a84d0a4d23ea675994104e667ceff49227ce473ba6a59ba2c84f250b74/wrapt-1.17.3-cp313-cp313-win_amd64.whl", hash = "sha256:1f0b2f40cf341ee8cc1a97d51ff50dddb9fcc73241b9143ec74b30fc4f44f6cb", size = 38885, upload-time = "2025-08-12T05:53:08.695Z" }, - { url = "https://files.pythonhosted.org/packages/01/77/66e54407c59d7b02a3c4e0af3783168fff8e5d61def52cda8728439d86bc/wrapt-1.17.3-cp313-cp313-win_arm64.whl", hash = "sha256:7425ac3c54430f5fc5e7b6f41d41e704db073309acfc09305816bc6a0b26bb16", size = 36896, upload-time = "2025-08-12T05:52:55.34Z" }, - { url = "https://files.pythonhosted.org/packages/02/a2/cd864b2a14f20d14f4c496fab97802001560f9f41554eef6df201cd7f76c/wrapt-1.17.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cf30f6e3c077c8e6a9a7809c94551203c8843e74ba0c960f4a98cd80d4665d39", size = 54132, upload-time = "2025-08-12T05:51:49.864Z" }, - { url = "https://files.pythonhosted.org/packages/d5/46/d011725b0c89e853dc44cceb738a307cde5d240d023d6d40a82d1b4e1182/wrapt-1.17.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e228514a06843cae89621384cfe3a80418f3c04aadf8a3b14e46a7be704e4235", size = 39091, upload-time = "2025-08-12T05:51:38.935Z" }, - { url = "https://files.pythonhosted.org/packages/2e/9e/3ad852d77c35aae7ddebdbc3b6d35ec8013af7d7dddad0ad911f3d891dae/wrapt-1.17.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5ea5eb3c0c071862997d6f3e02af1d055f381b1d25b286b9d6644b79db77657c", size = 39172, upload-time = "2025-08-12T05:51:59.365Z" }, - { url = "https://files.pythonhosted.org/packages/c3/f7/c983d2762bcce2326c317c26a6a1e7016f7eb039c27cdf5c4e30f4160f31/wrapt-1.17.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:281262213373b6d5e4bb4353bc36d1ba4084e6d6b5d242863721ef2bf2c2930b", size = 87163, upload-time = "2025-08-12T05:52:40.965Z" }, - { url = "https://files.pythonhosted.org/packages/e4/0f/f673f75d489c7f22d17fe0193e84b41540d962f75fce579cf6873167c29b/wrapt-1.17.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4a8d2b25efb6681ecacad42fca8859f88092d8732b170de6a5dddd80a1c8fa", size = 87963, upload-time = "2025-08-12T05:52:20.326Z" }, - { url = "https://files.pythonhosted.org/packages/df/61/515ad6caca68995da2fac7a6af97faab8f78ebe3bf4f761e1b77efbc47b5/wrapt-1.17.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:373342dd05b1d07d752cecbec0c41817231f29f3a89aa8b8843f7b95992ed0c7", size = 86945, upload-time = "2025-08-12T05:52:21.581Z" }, - { url = "https://files.pythonhosted.org/packages/d3/bd/4e70162ce398462a467bc09e768bee112f1412e563620adc353de9055d33/wrapt-1.17.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d40770d7c0fd5cbed9d84b2c3f2e156431a12c9a37dc6284060fb4bec0b7ffd4", size = 86857, upload-time = "2025-08-12T05:52:43.043Z" }, - { url = "https://files.pythonhosted.org/packages/2b/b8/da8560695e9284810b8d3df8a19396a6e40e7518059584a1a394a2b35e0a/wrapt-1.17.3-cp314-cp314-win32.whl", hash = "sha256:fbd3c8319de8e1dc79d346929cd71d523622da527cca14e0c1d257e31c2b8b10", size = 37178, upload-time = "2025-08-12T05:53:12.605Z" }, - { url = "https://files.pythonhosted.org/packages/db/c8/b71eeb192c440d67a5a0449aaee2310a1a1e8eca41676046f99ed2487e9f/wrapt-1.17.3-cp314-cp314-win_amd64.whl", hash = "sha256:e1a4120ae5705f673727d3253de3ed0e016f7cd78dc463db1b31e2463e1f3cf6", size = 39310, upload-time = "2025-08-12T05:53:11.106Z" }, - { url = "https://files.pythonhosted.org/packages/45/20/2cda20fd4865fa40f86f6c46ed37a2a8356a7a2fde0773269311f2af56c7/wrapt-1.17.3-cp314-cp314-win_arm64.whl", hash = "sha256:507553480670cab08a800b9463bdb881b2edeed77dc677b0a5915e6106e91a58", size = 37266, upload-time = "2025-08-12T05:52:56.531Z" }, - { url = "https://files.pythonhosted.org/packages/77/ed/dd5cf21aec36c80443c6f900449260b80e2a65cf963668eaef3b9accce36/wrapt-1.17.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ed7c635ae45cfbc1a7371f708727bf74690daedc49b4dba310590ca0bd28aa8a", size = 56544, upload-time = "2025-08-12T05:51:51.109Z" }, - { url = "https://files.pythonhosted.org/packages/8d/96/450c651cc753877ad100c7949ab4d2e2ecc4d97157e00fa8f45df682456a/wrapt-1.17.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:249f88ed15503f6492a71f01442abddd73856a0032ae860de6d75ca62eed8067", size = 40283, upload-time = "2025-08-12T05:51:39.912Z" }, - { url = "https://files.pythonhosted.org/packages/d1/86/2fcad95994d9b572db57632acb6f900695a648c3e063f2cd344b3f5c5a37/wrapt-1.17.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5a03a38adec8066d5a37bea22f2ba6bbf39fcdefbe2d91419ab864c3fb515454", size = 40366, upload-time = "2025-08-12T05:52:00.693Z" }, - { url = "https://files.pythonhosted.org/packages/64/0e/f4472f2fdde2d4617975144311f8800ef73677a159be7fe61fa50997d6c0/wrapt-1.17.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5d4478d72eb61c36e5b446e375bbc49ed002430d17cdec3cecb36993398e1a9e", size = 108571, upload-time = "2025-08-12T05:52:44.521Z" }, - { url = "https://files.pythonhosted.org/packages/cc/01/9b85a99996b0a97c8a17484684f206cbb6ba73c1ce6890ac668bcf3838fb/wrapt-1.17.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223db574bb38637e8230eb14b185565023ab624474df94d2af18f1cdb625216f", size = 113094, upload-time = "2025-08-12T05:52:22.618Z" }, - { url = "https://files.pythonhosted.org/packages/25/02/78926c1efddcc7b3aa0bc3d6b33a822f7d898059f7cd9ace8c8318e559ef/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e405adefb53a435f01efa7ccdec012c016b5a1d3f35459990afc39b6be4d5056", size = 110659, upload-time = "2025-08-12T05:52:24.057Z" }, - { url = "https://files.pythonhosted.org/packages/dc/ee/c414501ad518ac3e6fe184753632fe5e5ecacdcf0effc23f31c1e4f7bfcf/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:88547535b787a6c9ce4086917b6e1d291aa8ed914fdd3a838b3539dc95c12804", size = 106946, upload-time = "2025-08-12T05:52:45.976Z" }, - { url = "https://files.pythonhosted.org/packages/be/44/a1bd64b723d13bb151d6cc91b986146a1952385e0392a78567e12149c7b4/wrapt-1.17.3-cp314-cp314t-win32.whl", hash = "sha256:41b1d2bc74c2cac6f9074df52b2efbef2b30bdfe5f40cb78f8ca22963bc62977", size = 38717, upload-time = "2025-08-12T05:53:15.214Z" }, - { url = "https://files.pythonhosted.org/packages/79/d9/7cfd5a312760ac4dd8bf0184a6ee9e43c33e47f3dadc303032ce012b8fa3/wrapt-1.17.3-cp314-cp314t-win_amd64.whl", hash = "sha256:73d496de46cd2cdbdbcce4ae4bcdb4afb6a11234a1df9c085249d55166b95116", size = 41334, upload-time = "2025-08-12T05:53:14.178Z" }, - { url = "https://files.pythonhosted.org/packages/46/78/10ad9781128ed2f99dbc474f43283b13fea8ba58723e98844367531c18e9/wrapt-1.17.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f38e60678850c42461d4202739f9bf1e3a737c7ad283638251e79cc49effb6b6", size = 38471, upload-time = "2025-08-12T05:52:57.784Z" }, - { url = "https://files.pythonhosted.org/packages/1f/f6/a933bd70f98e9cf3e08167fc5cd7aaaca49147e48411c0bd5ae701bb2194/wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22", size = 23591, upload-time = "2025-08-12T05:53:20.674Z" }, -] - -[[package]] -name = "yarl" -version = "1.20.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "idna" }, - { name = "multidict" }, - { name = "propcache" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/3c/fb/efaa23fa4e45537b827620f04cf8f3cd658b76642205162e072703a5b963/yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac", size = 186428, upload-time = "2025-06-10T00:46:09.923Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/9a/cb7fad7d73c69f296eda6815e4a2c7ed53fc70c2f136479a91c8e5fbdb6d/yarl-1.20.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdcc4cd244e58593a4379fe60fdee5ac0331f8eb70320a24d591a3be197b94a9", size = 133667, upload-time = "2025-06-10T00:43:44.369Z" }, - { url = "https://files.pythonhosted.org/packages/67/38/688577a1cb1e656e3971fb66a3492501c5a5df56d99722e57c98249e5b8a/yarl-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b29a2c385a5f5b9c7d9347e5812b6f7ab267193c62d282a540b4fc528c8a9d2a", size = 91025, upload-time = "2025-06-10T00:43:46.295Z" }, - { url = "https://files.pythonhosted.org/packages/50/ec/72991ae51febeb11a42813fc259f0d4c8e0507f2b74b5514618d8b640365/yarl-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1112ae8154186dfe2de4732197f59c05a83dc814849a5ced892b708033f40dc2", size = 89709, upload-time = "2025-06-10T00:43:48.22Z" }, - { url = "https://files.pythonhosted.org/packages/99/da/4d798025490e89426e9f976702e5f9482005c548c579bdae792a4c37769e/yarl-1.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90bbd29c4fe234233f7fa2b9b121fb63c321830e5d05b45153a2ca68f7d310ee", size = 352287, upload-time = "2025-06-10T00:43:49.924Z" }, - { url = "https://files.pythonhosted.org/packages/1a/26/54a15c6a567aac1c61b18aa0f4b8aa2e285a52d547d1be8bf48abe2b3991/yarl-1.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:680e19c7ce3710ac4cd964e90dad99bf9b5029372ba0c7cbfcd55e54d90ea819", size = 345429, upload-time = "2025-06-10T00:43:51.7Z" }, - { url = "https://files.pythonhosted.org/packages/d6/95/9dcf2386cb875b234353b93ec43e40219e14900e046bf6ac118f94b1e353/yarl-1.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a979218c1fdb4246a05efc2cc23859d47c89af463a90b99b7c56094daf25a16", size = 365429, upload-time = "2025-06-10T00:43:53.494Z" }, - { url = "https://files.pythonhosted.org/packages/91/b2/33a8750f6a4bc224242a635f5f2cff6d6ad5ba651f6edcccf721992c21a0/yarl-1.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255b468adf57b4a7b65d8aad5b5138dce6a0752c139965711bdcb81bc370e1b6", size = 363862, upload-time = "2025-06-10T00:43:55.766Z" }, - { url = "https://files.pythonhosted.org/packages/98/28/3ab7acc5b51f4434b181b0cee8f1f4b77a65919700a355fb3617f9488874/yarl-1.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a97d67108e79cfe22e2b430d80d7571ae57d19f17cda8bb967057ca8a7bf5bfd", size = 355616, upload-time = "2025-06-10T00:43:58.056Z" }, - { url = "https://files.pythonhosted.org/packages/36/a3/f666894aa947a371724ec7cd2e5daa78ee8a777b21509b4252dd7bd15e29/yarl-1.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8570d998db4ddbfb9a590b185a0a33dbf8aafb831d07a5257b4ec9948df9cb0a", size = 339954, upload-time = "2025-06-10T00:43:59.773Z" }, - { url = "https://files.pythonhosted.org/packages/f1/81/5f466427e09773c04219d3450d7a1256138a010b6c9f0af2d48565e9ad13/yarl-1.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97c75596019baae7c71ccf1d8cc4738bc08134060d0adfcbe5642f778d1dca38", size = 365575, upload-time = "2025-06-10T00:44:02.051Z" }, - { url = "https://files.pythonhosted.org/packages/2e/e3/e4b0ad8403e97e6c9972dd587388940a032f030ebec196ab81a3b8e94d31/yarl-1.20.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1c48912653e63aef91ff988c5432832692ac5a1d8f0fb8a33091520b5bbe19ef", size = 365061, upload-time = "2025-06-10T00:44:04.196Z" }, - { url = "https://files.pythonhosted.org/packages/ac/99/b8a142e79eb86c926f9f06452eb13ecb1bb5713bd01dc0038faf5452e544/yarl-1.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4c3ae28f3ae1563c50f3d37f064ddb1511ecc1d5584e88c6b7c63cf7702a6d5f", size = 364142, upload-time = "2025-06-10T00:44:06.527Z" }, - { url = "https://files.pythonhosted.org/packages/34/f2/08ed34a4a506d82a1a3e5bab99ccd930a040f9b6449e9fd050320e45845c/yarl-1.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c5e9642f27036283550f5f57dc6156c51084b458570b9d0d96100c8bebb186a8", size = 381894, upload-time = "2025-06-10T00:44:08.379Z" }, - { url = "https://files.pythonhosted.org/packages/92/f8/9a3fbf0968eac704f681726eff595dce9b49c8a25cd92bf83df209668285/yarl-1.20.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2c26b0c49220d5799f7b22c6838409ee9bc58ee5c95361a4d7831f03cc225b5a", size = 383378, upload-time = "2025-06-10T00:44:10.51Z" }, - { url = "https://files.pythonhosted.org/packages/af/85/9363f77bdfa1e4d690957cd39d192c4cacd1c58965df0470a4905253b54f/yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564ab3d517e3d01c408c67f2e5247aad4019dcf1969982aba3974b4093279004", size = 374069, upload-time = "2025-06-10T00:44:12.834Z" }, - { url = "https://files.pythonhosted.org/packages/35/99/9918c8739ba271dcd935400cff8b32e3cd319eaf02fcd023d5dcd487a7c8/yarl-1.20.1-cp312-cp312-win32.whl", hash = "sha256:daea0d313868da1cf2fac6b2d3a25c6e3a9e879483244be38c8e6a41f1d876a5", size = 81249, upload-time = "2025-06-10T00:44:14.731Z" }, - { url = "https://files.pythonhosted.org/packages/eb/83/5d9092950565481b413b31a23e75dd3418ff0a277d6e0abf3729d4d1ce25/yarl-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:48ea7d7f9be0487339828a4de0360d7ce0efc06524a48e1810f945c45b813698", size = 86710, upload-time = "2025-06-10T00:44:16.716Z" }, - { url = "https://files.pythonhosted.org/packages/8a/e1/2411b6d7f769a07687acee88a062af5833cf1966b7266f3d8dfb3d3dc7d3/yarl-1.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a", size = 131811, upload-time = "2025-06-10T00:44:18.933Z" }, - { url = "https://files.pythonhosted.org/packages/b2/27/584394e1cb76fb771371770eccad35de400e7b434ce3142c2dd27392c968/yarl-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3", size = 90078, upload-time = "2025-06-10T00:44:20.635Z" }, - { url = "https://files.pythonhosted.org/packages/bf/9a/3246ae92d4049099f52d9b0fe3486e3b500e29b7ea872d0f152966fc209d/yarl-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7", size = 88748, upload-time = "2025-06-10T00:44:22.34Z" }, - { url = "https://files.pythonhosted.org/packages/a3/25/35afe384e31115a1a801fbcf84012d7a066d89035befae7c5d4284df1e03/yarl-1.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691", size = 349595, upload-time = "2025-06-10T00:44:24.314Z" }, - { url = "https://files.pythonhosted.org/packages/28/2d/8aca6cb2cabc8f12efcb82749b9cefecbccfc7b0384e56cd71058ccee433/yarl-1.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31", size = 342616, upload-time = "2025-06-10T00:44:26.167Z" }, - { url = "https://files.pythonhosted.org/packages/0b/e9/1312633d16b31acf0098d30440ca855e3492d66623dafb8e25b03d00c3da/yarl-1.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28", size = 361324, upload-time = "2025-06-10T00:44:27.915Z" }, - { url = "https://files.pythonhosted.org/packages/bc/a0/688cc99463f12f7669eec7c8acc71ef56a1521b99eab7cd3abb75af887b0/yarl-1.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653", size = 359676, upload-time = "2025-06-10T00:44:30.041Z" }, - { url = "https://files.pythonhosted.org/packages/af/44/46407d7f7a56e9a85a4c207724c9f2c545c060380718eea9088f222ba697/yarl-1.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5", size = 352614, upload-time = "2025-06-10T00:44:32.171Z" }, - { url = "https://files.pythonhosted.org/packages/b1/91/31163295e82b8d5485d31d9cf7754d973d41915cadce070491778d9c9825/yarl-1.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02", size = 336766, upload-time = "2025-06-10T00:44:34.494Z" }, - { url = "https://files.pythonhosted.org/packages/b4/8e/c41a5bc482121f51c083c4c2bcd16b9e01e1cf8729e380273a952513a21f/yarl-1.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53", size = 364615, upload-time = "2025-06-10T00:44:36.856Z" }, - { url = "https://files.pythonhosted.org/packages/e3/5b/61a3b054238d33d70ea06ebba7e58597891b71c699e247df35cc984ab393/yarl-1.20.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc", size = 360982, upload-time = "2025-06-10T00:44:39.141Z" }, - { url = "https://files.pythonhosted.org/packages/df/a3/6a72fb83f8d478cb201d14927bc8040af901811a88e0ff2da7842dd0ed19/yarl-1.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04", size = 369792, upload-time = "2025-06-10T00:44:40.934Z" }, - { url = "https://files.pythonhosted.org/packages/7c/af/4cc3c36dfc7c077f8dedb561eb21f69e1e9f2456b91b593882b0b18c19dc/yarl-1.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4", size = 382049, upload-time = "2025-06-10T00:44:42.854Z" }, - { url = "https://files.pythonhosted.org/packages/19/3a/e54e2c4752160115183a66dc9ee75a153f81f3ab2ba4bf79c3c53b33de34/yarl-1.20.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b", size = 384774, upload-time = "2025-06-10T00:44:45.275Z" }, - { url = "https://files.pythonhosted.org/packages/9c/20/200ae86dabfca89060ec6447649f219b4cbd94531e425e50d57e5f5ac330/yarl-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1", size = 374252, upload-time = "2025-06-10T00:44:47.31Z" }, - { url = "https://files.pythonhosted.org/packages/83/75/11ee332f2f516b3d094e89448da73d557687f7d137d5a0f48c40ff211487/yarl-1.20.1-cp313-cp313-win32.whl", hash = "sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7", size = 81198, upload-time = "2025-06-10T00:44:49.164Z" }, - { url = "https://files.pythonhosted.org/packages/ba/ba/39b1ecbf51620b40ab402b0fc817f0ff750f6d92712b44689c2c215be89d/yarl-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c", size = 86346, upload-time = "2025-06-10T00:44:51.182Z" }, - { url = "https://files.pythonhosted.org/packages/43/c7/669c52519dca4c95153c8ad96dd123c79f354a376346b198f438e56ffeb4/yarl-1.20.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d", size = 138826, upload-time = "2025-06-10T00:44:52.883Z" }, - { url = "https://files.pythonhosted.org/packages/6a/42/fc0053719b44f6ad04a75d7f05e0e9674d45ef62f2d9ad2c1163e5c05827/yarl-1.20.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf", size = 93217, upload-time = "2025-06-10T00:44:54.658Z" }, - { url = "https://files.pythonhosted.org/packages/4f/7f/fa59c4c27e2a076bba0d959386e26eba77eb52ea4a0aac48e3515c186b4c/yarl-1.20.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3", size = 92700, upload-time = "2025-06-10T00:44:56.784Z" }, - { url = "https://files.pythonhosted.org/packages/2f/d4/062b2f48e7c93481e88eff97a6312dca15ea200e959f23e96d8ab898c5b8/yarl-1.20.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d", size = 347644, upload-time = "2025-06-10T00:44:59.071Z" }, - { url = "https://files.pythonhosted.org/packages/89/47/78b7f40d13c8f62b499cc702fdf69e090455518ae544c00a3bf4afc9fc77/yarl-1.20.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c", size = 323452, upload-time = "2025-06-10T00:45:01.605Z" }, - { url = "https://files.pythonhosted.org/packages/eb/2b/490d3b2dc66f52987d4ee0d3090a147ea67732ce6b4d61e362c1846d0d32/yarl-1.20.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1", size = 346378, upload-time = "2025-06-10T00:45:03.946Z" }, - { url = "https://files.pythonhosted.org/packages/66/ad/775da9c8a94ce925d1537f939a4f17d782efef1f973039d821cbe4bcc211/yarl-1.20.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce", size = 353261, upload-time = "2025-06-10T00:45:05.992Z" }, - { url = "https://files.pythonhosted.org/packages/4b/23/0ed0922b47a4f5c6eb9065d5ff1e459747226ddce5c6a4c111e728c9f701/yarl-1.20.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3", size = 335987, upload-time = "2025-06-10T00:45:08.227Z" }, - { url = "https://files.pythonhosted.org/packages/3e/49/bc728a7fe7d0e9336e2b78f0958a2d6b288ba89f25a1762407a222bf53c3/yarl-1.20.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be", size = 329361, upload-time = "2025-06-10T00:45:10.11Z" }, - { url = "https://files.pythonhosted.org/packages/93/8f/b811b9d1f617c83c907e7082a76e2b92b655400e61730cd61a1f67178393/yarl-1.20.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16", size = 346460, upload-time = "2025-06-10T00:45:12.055Z" }, - { url = "https://files.pythonhosted.org/packages/70/fd/af94f04f275f95da2c3b8b5e1d49e3e79f1ed8b6ceb0f1664cbd902773ff/yarl-1.20.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513", size = 334486, upload-time = "2025-06-10T00:45:13.995Z" }, - { url = "https://files.pythonhosted.org/packages/84/65/04c62e82704e7dd0a9b3f61dbaa8447f8507655fd16c51da0637b39b2910/yarl-1.20.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f", size = 342219, upload-time = "2025-06-10T00:45:16.479Z" }, - { url = "https://files.pythonhosted.org/packages/91/95/459ca62eb958381b342d94ab9a4b6aec1ddec1f7057c487e926f03c06d30/yarl-1.20.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390", size = 350693, upload-time = "2025-06-10T00:45:18.399Z" }, - { url = "https://files.pythonhosted.org/packages/a6/00/d393e82dd955ad20617abc546a8f1aee40534d599ff555ea053d0ec9bf03/yarl-1.20.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458", size = 355803, upload-time = "2025-06-10T00:45:20.677Z" }, - { url = "https://files.pythonhosted.org/packages/9e/ed/c5fb04869b99b717985e244fd93029c7a8e8febdfcffa06093e32d7d44e7/yarl-1.20.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e", size = 341709, upload-time = "2025-06-10T00:45:23.221Z" }, - { url = "https://files.pythonhosted.org/packages/24/fd/725b8e73ac2a50e78a4534ac43c6addf5c1c2d65380dd48a9169cc6739a9/yarl-1.20.1-cp313-cp313t-win32.whl", hash = "sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d", size = 86591, upload-time = "2025-06-10T00:45:25.793Z" }, - { url = "https://files.pythonhosted.org/packages/94/c3/b2e9f38bc3e11191981d57ea08cab2166e74ea770024a646617c9cddd9f6/yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f", size = 93003, upload-time = "2025-06-10T00:45:27.752Z" }, - { url = "https://files.pythonhosted.org/packages/b4/2d/2345fce04cfd4bee161bf1e7d9cdc702e3e16109021035dbb24db654a622/yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77", size = 46542, upload-time = "2025-06-10T00:46:07.521Z" }, -] - -[[package]] -name = "yaspin" -version = "3.2.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "termcolor" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/dc/f9/9d9556b9fef2df1dd78c770c021bfcf84d03413bd137cd3e3279a612adc4/yaspin-3.2.0.tar.gz", hash = "sha256:416fe8d6722d26e4d1a1f50498bb4f3bdd4c68b9cd54065d224a4b9d1228cce7", size = 39083, upload-time = "2025-09-17T18:54:08.267Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b9/76/92596d08527dab7bc2ccbfd0fd620175231994feb7eaddb2a78d6333e7b6/yaspin-3.2.0-py3-none-any.whl", hash = "sha256:6a98053c75c0728271070bd6c99d0c83b6de76734bee34a294c2c2df00e9a06c", size = 20634, upload-time = "2025-09-17T18:54:06.965Z" }, -] - -[[package]] -name = "zipp" -version = "3.23.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, -]