diff --git a/dbx-agent-app/.github/workflows/docs.yml b/dbx-agent-app/.github/workflows/docs.yml new file mode 100644 index 00000000..7e173edc --- /dev/null +++ b/dbx-agent-app/.github/workflows/docs.yml @@ -0,0 +1,28 @@ +name: Documentation + +on: + push: + branches: [ main ] + workflow_dispatch: + +permissions: + contents: write + +jobs: + deploy-docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + pip install mkdocs-material mkdocstrings[python] pymdown-extensions + + - name: Build and deploy + run: | + mkdocs gh-deploy --force diff --git a/dbx-agent-app/.github/workflows/publish.yml b/dbx-agent-app/.github/workflows/publish.yml new file mode 100644 index 00000000..0c60674f --- /dev/null +++ b/dbx-agent-app/.github/workflows/publish.yml @@ -0,0 +1,40 @@ +name: Publish to PyPI + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + build-and-publish: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install build dependencies + run: | + python -m pip install --upgrade pip + pip install build twine + + - name: Build package + run: | + python -m build + + - name: Check package + run: | + twine check dist/* + + - name: Publish to PyPI + env: + TWINE_USERNAME: __token__ + TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} + run: | + twine upload dist/* diff --git a/dbx-agent-app/.github/workflows/test.yml b/dbx-agent-app/.github/workflows/test.yml new file mode 100644 index 00000000..bd307de9 --- /dev/null +++ b/dbx-agent-app/.github/workflows/test.yml @@ -0,0 +1,54 @@ +name: Tests + +on: + push: + branches: [ main, develop ] + pull_request: + branches: [ main, develop ] + +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.10", "3.11", "3.12"] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e ".[dev]" + + - name: Lint with ruff + run: | + ruff check src/ + + - name: Check formatting with black + run: | + black --check src/ + + - name: Type check with mypy (informational) + continue-on-error: true + run: | + pip install mypy types-httpx + mypy src/ --ignore-missing-imports + + - name: Run tests with pytest + run: | + pytest tests/ -v --cov=dbx_agent_app --cov-report=xml --cov-report=term + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + if: matrix.python-version == '3.11' + with: + file: ./coverage.xml + flags: unittests + name: codecov-umbrella + fail_ci_if_error: false diff --git a/dbx-agent-app/.gitignore b/dbx-agent-app/.gitignore new file mode 100644 index 00000000..0e99307d --- /dev/null +++ b/dbx-agent-app/.gitignore @@ -0,0 +1,54 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Virtual environments +venv/ +env/ +ENV/ +.venv + +# IDEs +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# Testing +.pytest_cache/ +.coverage +htmlcov/ +.tox/ +.hypothesis/ + +# Documentation +docs/_build/ +site/ + +# OS +.DS_Store +Thumbs.db + +# Project specific +*.log +.env +.env.local diff --git a/dbx-agent-app/CLAUDE.md b/dbx-agent-app/CLAUDE.md new file mode 100644 index 00000000..732e5dc0 --- /dev/null +++ b/dbx-agent-app/CLAUDE.md @@ -0,0 +1,105 @@ +# dbx-agent-app + +## Project Overview + +Agent platform for Databricks Apps. Package name: `dbx-agent-app` (PyPI), import as `dbx_agent_app` (Python). + +Build agents with any framework (official Databricks SDK, LangGraph, CrewAI, plain FastAPI). Deploy the platform to discover, test, trace, and govern all of them. + +## Architecture: Agent Platform (v0.3) + +This is NOT an agent-building SDK. It's the **control plane** for agents in your workspace. + +### What the platform does: +- **Auto-discovery** — scans workspace apps for agent cards at `/.well-known/agent.json` +- **Testing** — call any agent via `/invocations` (Databricks standard protocol) +- **Governance** — register discovered agents in Unity Catalog, track lineage +- **Observability** — trace timing, protocol detection, agent handoff routing +- **Multi-agent deploy** — `agents.yaml` → topological sort → deploy → wire → permissions + +### What agent developers do: +- Build agents with any framework +- Use `@app_agent` decorator (recommended) or `add_agent_card()` helper +- Deploy as Databricks Apps + +## Primary API: `@app_agent` decorator + +The recommended way to build agents. One decorator gives you `/invocations`, agent card, health, and MCP. + +```python +from dbx_agent_app import app_agent, AgentRequest, AgentResponse + +@app_agent(name="my_agent", description="Does stuff", capabilities=["search"]) +async def my_agent(request: AgentRequest) -> AgentResponse: + return AgentResponse.text(f"You said: {request.last_user_message}") + +# my_agent.app → FastAPI with /invocations, /.well-known/agent.json, /health +# uvicorn app:my_agent.app +``` + +Return types auto-coerced: `AgentResponse`, `str`, or `dict`. + +### Wire protocol types + +SDK types that replace `mlflow.types.responses` — no mlflow dependency needed: + +- `AgentRequest(input=[InputItem(role="user", content="...")])` — `.messages`, `.last_user_message` +- `AgentResponse.text("Hello!")` / `AgentResponse.from_dict({...})` — `.to_wire()` +- `StreamEvent.text_delta("chunk")` / `StreamEvent.done("full text")` — `.to_sse()` + +### LangChain compatibility + +For agents using LangChain, replace `self.prep_msgs_for_llm()` with: + +```python +from dbx_agent_app.core.compat import to_langchain_messages +messages = to_langchain_messages(request) # -> List[BaseMessage] +``` + +## Helpers API (plain FastAPI) + +For agents that want full control over their FastAPI app: + +```python +from fastapi import FastAPI +from dbx_agent_app import add_agent_card, add_mcp_endpoints + +app = FastAPI() + +@app.post("/invocations") +async def invocations(request): ... + +add_agent_card(app, name="my_agent", description="...", capabilities=["search"]) +add_mcp_endpoints(app, tools=[...]) # optional +``` + +## Project Structure + +- `src/dbx_agent_app/core/` — `@app_agent` decorator, wire types, helpers, compat +- `src/dbx_agent_app/dashboard/` — **the platform app** (FastAPI + React SPA) +- `src/dbx_agent_app/deploy/` — multi-agent deploy orchestration (agents.yaml) +- `src/dbx_agent_app/discovery/` — workspace agent scanning +- `src/dbx_agent_app/registry/` — Unity Catalog registration +- `src/dbx_agent_app/mcp/` — MCP JSON-RPC server +- `src/dbx_agent_app/cli.py` — CLI entry point +- `examples/` — example agents using helpers + official framework +- `tests/` — SDK tests + +## Key Conventions + +- SDK dependencies stay minimal (FastAPI, uvicorn, pydantic, httpx, databricks-sdk) +- mlflow is optional (`pip install dbx-agent-app[mlflow]`) +- The dashboard is the primary product, not a side feature +- `agents.yaml` defines agent systems (topology, dependencies, wiring) +- Examples use `@app_agent` decorator (preferred) or plain FastAPI + `add_agent_card()` +- Platform is framework-agnostic: any app serving `/.well-known/agent.json` gets discovered + +## CLI Commands + +``` +dbx-agent-app deploy # Deploy agents from agents.yaml +dbx-agent-app status # Show deployment status +dbx-agent-app destroy # Tear down deployed agents +dbx-agent-app dashboard # Launch platform locally (dev) +dbx-agent-app platform # Deploy platform as a Databricks App +``` diff --git a/dbx-agent-app/CONTRIBUTING.md b/dbx-agent-app/CONTRIBUTING.md new file mode 100644 index 00000000..9ac52186 --- /dev/null +++ b/dbx-agent-app/CONTRIBUTING.md @@ -0,0 +1,73 @@ +# Contributing to dbx-agent-app + +Thank you for your interest in contributing to dbx-agent-app! This project is a Databricks Labs initiative to make it easier to build discoverable AI agents on Databricks Apps. + +## Development Setup + +1. Clone the repository +2. Install dependencies: + ```bash + pip install -e ".[dev]" + ``` +3. Run tests: + ```bash + pytest + ``` + +## Code Style + +- Use Black for code formatting: `black src/` +- Use Ruff for linting: `ruff check src/` +- Line length: 100 characters (configured in `pyproject.toml`) +- Type hints are encouraged for public APIs + +## Testing + +- Write tests for new features using pytest +- Place tests in the `tests/` directory +- Run tests with: `pytest tests/` +- Aim for >80% code coverage + +## Pull Requests + +1. Fork the repository +2. Create a feature branch: `git checkout -b feature/my-feature` +3. Make your changes +4. Add tests for new functionality +5. Run tests and linting: `pytest && black src/ && ruff check src/` +6. Commit with a clear message describing the change +7. Push and create a pull request + +## PR Guidelines + +- **Clear description**: Explain what the PR does and why +- **Tests included**: All new features should have tests +- **Documentation updated**: Update README.md and docstrings as needed +- **Small, focused changes**: One feature or fix per PR +- **Passes CI**: All tests and linting must pass + +## Areas for Contribution + +We welcome contributions in these areas: + +- **Unity Catalog integration**: Register agents as UC catalog objects +- **MCP server support**: Add Model Context Protocol server capabilities +- **Orchestration patterns**: Multi-agent coordination utilities +- **RAG utilities**: Built-in vector search and retrieval patterns +- **Observability**: Logging, metrics, and tracing integrations +- **Documentation**: Examples, guides, and API documentation +- **Testing**: Improve test coverage and test utilities + +## Questions? + +- Open an issue for bugs or feature requests +- Start a discussion for design questions +- Check existing issues and PRs before starting work + +## Code of Conduct + +Be respectful, inclusive, and constructive in all interactions. This is a professional community focused on building great tools together. + +## License + +By contributing, you agree that your contributions will be licensed under the Apache 2.0 License. diff --git a/dbx-agent-app/DEPLOYMENT_GUIDE.md b/dbx-agent-app/DEPLOYMENT_GUIDE.md new file mode 100644 index 00000000..5df0d18a --- /dev/null +++ b/dbx-agent-app/DEPLOYMENT_GUIDE.md @@ -0,0 +1,207 @@ +# Databricks Labs Sandbox Deployment Guide + +## Overview + +This framework is ready for deployment to the [Databricks Labs Sandbox](https://github.com/databrickslabs/sandbox) repository. The sandbox is the perfect home for this project as it's: + +- **Early-stage but valuable**: Framework is functional and provides immediate value +- **Community-driven**: Open for contributions and iteration +- **Low barrier to adoption**: Simple API that users can start with immediately +- **Building block for future labs projects**: Foundation for multi-agent systems + +## Repository Structure + +``` +dbx-agent-app/ +├── src/dbx_agent_app/ # Core framework +│ ├── core/ # @app_agent decorator, agent request/response types +│ ├── discovery/ # Agent discovery, A2A client +│ ├── mcp/ # MCP server, UC Functions +│ ├── registry/ # Unity Catalog integration +│ └── orchestration/ # (Future: multi-agent patterns) +├── examples/ # Complete working examples +├── tests/ # Test suite +├── docs/ # MkDocs documentation +├── .github/workflows/ # CI/CD pipelines +├── README.md # Main documentation +├── CONTRIBUTING.md # Contribution guidelines +├── LICENSE # Apache 2.0 +└── pyproject.toml # Package configuration +``` + +## Pre-Deployment Checklist + +### ✅ Completed + +- [x] Core framework implementation (@app_agent decorator, discovery) +- [x] Agent card endpoints and protocol +- [x] Workspace agent discovery +- [x] Example applications +- [x] Test suite foundation +- [x] CI/CD workflows (test, publish, docs) +- [x] Documentation structure (MkDocs) +- [x] README and CONTRIBUTING guides +- [x] Apache 2.0 LICENSE + +### 🔄 Recommended Before Launch + +1. **Additional Tests** + - Integration tests with real Databricks Apps + - UC registration end-to-end tests + - MCP server protocol compliance tests + +2. **Documentation Completion** + - Finish all docs/ guide pages + - Add API reference with mkdocstrings + - Video walkthrough or GIF demos + +3. **Example Expansion** + - Multi-agent orchestration example + - RAG agent with vector search + - Data processing pipeline agent + +4. **Community Prep** + - Create GitHub issue templates + - Set up discussion categories + - Add CODE_OF_CONDUCT.md + +## Deployment Steps + +### 1. Fork to databrickslabs/sandbox + +```bash +# Clone this repository +git clone + +# Add sandbox as remote +cd dbx-agent-app +git remote add sandbox git@github.com:databrickslabs/sandbox.git + +# Create feature branch +git checkout -b dbx-agent-app-framework + +# Push to sandbox +git push sandbox dbx-agent-app-framework +``` + +### 2. Create PR to sandbox/main + +**PR Title**: Add dbx-agent-app framework for building discoverable agents + +**PR Description**: +```markdown +## Summary + +Adds `dbx-agent-app`, a lightweight Python framework for building discoverable AI agents on Databricks Apps. This framework makes it trivial to turn any Databricks App into a standards-compliant agent with auto-generated A2A protocol endpoints. + +## What It Does + +- **5 lines to create an agent**: Simple `@app_agent` decorator +- **Auto-generates protocol endpoints**: `/.well-known/agent.json`, OIDC config, health checks +- **Workspace discovery**: Find and communicate with agents across the workspace +- **Plain FastAPI support**: Use `add_agent_card()` helper for full control +- **Agent communication**: A2A protocol for standardized agent-to-agent interaction + +## Key Files + +- `src/dbx_agent_app/core/decorator.py` - @app_agent decorator +- `src/dbx_agent_app/core/types.py` - AgentRequest, AgentResponse types +- `src/dbx_agent_app/discovery/` - Agent discovery and A2A client +- `src/dbx_agent_app/registry/` - Unity Catalog integration +- `src/dbx_agent_app/mcp/` - MCP server support +- `examples/` - Complete working examples +- `tests/` - Test suite + +## Example Usage + +```python +from dbx_agent_app import app_agent, AgentRequest, AgentResponse + +@app_agent( + name="customer_research", + description="Research customers", + capabilities=["search", "analysis"], +) +async def customer_research(request: AgentRequest) -> AgentResponse: + return AgentResponse.text(f"Processing: {request.last_user_message}") +``` + +## Testing + +All tests pass: +```bash +pytest tests/ -v +``` + +## Documentation + +Full documentation at `docs/` (deployed via GitHub Pages) + +## Related Issues + +Addresses the need for standardized agent building on Databricks Apps. +``` + +### 3. Post-Merge Actions + +1. **Set up PyPI publishing** + - Create PyPI account for databricks-labs + - Add `PYPI_API_TOKEN` to repo secrets + - Publish first release (0.1.0) + +2. **Enable GitHub Pages** + - Go to Settings → Pages + - Source: GitHub Actions + - Deploy docs workflow will handle builds + +3. **Community Engagement** + - Announce in Databricks Community forums + - Share in relevant Slack channels + - Blog post on Databricks Labs blog + +4. **Iterate Based on Feedback** + - Monitor issues and discussions + - Prioritize community requests + - Release patches and minor versions + +## Long-Term Vision + +### Phase 1: Foundation (Current) +- ✅ Core framework +- ✅ Basic discovery +- ✅ UC integration +- ✅ MCP support + +### Phase 2: Enrichment (Next 3 months) +- Advanced orchestration patterns +- RAG utilities (vector search, retrieval) +- Observability integrations +- More UC Functions examples + +### Phase 3: Maturity (6-12 months) +- Graduate to full databrickslabs repo +- Native UC AGENT type support (when available) +- Multi-agent coordination primitives +- Production deployment patterns + +## Success Metrics + +Track these metrics to assess adoption: + +- **GitHub stars**: Community interest +- **PyPI downloads**: Actual usage +- **Issues/PRs**: Community engagement +- **Documentation views**: Learning curve +- **Example forks**: Real-world adoption + +Target for sandbox graduation (move to full repo): +- 100+ stars +- 1000+ PyPI downloads/month +- 10+ contributors +- 5+ community-contributed examples + +## Contact + +Framework developed as part of the multi-agent registry project. + +Questions? Open an issue or start a discussion in the sandbox repo. diff --git a/dbx-agent-app/FRAMEWORK_OVERVIEW.md b/dbx-agent-app/FRAMEWORK_OVERVIEW.md new file mode 100644 index 00000000..b9a092f7 --- /dev/null +++ b/dbx-agent-app/FRAMEWORK_OVERVIEW.md @@ -0,0 +1,249 @@ +# dbx-agent-app Framework Overview + +## What We've Built + +A lightweight Python framework for building discoverable AI agents on Databricks Apps. This is designed as a Databricks Labs contribution that makes it trivial to: + +1. **Turn any Databricks App into an agent** with auto-generated A2A protocol endpoints +2. **Discover agent-enabled apps** across your Databricks workspace +3. **Communicate with agents** using the A2A protocol standard + +## Key Design Principle + +**Agent = Databricks App** (not Model Serving endpoint) + +This framework treats Databricks Apps as first-class agents, allowing them to: +- Run custom logic, tools, and UI +- Expose capabilities via standard agent cards +- Be discovered by other agents and systems +- Delegate authentication to Databricks workspace OIDC + +## Framework Structure + +``` +dbx-agent-app/ +├── src/dbx_agent_app/ +│ ├── core/ +│ │ ├── __init__.py +│ │ ├── decorator.py # @app_agent decorator +│ │ ├── types.py # AgentRequest, AgentResponse +│ │ └── helpers.py # add_agent_card() helper +│ ├── discovery/ +│ │ ├── __init__.py +│ │ ├── a2a_client.py # A2A protocol client +│ │ └── agent_discovery.py # Workspace agent discovery +│ ├── mcp/ # MCP server support +│ ├── registry/ # UC integration +│ └── orchestration/ # Multi-agent patterns +│ +├── examples/ +│ ├── customer_research_agent.py # Full agent example using @app_agent +│ ├── discover_agents.py # Discovery client example +│ └── plain_fastapi_agent.py # Plain FastAPI + add_agent_card() +│ +├── tests/ +│ └── test_decorator.py # Core decorator tests +│ +├── README.md # Comprehensive documentation +├── CONTRIBUTING.md # Contribution guidelines +├── LICENSE # Apache 2.0 +└── pyproject.toml # Package configuration +``` + +## Core Components + +### 1. @app_agent Decorator (core/decorator.py) + +Python decorator that transforms an async function into an agent with auto-generated endpoints: +- `/.well-known/agent.json` - A2A protocol agent card +- `/invocations` - Standard Databricks protocol endpoint +- `/.well-known/openid-configuration` - OIDC delegation +- `/health` - Health check endpoint + +**Usage:** +```python +from dbx_agent_app import app_agent, AgentRequest, AgentResponse + +@app_agent( + name="my_agent", + description="Does useful things", + capabilities=["search", "analysis"], +) +async def my_agent(request: AgentRequest) -> AgentResponse: + return AgentResponse.text(f"You said: {request.last_user_message}") + +app = my_agent.app # FastAPI app +``` + +### 2. AgentDiscovery (discovery/agent_discovery.py) + +Discovers agent-enabled apps in your workspace by: +1. Listing all running Databricks Apps via SDK +2. Probing each app for A2A agent cards (/.well-known/agent.json) +3. Returning DiscoveredAgent objects with metadata + +**Usage:** +```python +from dbx_agent_app.discovery import AgentDiscovery + +discovery = AgentDiscovery(profile="my-profile") +result = await discovery.discover_agents() + +for agent in result.agents: + print(f"{agent.name}: {agent.endpoint_url}") +``` + +### 3. A2AClient (discovery/a2a_client.py) + +Communicates with agents using A2A protocol: +- Fetch agent cards (with OAuth redirect detection) +- Send messages via JSON-RPC +- Stream responses via SSE +- Handle authentication + +**Usage:** +```python +from dbx_agent_app.discovery import A2AClient + +async with A2AClient() as client: + card = await client.fetch_agent_card(agent_url) + response = await client.send_message(agent_url, "Hello") +``` + +## What Gets Auto-Generated + +When you create an agent with `@app_agent`, the framework automatically provides: + +### Agent Card (/.well-known/agent.json) +```json +{ + "schema_version": "a2a/1.0", + "name": "my_agent", + "description": "Does useful things", + "capabilities": ["search", "analysis"], + "endpoints": { + "invoke": "/invocations" + } +} +``` + +### Invocations Endpoint (/invocations) +Standard Databricks protocol endpoint for agent communication + +### OIDC Configuration (/.well-known/openid-configuration) +Delegates to Databricks workspace OIDC provider for authentication + +## Examples + +### Creating an Agent with @app_agent +```python +# examples/customer_research_agent.py +from dbx_agent_app import app_agent, AgentRequest, AgentResponse + +@app_agent( + name="customer_research", + description="Research customer information", + capabilities=["search", "analysis"], +) +async def customer_research(request: AgentRequest) -> AgentResponse: + return AgentResponse.text(f"Processing: {request.last_user_message}") + +app = customer_research.app +``` + +### Plain FastAPI with Helpers +```python +# examples/plain_fastapi_agent.py +from fastapi import FastAPI +from dbx_agent_app import add_agent_card + +app = FastAPI() + +@app.post("/invocations") +async def invocations(request): + return {"response": "Hello"} + +add_agent_card( + app, + name="my_agent", + description="My agent", + capabilities=["search"], +) +``` + +### Discovering Agents +```python +# examples/discover_agents.py +from dbx_agent_app.discovery import AgentDiscovery + +discovery = AgentDiscovery(profile="my-profile") +result = await discovery.discover_agents() + +for agent in result.agents: + print(f"Found: {agent.name}") +``` + +## Testing + +```bash +# Run tests +pytest + +# Example test: test_decorator.py +def test_agent_card_endpoint(): + @app_agent(name="test", description="Test", capabilities=[]) + async def test_agent(request: AgentRequest) -> AgentResponse: + return AgentResponse.text("OK") + + client = TestClient(test_agent.app) + response = client.get("/.well-known/agent.json") + assert response.status_code == 200 + assert response.json()["name"] == "test" +``` + +## Future Roadmap + +- **Unity Catalog integration**: Register agents as AGENT catalog objects +- **MCP server support**: Model Context Protocol endpoints +- **Orchestration patterns**: Multi-agent coordination utilities +- **RAG utilities**: Built-in vector search and retrieval +- **Observability**: Logging, metrics, tracing integrations + +## Why This Matters + +This framework solves the gap in Databricks' agent ecosystem: + +**Before:** +- No standard way to make apps discoverable as agents +- Manual protocol implementation required +- No workspace-level agent discovery +- Agents tied to Model Serving endpoints + +**After:** +- Single decorator: `@app_agent` makes any function an agent +- Auto-generated protocol endpoints +- Built-in discovery across workspace +- Agents can be full applications with custom logic + +## Databricks Labs Fit + +This is a natural Labs contribution because it: +- Builds on top of Databricks primitives (Apps, SDK, OIDC) +- Follows open standards (A2A protocol) +- Enables new patterns (multi-agent systems on Databricks) +- Low-friction adoption (5 lines of code to get started) +- Complements existing offerings (Model Serving, UC Functions) + +## Next Steps for Contribution + +1. **Add tests** for discovery and A2A client modules +2. **Create integration examples** with real Databricks Apps +3. **Add Unity Catalog integration** for agent registration +4. **Write MCP server support** for UC Functions +5. **Build orchestration patterns** for multi-agent workflows +6. **Set up CI/CD** for testing and publishing +7. **Create documentation site** (GitHub Pages or Read the Docs) + +## Contact + +This framework was extracted from the multi-agent registry project and designed for Databricks Labs contribution. diff --git a/dbx-agent-app/LICENSE b/dbx-agent-app/LICENSE new file mode 100644 index 00000000..02e4d2d1 --- /dev/null +++ b/dbx-agent-app/LICENSE @@ -0,0 +1,17 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + Copyright 2024 Databricks Labs + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/dbx-agent-app/README.md b/dbx-agent-app/README.md new file mode 100644 index 00000000..b9c913b6 --- /dev/null +++ b/dbx-agent-app/README.md @@ -0,0 +1,322 @@ +# dbx-agent-app + +A lightweight Python framework for building discoverable AI agents on Databricks Apps that automatically expose A2A (Agent-to-Agent) protocol endpoints. + +## What It Does + +The `dbx-agent-app` framework makes it trivial to turn a Databricks App into a discoverable, standards-compliant agent: + +- **Auto-generates A2A protocol endpoints** (`/.well-known/agent.json`, `/.well-known/openid-configuration`) +- **Wraps FastAPI** to seamlessly integrate agent capabilities with your web app +- **Provides discovery clients** to find and communicate with other agents in your workspace +- **Handles authentication** by delegating to Databricks workspace OIDC + +## Key Concepts + +### Agent = Databricks App + +Unlike traditional approaches where agents are backed by Model Serving endpoints, this framework treats **Databricks Apps as first-class agents**. Each app: + +- Exposes its capabilities via a standard agent card +- Can be discovered by other agents and systems +- Runs as a full application with custom logic, tools, and UI + +### A2A Protocol + +The [A2A (Agent-to-Agent) protocol](https://a2a.so/) provides a standard way for agents to: +- Advertise their capabilities via `/.well-known/agent.json` +- Delegate authentication via `/.well-known/openid-configuration` +- Communicate using JSON-RPC over HTTP + +## Installation + +```bash +pip install dbx-agent-app +``` + +Or with development dependencies: + +```bash +pip install dbx-agent-app[dev] +``` + +## Quick Start + +### 1. Create an Agent + +```python +from dbx_agent_app import app_agent, AgentRequest, AgentResponse + +# Create your agent with capabilities +@app_agent( + name="customer_research", + description="Research customer information and market trends", + capabilities=["search", "analysis", "research"], +) +async def customer_research(request: AgentRequest) -> AgentResponse: + # Your agent logic here + return AgentResponse.text(f"Processing: {request.last_user_message}") + +# Run the app +if __name__ == "__main__": + import uvicorn + uvicorn.run(customer_research.app, host="0.0.0.0", port=8000) +``` + +### 2. Deploy to Databricks Apps + +Create an `app.yaml`: + +```yaml +command: + - "python" + - "-m" + - "uvicorn" + - "app:app" + - "--host" + - "0.0.0.0" + - "--port" + - "8000" + +env: + - name: DATABRICKS_HOST + valueFrom: system +``` + +Deploy: + +```bash +databricks apps create customer-research --description "Customer research agent" +databricks apps deploy customer-research --source-code-path ./ +``` + +### 3. Discover Agents in Your Workspace + +```python +import asyncio +from dbx_agent_app.discovery import AgentDiscovery + +async def main(): + discovery = AgentDiscovery(profile="my-profile") + result = await discovery.discover_agents() + + for agent in result.agents: + print(f"Found: {agent.name} - {agent.description}") + print(f" URL: {agent.endpoint_url}") + print(f" Capabilities: {agent.capabilities}") + +asyncio.run(main()) +``` + +## What Gets Auto-Generated + +When you create an agent with `@app_agent`, the framework automatically sets up: + +### `/.well-known/agent.json` (Agent Card) + +```json +{ + "schema_version": "a2a/1.0", + "name": "customer_research", + "description": "Research customer information and market trends", + "capabilities": ["search", "analysis", "research"], + "version": "1.0.0", + "endpoints": { + "mcp": "/api/mcp", + "invoke": "/api/invoke" + }, + "tools": [ + { + "name": "search_companies", + "description": "Search for companies by industry", + "parameters": { + "industry": {"type": "str", "required": true}, + "limit": {"type": "int", "required": false} + } + } + ] +} +``` + +### `/.well-known/openid-configuration` + +Delegates authentication to the Databricks workspace OIDC provider: + +```json +{ + "issuer": "https://your-workspace.cloud.databricks.com/oidc", + "authorization_endpoint": "https://your-workspace.cloud.databricks.com/oidc/oauth2/v2.0/authorize", + "token_endpoint": "https://your-workspace.cloud.databricks.com/oidc/v1/token", + "jwks_uri": "https://your-workspace.cloud.databricks.com/oidc/v1/keys" +} +``` + +### `/health` + +Standard health check endpoint: + +```json +{ + "status": "healthy", + "agent": "customer_research", + "version": "1.0.0" +} +``` + +## Discovery API + +The `AgentDiscovery` class scans your workspace for agent-enabled apps: + +```python +from dbx_agent_app.discovery import AgentDiscovery + +# Initialize with optional profile +discovery = AgentDiscovery(profile="my-profile") + +# Discover all agents +result = await discovery.discover_agents() + +# Access discovered agents +for agent in result.agents: + print(agent.name) # Agent name from card + print(agent.endpoint_url) # Base URL of the app + print(agent.app_name) # Databricks App name + print(agent.description) # Agent description + print(agent.capabilities) # Comma-separated capabilities + print(agent.protocol_version) # A2A protocol version + +# Check for errors +if result.errors: + for error in result.errors: + print(f"Error: {error}") +``` + +## A2A Client API + +Communicate with other agents using the A2A protocol: + +```python +from dbx_agent_app.discovery import A2AClient + +async with A2AClient() as client: + # Fetch an agent's card + card = await client.fetch_agent_card("https://agent.databricksapps.com") + + # Send a message + response = await client.send_message( + "https://agent.databricksapps.com/api/a2a", + "What are your capabilities?" + ) + + # Send a streaming message + async for event in client.send_streaming_message(url, "Analyze this data"): + print(event) +``` + +## Tool Registration + +When building agents with `@app_agent`, you can define tools directly within your agent or use helper functions. Tools are automatically exposed through A2A protocol endpoints. + +For agents using plain FastAPI with the `add_agent_card()` helper, tools can be registered using the same pattern and will be included in the agent card. + +Example: + +```python +from dbx_agent_app import app_agent, AgentRequest, AgentResponse + +@app_agent( + name="customer_research", + description="Research customers", + capabilities=["search"], +) +async def customer_research(request: AgentRequest) -> AgentResponse: + # Process customer queries with your custom logic + return AgentResponse.text(f"Result: {request.last_user_message}") + +app = customer_research.app # FastAPI app with agent endpoints +``` + +## Unity Catalog Integration + +The framework integrates with Unity Catalog for agent registration and discovery: + +- Agents are discoverable via workspace scanning +- UC integration tracks agent metadata and lineage +- Manage agent permissions through UC grants + +Agents using `@app_agent` are automatically discovered when deployed to Databricks Apps. The discovery service scans for agent cards at `/.well-known/agent.json` across all running apps. + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Databricks Workspace │ +│ │ +│ ┌────────────────┐ ┌────────────────┐ │ +│ │ Agent App 1 │ │ Agent App 2 │ │ +│ │ (Customer │ │ (Market │ │ +│ │ Research) │ │ Analysis) │ │ +│ │ │ │ │ │ +│ │ @app_agent │ │ @app_agent │ │ +│ │ + agent card │ │ + agent card │ │ +│ │ + /invocations │ │ + /invocations │ │ +│ └────────────────┘ └────────────────┘ │ +│ ▲ ▲ │ +│ │ │ │ +│ └───────────┬───────────────┘ │ +│ │ │ +│ ┌──────▼──────┐ │ +│ │ Discovery │ │ +│ │ Service │ │ +│ │ │ │ +│ │ Workspace │ │ +│ │ Scanning │ │ +│ └──────────────┘ │ +└─────────────────────────────────────────────────────────────┘ +``` + +## Examples + +See the `examples/` directory for complete working examples: + +- `customer_research_agent.py` - Full agent with multiple tools +- `discover_agents.py` - Workspace agent discovery +- `communicate_with_agent.py` - A2A protocol communication + +## Development + +```bash +# Install with dev dependencies +pip install -e ".[dev]" + +# Run tests +pytest + +# Format code +black src/ + +# Lint +ruff check src/ +``` + +## Roadmap + +- [x] Unity Catalog integration for agent registration +- [x] MCP (Model Context Protocol) server support +- [ ] Built-in RAG and vector search utilities +- [ ] Observability and logging integrations + +## Contributing + +Contributions welcome! This is a Databricks Labs project. See `CONTRIBUTING.md` for guidelines. + +## License + +Apache 2.0 + +## Related Projects + +- [A2A Protocol](https://a2a.so/) - Agent-to-Agent communication standard +- [MCP](https://modelcontextprotocol.io/) - Model Context Protocol +- [Databricks Apps](https://docs.databricks.com/aws/en/dev-tools/databricks-apps/) - Deploy apps on Databricks +- [Databricks SDK](https://github.com/databricks/databricks-sdk-py) - Python SDK for Databricks diff --git a/dbx-agent-app/app/backend/.databricksignore b/dbx-agent-app/app/backend/.databricksignore new file mode 100644 index 00000000..62903d5f --- /dev/null +++ b/dbx-agent-app/app/backend/.databricksignore @@ -0,0 +1,71 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +.venv/ +venv/ +ENV/ +env/ + +# Testing +.pytest_cache/ +.coverage +htmlcov/ +*.cover + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ +.DS_Store + +# Git +.git/ +.gitignore +.gitattributes + +# Documentation & Phase tracking (don't need in deployed app) +PHASE_*.md +QUICKSTART.md +README.md +*.md + +# Build artifacts +dist/ +build/ +*.egg-info/ + +# Local development +.env +.env.backup +.env.local +*.db +*.db-journal +*.sqlite +*.sqlite3 + +# Deployment helpers (not needed in app) +deploy.sh +create_warehouse_tables.py +add_missing_columns.py +init_warehouse_schema.sql + +# Alembic (migrations run separately) +alembic/ +alembic.ini + +# Tests (not needed in production) +tests/ +test_*.py +*_test.py +pytest.ini +conftest.py + +# Temporary files +*.tmp +*.log +.cache/ diff --git a/dbx-agent-app/app/backend/.gitignore b/dbx-agent-app/app/backend/.gitignore new file mode 100644 index 00000000..1de4659e --- /dev/null +++ b/dbx-agent-app/app/backend/.gitignore @@ -0,0 +1,55 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +*.egg-info/ +dist/ +build/ +*.egg + +# Virtual Environment +.venv/ +venv/ +ENV/ +env/ + +# Environment Variables +.env +.env.local + +# Database +*.db +*.sqlite +*.sqlite3 +registry.db +data/*.db +!data/.gitkeep + +# Databricks +.databricks/ + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Testing +.pytest_cache/ +.coverage +htmlcov/ +*.cover + +# Logs +*.log + +# Alembic +alembic/__pycache__/ +alembic/versions/__pycache__/ diff --git a/dbx-agent-app/app/backend/README.md b/dbx-agent-app/app/backend/README.md new file mode 100644 index 00000000..fa89f24a --- /dev/null +++ b/dbx-agent-app/app/backend/README.md @@ -0,0 +1,257 @@ +# Multi-Agent Registry API + +The central backend for the agentic-knowledge-graph project. A FastAPI service that discovers, indexes, and provides access to everything in a Databricks workspace — agents, tools, MCP servers, Unity Catalog assets, workspace objects, and more. + +## What It Does + +**Asset Discovery & Indexing** — Crawls Unity Catalog (tables, views, functions, models, volumes) and workspace objects (notebooks, jobs, dashboards, pipelines). Discovers Databricks Apps that expose MCP endpoints and registers their tools. + +**Agent & Tool Registry** — Registers AI agents with capabilities, endpoints, and system prompts. Catalogs MCP servers and tools. Lets users curate collections and generate supervisor orchestrators from them. + +**Search & Chat** — Semantic search across all indexed assets via vector embeddings. Chat interface with tool calling against registered MCP servers. Conversation persistence with MLflow tracing. + +## Architecture + +``` +Webapp (React) + | +Registry API (FastAPI) <-- this service + | + +-- Discovery Service (MCP endpoint probing, workspace/catalog crawlers) + +-- Agent/Tool Registry (CRUD, collections, supervisor generation) + +-- Search Service (vector embeddings, semantic search) + +-- Chat Service (LLM chat with tool calling, MLflow traces) + +-- A2A Protocol (agent-to-agent coordination via JSON-RPC 2.0) + | +Databricks (Unity Catalog, SQL Warehouse, MLflow, Foundation Models, Apps) +``` + +## Domain Models + +15 SQLAlchemy models across 4 domains: + +### Core Registry + +| Model | Table | Purpose | +|-------|-------|---------| +| App | `apps` | Databricks Apps metadata (name, url, owner) | +| MCPServer | `mcp_servers` | MCP server configs (managed/external/custom), FK to App | +| Tool | `tools` | Individual tools from MCP servers (name, params, description) | +| Collection | `collections` | User-curated groupings | +| CollectionItem | `collection_items` | Polymorphic join (app, server, or tool) | + +### Agents & Coordination + +| Model | Table | Purpose | +|-------|-------|---------| +| Agent | `agents` | Agent entities with capabilities, endpoint, system prompt, A2A fields | +| Supervisor | `supervisors` | Generated supervisor metadata from collections | +| A2ATask | `a2a_tasks` | Work assigned to agents via A2A protocol | + +### Asset Indexing + +| Model | Table | Purpose | +|-------|-------|---------| +| CatalogAsset | `catalog_assets` | UC assets (tables, views, functions) with columns, tags, properties | +| WorkspaceAsset | `workspace_assets` | Workspace objects (notebooks, jobs, dashboards) with metadata | +| AssetEmbedding | `asset_embeddings` | Vector embeddings for semantic search | +| AssetRelationship | `asset_relationships` | Lineage/dependency edges between assets | + +### Operational + +| Model | Table | Purpose | +|-------|-------|---------| +| Conversation | `conversations` | Chat conversation metadata | +| ConversationMessage | `conversation_messages` | Individual messages with trace links | +| AuditLog | `audit_log` | Append-only governance trail | +| DiscoveryState | `discovery_state` | Background crawl/discovery status | + +### Entity Relationships + +``` +apps (1) ───< mcp_servers (1) ───< tools + | | | + +----------------+------------------+---< collection_items >--- collections + | +agents ───< a2a_tasks | + | +catalog_assets ───< asset_embeddings | +workspace_assets ──< asset_relationships +``` + +## API Routes + +20 route modules under `/api`: + +### Discovery & Indexing +- `GET /api/discovery/workspaces` — List Databricks CLI profiles with auth status +- `POST /api/discovery/refresh` — Discover MCP servers from workspace apps, catalog, or custom URLs +- `GET /api/discovery/status` — Check background discovery status +- `POST /api/catalog-assets/crawl` — Crawl Unity Catalog (tables, views, functions, volumes) +- `POST /api/workspace-assets/crawl` — Crawl workspace objects (notebooks, jobs, dashboards) + +### Agent Management +- `GET|POST /api/agents` — List / create agents +- `GET|PUT|DELETE /api/agents/{id}` — Get / update / delete agent +- `GET /api/agents/{id}/card` — A2A-compliant Agent Card + +### MCP Servers & Tools +- `GET|POST /api/mcp-servers` — List / create MCP server configs +- `GET|PUT|DELETE /api/mcp-servers/{id}` — CRUD +- `GET|POST /api/tools` — List / create tools +- `GET|PUT|DELETE /api/tools/{id}` — CRUD + +### Collections & Supervisors +- `GET|POST /api/collections` — List / create collections +- `POST /api/collections/{id}/items` — Add app/server/tool to collection +- `POST /api/supervisors/generate` — Generate supervisor code from a collection +- `GET /api/supervisors/{id}/preview` — Preview generated files +- `POST /api/supervisors/{id}/download` — Download as zip + +### Search & Chat +- `POST /api/search` — Semantic search across all indexed assets +- `POST /api/chat` — Chat with tool calling (SSE streaming) +- `GET|POST /api/conversations` — Conversation history +- `GET /api/lineage/{asset_id}` — Asset lineage graph + +### Infrastructure +- `GET /health` — Health check +- `GET /api/audit-log` — Query audit trail +- `POST /api/a2a` — A2A JSON-RPC 2.0 dispatch + +## Services + +16 service modules implementing business logic: + +| Service | File | Purpose | +|---------|------|---------| +| DiscoveryService | `discovery.py` | Orchestrates workspace app enumeration, MCP probing, catalog discovery | +| MCPClient | `mcp_client.py` | JSON-RPC 2.0 client for MCP `tools/list` calls | +| ToolParser | `tool_parser.py` | Normalizes MCP tool metadata | +| CatalogCrawler | `catalog_crawler.py` | Walks UC hierarchy via Databricks SDK | +| WorkspaceCrawler | `workspace_crawler.py` | Indexes workspace objects via SDK | +| LineageCrawler | `lineage_crawler.py` | Tracks asset dependencies | +| EmbeddingService | `embedding.py` | Generates vectors via Databricks Foundation Models | +| SearchService | `search.py` | Semantic search with vector similarity | +| GeneratorService | `generator.py` | Generates supervisor code from Jinja2 templates | +| ChatContext | `chat_context.py` | Conversation memory and context management | +| AgentChat | `agent_chat.py` | Agent chat with tool calling | +| CollectionsService | `collections.py` | Collection CRUD with item management | +| A2AClient | `a2a_client.py` | Agent-to-Agent protocol client | +| WorkspaceProfiles | `workspace_profiles.py` | Parses `~/.databrickscfg`, validates auth | +| AuditService | `audit.py` | Append-only audit logging | +| A2ANotifications | `a2a_notifications.py` | Webhook notifications for A2A tasks | + +## Project Structure + +``` +registry-api/ +├── app/ +│ ├── main.py # FastAPI app, lifespan, router registration +│ ├── config.py # Pydantic settings from env vars +│ ├── database.py # SQLAlchemy engine, session factory +│ ├── db_adapter.py # Database abstraction (SQLite / Databricks SQL) +│ ├── middleware/ +│ │ └── auth.py # OBO authentication middleware +│ ├── models/ # 15 SQLAlchemy models +│ ├── routes/ # 20 FastAPI route modules +│ ├── schemas/ # Pydantic request/response schemas +│ ├── services/ # 16 business logic modules +│ └── templates/ # Jinja2 templates for supervisor generation +├── alembic/ # Database migrations +├── tests/ # Pytest test suite +├── app.yaml # Databricks Apps deployment config +├── deploy.sh # Deployment script +├── requirements.txt # Python dependencies +└── README.md +``` + +## Setup + +### Local Development + +```bash +python3 -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt + +# SQLite for local dev (default) +export DATABASE_URL=sqlite:///./data/registry.db + +# Run with auto-reload +uvicorn app.main:app --reload --port 8000 +``` + +### Databricks Apps Deployment + +```bash +# Sync and deploy (uses fe-vm-serverless-dxukih profile) +./deploy.sh + +# Or manually: +databricks sync . /Workspace/Shared/apps/registry-api --profile fe-vm-serverless-dxukih --full \ + --exclude ".venv" --exclude "__pycache__" --exclude ".git" --exclude "*.pyc" --exclude "*.db" +databricks apps deploy registry-api --source-code-path /Workspace/Shared/apps/registry-api \ + --profile fe-vm-serverless-dxukih +``` + +**Deployed URL**: `https://registry-api-7474660127789418.aws.databricksapps.com` + +## Configuration + +### Required + +| Variable | Description | Example | +|----------|-------------|---------| +| `DATABASE_URL` | Connection string | `sqlite:///./data/registry.db` | + +### Databricks + +| Variable | Description | Default | +|----------|-------------|---------| +| `DATABRICKS_HOST` | Workspace URL | (from SDK config) | +| `DATABRICKS_TOKEN` | Access token | (from SDK config) | +| `DATABRICKS_CONFIG_PROFILE` | CLI profile | (none) | +| `DATABRICKS_WAREHOUSE_ID` | SQL warehouse ID | (none) | + +### LLM & Embeddings + +| Variable | Description | Default | +|----------|-------------|---------| +| `LLM_ENDPOINT` | Chat model endpoint | `databricks-claude-sonnet-4-5` | +| `EMBEDDING_MODEL` | Embedding model | `databricks-bge-large-en` | +| `EMBEDDING_DIMENSION` | Vector dimensions | `1024` | + +### MCP & A2A + +| Variable | Description | Default | +|----------|-------------|---------| +| `MCP_CATALOG_URL` | Central MCP catalog endpoint | (none, gracefully skipped) | +| `A2A_PROTOCOL_VERSION` | A2A protocol version | `0.3.0` | +| `A2A_BASE_URL` | Base URL for A2A endpoints | (none) | + +### Server + +| Variable | Description | Default | +|----------|-------------|---------| +| `PORT` | Server port | `8000` | +| `HOST` | Bind address | `0.0.0.0` | +| `CORS_ORIGINS` | Allowed origins (comma-separated) | `http://localhost:3000,...` | +| `AUTH_ENABLED` | Enable auth middleware | `true` | + +## Database Migrations + +```bash +alembic upgrade head # Apply all migrations +alembic revision --autogenerate -m "description" # Generate new migration +alembic history # View history +alembic downgrade -1 # Rollback one +``` + +## Testing + +```bash +pytest # Run all tests +pytest tests/test_discovery.py # Run specific test file +pytest -v --tb=short # Verbose with short tracebacks +``` diff --git a/dbx-agent-app/app/backend/alembic.ini b/dbx-agent-app/app/backend/alembic.ini new file mode 100644 index 00000000..09b51502 --- /dev/null +++ b/dbx-agent-app/app/backend/alembic.ini @@ -0,0 +1,150 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts. +# this is typically a path given in POSIX (e.g. forward slashes) +# format, relative to the token %(here)s which refers to the location of this +# ini file +script_location = %(here)s/alembic + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s +# Or organize into date-based subdirectories (requires recursive_version_locations = true) +# file_template = %%(year)d/%%(month).2d/%%(day).2d_%%(hour).2d%%(minute).2d_%%(second).2d_%%(rev)s_%%(slug)s + +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. for multiple paths, the path separator +# is defined by "path_separator" below. +prepend_sys_path = . + + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the tzdata library which can be installed by adding +# `alembic[tz]` to the pip requirements. +# string value is passed to ZoneInfo() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to /versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the separator specified by "path_separator" +# below. +# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions + +# path_separator; This indicates what character is used to split lists of file +# paths, including version_locations and prepend_sys_path within configparser +# files such as alembic.ini. +# The default rendered in new alembic.ini files is "os", which uses os.pathsep +# to provide os-dependent path splitting. +# +# Note that in order to support legacy alembic.ini files, this default does NOT +# take place if path_separator is not present in alembic.ini. If this +# option is omitted entirely, fallback logic is as follows: +# +# 1. Parsing of the version_locations option falls back to using the legacy +# "version_path_separator" key, which if absent then falls back to the legacy +# behavior of splitting on spaces and/or commas. +# 2. Parsing of the prepend_sys_path option falls back to the legacy +# behavior of splitting on spaces, commas, or colons. +# +# Valid values for path_separator are: +# +# path_separator = : +# path_separator = ; +# path_separator = space +# path_separator = newline +# +# Use os.pathsep. Default configuration used for new projects. +path_separator = os + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# new in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +# database URL. This is consumed by the user-maintained env.py script only. +# other means of configuring database URLs may be customized within the env.py +# file. +# NOTE: Database URL is loaded from environment variables in env.py +# sqlalchemy.url = driver://user:pass@localhost/dbname + + +[post_write_hooks] +# post_write_hooks defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples + +# format using "black" - use the console_scripts runner, against the "black" entrypoint +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME + +# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module +# hooks = ruff +# ruff.type = module +# ruff.module = ruff +# ruff.options = check --fix REVISION_SCRIPT_FILENAME + +# Alternatively, use the exec runner to execute a binary found on your PATH +# hooks = ruff +# ruff.type = exec +# ruff.executable = ruff +# ruff.options = check --fix REVISION_SCRIPT_FILENAME + +# Logging configuration. This is also consumed by the user-maintained +# env.py script only. +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARNING +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARNING +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/dbx-agent-app/app/backend/alembic/README b/dbx-agent-app/app/backend/alembic/README new file mode 100644 index 00000000..98e4f9c4 --- /dev/null +++ b/dbx-agent-app/app/backend/alembic/README @@ -0,0 +1 @@ +Generic single-database configuration. \ No newline at end of file diff --git a/dbx-agent-app/app/backend/alembic/env.py b/dbx-agent-app/app/backend/alembic/env.py new file mode 100644 index 00000000..957390dc --- /dev/null +++ b/dbx-agent-app/app/backend/alembic/env.py @@ -0,0 +1,89 @@ +import os +import sys +from logging.config import fileConfig + +from sqlalchemy import engine_from_config +from sqlalchemy import pool + +from alembic import context + +# Add the parent directory to the path so we can import our app +sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) + +# Import our models and database configuration +from app.database import Base +from app.config import settings +from app.models import App, MCPServer, Tool, Collection, CollectionItem, Supervisor, DiscoveryState + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Override database URL from environment variables +config.set_main_option("sqlalchemy.url", settings.database_url) + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +target_metadata = Base.metadata + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + connectable = engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + with connectable.connect() as connection: + context.configure( + connection=connection, target_metadata=target_metadata + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/dbx-agent-app/app/backend/alembic/script.py.mako b/dbx-agent-app/app/backend/alembic/script.py.mako new file mode 100644 index 00000000..11016301 --- /dev/null +++ b/dbx-agent-app/app/backend/alembic/script.py.mako @@ -0,0 +1,28 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + """Upgrade schema.""" + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + """Downgrade schema.""" + ${downgrades if downgrades else "pass"} diff --git a/dbx-agent-app/app/backend/alembic/versions/20260225110200_add_agent_app_link.py b/dbx-agent-app/app/backend/alembic/versions/20260225110200_add_agent_app_link.py new file mode 100644 index 00000000..c1cac499 --- /dev/null +++ b/dbx-agent-app/app/backend/alembic/versions/20260225110200_add_agent_app_link.py @@ -0,0 +1,30 @@ +"""add agent app link + +Revision ID: d1e2f3a4b5c6 +Revises: c3d4e5f6a7b8 +Create Date: 2026-02-25 11:00:00.000000 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = 'd1e2f3a4b5c6' +down_revision: Union[str, None] = 'c3d4e5f6a7b8' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Add app_id column to agents table + op.add_column('agents', sa.Column('app_id', sa.Integer(), nullable=True)) + op.create_foreign_key('fk_agent_app', 'agents', 'apps', ['app_id'], ['id'], ondelete='CASCADE') + + +def downgrade() -> None: + # Remove the foreign key and column + op.drop_constraint('fk_agent_app', 'agents', type_='foreignkey') + op.drop_column('agents', 'app_id') diff --git a/dbx-agent-app/app/backend/alembic/versions/423f4a48143d_initial_schema.py b/dbx-agent-app/app/backend/alembic/versions/423f4a48143d_initial_schema.py new file mode 100644 index 00000000..a1ee2604 --- /dev/null +++ b/dbx-agent-app/app/backend/alembic/versions/423f4a48143d_initial_schema.py @@ -0,0 +1,116 @@ +"""initial_schema + +Revision ID: 423f4a48143d +Revises: +Create Date: 2026-02-10 16:06:30.849897 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = '423f4a48143d' +down_revision: Union[str, Sequence[str], None] = None +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Upgrade schema.""" + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('apps', + sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), + sa.Column('name', sa.String(length=255), nullable=False), + sa.Column('owner', sa.String(length=255), nullable=True), + sa.Column('url', sa.Text(), nullable=True), + sa.Column('tags', sa.Text(), nullable=True), + sa.Column('manifest_url', sa.Text(), nullable=True), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('name') + ) + op.create_index('idx_app_name', 'apps', ['name'], unique=False) + op.create_index('idx_app_owner', 'apps', ['owner'], unique=False) + op.create_index(op.f('ix_apps_id'), 'apps', ['id'], unique=False) + op.create_table('collections', + sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), + sa.Column('name', sa.String(length=255), nullable=False), + sa.Column('description', sa.Text(), nullable=True), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('name') + ) + op.create_index('idx_collection_name', 'collections', ['name'], unique=False) + op.create_index(op.f('ix_collections_id'), 'collections', ['id'], unique=False) + op.create_table('mcp_servers', + sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), + sa.Column('app_id', sa.Integer(), nullable=True), + sa.Column('server_url', sa.Text(), nullable=False), + sa.Column('kind', sa.Enum('MANAGED', 'EXTERNAL', 'CUSTOM', name='mcpserverkind'), nullable=False), + sa.Column('uc_connection', sa.String(length=255), nullable=True), + sa.Column('scopes', sa.Text(), nullable=True), + sa.ForeignKeyConstraint(['app_id'], ['apps.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + op.create_index('idx_mcp_server_app_id', 'mcp_servers', ['app_id'], unique=False) + op.create_index('idx_mcp_server_kind', 'mcp_servers', ['kind'], unique=False) + op.create_index(op.f('ix_mcp_servers_id'), 'mcp_servers', ['id'], unique=False) + op.create_table('tools', + sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), + sa.Column('mcp_server_id', sa.Integer(), nullable=False), + sa.Column('name', sa.String(length=255), nullable=False), + sa.Column('description', sa.Text(), nullable=True), + sa.Column('parameters', sa.Text(), nullable=True), + sa.ForeignKeyConstraint(['mcp_server_id'], ['mcp_servers.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + op.create_index('idx_tool_mcp_server_id', 'tools', ['mcp_server_id'], unique=False) + op.create_index('idx_tool_name', 'tools', ['name'], unique=False) + op.create_index(op.f('ix_tools_id'), 'tools', ['id'], unique=False) + op.create_table('collection_items', + sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), + sa.Column('collection_id', sa.Integer(), nullable=False), + sa.Column('app_id', sa.Integer(), nullable=True), + sa.Column('mcp_server_id', sa.Integer(), nullable=True), + sa.Column('tool_id', sa.Integer(), nullable=True), + sa.CheckConstraint('(app_id IS NOT NULL AND mcp_server_id IS NULL AND tool_id IS NULL) OR (app_id IS NULL AND mcp_server_id IS NOT NULL AND tool_id IS NULL) OR (app_id IS NULL AND mcp_server_id IS NULL AND tool_id IS NOT NULL)', name='chk_collection_item_exactly_one_ref'), + sa.ForeignKeyConstraint(['app_id'], ['apps.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['collection_id'], ['collections.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['mcp_server_id'], ['mcp_servers.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['tool_id'], ['tools.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + op.create_index('idx_collection_item_app_id', 'collection_items', ['app_id'], unique=False) + op.create_index('idx_collection_item_collection_id', 'collection_items', ['collection_id'], unique=False) + op.create_index('idx_collection_item_mcp_server_id', 'collection_items', ['mcp_server_id'], unique=False) + op.create_index('idx_collection_item_tool_id', 'collection_items', ['tool_id'], unique=False) + op.create_index(op.f('ix_collection_items_id'), 'collection_items', ['id'], unique=False) + # ### end Alembic commands ### + + +def downgrade() -> None: + """Downgrade schema.""" + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index(op.f('ix_collection_items_id'), table_name='collection_items') + op.drop_index('idx_collection_item_tool_id', table_name='collection_items') + op.drop_index('idx_collection_item_mcp_server_id', table_name='collection_items') + op.drop_index('idx_collection_item_collection_id', table_name='collection_items') + op.drop_index('idx_collection_item_app_id', table_name='collection_items') + op.drop_table('collection_items') + op.drop_index(op.f('ix_tools_id'), table_name='tools') + op.drop_index('idx_tool_name', table_name='tools') + op.drop_index('idx_tool_mcp_server_id', table_name='tools') + op.drop_table('tools') + op.drop_index(op.f('ix_mcp_servers_id'), table_name='mcp_servers') + op.drop_index('idx_mcp_server_kind', table_name='mcp_servers') + op.drop_index('idx_mcp_server_app_id', table_name='mcp_servers') + op.drop_table('mcp_servers') + op.drop_index(op.f('ix_collections_id'), table_name='collections') + op.drop_index('idx_collection_name', table_name='collections') + op.drop_table('collections') + op.drop_index(op.f('ix_apps_id'), table_name='apps') + op.drop_index('idx_app_owner', table_name='apps') + op.drop_index('idx_app_name', table_name='apps') + op.drop_table('apps') + # ### end Alembic commands ### diff --git a/dbx-agent-app/app/backend/alembic/versions/b1e2f3a4c5d6_add_supervisor_and_discovery_state.py b/dbx-agent-app/app/backend/alembic/versions/b1e2f3a4c5d6_add_supervisor_and_discovery_state.py new file mode 100644 index 00000000..cee6a308 --- /dev/null +++ b/dbx-agent-app/app/backend/alembic/versions/b1e2f3a4c5d6_add_supervisor_and_discovery_state.py @@ -0,0 +1,52 @@ +"""add supervisor and discovery_state tables + +Revision ID: b1e2f3a4c5d6 +Revises: 423f4a48143d +Create Date: 2026-02-18 10:30:00.000000 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = 'b1e2f3a4c5d6' +down_revision: Union[str, Sequence[str], None] = '423f4a48143d' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Add supervisors and discovery_state tables.""" + op.create_table('supervisors', + sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), + sa.Column('collection_id', sa.Integer(), nullable=False), + sa.Column('app_name', sa.String(length=255), nullable=False), + sa.Column('generated_at', sa.DateTime(), nullable=False), + sa.Column('deployed_url', sa.Text(), nullable=True), + sa.ForeignKeyConstraint(['collection_id'], ['collections.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id'), + ) + op.create_index('idx_supervisor_collection_id', 'supervisors', ['collection_id'], unique=False) + op.create_index('idx_supervisor_app_name', 'supervisors', ['app_name'], unique=False) + op.create_index(op.f('ix_supervisors_id'), 'supervisors', ['id'], unique=False) + + op.create_table('discovery_state', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('is_running', sa.Boolean(), nullable=False), + sa.Column('last_run_timestamp', sa.String(length=64), nullable=True), + sa.Column('last_run_status', sa.String(length=32), nullable=True), + sa.Column('last_run_message', sa.Text(), nullable=True), + sa.PrimaryKeyConstraint('id'), + ) + + +def downgrade() -> None: + """Remove supervisors and discovery_state tables.""" + op.drop_table('discovery_state') + op.drop_index(op.f('ix_supervisors_id'), table_name='supervisors') + op.drop_index('idx_supervisor_app_name', table_name='supervisors') + op.drop_index('idx_supervisor_collection_id', table_name='supervisors') + op.drop_table('supervisors') diff --git a/dbx-agent-app/app/backend/alembic/versions/c3d4e5f6a7b8_add_agent_analytics.py b/dbx-agent-app/app/backend/alembic/versions/c3d4e5f6a7b8_add_agent_analytics.py new file mode 100644 index 00000000..7dd4cac4 --- /dev/null +++ b/dbx-agent-app/app/backend/alembic/versions/c3d4e5f6a7b8_add_agent_analytics.py @@ -0,0 +1,43 @@ +"""add agent_analytics table + +Revision ID: c3d4e5f6a7b8 +Revises: b1e2f3a4c5d6 +Create Date: 2026-02-24 23:00:00.000000 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = 'c3d4e5f6a7b8' +down_revision: Union[str, Sequence[str], None] = 'b1e2f3a4c5d6' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Add agent_analytics table for tracking per-invocation agent performance.""" + op.create_table('agent_analytics', + sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), + sa.Column('agent_id', sa.Integer(), nullable=False), + sa.Column('task_description', sa.Text(), nullable=True), + sa.Column('success', sa.Integer(), server_default='1', nullable=True), + sa.Column('latency_ms', sa.Integer(), nullable=True), + sa.Column('quality_score', sa.Integer(), nullable=True), + sa.Column('error_message', sa.Text(), nullable=True), + sa.Column('created_at', sa.DateTime(), nullable=False), + sa.ForeignKeyConstraint(['agent_id'], ['agents.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id'), + ) + op.create_index('idx_analytics_agent_id', 'agent_analytics', ['agent_id'], unique=False) + op.create_index('idx_analytics_created_at', 'agent_analytics', ['created_at'], unique=False) + + +def downgrade() -> None: + """Remove agent_analytics table.""" + op.drop_index('idx_analytics_created_at', table_name='agent_analytics') + op.drop_index('idx_analytics_agent_id', table_name='agent_analytics') + op.drop_table('agent_analytics') diff --git a/dbx-agent-app/app/backend/app.yaml b/dbx-agent-app/app/backend/app.yaml new file mode 100644 index 00000000..93ef907c --- /dev/null +++ b/dbx-agent-app/app/backend/app.yaml @@ -0,0 +1,85 @@ +# Databricks Apps Configuration for Multi-Agent Registry API +# Documentation: https://docs.databricks.com/dev-tools/databricks-apps/index.html + +# Command to start the application +# Uses uvicorn with proper worker configuration for production +command: + - python + - -m + - uvicorn + - app.main:app + - --host + - "0.0.0.0" + - --port + - "8000" + - --workers + # Using 1 worker to avoid concurrent startup discovery conflicts + - "1" + - --loop + - uvloop + - --http + - httptools + +# Environment variables +# Static values use 'value', resource references use 'valueFrom' +env: + # Application Configuration + - name: PORT + value: "8000" + - name: ENVIRONMENT + value: production + - name: DEBUG + value: "false" + + # API Configuration + - name: API_TITLE + value: "Multi-Agent Registry API" + - name: API_VERSION + value: "0.1.0" + - name: API_PREFIX + value: "/api" + + # Database Configuration - Using Databricks SQL Warehouse for persistent storage + - name: DATABASE_URL + value: "databricks://warehouse" + - name: DATABRICKS_WAREHOUSE_ID + value: "387bcda0f2ece20c" + - name: DB_CATALOG + value: "serverless_dxukih_catalog" + - name: DB_SCHEMA + value: "registry" + + # CORS Configuration + # Allow requests from webapp and localhost + - name: CORS_ORIGINS + value: "https://multi-agent-registry-webapp-7474660127789418.aws.databricksapps.com,http://localhost:5501,http://localhost:3000" + - name: CORS_CREDENTIALS + value: "true" + - name: CORS_METHODS + value: "GET,POST,PUT,DELETE,OPTIONS,PATCH" + - name: CORS_HEADERS + value: "Content-Type,Authorization,X-Requested-With,Accept,Origin" + +# Resources +resources: + - name: sql-warehouse + sql_warehouse: + id: "387bcda0f2ece20c" + permission: CAN_USE + - name: serverless-catalog + catalog: + name: serverless_dxukih_catalog + permission: USE_CATALOG + - name: registry-schema + schema: + catalog_name: serverless_dxukih_catalog + name: registry + permission: USE_SCHEMA + +# Health check configuration +# Databricks Apps will probe this endpoint to determine app readiness +# The /health endpoint returns {"status": "healthy", "version": "x.x.x"} + +# Disable OAuth to make API publicly accessible +permissions: + authorization: NONE diff --git a/dbx-agent-app/app/backend/app/config.py b/dbx-agent-app/app/backend/app/config.py new file mode 100644 index 00000000..3d379bc4 --- /dev/null +++ b/dbx-agent-app/app/backend/app/config.py @@ -0,0 +1,74 @@ +from pydantic_settings import BaseSettings, SettingsConfigDict +from typing import Optional + + +class Settings(BaseSettings): + """ + Application settings loaded from environment variables. + """ + + # Database Configuration (Lakebase via Unity Catalog) + database_url: str + database_pool_size: int = 5 + database_max_overflow: int = 10 + database_pool_timeout: int = 30 + + # Databricks Configuration + databricks_host: Optional[str] = None + databricks_token: Optional[str] = None + databricks_config_profile: Optional[str] = None + databricks_warehouse_id: Optional[str] = None + db_catalog: str = "serverless_dxukih_catalog" + db_schema: str = "registry" + + # LLM Configuration + llm_endpoint: str = "databricks-claude-sonnet-4-5" + + # Embedding Configuration + embedding_model: str = "databricks-bge-large-en" + embedding_dimension: int = 1024 + search_results_limit: int = 20 + + # API Configuration + api_title: str = "Multi-Agent Registry API" + api_version: str = "0.1.0" + api_prefix: str = "/api" + + # Server Configuration + port: int = 8000 + host: str = "0.0.0.0" + + # MCP Catalog Configuration + mcp_catalog_url: Optional[str] = None + + # MLflow Tracing + mlflow_tracking_uri: str = "databricks" + mlflow_experiment_name: str = "/Shared/registry-api-chat-traces" + + # A2A Protocol + a2a_protocol_version: str = "0.3.0" + a2a_base_url: Optional[str] = None + + # Authentication + # Set to False to disable auth middleware (not recommended for production) + auth_enabled: bool = True + + # Environment + environment: str = "development" + debug: bool = False + + # CORS Settings + # Default to localhost for development. In production, set explicit origins. + cors_origins: str = "http://localhost:3000,http://localhost:5500,http://localhost:5501" + cors_credentials: bool = True + cors_methods: str = "GET,POST,PUT,DELETE,OPTIONS,PATCH" + cors_headers: str = "Content-Type,Authorization,X-Requested-With,Accept,Origin" + + model_config = SettingsConfigDict( + env_file=".env", + env_file_encoding="utf-8", + case_sensitive=False, + ) + + +settings = Settings() diff --git a/dbx-agent-app/app/backend/app/database.py b/dbx-agent-app/app/backend/app/database.py new file mode 100644 index 00000000..c09da4d1 --- /dev/null +++ b/dbx-agent-app/app/backend/app/database.py @@ -0,0 +1,71 @@ +import logging +from sqlalchemy import create_engine, event +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker +from sqlalchemy.pool import NullPool +from app.config import settings +from typing import Generator + +logger = logging.getLogger(__name__) + +# Create SQLAlchemy engine with NullPool for OBO (On-Behalf-Of) support +# NullPool ensures each request gets a fresh connection with the user's identity +engine = create_engine( + settings.database_url, + poolclass=NullPool, # Required for OBO - no connection pooling + echo=settings.debug, + future=True, +) + +# Session factory +SessionLocal = sessionmaker( + autocommit=False, + autoflush=False, + bind=engine, + future=True, +) + +# Base class for all models +Base = declarative_base() + + +def get_db() -> Generator: + """ + Dependency function to get database session. + Yields a database session and ensures it's closed after use. + + Usage in FastAPI: + @app.get("/items") + def get_items(db: Session = Depends(get_db)): + return db.query(Item).all() + """ + db = SessionLocal() + try: + yield db + finally: + db.close() + + +def init_db() -> None: + """ + Initialize database by creating all tables. + This is typically called during application startup or migrations. + Uses checkfirst=True to avoid errors if tables already exist. + """ + try: + Base.metadata.create_all(bind=engine, checkfirst=True) + except Exception as e: + # Log error but don't crash - tables might already exist + logger.warning("Database initialization warning (this is usually safe): %s", e) + + +@event.listens_for(engine, "connect") +def set_sqlite_pragma(dbapi_connection, connection_record): + """ + Event listener to set SQLite pragmas if using SQLite for testing. + This is only executed when connecting to SQLite databases. + """ + if "sqlite" in settings.database_url: + cursor = dbapi_connection.cursor() + cursor.execute("PRAGMA foreign_keys=ON") + cursor.close() diff --git a/dbx-agent-app/app/backend/app/db_adapter.py b/dbx-agent-app/app/backend/app/db_adapter.py new file mode 100644 index 00000000..e0a89bcb --- /dev/null +++ b/dbx-agent-app/app/backend/app/db_adapter.py @@ -0,0 +1,2258 @@ +""" +Database adapter that switches between SQLite (SQLAlchemy) and Databricks Warehouse. +""" +import logging +from typing import List, Dict, Any, Optional, Tuple +from sqlalchemy.orm import Session +from app.config import settings +import app.database as _db_module +from app import models + +logger = logging.getLogger(__name__) + +USE_SQLITE = settings.database_url.startswith("sqlite") + +# Log which backend is active +logger.info(f"[DB-ADAPTER] DATABASE_URL={settings.database_url}") +logger.info(f"[DB-ADAPTER] Using backend: {'SQLite' if USE_SQLITE else 'SQL Warehouse'}") +if not USE_SQLITE: + logger.info(f"[DB-ADAPTER] Warehouse config: catalog={settings.db_catalog}, schema={settings.db_schema}, warehouse_id={settings.databricks_warehouse_id}") + +def safe_timestamp(obj, attr): + """Safely get timestamp attribute that might not exist.""" + val = getattr(obj, attr, None) + return val.isoformat() if val else None + + +class DatabaseAdapter: + """Adapter that routes to SQLite or Warehouse based on configuration.""" + + @staticmethod + def _get_session() -> Session: + """Get SQLAlchemy session for SQLite.""" + return _db_module.SessionLocal() + + # ==================== COLLECTIONS ==================== + + @staticmethod + def list_collections(page: int = 1, page_size: int = 50) -> Tuple[List[Dict], int]: + """List collections with pagination.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + offset = (page - 1) * page_size + collections = db.query(models.Collection).offset(offset).limit(page_size).all() + total = db.query(models.Collection).count() + + return ( + [ + { + "id": c.id, + "name": c.name, + "description": c.description, + "created_at": safe_timestamp(c, 'created_at'), + "updated_at": safe_timestamp(c, 'updated_at'), + } + for c in collections + ], + total, + ) + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.list_collections(page, page_size) + + @staticmethod + def get_collection(collection_id: int) -> Optional[Dict]: + """Get collection by ID.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + collection = db.query(models.Collection).filter(models.Collection.id == collection_id).first() + if not collection: + return None + return { + "id": collection.id, + "name": collection.name, + "description": collection.description, + "created_at": safe_timestamp(collection, 'created_at'), + "updated_at": safe_timestamp(collection, 'updated_at'), + } + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.get_collection(collection_id) + + @staticmethod + def create_collection(name: str, description: str = None) -> Dict: + """Create new collection.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + collection = models.Collection(name=name, description=description) + db.add(collection) + db.commit() + db.refresh(collection) + return { + "id": collection.id, + "name": collection.name, + "description": collection.description, + "created_at": safe_timestamp(collection, 'created_at'), + "updated_at": safe_timestamp(collection, 'updated_at'), + } + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.create_collection(name, description) + + @staticmethod + def update_collection(collection_id: int, **kwargs) -> Optional[Dict]: + """Update a collection.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + collection = db.query(models.Collection).filter(models.Collection.id == collection_id).first() + if not collection: + return None + for key, value in kwargs.items(): + if value is not None and hasattr(collection, key): + setattr(collection, key, value) + db.commit() + db.refresh(collection) + return { + "id": collection.id, + "name": collection.name, + "description": collection.description, + "created_at": safe_timestamp(collection, 'created_at'), + "updated_at": safe_timestamp(collection, 'updated_at'), + } + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.update_collection(collection_id, **kwargs) + + @staticmethod + def delete_collection(collection_id: int) -> None: + """Delete a collection.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + collection = db.query(models.Collection).filter(models.Collection.id == collection_id).first() + if collection: + db.delete(collection) + db.commit() + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + WarehouseDB.delete_collection(collection_id) + + # ==================== APPS ==================== + + @staticmethod + def get_app_by_url(url: str) -> Optional[Dict]: + """Get app by URL.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + app = db.query(models.App).filter(models.App.url == url).first() + if not app: + return None + return { + "id": app.id, + "name": app.name, + "owner": app.owner, + "url": app.url, + "tags": app.tags, + "manifest_url": app.manifest_url, + } + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + apps, _ = WarehouseDB.list_apps(page=1, page_size=1000) + for app in apps: + if app.get("url") == url: + return app + return None + + @staticmethod + def get_app_by_name(name: str) -> Optional[Dict]: + """Get app by name.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + app = db.query(models.App).filter(models.App.name == name).first() + if not app: + return None + return { + "id": app.id, + "name": app.name, + "owner": app.owner, + "url": app.url, + "tags": app.tags, + "manifest_url": app.manifest_url, + } + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + apps, _ = WarehouseDB.list_apps(page=1, page_size=1000) + for app in apps: + if app.get("name") == name: + return app + return None + + @staticmethod + def upsert_app_by_name(name: str, owner: str = None, url: str = None, + tags: str = None, manifest_url: str = None) -> Dict: + """ + Upsert app by name. Returns app dict with ID. + If app exists, updates it. If not, creates it. + """ + existing = DatabaseAdapter.get_app_by_name(name) + if existing: + # Update existing app + update_kwargs = {} + if owner is not None: + update_kwargs["owner"] = owner + if url is not None: + update_kwargs["url"] = url + if tags is not None: + update_kwargs["tags"] = tags + if manifest_url is not None: + update_kwargs["manifest_url"] = manifest_url + + if update_kwargs: + updated = DatabaseAdapter.update_app(existing["id"], **update_kwargs) + return updated if updated else existing + return existing + else: + # Create new app + return DatabaseAdapter.create_app(name=name, owner=owner, url=url, + tags=tags, manifest_url=manifest_url) + + @staticmethod + def create_app(name: str, owner: str = None, url: str = None, tags: str = None, manifest_url: str = None) -> Dict: + """Create a new app.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + app_obj = models.App(name=name, owner=owner, url=url, tags=tags, manifest_url=manifest_url) + db.add(app_obj) + db.commit() + db.refresh(app_obj) + return { + "id": app_obj.id, + "name": app_obj.name, + "owner": app_obj.owner, + "url": app_obj.url, + "tags": app_obj.tags, + "manifest_url": app_obj.manifest_url, + } + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.create_app(name=name, owner=owner, url=url, tags=tags, manifest_url=manifest_url) + + @staticmethod + def list_apps(page: int = 1, page_size: int = 50, owner: str = None) -> Tuple[List[Dict], int]: + """List apps with pagination and optional owner filter.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + offset = (page - 1) * page_size + query = db.query(models.App) + if owner: + query = query.filter(models.App.owner == owner) + apps = query.offset(offset).limit(page_size).all() + total = query.count() + + return ( + [ + { + "id": a.id, + "name": a.name, + "owner": a.owner, + "url": a.url, + "tags": a.tags, + "manifest_url": a.manifest_url, + "created_at": safe_timestamp(a, 'created_at'), + "updated_at": safe_timestamp(a, 'updated_at'), + } + for a in apps + ], + total, + ) + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.list_apps(page, page_size) + + @staticmethod + def update_app(app_id: int, **kwargs) -> Optional[Dict]: + """Update an app.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + app_obj = db.query(models.App).filter(models.App.id == app_id).first() + if not app_obj: + return None + for key, value in kwargs.items(): + if value is not None and hasattr(app_obj, key): + setattr(app_obj, key, value) + db.commit() + db.refresh(app_obj) + return { + "id": app_obj.id, + "name": app_obj.name, + "owner": app_obj.owner, + "url": app_obj.url, + "tags": app_obj.tags, + "manifest_url": app_obj.manifest_url, + } + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.update_app(app_id, **kwargs) + + @staticmethod + def delete_app(app_id: int) -> None: + """Delete an app.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + app_obj = db.query(models.App).filter(models.App.id == app_id).first() + if app_obj: + db.delete(app_obj) + db.commit() + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + WarehouseDB.delete_app(app_id) + + # ==================== MCP SERVERS ==================== + + @staticmethod + def get_mcp_server_by_url(server_url: str) -> Optional[Dict]: + """Get MCP server by URL.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + server = db.query(models.MCPServer).filter( + models.MCPServer.server_url == server_url + ).first() + if not server: + return None + return { + "id": server.id, + "app_id": server.app_id, + "server_url": server.server_url, + "kind": server.kind, + "uc_connection": server.uc_connection, + "scopes": server.scopes, + } + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + servers, _ = WarehouseDB.list_mcp_servers(page=1, page_size=1000, app_id=None) + for server in servers: + if server.get("server_url") == server_url: + return server + return None + + @staticmethod + def upsert_mcp_server_by_url(server_url: str, kind: str = 'managed', + app_id: int = None, uc_connection: str = None, + scopes: str = None) -> Dict: + """ + Upsert MCP server by URL. Returns server dict with ID. + If server exists, updates it. If not, creates it. + """ + existing = DatabaseAdapter.get_mcp_server_by_url(server_url) + if existing: + # Update existing server + update_kwargs = {} + if kind is not None: + update_kwargs["kind"] = kind + if app_id is not None: + update_kwargs["app_id"] = app_id + if uc_connection is not None: + update_kwargs["uc_connection"] = uc_connection + if scopes is not None: + update_kwargs["scopes"] = scopes + + if update_kwargs: + updated = DatabaseAdapter.update_mcp_server(existing["id"], **update_kwargs) + return updated if updated else existing + return existing + else: + # Create new server + return DatabaseAdapter.create_mcp_server( + server_url=server_url, kind=kind, app_id=app_id, + uc_connection=uc_connection, scopes=scopes + ) + + @staticmethod + def create_mcp_server(server_url: str, kind: str = 'managed', app_id: int = None, uc_connection: str = None, scopes: str = None) -> Dict: + """Create a new MCP server.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + server = models.MCPServer( + server_url=server_url, + kind=kind, + app_id=app_id, + uc_connection=uc_connection, + scopes=scopes, + ) + db.add(server) + db.commit() + db.refresh(server) + return { + "id": server.id, + "app_id": server.app_id, + "server_url": server.server_url, + "kind": server.kind, + "uc_connection": server.uc_connection, + "scopes": server.scopes, + } + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.create_mcp_server(server_url=server_url, kind=kind, app_id=app_id, uc_connection=uc_connection, scopes=scopes) + + @staticmethod + def delete_mcp_server(server_id: int) -> None: + """Delete an MCP server.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + server = db.query(models.MCPServer).filter(models.MCPServer.id == server_id).first() + if server: + db.delete(server) + db.commit() + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + WarehouseDB.delete_mcp_server(server_id) + + @staticmethod + def list_mcp_servers(page: int = 1, page_size: int = 50, app_id: int = None, kind: str = None) -> Tuple[List[Dict], int]: + """List MCP servers with pagination.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + offset = (page - 1) * page_size + query = db.query(models.MCPServer) + if app_id: + query = query.filter(models.MCPServer.app_id == app_id) + if kind: + query = query.filter(models.MCPServer.kind == kind) + servers = query.offset(offset).limit(page_size).all() + total = query.count() + + return ( + [ + { + "id": s.id, + "app_id": s.app_id, + "server_url": s.server_url, + "kind": s.kind, + "uc_connection": s.uc_connection, + "scopes": s.scopes, + "created_at": safe_timestamp(s, 'created_at'), + } + for s in servers + ], + total, + ) + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.list_mcp_servers(page, page_size, app_id) + + @staticmethod + def update_mcp_server(server_id: int, **kwargs) -> Optional[Dict]: + """Update an MCP server.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + server = db.query(models.MCPServer).filter(models.MCPServer.id == server_id).first() + if not server: + return None + for key, value in kwargs.items(): + if value is not None and hasattr(server, key): + setattr(server, key, value) + db.commit() + db.refresh(server) + return { + "id": server.id, + "app_id": server.app_id, + "server_url": server.server_url, + "kind": server.kind, + "uc_connection": server.uc_connection, + "scopes": server.scopes, + "created_at": safe_timestamp(server, 'created_at'), + } + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.update_mcp_server(server_id, **kwargs) + + # ==================== TOOLS ==================== + + @staticmethod + def get_tool_by_server_and_name(mcp_server_id: int, name: str) -> Optional[Dict]: + """Get tool by MCP server ID and tool name.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + tool = db.query(models.Tool).filter( + models.Tool.mcp_server_id == mcp_server_id, + models.Tool.name == name + ).first() + if not tool: + return None + return { + "id": tool.id, + "mcp_server_id": tool.mcp_server_id, + "name": tool.name, + "description": tool.description, + "parameters": tool.parameters, + } + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + tools, _ = WarehouseDB.list_tools(page=1, page_size=1000, mcp_server_id=mcp_server_id) + for tool in tools: + if tool.get("name") == name: + return tool + return None + + @staticmethod + def upsert_tool_by_server_and_name(mcp_server_id: int, name: str, + description: str = None, + parameters: str = None) -> Dict: + """ + Upsert tool by server ID and name. Returns tool dict with ID. + If tool exists, updates it. If not, creates it. + """ + existing = DatabaseAdapter.get_tool_by_server_and_name(mcp_server_id, name) + if existing: + # Update existing tool + update_kwargs = {} + if description is not None: + update_kwargs["description"] = description + if parameters is not None: + update_kwargs["parameters"] = parameters + + # Note: DatabaseAdapter doesn't have update_tool, so we'd need to add it + # For now, just return existing if it matches + return existing + else: + # Create new tool + return DatabaseAdapter.create_tool( + mcp_server_id=mcp_server_id, name=name, + description=description, parameters=parameters + ) + + @staticmethod + def list_tools(page: int = 1, page_size: int = 50, mcp_server_id: int = None, + name: str = None, search: str = None, tags: str = None, + owner: str = None) -> Tuple[List[Dict], int]: + """List tools with pagination and filtering.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + offset = (page - 1) * page_size + query = db.query(models.Tool) + if mcp_server_id: + query = query.filter(models.Tool.mcp_server_id == mcp_server_id) + if name: + query = query.filter(models.Tool.name.ilike(f"%{name}%")) + if search: + pattern = f"%{search}%" + query = query.filter( + (models.Tool.name.ilike(pattern)) + | (models.Tool.description.ilike(pattern)) + ) + # Filter by parent app tags/owner via MCP server join + if tags or owner: + query = query.join( + models.MCPServer, + models.Tool.mcp_server_id == models.MCPServer.id, + ).join( + models.App, + models.MCPServer.app_id == models.App.id, + ) + if owner: + query = query.filter(models.App.owner.ilike(f"%{owner}%")) + if tags: + tag_list = [t.strip() for t in tags.split(",")] + from sqlalchemy import or_ + tag_filters = [models.App.tags.ilike(f"%{tag}%") for tag in tag_list] + query = query.filter(or_(*tag_filters)) + + total = query.count() + tools = query.offset(offset).limit(page_size).all() + + return ( + [ + { + "id": t.id, + "mcp_server_id": t.mcp_server_id, + "name": t.name, + "description": t.description, + "parameters": t.parameters, + "created_at": safe_timestamp(t, 'created_at'), + } + for t in tools + ], + total, + ) + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.list_tools(page, page_size, mcp_server_id) + + + @staticmethod + def create_tool(mcp_server_id: int, name: str, description: str = None, parameters: str = None) -> Dict: + """Create a new tool.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + tool = models.Tool( + mcp_server_id=mcp_server_id, + name=name, + description=description, + parameters=parameters, + ) + db.add(tool) + db.commit() + db.refresh(tool) + return { + "id": tool.id, + "mcp_server_id": tool.mcp_server_id, + "name": tool.name, + "description": tool.description, + "parameters": tool.parameters, + } + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.create_tool(mcp_server_id=mcp_server_id, name=name, description=description, parameters=parameters) + + # ==================== COLLECTION ITEMS ==================== + + @staticmethod + def list_collection_items(collection_id: int) -> List[Dict]: + """List items in a collection.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + items = db.query(models.CollectionItem).filter( + models.CollectionItem.collection_id == collection_id + ).all() + + result = [] + for item in items: + item_dict = { + "id": item.id, + "collection_id": item.collection_id, + "app_id": item.app_id, + "mcp_server_id": item.mcp_server_id, + "tool_id": item.tool_id, + } + result.append(item_dict) + return result + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.list_collection_items(collection_id) + + @staticmethod + def add_collection_item(collection_id: int, app_id: int = None, mcp_server_id: int = None, tool_id: int = None) -> Dict: + """Add an item to a collection.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + item = models.CollectionItem( + collection_id=collection_id, + app_id=app_id, + mcp_server_id=mcp_server_id, + tool_id=tool_id, + ) + db.add(item) + db.commit() + db.refresh(item) + return { + "id": item.id, + "collection_id": item.collection_id, + "app_id": item.app_id, + "mcp_server_id": item.mcp_server_id, + "tool_id": item.tool_id, + } + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.add_collection_item(collection_id=collection_id, app_id=app_id, mcp_server_id=mcp_server_id, tool_id=tool_id) + + @staticmethod + def get_collection_item(item_id: int) -> Optional[Dict]: + """Get a collection item by ID.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + item = db.query(models.CollectionItem).filter(models.CollectionItem.id == item_id).first() + if not item: + return None + return { + "id": item.id, + "collection_id": item.collection_id, + "app_id": item.app_id, + "mcp_server_id": item.mcp_server_id, + "tool_id": item.tool_id, + } + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.get_collection_item(item_id) + + @staticmethod + def delete_collection_item(item_id: int) -> None: + """Delete a collection item.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + item = db.query(models.CollectionItem).filter(models.CollectionItem.id == item_id).first() + if item: + db.delete(item) + db.commit() + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + WarehouseDB.delete_collection_item(item_id) + + # ==================== INDIVIDUAL LOOKUPS ==================== + + @staticmethod + def get_app(app_id: int) -> Optional[Dict]: + """Get app by ID.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + app = db.query(models.App).filter(models.App.id == app_id).first() + if not app: + return None + return { + "id": app.id, + "name": app.name, + "owner": app.owner, + "url": app.url, + "tags": app.tags, + "manifest_url": app.manifest_url, + } + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.get_app(app_id) + + @staticmethod + def get_mcp_server(server_id: int) -> Optional[Dict]: + """Get MCP server by ID.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + server = db.query(models.MCPServer).filter(models.MCPServer.id == server_id).first() + if not server: + return None + return { + "id": server.id, + "app_id": server.app_id, + "server_url": server.server_url, + "kind": server.kind, + } + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.get_mcp_server(server_id) + + @staticmethod + def get_tool(tool_id: int) -> Optional[Dict]: + """Get tool by ID.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + tool = db.query(models.Tool).filter(models.Tool.id == tool_id).first() + if not tool: + return None + return { + "id": tool.id, + "mcp_server_id": tool.mcp_server_id, + "name": tool.name, + "description": tool.description, + "parameters": tool.parameters, + } + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB + return WarehouseDB.get_tool(tool_id) + + # ==================== SUPERVISORS ==================== + + @staticmethod + def list_supervisors() -> Tuple[List[Dict], int]: + """List all generated supervisors.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + supervisors = db.query(models.Supervisor).order_by(models.Supervisor.generated_at.desc()).all() + return ( + [ + { + "id": s.id, + "collection_id": s.collection_id, + "app_name": s.app_name, + "generated_at": s.generated_at.isoformat() if s.generated_at else None, + "deployed_url": s.deployed_url, + } + for s in supervisors + ], + len(supervisors), + ) + finally: + db.close() + else: + return [], 0 + + @staticmethod + def create_supervisor(collection_id: int, app_name: str, deployed_url: str = None) -> Dict: + """Create supervisor metadata record.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + from datetime import datetime + supervisor = models.Supervisor( + collection_id=collection_id, + app_name=app_name, + generated_at=datetime.utcnow(), + deployed_url=deployed_url, + ) + db.add(supervisor) + db.commit() + db.refresh(supervisor) + return { + "id": supervisor.id, + "collection_id": supervisor.collection_id, + "app_name": supervisor.app_name, + "generated_at": supervisor.generated_at.isoformat() if supervisor.generated_at else None, + "deployed_url": supervisor.deployed_url, + } + except Exception: + db.rollback() + raise + finally: + db.close() + else: + return {} + + @staticmethod + def get_supervisor(supervisor_id: int) -> Optional[Dict]: + """Get supervisor by ID.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + s = db.query(models.Supervisor).filter(models.Supervisor.id == supervisor_id).first() + if not s: + return None + return { + "id": s.id, + "collection_id": s.collection_id, + "app_name": s.app_name, + "generated_at": s.generated_at.isoformat() if s.generated_at else None, + "deployed_url": s.deployed_url, + } + finally: + db.close() + else: + return None + + @staticmethod + def delete_supervisor(supervisor_id: int) -> bool: + """Delete supervisor metadata. Returns True if found and deleted.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + s = db.query(models.Supervisor).filter(models.Supervisor.id == supervisor_id).first() + if not s: + return False + db.delete(s) + db.commit() + return True + except Exception: + db.rollback() + raise + finally: + db.close() + else: + return False + + + # ==================== AGENTS ==================== + + @staticmethod + def get_agent_by_name(name: str) -> Optional[Dict]: + """Get agent by name.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + agent = db.query(models.Agent).filter(models.Agent.name == name).first() + if not agent: + return None + return DatabaseAdapter._agent_to_dict(agent) + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB as WDB + agents, _ = WDB.list_agents(page=1, page_size=1000) + for agent in agents: + if agent.get("name") == name: + return agent + return None + + @staticmethod + def upsert_agent_by_name(name: str, **kwargs) -> Dict: + """ + Upsert agent by name. Returns agent dict with ID. + If agent exists, updates factual fields only (preserves manual fields). + If not, creates new agent with status="discovered". + """ + existing = DatabaseAdapter.get_agent_by_name(name) + if existing: + # Update factual fields only + update_kwargs = {} + for key in ["endpoint_url", "description", "capabilities", "a2a_capabilities", + "skills", "protocol_version", "app_id"]: + if key in kwargs and kwargs[key] is not None: + update_kwargs[key] = kwargs[key] + + if update_kwargs: + updated = DatabaseAdapter.update_agent(existing["id"], **update_kwargs) + return updated if updated else existing + return existing + else: + # Create new agent with discovered status + create_kwargs = {k: v for k, v in kwargs.items() if v is not None} + if "status" not in create_kwargs: + create_kwargs["status"] = "discovered" + return DatabaseAdapter.create_agent(name=name, **create_kwargs) + + @staticmethod + def _agent_to_dict(a) -> Dict: + """Convert Agent ORM object to dict.""" + return { + "id": a.id, + "name": a.name, + "description": a.description, + "capabilities": a.capabilities, + "status": a.status, + "collection_id": a.collection_id, + "endpoint_url": a.endpoint_url, + "auth_token": a.auth_token, + "a2a_capabilities": a.a2a_capabilities, + "skills": a.skills, + "protocol_version": a.protocol_version, + "system_prompt": a.system_prompt, + "created_at": safe_timestamp(a, 'created_at'), + "updated_at": safe_timestamp(a, 'updated_at'), + } + + @staticmethod + def list_agents(page: int = 1, page_size: int = 50) -> Tuple[List[Dict], int]: + """List agents with pagination.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + offset = (page - 1) * page_size + agents = db.query(models.Agent).offset(offset).limit(page_size).all() + total = db.query(models.Agent).count() + return ([DatabaseAdapter._agent_to_dict(a) for a in agents], total) + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB as WDB + return WDB.list_agents(page, page_size) + + @staticmethod + def get_agent(agent_id: int) -> Optional[Dict]: + """Get agent by ID.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + agent = db.query(models.Agent).filter(models.Agent.id == agent_id).first() + if not agent: + return None + return DatabaseAdapter._agent_to_dict(agent) + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB as WDB + return WDB.get_agent(agent_id) + + @staticmethod + def create_agent(name: str, description: str = None, capabilities: str = None, + status: str = "draft", collection_id: int = None, + endpoint_url: str = None, auth_token: str = None, + a2a_capabilities: str = None, skills: str = None, + protocol_version: str = None, system_prompt: str = None) -> Dict: + """Create a new agent.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + agent = models.Agent( + name=name, + description=description, + capabilities=capabilities, + status=status or "draft", + collection_id=collection_id, + endpoint_url=endpoint_url, + auth_token=auth_token, + a2a_capabilities=a2a_capabilities, + skills=skills, + protocol_version=protocol_version, + system_prompt=system_prompt, + ) + db.add(agent) + db.commit() + db.refresh(agent) + return DatabaseAdapter._agent_to_dict(agent) + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB as WDB + return WDB.create_agent(name=name, description=description, + capabilities=capabilities, status=status, + collection_id=collection_id, endpoint_url=endpoint_url) + + @staticmethod + def update_agent(agent_id: int, **kwargs) -> Optional[Dict]: + """Update an agent.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + agent = db.query(models.Agent).filter(models.Agent.id == agent_id).first() + if not agent: + return None + for key, value in kwargs.items(): + if value is not None and hasattr(agent, key): + setattr(agent, key, value) + db.commit() + db.refresh(agent) + return DatabaseAdapter._agent_to_dict(agent) + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB as WDB + return WDB.update_agent(agent_id, **kwargs) + + @staticmethod + def delete_agent(agent_id: int) -> None: + """Delete an agent.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + agent = db.query(models.Agent).filter(models.Agent.id == agent_id).first() + if agent: + db.delete(agent) + db.commit() + except Exception: + db.rollback() + raise + finally: + db.close() + else: + from app.db_warehouse import WarehouseDB as WDB + WDB.delete_agent(agent_id) + + @staticmethod + def list_active_a2a_agents() -> List[Dict]: + """List agents that are active and have an endpoint_url (A2A-capable).""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + agents = db.query(models.Agent).filter( + models.Agent.status == "active", + models.Agent.endpoint_url.isnot(None), + ).all() + return [DatabaseAdapter._agent_to_dict(a) for a in agents] + finally: + db.close() + else: + return [] + + # ==================== A2A TASKS ==================== + + @staticmethod + def _a2a_task_to_dict(t) -> Dict: + """Convert A2ATask ORM object to dict.""" + return { + "id": t.id, + "agent_id": t.agent_id, + "context_id": t.context_id, + "status": t.status, + "messages": t.messages, + "artifacts": t.artifacts, + "metadata_json": t.metadata_json, + "webhook_url": t.webhook_url, + "webhook_token": t.webhook_token, + "created_at": safe_timestamp(t, 'created_at'), + "updated_at": safe_timestamp(t, 'updated_at'), + } + + @staticmethod + def create_a2a_task(task_id: str, agent_id: int, context_id: str = None, + status: str = "submitted", messages: str = None, + metadata_json: str = None) -> Dict: + """Create a new A2A task.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + task = models.A2ATask( + id=task_id, + agent_id=agent_id, + context_id=context_id, + status=status, + messages=messages, + metadata_json=metadata_json, + ) + db.add(task) + db.commit() + db.refresh(task) + return DatabaseAdapter._a2a_task_to_dict(task) + except Exception: + db.rollback() + raise + finally: + db.close() + else: + return {} + + @staticmethod + def get_a2a_task(task_id: str) -> Optional[Dict]: + """Get A2A task by ID.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + task = db.query(models.A2ATask).filter(models.A2ATask.id == task_id).first() + if not task: + return None + return DatabaseAdapter._a2a_task_to_dict(task) + finally: + db.close() + else: + return None + + @staticmethod + def update_a2a_task(task_id: str, **kwargs) -> Optional[Dict]: + """Update an A2A task.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + task = db.query(models.A2ATask).filter(models.A2ATask.id == task_id).first() + if not task: + return None + for key, value in kwargs.items(): + if value is not None and hasattr(task, key): + setattr(task, key, value) + db.commit() + db.refresh(task) + return DatabaseAdapter._a2a_task_to_dict(task) + except Exception: + db.rollback() + raise + finally: + db.close() + else: + return None + + @staticmethod + def list_a2a_tasks(agent_id: int = None, context_id: str = None, + status: str = None, page: int = 1, + page_size: int = 50) -> Tuple[List[Dict], int]: + """List A2A tasks with optional filters and pagination.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + query = db.query(models.A2ATask) + if agent_id: + query = query.filter(models.A2ATask.agent_id == agent_id) + if context_id: + query = query.filter(models.A2ATask.context_id == context_id) + if status: + query = query.filter(models.A2ATask.status == status) + + total = query.count() + offset = (page - 1) * page_size + tasks = query.order_by(models.A2ATask.created_at.desc()).offset(offset).limit(page_size).all() + return ([DatabaseAdapter._a2a_task_to_dict(t) for t in tasks], total) + finally: + db.close() + else: + return [], 0 + + @staticmethod + def delete_a2a_task(task_id: str) -> None: + """Delete an A2A task.""" + if USE_SQLITE: + db = DatabaseAdapter._get_session() + try: + task = db.query(models.A2ATask).filter(models.A2ATask.id == task_id).first() + if task: + db.delete(task) + db.commit() + except Exception: + db.rollback() + raise + finally: + db.close() + + # ==================== CATALOG ASSETS ==================== + + @staticmethod + def _catalog_asset_to_dict(a) -> Dict: + """Convert CatalogAsset ORM object to dict.""" + return { + "id": a.id, + "asset_type": a.asset_type, + "catalog": a.catalog, + "schema_name": a.schema_name, + "name": a.name, + "full_name": a.full_name, + "owner": a.owner, + "comment": a.comment, + "columns_json": a.columns_json, + "tags_json": a.tags_json, + "properties_json": a.properties_json, + "data_source_format": a.data_source_format, + "table_type": a.table_type, + "row_count": a.row_count, + "created_at": safe_timestamp(a, "created_at"), + "updated_at": safe_timestamp(a, "updated_at"), + "last_indexed_at": safe_timestamp(a, "last_indexed_at"), + } + + @staticmethod + def list_catalog_assets( + page: int = 1, + page_size: int = 50, + asset_type: str = None, + catalog: str = None, + schema_name: str = None, + search: str = None, + owner: str = None, + ) -> Tuple[List[Dict], int]: + """List catalog assets with optional filters and pagination.""" + from app.models.catalog_asset import CatalogAsset + + db = DatabaseAdapter._get_session() + try: + query = db.query(CatalogAsset) + if asset_type: + query = query.filter(CatalogAsset.asset_type == asset_type) + if catalog: + query = query.filter(CatalogAsset.catalog == catalog) + if schema_name: + query = query.filter(CatalogAsset.schema_name == schema_name) + if owner: + query = query.filter(CatalogAsset.owner == owner) + if search: + pattern = f"%{search}%" + query = query.filter( + (CatalogAsset.name.ilike(pattern)) + | (CatalogAsset.comment.ilike(pattern)) + | (CatalogAsset.full_name.ilike(pattern)) + | (CatalogAsset.columns_json.ilike(pattern)) + ) + + total = query.count() + offset = (page - 1) * page_size + assets = query.order_by(CatalogAsset.full_name).offset(offset).limit(page_size).all() + return ([DatabaseAdapter._catalog_asset_to_dict(a) for a in assets], total) + finally: + db.close() + + @staticmethod + def get_catalog_asset(asset_id: int) -> Optional[Dict]: + """Get catalog asset by ID.""" + from app.models.catalog_asset import CatalogAsset + + db = DatabaseAdapter._get_session() + try: + asset = db.query(CatalogAsset).filter(CatalogAsset.id == asset_id).first() + if not asset: + return None + return DatabaseAdapter._catalog_asset_to_dict(asset) + finally: + db.close() + + @staticmethod + def get_catalog_asset_by_full_name(full_name: str) -> Optional[Dict]: + """Get catalog asset by three-level namespace.""" + from app.models.catalog_asset import CatalogAsset + + db = DatabaseAdapter._get_session() + try: + asset = db.query(CatalogAsset).filter(CatalogAsset.full_name == full_name).first() + if not asset: + return None + return DatabaseAdapter._catalog_asset_to_dict(asset) + finally: + db.close() + + @staticmethod + def create_catalog_asset(**kwargs) -> Dict: + """Create a new catalog asset.""" + from app.models.catalog_asset import CatalogAsset + + db = DatabaseAdapter._get_session() + try: + # Convert last_indexed_at string to datetime if provided + if "last_indexed_at" in kwargs and isinstance(kwargs["last_indexed_at"], str): + from datetime import datetime as dt + kwargs["last_indexed_at"] = dt.fromisoformat(kwargs["last_indexed_at"].replace("Z", "+00:00")) + + asset = CatalogAsset(**kwargs) + db.add(asset) + db.commit() + db.refresh(asset) + return DatabaseAdapter._catalog_asset_to_dict(asset) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def update_catalog_asset(asset_id: int, **kwargs) -> Optional[Dict]: + """Update a catalog asset.""" + from app.models.catalog_asset import CatalogAsset + + db = DatabaseAdapter._get_session() + try: + asset = db.query(CatalogAsset).filter(CatalogAsset.id == asset_id).first() + if not asset: + return None + + # Convert last_indexed_at string to datetime if provided + if "last_indexed_at" in kwargs and isinstance(kwargs["last_indexed_at"], str): + from datetime import datetime as dt + kwargs["last_indexed_at"] = dt.fromisoformat(kwargs["last_indexed_at"].replace("Z", "+00:00")) + + for key, value in kwargs.items(): + if hasattr(asset, key): + setattr(asset, key, value) + db.commit() + db.refresh(asset) + return DatabaseAdapter._catalog_asset_to_dict(asset) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def clear_catalog_assets() -> None: + """Delete all catalog assets.""" + from app.models.catalog_asset import CatalogAsset + + db = DatabaseAdapter._get_session() + try: + db.query(CatalogAsset).delete() + db.commit() + except Exception: + db.rollback() + raise + finally: + db.close() + + # ==================== WORKSPACE ASSETS ==================== + + @staticmethod + def _workspace_asset_to_dict(a) -> Dict: + """Convert WorkspaceAsset ORM object to dict.""" + return { + "id": a.id, + "asset_type": a.asset_type, + "workspace_host": a.workspace_host, + "path": a.path, + "name": a.name, + "owner": a.owner, + "description": a.description, + "language": a.language, + "tags_json": a.tags_json, + "metadata_json": a.metadata_json, + "content_preview": a.content_preview, + "resource_id": a.resource_id, + "created_at": safe_timestamp(a, "created_at"), + "updated_at": safe_timestamp(a, "updated_at"), + "last_indexed_at": safe_timestamp(a, "last_indexed_at"), + } + + @staticmethod + def list_workspace_assets( + page: int = 1, + page_size: int = 50, + asset_type: str = None, + search: str = None, + owner: str = None, + workspace_host: str = None, + ) -> Tuple[List[Dict], int]: + """List workspace assets with optional filters and pagination.""" + from app.models.workspace_asset import WorkspaceAsset + + db = DatabaseAdapter._get_session() + try: + query = db.query(WorkspaceAsset) + if asset_type: + query = query.filter(WorkspaceAsset.asset_type == asset_type) + if workspace_host: + query = query.filter(WorkspaceAsset.workspace_host == workspace_host) + if owner: + query = query.filter(WorkspaceAsset.owner == owner) + if search: + pattern = f"%{search}%" + query = query.filter( + (WorkspaceAsset.name.ilike(pattern)) + | (WorkspaceAsset.description.ilike(pattern)) + | (WorkspaceAsset.content_preview.ilike(pattern)) + | (WorkspaceAsset.path.ilike(pattern)) + ) + + total = query.count() + offset = (page - 1) * page_size + assets = query.order_by(WorkspaceAsset.name).offset(offset).limit(page_size).all() + return ([DatabaseAdapter._workspace_asset_to_dict(a) for a in assets], total) + finally: + db.close() + + @staticmethod + def get_workspace_asset(asset_id: int) -> Optional[Dict]: + """Get workspace asset by ID.""" + from app.models.workspace_asset import WorkspaceAsset + + db = DatabaseAdapter._get_session() + try: + asset = db.query(WorkspaceAsset).filter(WorkspaceAsset.id == asset_id).first() + if not asset: + return None + return DatabaseAdapter._workspace_asset_to_dict(asset) + finally: + db.close() + + @staticmethod + def get_workspace_asset_by_path(workspace_host: str, path: str) -> Optional[Dict]: + """Get workspace asset by host + path.""" + from app.models.workspace_asset import WorkspaceAsset + + db = DatabaseAdapter._get_session() + try: + asset = db.query(WorkspaceAsset).filter( + WorkspaceAsset.workspace_host == workspace_host, + WorkspaceAsset.path == path, + ).first() + if not asset: + return None + return DatabaseAdapter._workspace_asset_to_dict(asset) + finally: + db.close() + + @staticmethod + def create_workspace_asset(**kwargs) -> Dict: + """Create a new workspace asset.""" + from app.models.workspace_asset import WorkspaceAsset + + db = DatabaseAdapter._get_session() + try: + if "last_indexed_at" in kwargs and isinstance(kwargs["last_indexed_at"], str): + from datetime import datetime as dt + kwargs["last_indexed_at"] = dt.fromisoformat(kwargs["last_indexed_at"].replace("Z", "+00:00")) + + asset = WorkspaceAsset(**kwargs) + db.add(asset) + db.commit() + db.refresh(asset) + return DatabaseAdapter._workspace_asset_to_dict(asset) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def update_workspace_asset(asset_id: int, **kwargs) -> Optional[Dict]: + """Update a workspace asset.""" + from app.models.workspace_asset import WorkspaceAsset + + db = DatabaseAdapter._get_session() + try: + asset = db.query(WorkspaceAsset).filter(WorkspaceAsset.id == asset_id).first() + if not asset: + return None + + if "last_indexed_at" in kwargs and isinstance(kwargs["last_indexed_at"], str): + from datetime import datetime as dt + kwargs["last_indexed_at"] = dt.fromisoformat(kwargs["last_indexed_at"].replace("Z", "+00:00")) + + for key, value in kwargs.items(): + if hasattr(asset, key): + setattr(asset, key, value) + db.commit() + db.refresh(asset) + return DatabaseAdapter._workspace_asset_to_dict(asset) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def clear_workspace_assets() -> None: + """Delete all workspace assets.""" + from app.models.workspace_asset import WorkspaceAsset + + db = DatabaseAdapter._get_session() + try: + db.query(WorkspaceAsset).delete() + db.commit() + except Exception: + db.rollback() + raise + finally: + db.close() + + # ==================== ASSET EMBEDDINGS ==================== + + @staticmethod + def _embedding_to_dict(e) -> Dict: + """Convert AssetEmbedding ORM object to dict.""" + return { + "id": e.id, + "asset_type": e.asset_type, + "asset_id": e.asset_id, + "text_content": e.text_content, + "embedding_json": e.embedding_json, + "embedding_model": e.embedding_model, + "dimension": e.dimension, + "created_at": safe_timestamp(e, "created_at"), + "updated_at": safe_timestamp(e, "updated_at"), + } + + @staticmethod + def get_asset_embedding(asset_type: str, asset_id: int) -> Optional[Dict]: + """Get embedding for a specific asset.""" + from app.models.asset_embedding import AssetEmbedding + + db = DatabaseAdapter._get_session() + try: + emb = db.query(AssetEmbedding).filter( + AssetEmbedding.asset_type == asset_type, + AssetEmbedding.asset_id == asset_id, + ).first() + if not emb: + return None + return DatabaseAdapter._embedding_to_dict(emb) + finally: + db.close() + + @staticmethod + def create_asset_embedding(**kwargs) -> Dict: + """Create a new asset embedding.""" + from app.models.asset_embedding import AssetEmbedding + + db = DatabaseAdapter._get_session() + try: + emb = AssetEmbedding(**kwargs) + db.add(emb) + db.commit() + db.refresh(emb) + return DatabaseAdapter._embedding_to_dict(emb) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def update_asset_embedding(embedding_id: int, **kwargs) -> Optional[Dict]: + """Update an existing embedding.""" + from app.models.asset_embedding import AssetEmbedding + + db = DatabaseAdapter._get_session() + try: + emb = db.query(AssetEmbedding).filter(AssetEmbedding.id == embedding_id).first() + if not emb: + return None + for key, value in kwargs.items(): + if hasattr(emb, key): + setattr(emb, key, value) + db.commit() + db.refresh(emb) + return DatabaseAdapter._embedding_to_dict(emb) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def list_all_asset_embeddings() -> List[Dict]: + """List all embeddings (for in-memory similarity search).""" + from app.models.asset_embedding import AssetEmbedding + + db = DatabaseAdapter._get_session() + try: + embeddings = db.query(AssetEmbedding).all() + return [DatabaseAdapter._embedding_to_dict(e) for e in embeddings] + finally: + db.close() + + @staticmethod + def get_embedding_stats() -> Dict: + """Get embedding coverage statistics.""" + from app.models.asset_embedding import AssetEmbedding + from app.models.catalog_asset import CatalogAsset + from app.models.workspace_asset import WorkspaceAsset + + db = DatabaseAdapter._get_session() + try: + embedded_count = db.query(AssetEmbedding).count() + catalog_count = db.query(CatalogAsset).count() + workspace_count = db.query(WorkspaceAsset).count() + app_count = db.query(models.App).count() + tool_count = db.query(models.Tool).count() + agent_count = db.query(models.Agent).count() + + total_assets = catalog_count + workspace_count + app_count + tool_count + agent_count + + return { + "total_assets": total_assets, + "embedded_assets": embedded_count, + "pending_assets": max(0, total_assets - embedded_count), + } + finally: + db.close() + + @staticmethod + def clear_asset_embeddings() -> None: + """Delete all embeddings.""" + from app.models.asset_embedding import AssetEmbedding + + db = DatabaseAdapter._get_session() + try: + db.query(AssetEmbedding).delete() + db.commit() + except Exception: + db.rollback() + raise + finally: + db.close() + + # ==================== ASSET RELATIONSHIPS ==================== + + @staticmethod + def _relationship_to_dict(r) -> Dict: + """Convert AssetRelationship ORM object to dict.""" + return { + "id": r.id, + "source_type": r.source_type, + "source_id": r.source_id, + "source_name": r.source_name, + "target_type": r.target_type, + "target_id": r.target_id, + "target_name": r.target_name, + "relationship_type": r.relationship_type, + "metadata_json": r.metadata_json, + "discovered_at": safe_timestamp(r, "discovered_at"), + "updated_at": safe_timestamp(r, "updated_at"), + } + + @staticmethod + def get_asset_relationship( + source_type: str, source_id: int, + target_type: str, target_id: int, + relationship_type: str, + ) -> Optional[Dict]: + """Find a specific relationship edge.""" + from app.models.asset_relationship import AssetRelationship + + db = DatabaseAdapter._get_session() + try: + rel = db.query(AssetRelationship).filter( + AssetRelationship.source_type == source_type, + AssetRelationship.source_id == source_id, + AssetRelationship.target_type == target_type, + AssetRelationship.target_id == target_id, + AssetRelationship.relationship_type == relationship_type, + ).first() + if not rel: + return None + return DatabaseAdapter._relationship_to_dict(rel) + finally: + db.close() + + @staticmethod + def create_asset_relationship(**kwargs) -> Dict: + """Create a new relationship edge.""" + from app.models.asset_relationship import AssetRelationship + + db = DatabaseAdapter._get_session() + try: + rel = AssetRelationship(**kwargs) + db.add(rel) + db.commit() + db.refresh(rel) + return DatabaseAdapter._relationship_to_dict(rel) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def update_asset_relationship(rel_id: int, **kwargs) -> Optional[Dict]: + """Update an existing relationship.""" + from app.models.asset_relationship import AssetRelationship + + db = DatabaseAdapter._get_session() + try: + rel = db.query(AssetRelationship).filter(AssetRelationship.id == rel_id).first() + if not rel: + return None + for key, value in kwargs.items(): + if hasattr(rel, key): + setattr(rel, key, value) + db.commit() + db.refresh(rel) + return DatabaseAdapter._relationship_to_dict(rel) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def get_relationships_by_source(source_type: str, source_id: int) -> List[Dict]: + """Get all relationships where this asset is the source.""" + from app.models.asset_relationship import AssetRelationship + + db = DatabaseAdapter._get_session() + try: + rels = db.query(AssetRelationship).filter( + AssetRelationship.source_type == source_type, + AssetRelationship.source_id == source_id, + ).all() + return [DatabaseAdapter._relationship_to_dict(r) for r in rels] + finally: + db.close() + + @staticmethod + def get_relationships_by_target(target_type: str, target_id: int) -> List[Dict]: + """Get all relationships where this asset is the target.""" + from app.models.asset_relationship import AssetRelationship + + db = DatabaseAdapter._get_session() + try: + rels = db.query(AssetRelationship).filter( + AssetRelationship.target_type == target_type, + AssetRelationship.target_id == target_id, + ).all() + return [DatabaseAdapter._relationship_to_dict(r) for r in rels] + finally: + db.close() + + @staticmethod + def list_asset_relationships( + source_type: str = None, + target_type: str = None, + relationship_type: str = None, + page: int = 1, + page_size: int = 100, + ) -> Tuple[List[Dict], int]: + """List relationships with optional filters.""" + from app.models.asset_relationship import AssetRelationship + + db = DatabaseAdapter._get_session() + try: + query = db.query(AssetRelationship) + if source_type: + query = query.filter(AssetRelationship.source_type == source_type) + if target_type: + query = query.filter(AssetRelationship.target_type == target_type) + if relationship_type: + query = query.filter(AssetRelationship.relationship_type == relationship_type) + + total = query.count() + offset = (page - 1) * page_size + rels = query.order_by(AssetRelationship.discovered_at.desc()).offset(offset).limit(page_size).all() + return ([DatabaseAdapter._relationship_to_dict(r) for r in rels], total) + finally: + db.close() + + @staticmethod + def clear_asset_relationships() -> None: + """Delete all relationships.""" + from app.models.asset_relationship import AssetRelationship + + db = DatabaseAdapter._get_session() + try: + db.query(AssetRelationship).delete() + db.commit() + except Exception: + db.rollback() + raise + finally: + db.close() + + # ==================== AUDIT LOG ==================== + + @staticmethod + def _audit_log_to_dict(entry) -> Dict: + """Convert AuditLog ORM object to dict.""" + return { + "id": entry.id, + "timestamp": entry.timestamp.isoformat() if entry.timestamp else None, + "user_email": entry.user_email, + "action": entry.action, + "resource_type": entry.resource_type, + "resource_id": entry.resource_id, + "resource_name": entry.resource_name, + "details": entry.details, + "ip_address": entry.ip_address, + } + + @staticmethod + def create_audit_log( + user_email: str, + action: str, + resource_type: str, + resource_id: str = None, + resource_name: str = None, + details: str = None, + ip_address: str = None, + ) -> Dict: + """Create an audit log entry.""" + from app.models.audit_log import AuditLog + + db = DatabaseAdapter._get_session() + try: + entry = AuditLog( + user_email=user_email, + action=action, + resource_type=resource_type, + resource_id=resource_id, + resource_name=resource_name, + details=details, + ip_address=ip_address, + ) + db.add(entry) + db.commit() + db.refresh(entry) + return DatabaseAdapter._audit_log_to_dict(entry) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def list_audit_logs( + page: int = 1, + page_size: int = 50, + user_email: str = None, + action: str = None, + resource_type: str = None, + date_from: str = None, + date_to: str = None, + ) -> Tuple[List[Dict], int]: + """List audit log entries with optional filters and pagination.""" + from app.models.audit_log import AuditLog + + db = DatabaseAdapter._get_session() + try: + query = db.query(AuditLog) + if user_email: + query = query.filter(AuditLog.user_email == user_email) + if action: + query = query.filter(AuditLog.action == action) + if resource_type: + query = query.filter(AuditLog.resource_type == resource_type) + if date_from: + from datetime import datetime as dt + query = query.filter(AuditLog.timestamp >= dt.fromisoformat(date_from)) + if date_to: + from datetime import datetime as dt + query = query.filter(AuditLog.timestamp <= dt.fromisoformat(date_to)) + + total = query.count() + offset = (page - 1) * page_size + entries = query.order_by(AuditLog.timestamp.desc()).offset(offset).limit(page_size).all() + return ([DatabaseAdapter._audit_log_to_dict(e) for e in entries], total) + finally: + db.close() + + + # ==================== CONVERSATIONS ==================== + + @staticmethod + def _conversation_to_dict(c) -> Dict: + """Convert Conversation ORM object to dict.""" + return { + "id": c.id, + "title": c.title, + "user_email": c.user_email, + "collection_id": c.collection_id, + "created_at": safe_timestamp(c, "created_at"), + "updated_at": safe_timestamp(c, "updated_at"), + } + + @staticmethod + def _conversation_message_to_dict(m) -> Dict: + """Convert ConversationMessage ORM object to dict.""" + return { + "id": m.id, + "conversation_id": m.conversation_id, + "role": m.role, + "content": m.content, + "trace_id": m.trace_id, + "created_at": safe_timestamp(m, "created_at"), + } + + @staticmethod + def create_conversation(id: str, title: str, user_email: str = None, + collection_id: int = None) -> Dict: + """Create a new conversation.""" + from app.models.conversation import Conversation + + db = DatabaseAdapter._get_session() + try: + conv = Conversation( + id=id, + title=title, + user_email=user_email, + collection_id=collection_id, + ) + db.add(conv) + db.commit() + db.refresh(conv) + return DatabaseAdapter._conversation_to_dict(conv) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def get_conversation(conversation_id: str) -> Optional[Dict]: + """Get conversation with its messages.""" + from app.models.conversation import Conversation, ConversationMessage + + db = DatabaseAdapter._get_session() + try: + conv = db.query(Conversation).filter(Conversation.id == conversation_id).first() + if not conv: + return None + + messages = ( + db.query(ConversationMessage) + .filter(ConversationMessage.conversation_id == conversation_id) + .order_by(ConversationMessage.created_at) + .all() + ) + + result = DatabaseAdapter._conversation_to_dict(conv) + result["messages"] = [DatabaseAdapter._conversation_message_to_dict(m) for m in messages] + result["message_count"] = len(messages) + return result + finally: + db.close() + + @staticmethod + def list_conversations(user_email: str = None, page: int = 1, + page_size: int = 50) -> Tuple[List[Dict], int]: + """List conversations, newest first.""" + from app.models.conversation import Conversation, ConversationMessage + from sqlalchemy import func + + db = DatabaseAdapter._get_session() + try: + query = db.query(Conversation) + if user_email: + query = query.filter(Conversation.user_email == user_email) + + total = query.count() + offset = (page - 1) * page_size + convs = query.order_by(Conversation.updated_at.desc()).offset(offset).limit(page_size).all() + + results = [] + for conv in convs: + d = DatabaseAdapter._conversation_to_dict(conv) + d["message_count"] = ( + db.query(func.count(ConversationMessage.id)) + .filter(ConversationMessage.conversation_id == conv.id) + .scalar() + ) + results.append(d) + + return results, total + finally: + db.close() + + @staticmethod + def delete_conversation(conversation_id: str) -> bool: + """Delete a conversation and its messages. Returns True if found.""" + from app.models.conversation import Conversation, ConversationMessage + + db = DatabaseAdapter._get_session() + try: + conv = db.query(Conversation).filter(Conversation.id == conversation_id).first() + if not conv: + return False + db.query(ConversationMessage).filter( + ConversationMessage.conversation_id == conversation_id + ).delete() + db.delete(conv) + db.commit() + return True + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def update_conversation_title(conversation_id: str, title: str) -> Optional[Dict]: + """Rename a conversation.""" + from app.models.conversation import Conversation + + db = DatabaseAdapter._get_session() + try: + conv = db.query(Conversation).filter(Conversation.id == conversation_id).first() + if not conv: + return None + conv.title = title + db.commit() + db.refresh(conv) + return DatabaseAdapter._conversation_to_dict(conv) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def create_conversation_message(conversation_id: str, role: str, + content: str, trace_id: str = None) -> Dict: + """Add a message to a conversation and touch the conversation's updated_at.""" + from app.models.conversation import Conversation, ConversationMessage + from datetime import datetime + + db = DatabaseAdapter._get_session() + try: + msg = ConversationMessage( + conversation_id=conversation_id, + role=role, + content=content, + trace_id=trace_id, + ) + db.add(msg) + + # Touch conversation updated_at + conv = db.query(Conversation).filter(Conversation.id == conversation_id).first() + if conv: + conv.updated_at = datetime.utcnow() + + db.commit() + db.refresh(msg) + return DatabaseAdapter._conversation_message_to_dict(msg) + except Exception: + db.rollback() + raise + finally: + db.close() + + + # ==================== AGENT ANALYTICS ==================== + + @staticmethod + def _agent_analytic_to_dict(a) -> Dict: + """Convert AgentAnalytics ORM object to dict.""" + return { + "id": a.id, + "agent_id": a.agent_id, + "task_description": a.task_description, + "success": a.success, + "latency_ms": a.latency_ms, + "quality_score": a.quality_score, + "error_message": a.error_message, + "created_at": safe_timestamp(a, "created_at"), + } + + @staticmethod + def create_agent_analytic( + agent_id: int, + task_description: str = None, + success: int = 1, + latency_ms: int = None, + quality_score: int = None, + error_message: str = None, + ) -> Dict: + """Record an analytics entry for an agent invocation.""" + from app.models.agent_analytics import AgentAnalytics + + db = DatabaseAdapter._get_session() + try: + entry = AgentAnalytics( + agent_id=agent_id, + task_description=task_description, + success=success, + latency_ms=latency_ms, + quality_score=quality_score, + error_message=error_message, + ) + db.add(entry) + db.commit() + db.refresh(entry) + return DatabaseAdapter._agent_analytic_to_dict(entry) + except Exception: + db.rollback() + raise + finally: + db.close() + + @staticmethod + def list_agent_analytics(agent_id: int, limit: int = 50) -> List[Dict]: + """List recent analytics for a specific agent.""" + from app.models.agent_analytics import AgentAnalytics + + db = DatabaseAdapter._get_session() + try: + entries = ( + db.query(AgentAnalytics) + .filter(AgentAnalytics.agent_id == agent_id) + .order_by(AgentAnalytics.created_at.desc()) + .limit(limit) + .all() + ) + return [DatabaseAdapter._agent_analytic_to_dict(e) for e in entries] + finally: + db.close() + + @staticmethod + def get_agent_summary_stats(agent_id: int) -> Dict: + """Get aggregated stats for a specific agent.""" + from app.models.agent_analytics import AgentAnalytics + from sqlalchemy import func + + db = DatabaseAdapter._get_session() + try: + total = db.query(func.count(AgentAnalytics.id)).filter( + AgentAnalytics.agent_id == agent_id + ).scalar() or 0 + + successes = db.query(func.count(AgentAnalytics.id)).filter( + AgentAnalytics.agent_id == agent_id, + AgentAnalytics.success == 1, + ).scalar() or 0 + + avg_latency = db.query(func.avg(AgentAnalytics.latency_ms)).filter( + AgentAnalytics.agent_id == agent_id, + AgentAnalytics.latency_ms.isnot(None), + ).scalar() + + avg_quality = db.query(func.avg(AgentAnalytics.quality_score)).filter( + AgentAnalytics.agent_id == agent_id, + AgentAnalytics.quality_score.isnot(None), + ).scalar() + + return { + "agent_id": agent_id, + "total_invocations": total, + "success_count": successes, + "failure_count": total - successes, + "success_rate": round(successes / total, 4) if total > 0 else None, + "avg_latency_ms": round(avg_latency) if avg_latency is not None else None, + "avg_quality_score": round(float(avg_quality), 2) if avg_quality is not None else None, + } + finally: + db.close() + + +# Alias for backward compatibility +WarehouseDB = DatabaseAdapter diff --git a/dbx-agent-app/app/backend/app/db_warehouse.py b/dbx-agent-app/app/backend/app/db_warehouse.py new file mode 100644 index 00000000..a46fb7de --- /dev/null +++ b/dbx-agent-app/app/backend/app/db_warehouse.py @@ -0,0 +1,798 @@ +""" +Databricks SQL Warehouse database layer. + +This module provides database access via Databricks SDK Statement Execution API, +replacing SQLAlchemy/SQLite for production use with Unity Catalog tables. + +All queries use parameterized statements to prevent SQL injection. +""" + +import os +import time +import logging +from typing import List, Dict, Any, Optional, Tuple +from databricks.sdk import WorkspaceClient +from databricks.sdk.service.sql import StatementState, StatementParameterListItem + +logger = logging.getLogger(__name__) + +# Configuration - require explicit values, no hardcoded defaults +CATALOG = os.getenv("DB_CATALOG") +SCHEMA = os.getenv("DB_SCHEMA") +WAREHOUSE_ID = os.getenv("DATABRICKS_WAREHOUSE_ID") + +if not CATALOG or not SCHEMA or not WAREHOUSE_ID: + logger.warning( + "DB_CATALOG, DB_SCHEMA, or DATABRICKS_WAREHOUSE_ID not set. " + "Warehouse backend will fail if used." + ) + CATALOG = CATALOG or "default" + SCHEMA = SCHEMA or "default" + WAREHOUSE_ID = WAREHOUSE_ID or "" + +# Table names +APPS_TABLE = f"{CATALOG}.{SCHEMA}.apps" +AGENTS_TABLE = f"{CATALOG}.{SCHEMA}.agents" +MCP_SERVERS_TABLE = f"{CATALOG}.{SCHEMA}.mcp_servers" +TOOLS_TABLE = f"{CATALOG}.{SCHEMA}.tools" +COLLECTIONS_TABLE = f"{CATALOG}.{SCHEMA}.collections" +COLLECTION_ITEMS_TABLE = f"{CATALOG}.{SCHEMA}.collection_items" + +# Global workspace client (reused across requests) +_workspace_client = None + + +def get_workspace_client() -> WorkspaceClient: + """Get or create the workspace client.""" + global _workspace_client + if _workspace_client is None: + _workspace_client = WorkspaceClient() + return _workspace_client + + +def _param(name: str, value: Any) -> StatementParameterListItem: + """Create a statement parameter. + + Preserves integer types for LIMIT/OFFSET clauses. + Converts other types to string for SQL compatibility. + """ + if value is None: + return StatementParameterListItem(name=name, value=None) + # Keep integers as integers for LIMIT/OFFSET compatibility + if isinstance(value, int): + return StatementParameterListItem(name=name, value=str(value), type="INT") + return StatementParameterListItem(name=name, value=str(value)) + + +def execute_sql( + sql: str, + parameters: Optional[List[StatementParameterListItem]] = None, + wait_timeout: str = "30s", +) -> List[Dict[str, Any]]: + """ + Execute a SQL statement using the Statement Execution API. + Returns list of dicts with column names as keys. + + Uses parameterized queries for safety against SQL injection. + """ + w = get_workspace_client() + + # Execute statement + response = w.statement_execution.execute_statement( + warehouse_id=WAREHOUSE_ID, + statement=sql, + parameters=parameters, + wait_timeout=wait_timeout, + catalog=CATALOG, + schema=SCHEMA, + ) + + # Check for errors + if response.status.state == StatementState.FAILED: + error_msg = response.status.error.message if response.status.error else "Unknown error" + raise RuntimeError(f"SQL execution failed: {error_msg}") + + # If still running, wait for completion + if response.status.state in (StatementState.PENDING, StatementState.RUNNING): + statement_id = response.statement_id + max_wait = 60 # seconds + start = time.time() + while time.time() - start < max_wait: + response = w.statement_execution.get_statement(statement_id) + if response.status.state == StatementState.SUCCEEDED: + break + if response.status.state == StatementState.FAILED: + error_msg = response.status.error.message if response.status.error else "Unknown error" + raise RuntimeError(f"SQL execution failed: {error_msg}") + time.sleep(0.5) + + # Parse results + if not response.result or not response.manifest: + return [] + + columns = [col.name for col in response.manifest.schema.columns] + rows = [] + if response.result.data_array: + for row_data in response.result.data_array: + rows.append(dict(zip(columns, row_data))) + + return rows + + +def execute_sql_scalar( + sql: str, + parameters: Optional[List[StatementParameterListItem]] = None, +) -> Any: + """Execute SQL and return a single scalar value.""" + results = execute_sql(sql, parameters) + if results and len(results) > 0: + first_row = results[0] + if first_row: + return list(first_row.values())[0] + return None + + +class WarehouseDB: + """Database operations using Databricks SQL Warehouse.""" + + # ==================== APPS ==================== + + @staticmethod + def list_apps(page: int = 1, page_size: int = 50) -> Tuple[List[Dict], int]: + """List apps with pagination.""" + page = max(1, int(page)) + page_size = max(1, min(100, int(page_size))) + offset = (page - 1) * page_size + + total = execute_sql_scalar(f"SELECT COUNT(*) as cnt FROM {APPS_TABLE}") or 0 + + rows = execute_sql( + f""" + SELECT id, name, owner, url, tags, manifest_url, created_at + FROM {APPS_TABLE} + ORDER BY id + LIMIT :page_size OFFSET :offset + """, + parameters=[ + _param("page_size", page_size), + _param("offset", offset), + ], + ) + + return rows, int(total) + + @staticmethod + def get_app(app_id: int) -> Optional[Dict]: + """Get a single app by ID.""" + rows = execute_sql( + f""" + SELECT id, name, owner, url, tags, manifest_url, created_at + FROM {APPS_TABLE} + WHERE id = :app_id + """, + parameters=[_param("app_id", int(app_id))], + ) + return rows[0] if rows else None + + @staticmethod + def create_app(name: str, owner: str = None, url: str = None, + tags: str = None, manifest_url: str = None) -> Dict: + """Create a new app.""" + execute_sql( + f""" + INSERT INTO {APPS_TABLE} (name, owner, url, tags, manifest_url, created_at) + VALUES (:name, :owner, :url, :tags, :manifest_url, CURRENT_TIMESTAMP()) + """, + parameters=[ + _param("name", name), + _param("owner", owner), + _param("url", url), + _param("tags", tags), + _param("manifest_url", manifest_url), + ], + ) + + # Get the inserted row + rows = execute_sql( + f""" + SELECT id, name, owner, url, tags, manifest_url, created_at + FROM {APPS_TABLE} + WHERE name = :name + ORDER BY id DESC + LIMIT 1 + """, + parameters=[_param("name", name)], + ) + return rows[0] if rows else {} + + @staticmethod + def update_app(app_id: int, **kwargs) -> Optional[Dict]: + """Update an app.""" + if not kwargs: + return WarehouseDB.get_app(app_id) + + set_clauses = [] + params = [_param("app_id", int(app_id))] + for i, (key, value) in enumerate(kwargs.items()): + if value is not None: + param_name = f"val_{i}" + set_clauses.append(f"{key} = :{param_name}") + params.append(_param(param_name, value)) + + if not set_clauses: + return WarehouseDB.get_app(app_id) + + execute_sql( + f""" + UPDATE {APPS_TABLE} + SET {', '.join(set_clauses)} + WHERE id = :app_id + """, + parameters=params, + ) + + return WarehouseDB.get_app(app_id) + + @staticmethod + def delete_app(app_id: int) -> bool: + """Delete an app.""" + execute_sql( + f"DELETE FROM {APPS_TABLE} WHERE id = :app_id", + parameters=[_param("app_id", int(app_id))], + ) + return True + + # ==================== AGENTS ==================== + + @staticmethod + def list_agents(page: int = 1, page_size: int = 50) -> Tuple[List[Dict], int]: + """List agents with pagination.""" + page = max(1, int(page)) + page_size = max(1, min(100, int(page_size))) + offset = (page - 1) * page_size + + total = execute_sql_scalar(f"SELECT COUNT(*) as cnt FROM {AGENTS_TABLE}") or 0 + + rows = execute_sql( + f""" + SELECT id, name, description, capabilities, status, collection_id, app_id, + endpoint_url, auth_token, a2a_capabilities, skills, protocol_version, + system_prompt, created_at, updated_at + FROM {AGENTS_TABLE} + ORDER BY created_at DESC + LIMIT :page_size OFFSET :offset + """, + parameters=[_param("page_size", page_size), _param("offset", offset)], + ) + return rows, total + + @staticmethod + def get_agent(agent_id: int) -> Optional[Dict]: + """Get a single agent by ID.""" + rows = execute_sql( + f""" + SELECT id, name, description, capabilities, status, collection_id, app_id, + endpoint_url, auth_token, a2a_capabilities, skills, protocol_version, + system_prompt, created_at, updated_at + FROM {AGENTS_TABLE} + WHERE id = :agent_id + """, + parameters=[_param("agent_id", int(agent_id))], + ) + return rows[0] if rows else None + + @staticmethod + def create_agent(name: str, description: str = None, capabilities: str = None, + status: str = "draft", collection_id: int = None, + endpoint_url: str = None, **kwargs) -> Dict: + """Create a new agent.""" + # Extract optional fields + auth_token = kwargs.get("auth_token") + a2a_capabilities = kwargs.get("a2a_capabilities") + skills = kwargs.get("skills") + protocol_version = kwargs.get("protocol_version") + system_prompt = kwargs.get("system_prompt") + app_id = kwargs.get("app_id") + + execute_sql( + f""" + INSERT INTO {AGENTS_TABLE} ( + name, description, capabilities, status, collection_id, app_id, + endpoint_url, auth_token, a2a_capabilities, skills, + protocol_version, system_prompt, created_at + ) + VALUES ( + :name, :description, :capabilities, :status, :collection_id, :app_id, + :endpoint_url, :auth_token, :a2a_capabilities, :skills, + :protocol_version, :system_prompt, CURRENT_TIMESTAMP() + ) + """, + parameters=[ + _param("name", name), + _param("description", description), + _param("capabilities", capabilities), + _param("status", status or "draft"), + _param("collection_id", collection_id), + _param("app_id", app_id), + _param("endpoint_url", endpoint_url), + _param("auth_token", auth_token), + _param("a2a_capabilities", a2a_capabilities), + _param("skills", skills), + _param("protocol_version", protocol_version), + _param("system_prompt", system_prompt), + ], + ) + + # Get the inserted row + rows = execute_sql( + f""" + SELECT id, name, description, capabilities, status, collection_id, app_id, + endpoint_url, auth_token, a2a_capabilities, skills, protocol_version, + system_prompt, created_at + FROM {AGENTS_TABLE} + WHERE name = :name + ORDER BY id DESC LIMIT 1 + """, + parameters=[_param("name", name)], + ) + return rows[0] if rows else {} + + @staticmethod + def update_agent(agent_id: int, **kwargs) -> Optional[Dict]: + """Update an agent.""" + if not kwargs: + return WarehouseDB.get_agent(agent_id) + + set_clauses = [] + params = [_param("agent_id", int(agent_id))] + for i, (key, value) in enumerate(kwargs.items()): + if value is not None: + param_name = f"val_{i}" + set_clauses.append(f"{key} = :{param_name}") + params.append(_param(param_name, value)) + + if not set_clauses: + return WarehouseDB.get_agent(agent_id) + + execute_sql( + f""" + UPDATE {AGENTS_TABLE} + SET {', '.join(set_clauses)}, updated_at = CURRENT_TIMESTAMP() + WHERE id = :agent_id + """, + parameters=params, + ) + return WarehouseDB.get_agent(agent_id) + + @staticmethod + def delete_agent(agent_id: int) -> bool: + """Delete an agent.""" + execute_sql( + f"DELETE FROM {AGENTS_TABLE} WHERE id = :agent_id", + parameters=[_param("agent_id", int(agent_id))], + ) + return True + + # ==================== MCP SERVERS ==================== + + @staticmethod + def list_mcp_servers(page: int = 1, page_size: int = 50, app_id: int = None) -> Tuple[List[Dict], int]: + """List MCP servers with pagination.""" + page = max(1, int(page)) + page_size = max(1, min(100, int(page_size))) + offset = (page - 1) * page_size + + params = [ + _param("page_size", page_size), + _param("offset", offset), + ] + + where_clause = "" + if app_id is not None: + where_clause = "WHERE app_id = :app_id" + params.append(_param("app_id", int(app_id))) + + total = execute_sql_scalar( + f"SELECT COUNT(*) as cnt FROM {MCP_SERVERS_TABLE} {where_clause}", + parameters=params[2:] if app_id else None, + ) or 0 + + rows = execute_sql( + f""" + SELECT id, app_id, server_url, kind, uc_connection, scopes, created_at + FROM {MCP_SERVERS_TABLE} + {where_clause} + ORDER BY id + LIMIT :page_size OFFSET :offset + """, + parameters=params, + ) + + return rows, int(total) + + @staticmethod + def get_mcp_server(server_id: int) -> Optional[Dict]: + """Get a single MCP server by ID.""" + rows = execute_sql( + f""" + SELECT id, app_id, server_url, kind, uc_connection, scopes, created_at + FROM {MCP_SERVERS_TABLE} + WHERE id = :server_id + """, + parameters=[_param("server_id", int(server_id))], + ) + return rows[0] if rows else None + + @staticmethod + def create_mcp_server(server_url: str, kind: str = 'managed', + app_id: int = None, uc_connection: str = None, + scopes: str = None) -> Dict: + """Create a new MCP server.""" + execute_sql( + f""" + INSERT INTO {MCP_SERVERS_TABLE} (server_url, kind, app_id, uc_connection, scopes, created_at) + VALUES (:server_url, :kind, :app_id, :uc_connection, :scopes, CURRENT_TIMESTAMP()) + """, + parameters=[ + _param("server_url", server_url), + _param("kind", kind), + _param("app_id", app_id), + _param("uc_connection", uc_connection), + _param("scopes", scopes), + ], + ) + + rows = execute_sql( + f""" + SELECT id, app_id, server_url, kind, uc_connection, scopes, created_at + FROM {MCP_SERVERS_TABLE} + WHERE server_url = :server_url + ORDER BY id DESC + LIMIT 1 + """, + parameters=[_param("server_url", server_url)], + ) + return rows[0] if rows else {} + + @staticmethod + def update_mcp_server(server_id: int, **kwargs) -> Optional[Dict]: + """Update an MCP server.""" + if not kwargs: + return WarehouseDB.get_mcp_server(server_id) + + set_clauses = [] + params = [_param("server_id", int(server_id))] + for i, (key, value) in enumerate(kwargs.items()): + if value is not None: + param_name = f"val_{i}" + set_clauses.append(f"{key} = :{param_name}") + params.append(_param(param_name, value)) + + if not set_clauses: + return WarehouseDB.get_mcp_server(server_id) + + execute_sql( + f""" + UPDATE {MCP_SERVERS_TABLE} + SET {', '.join(set_clauses)} + WHERE id = :server_id + """, + parameters=params, + ) + + return WarehouseDB.get_mcp_server(server_id) + + @staticmethod + def delete_mcp_server(server_id: int) -> bool: + """Delete an MCP server.""" + execute_sql( + f"DELETE FROM {MCP_SERVERS_TABLE} WHERE id = :server_id", + parameters=[_param("server_id", int(server_id))], + ) + return True + + # ==================== TOOLS ==================== + + @staticmethod + def list_tools(page: int = 1, page_size: int = 50, + mcp_server_id: int = None) -> Tuple[List[Dict], int]: + """List tools with pagination.""" + page = max(1, int(page)) + page_size = max(1, min(100, int(page_size))) + offset = (page - 1) * page_size + + params = [ + _param("page_size", page_size), + _param("offset", offset), + ] + + where_clause = "" + if mcp_server_id is not None: + where_clause = "WHERE mcp_server_id = :mcp_server_id" + params.append(_param("mcp_server_id", int(mcp_server_id))) + + total = execute_sql_scalar( + f"SELECT COUNT(*) as cnt FROM {TOOLS_TABLE} {where_clause}", + parameters=params[2:] if mcp_server_id else None, + ) or 0 + + rows = execute_sql( + f""" + SELECT id, mcp_server_id, name, description, parameters, created_at + FROM {TOOLS_TABLE} + {where_clause} + ORDER BY id + LIMIT :page_size OFFSET :offset + """, + parameters=params, + ) + + return rows, int(total) + + @staticmethod + def get_tool(tool_id: int) -> Optional[Dict]: + """Get a single tool by ID.""" + rows = execute_sql( + f""" + SELECT id, mcp_server_id, name, description, parameters, created_at + FROM {TOOLS_TABLE} + WHERE id = :tool_id + """, + parameters=[_param("tool_id", int(tool_id))], + ) + return rows[0] if rows else None + + @staticmethod + def create_tool(mcp_server_id: int, name: str, description: str = None, + parameters: str = None) -> Dict: + """Create a new tool.""" + execute_sql( + f""" + INSERT INTO {TOOLS_TABLE} (mcp_server_id, name, description, parameters, created_at) + VALUES (:mcp_server_id, :name, :description, :parameters, CURRENT_TIMESTAMP()) + """, + parameters=[ + _param("mcp_server_id", int(mcp_server_id)), + _param("name", name), + _param("description", description), + _param("parameters", parameters), + ], + ) + + rows = execute_sql( + f""" + SELECT id, mcp_server_id, name, description, parameters, created_at + FROM {TOOLS_TABLE} + WHERE mcp_server_id = :mcp_server_id AND name = :name + ORDER BY id DESC + LIMIT 1 + """, + parameters=[ + _param("mcp_server_id", int(mcp_server_id)), + _param("name", name), + ], + ) + return rows[0] if rows else {} + + # ==================== COLLECTIONS ==================== + + @staticmethod + def list_collections(page: int = 1, page_size: int = 50) -> Tuple[List[Dict], int]: + """List collections with pagination.""" + page = max(1, int(page)) + page_size = max(1, min(100, int(page_size))) + offset = (page - 1) * page_size + + total = execute_sql_scalar(f"SELECT COUNT(*) as cnt FROM {COLLECTIONS_TABLE}") or 0 + + rows = execute_sql( + f""" + SELECT id, name, description, created_at + FROM {COLLECTIONS_TABLE} + ORDER BY id + LIMIT :page_size OFFSET :offset + """, + parameters=[ + _param("page_size", page_size), + _param("offset", offset), + ], + ) + + return rows, int(total) + + @staticmethod + def get_collection(collection_id: int) -> Optional[Dict]: + """Get a single collection by ID.""" + rows = execute_sql( + f""" + SELECT id, name, description, created_at + FROM {COLLECTIONS_TABLE} + WHERE id = :collection_id + """, + parameters=[_param("collection_id", int(collection_id))], + ) + return rows[0] if rows else None + + @staticmethod + def create_collection(name: str, description: str = None) -> Dict: + """Create a new collection.""" + execute_sql( + f""" + INSERT INTO {COLLECTIONS_TABLE} (name, description, created_at) + VALUES (:name, :description, CURRENT_TIMESTAMP()) + """, + parameters=[ + _param("name", name), + _param("description", description), + ], + ) + + rows = execute_sql( + f""" + SELECT id, name, description, created_at + FROM {COLLECTIONS_TABLE} + WHERE name = :name + ORDER BY id DESC + LIMIT 1 + """, + parameters=[_param("name", name)], + ) + return rows[0] if rows else {} + + @staticmethod + def update_collection(collection_id: int, **kwargs) -> Optional[Dict]: + """Update a collection.""" + if not kwargs: + return WarehouseDB.get_collection(collection_id) + + set_clauses = [] + params = [_param("collection_id", int(collection_id))] + for i, (key, value) in enumerate(kwargs.items()): + if value is not None: + param_name = f"val_{i}" + set_clauses.append(f"{key} = :{param_name}") + params.append(_param(param_name, value)) + + if not set_clauses: + return WarehouseDB.get_collection(collection_id) + + execute_sql( + f""" + UPDATE {COLLECTIONS_TABLE} + SET {', '.join(set_clauses)} + WHERE id = :collection_id + """, + parameters=params, + ) + + return WarehouseDB.get_collection(collection_id) + + @staticmethod + def delete_collection(collection_id: int) -> bool: + """Delete a collection and its items.""" + cid_param = [_param("collection_id", int(collection_id))] + execute_sql( + f"DELETE FROM {COLLECTION_ITEMS_TABLE} WHERE collection_id = :collection_id", + parameters=cid_param, + ) + execute_sql( + f"DELETE FROM {COLLECTIONS_TABLE} WHERE id = :collection_id", + parameters=cid_param, + ) + return True + + # ==================== COLLECTION ITEMS ==================== + + @staticmethod + def list_collection_items(collection_id: int) -> List[Dict]: + """List items in a collection with joined entity data.""" + rows = execute_sql( + f""" + SELECT + ci.id, + ci.collection_id, + ci.app_id, + ci.mcp_server_id, + ci.tool_id, + a.name as app_name, + a.owner as app_owner, + a.url as app_url, + s.server_url, + s.kind as server_kind, + t.name as tool_name, + t.description as tool_description + FROM {COLLECTION_ITEMS_TABLE} ci + LEFT JOIN {APPS_TABLE} a ON ci.app_id = a.id + LEFT JOIN {MCP_SERVERS_TABLE} s ON ci.mcp_server_id = s.id + LEFT JOIN {TOOLS_TABLE} t ON ci.tool_id = t.id + WHERE ci.collection_id = :collection_id + """, + parameters=[_param("collection_id", int(collection_id))], + ) + + # Transform to nested structure expected by API + items = [] + for row_dict in rows: + item = { + 'id': row_dict['id'], + 'collection_id': row_dict['collection_id'], + 'app_id': row_dict['app_id'], + 'mcp_server_id': row_dict['mcp_server_id'], + 'tool_id': row_dict['tool_id'], + } + + if row_dict.get('app_id'): + item['app'] = { + 'id': row_dict['app_id'], + 'name': row_dict.get('app_name'), + 'owner': row_dict.get('app_owner'), + 'url': row_dict.get('app_url'), + } + if row_dict.get('mcp_server_id'): + item['server'] = { + 'id': row_dict['mcp_server_id'], + 'server_url': row_dict.get('server_url'), + 'kind': row_dict.get('server_kind'), + } + if row_dict.get('tool_id'): + item['tool'] = { + 'id': row_dict['tool_id'], + 'name': row_dict.get('tool_name'), + 'description': row_dict.get('tool_description'), + } + + items.append(item) + + return items + + @staticmethod + def add_collection_item(collection_id: int, app_id: int = None, + mcp_server_id: int = None, tool_id: int = None) -> Dict: + """Add an item to a collection.""" + execute_sql( + f""" + INSERT INTO {COLLECTION_ITEMS_TABLE} (collection_id, app_id, mcp_server_id, tool_id, created_at) + VALUES (:collection_id, :app_id, :mcp_server_id, :tool_id, CURRENT_TIMESTAMP()) + """, + parameters=[ + _param("collection_id", int(collection_id)), + _param("app_id", int(app_id) if app_id is not None else None), + _param("mcp_server_id", int(mcp_server_id) if mcp_server_id is not None else None), + _param("tool_id", int(tool_id) if tool_id is not None else None), + ], + ) + + # Get the inserted item + rows = execute_sql( + f""" + SELECT id, collection_id, app_id, mcp_server_id, tool_id + FROM {COLLECTION_ITEMS_TABLE} + WHERE collection_id = :collection_id + ORDER BY id DESC + LIMIT 1 + """, + parameters=[_param("collection_id", int(collection_id))], + ) + return rows[0] if rows else {} + + @staticmethod + def get_collection_item(item_id: int) -> Optional[Dict]: + """Get a collection item by ID.""" + rows = execute_sql( + f""" + SELECT id, collection_id, app_id, mcp_server_id, tool_id + FROM {COLLECTION_ITEMS_TABLE} + WHERE id = :item_id + """, + parameters=[_param("item_id", int(item_id))], + ) + return rows[0] if rows else None + + @staticmethod + def delete_collection_item(item_id: int) -> bool: + """Delete a collection item.""" + execute_sql( + f"DELETE FROM {COLLECTION_ITEMS_TABLE} WHERE id = :item_id", + parameters=[_param("item_id", int(item_id))], + ) + return True diff --git a/dbx-agent-app/app/backend/app/deps.py b/dbx-agent-app/app/backend/app/deps.py new file mode 100644 index 00000000..f020bfea --- /dev/null +++ b/dbx-agent-app/app/backend/app/deps.py @@ -0,0 +1,124 @@ +""" +Dependency injection helpers for FastAPI routes. +""" + +from typing import Generator, Optional +from sqlalchemy.orm import Session +from fastapi import Depends, HTTPException, status +from app.database import get_db +from app.models import App, MCPServer, Tool, Collection, CollectionItem + + +def get_app_or_404(app_id: int, db: Session = Depends(get_db)) -> App: + """ + Get an App by ID or raise 404 if not found. + + Args: + app_id: The app ID to fetch + db: Database session + + Returns: + The App instance + + Raises: + HTTPException: 404 if app not found + """ + app = db.query(App).filter(App.id == app_id).first() + if not app: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"App with id {app_id} not found", + ) + return app + + +def get_mcp_server_or_404(server_id: int, db: Session = Depends(get_db)) -> MCPServer: + """ + Get an MCPServer by ID or raise 404 if not found. + + Args: + server_id: The server ID to fetch + db: Database session + + Returns: + The MCPServer instance + + Raises: + HTTPException: 404 if server not found + """ + server = db.query(MCPServer).filter(MCPServer.id == server_id).first() + if not server: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"MCP Server with id {server_id} not found", + ) + return server + + +def get_tool_or_404(tool_id: int, db: Session = Depends(get_db)) -> Tool: + """ + Get a Tool by ID or raise 404 if not found. + + Args: + tool_id: The tool ID to fetch + db: Database session + + Returns: + The Tool instance + + Raises: + HTTPException: 404 if tool not found + """ + tool = db.query(Tool).filter(Tool.id == tool_id).first() + if not tool: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Tool with id {tool_id} not found", + ) + return tool + + +def get_collection_or_404(collection_id: int, db: Session = Depends(get_db)) -> Collection: + """ + Get a Collection by ID or raise 404 if not found. + + Args: + collection_id: The collection ID to fetch + db: Database session + + Returns: + The Collection instance + + Raises: + HTTPException: 404 if collection not found + """ + collection = db.query(Collection).filter(Collection.id == collection_id).first() + if not collection: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection with id {collection_id} not found", + ) + return collection + + +def get_collection_item_or_404(item_id: int, db: Session = Depends(get_db)) -> CollectionItem: + """ + Get a CollectionItem by ID or raise 404 if not found. + + Args: + item_id: The collection item ID to fetch + db: Database session + + Returns: + The CollectionItem instance + + Raises: + HTTPException: 404 if collection item not found + """ + item = db.query(CollectionItem).filter(CollectionItem.id == item_id).first() + if not item: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection item with id {item_id} not found", + ) + return item diff --git a/dbx-agent-app/app/backend/app/init_warehouse_schema.py b/dbx-agent-app/app/backend/app/init_warehouse_schema.py new file mode 100644 index 00000000..e0b323c5 --- /dev/null +++ b/dbx-agent-app/app/backend/app/init_warehouse_schema.py @@ -0,0 +1,122 @@ +""" +Initialize warehouse schema if using Databricks SQL Warehouse. + +This module provides a function to create tables in the warehouse +before the app starts up and attempts to query them. +""" + +import logging +from app.db_warehouse import execute_sql, CATALOG, SCHEMA + +logger = logging.getLogger(__name__) + + +def init_warehouse_tables(): + """ + Create warehouse tables if they don't exist. + This runs on app startup when using warehouse backend. + """ + tables_ddl = [ + # Apps table + f""" + CREATE TABLE IF NOT EXISTS {CATALOG}.{SCHEMA}.apps ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + name STRING NOT NULL UNIQUE, + owner STRING, + url STRING, + tags STRING, + manifest_url STRING + ) + """, + # Agents table + f""" + CREATE TABLE IF NOT EXISTS {CATALOG}.{SCHEMA}.agents ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + name STRING NOT NULL UNIQUE, + description STRING, + capabilities STRING, + status STRING NOT NULL DEFAULT 'draft', + collection_id INT, + app_id INT, + endpoint_url STRING, + auth_token STRING, + a2a_capabilities STRING, + skills STRING, + protocol_version STRING DEFAULT '0.3.0', + system_prompt STRING, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP(), + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP() + ) + """, + # Collections table + f""" + CREATE TABLE IF NOT EXISTS {CATALOG}.{SCHEMA}.collections ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + name STRING NOT NULL UNIQUE, + description STRING + ) + """, + # MCP Servers table + f""" + CREATE TABLE IF NOT EXISTS {CATALOG}.{SCHEMA}.mcp_servers ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + app_id INT, + server_url STRING NOT NULL, + kind STRING NOT NULL, + uc_connection STRING, + scopes STRING + ) + """, + # Tools table + f""" + CREATE TABLE IF NOT EXISTS {CATALOG}.{SCHEMA}.tools ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + mcp_server_id INT NOT NULL, + name STRING NOT NULL, + description STRING, + parameters STRING + ) + """, + # Collection Items table + f""" + CREATE TABLE IF NOT EXISTS {CATALOG}.{SCHEMA}.collection_items ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + collection_id INT NOT NULL, + app_id INT, + mcp_server_id INT, + tool_id INT + ) + """, + # Discovery State table + f""" + CREATE TABLE IF NOT EXISTS {CATALOG}.{SCHEMA}.discovery_state ( + id INT PRIMARY KEY, + is_running BOOLEAN NOT NULL DEFAULT FALSE, + last_run_timestamp STRING, + last_run_status STRING, + last_run_message STRING + ) + """, + # Supervisors table + f""" + CREATE TABLE IF NOT EXISTS {CATALOG}.{SCHEMA}.supervisors ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + collection_id INT NOT NULL, + app_name STRING NOT NULL, + generated_at TIMESTAMP NOT NULL, + deployed_url STRING + ) + """, + ] + + logger.info(f"[WAREHOUSE-INIT] Initializing warehouse schema: {CATALOG}.{SCHEMA}") + + for i, ddl in enumerate(tables_ddl, 1): + try: + execute_sql(ddl) + logger.info(f"[WAREHOUSE-INIT] Table {i}/{len(tables_ddl)} initialized") + except Exception as e: + logger.error(f"[WAREHOUSE-INIT] Failed to create table {i}: {e}") + raise + + logger.info("[WAREHOUSE-INIT] All tables initialized successfully") diff --git a/dbx-agent-app/app/backend/app/main.py b/dbx-agent-app/app/backend/app/main.py new file mode 100644 index 00000000..7d8aa6f0 --- /dev/null +++ b/dbx-agent-app/app/backend/app/main.py @@ -0,0 +1,286 @@ +""" +Multi-Agent Registry API - FastAPI Application + +This is the main FastAPI application that provides CRUD endpoints for: +- Apps: Databricks Apps metadata +- MCP Servers: MCP server configurations +- Tools: Individual tools/functions from MCP servers +- Collections: Curated collections of tools +- Discovery: MCP catalog discovery (stub for Phase 2.2) +""" + +import os +import logging +from fastapi import FastAPI, Request, status +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import JSONResponse, FileResponse +from fastapi.exceptions import RequestValidationError +from fastapi.staticfiles import StaticFiles +from contextlib import asynccontextmanager +from pathlib import Path + +from app.config import settings + +# Export Databricks env vars so the SDK's WorkspaceClient() can find them. +# pydantic-settings reads .env into the Settings object but doesn't set os.environ. +for _attr, _env in [ + ("databricks_host", "DATABRICKS_HOST"), + ("databricks_token", "DATABRICKS_TOKEN"), + ("databricks_config_profile", "DATABRICKS_CONFIG_PROFILE"), +]: + _val = getattr(settings, _attr, None) + if _val and _env not in os.environ: + os.environ[_env] = _val + +# Configure structured logging +logging.basicConfig( + level=logging.DEBUG if settings.debug else logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", +) +logger = logging.getLogger(__name__) +from app.routes import health, apps, mcp_servers, tools, collections, discovery, supervisors, agents, admin, chat, supervisor_runtime, agent_chat, traces, a2a, catalog_assets, workspace_assets, search, lineage, audit_log, conversations + + +@asynccontextmanager +async def lifespan(app: FastAPI): + """ + Lifespan context manager for startup/shutdown events. + Initializes database tables for SQLite (dev) on startup. + """ + from app.database import init_db, get_db + init_db() + + # Initialize MLflow tracing (graceful degradation if unavailable) + try: + import mlflow + mlflow.set_tracking_uri(settings.mlflow_tracking_uri) + mlflow.set_experiment(settings.mlflow_experiment_name) + logger.info("MLflow tracing initialized: uri=%s experiment=%s", + settings.mlflow_tracking_uri, settings.mlflow_experiment_name) + except Exception as e: + logger.warning("MLflow initialization failed (tracing disabled): %s", e) + + # Initialize warehouse schema if using warehouse backend + logger.info(f"[STARTUP] DATABASE_URL={settings.database_url}") + if settings.database_url.startswith("databricks://"): + logger.info("[STARTUP] Initializing warehouse schema") + try: + from app.init_warehouse_schema import init_warehouse_tables + init_warehouse_tables() + logger.info("[STARTUP] Warehouse schema initialization complete") + except Exception as e: + logger.error("[STARTUP] Warehouse schema initialization failed: %s", e, exc_info=True) + # Don't fail startup, but discovery will likely fail without tables + else: + logger.info(f"[STARTUP] Skipping warehouse init (using {settings.database_url})") + + # Run auto-discovery on startup to populate database + logger.info("[STARTUP] Running auto-discovery to populate database") + try: + from app.services.discovery import DiscoveryService + import asyncio + + service = DiscoveryService() + + # Run workspace and agent discovery + discovery_result = await service.discover_all( + custom_urls=None, + profile=None, # Will use default profile or service principal auth + ) + agent_result = await service.discover_agents_all(profile=None) + + # Upsert results + apps_discovered = len(getattr(service, "_pending_apps", [])) + upsert_result = service.upsert_discovery_results(discovery_result) + agent_upsert = service.upsert_agent_discovery_results(agent_result) if agent_result else None + + logger.info( + "[STARTUP] Auto-discovery completed: %d apps, %d servers, %d tools, %d agents", + apps_discovered, + discovery_result.servers_discovered, + discovery_result.tools_discovered, + len(agent_result.agents) if agent_result else 0, + ) + + except Exception as e: + logger.error("[STARTUP] Auto-discovery failed: %s", e, exc_info=True) + # Don't fail the app startup if discovery fails + + yield + # Shutdown: Clean up resources (if needed) + + +# Create FastAPI application +app = FastAPI( + title=settings.api_title, + version=settings.api_version, + description=""" + Multi-Agent Registry API for managing Databricks Apps, MCP servers, and tools. + + ## Features + + - **Apps**: Manage Databricks Apps metadata + - **MCP Servers**: Manage MCP server configurations + - **Tools**: Browse available tools from MCP servers + - **Collections**: Create curated collections of apps, servers, and tools + - **Discovery**: Automatic discovery of apps and tools from Databricks workspace + + ## Authentication + + This API uses On-Behalf-Of (OBO) authentication via Databricks Unity Catalog. + Each request is executed with the caller's identity for proper governance. + """, + lifespan=lifespan, + docs_url="/docs", + redoc_url="/redoc", + openapi_url="/openapi.json", +) + +# CORS Middleware (must be added LAST so it runs FIRST and adds headers to all responses) +# Parse origins from environment variable, defaulting to webapp URL +cors_origins_list = settings.cors_origins.split(",") +# Ensure webapp URL is included +webapp_url = "https://multi-agent-registry-webapp-7474660127789418.aws.databricksapps.com" +if webapp_url not in cors_origins_list: + cors_origins_list.append(webapp_url) + +logger.info(f"CORS configured with origins: {cors_origins_list}") + +app.add_middleware( + CORSMiddleware, + allow_origins=cors_origins_list, + allow_credentials=settings.cors_credentials, + allow_methods=settings.cors_methods.split(","), + allow_headers=settings.cors_headers.split(","), + expose_headers=["*"], # Expose all headers to the client +) + +# Authentication Middleware (added after CORS so it runs second) +from app.middleware.auth import DatabricksAuthMiddleware +if settings.auth_enabled: + app.add_middleware(DatabricksAuthMiddleware) + + +# Exception Handlers + + +@app.exception_handler(RequestValidationError) +async def validation_exception_handler(request: Request, exc: RequestValidationError): + """ + Handle Pydantic validation errors (422). + """ + return JSONResponse( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + content={ + "detail": exc.errors(), + "body": exc.body, + }, + ) + + +@app.exception_handler(Exception) +async def general_exception_handler(request: Request, exc: Exception): + """ + Handle all exceptions including Databricks SQL errors (500). + """ + # Check if this is a databricks-sql error + exc_module = type(exc).__module__ + if "databricks" in exc_module or "sql" in exc_module: + logger.error("Database error on %s %s: %s", request.method, request.url.path, exc) + return JSONResponse( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + content={ + "detail": "Database error occurred", + "error": str(exc) if settings.debug else "Internal server error", + }, + ) + + logger.error("Unhandled error on %s %s: %s", request.method, request.url.path, exc, exc_info=True) + return JSONResponse( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + content={ + "detail": "Internal server error", + "error": str(exc) if settings.debug else "An unexpected error occurred", + }, + ) + + +# Include Routers + +# Health check endpoints (no prefix) +app.include_router(health.router) + +# API endpoints with prefix +app.include_router(apps.router, prefix=settings.api_prefix) +app.include_router(mcp_servers.router, prefix=settings.api_prefix) +app.include_router(tools.router, prefix=settings.api_prefix) +app.include_router(collections.router, prefix=settings.api_prefix) +app.include_router(discovery.router, prefix=settings.api_prefix) +app.include_router(supervisors.router, prefix=settings.api_prefix) +app.include_router(supervisor_runtime.router, prefix=settings.api_prefix) +# Also mount at /supervisor for webapp compatibility (webapp expects /supervisor/chat) +app.include_router(supervisor_runtime.router, prefix="/supervisor") +app.include_router(agents.router, prefix=settings.api_prefix) +app.include_router(admin.router, prefix=settings.api_prefix) +app.include_router(chat.router, prefix=settings.api_prefix) +app.include_router(agent_chat.router, prefix=settings.api_prefix) +app.include_router(traces.router, prefix=settings.api_prefix) +app.include_router(a2a.router, prefix=settings.api_prefix) +app.include_router(catalog_assets.router, prefix=settings.api_prefix) +app.include_router(workspace_assets.router, prefix=settings.api_prefix) +app.include_router(search.router, prefix=settings.api_prefix) +app.include_router(lineage.router, prefix=settings.api_prefix) +app.include_router(audit_log.router, prefix=settings.api_prefix) +app.include_router(conversations.router, prefix=settings.api_prefix) + + +# Static files for React frontend +WEBAPP_DIST = Path(__file__).parent.parent / "webapp_dist" +if WEBAPP_DIST.exists(): + # Mount static assets (JS, CSS, images) with caching + # These have hashed filenames (index-xyz123.js) so can be cached forever + app.mount("/assets", StaticFiles(directory=WEBAPP_DIST / "assets"), name="assets") + + # Serve React app for known React Router paths + @app.get("/", include_in_schema=False) + @app.get("/discover", include_in_schema=False) + @app.get("/collections", include_in_schema=False) + @app.get("/chat", include_in_schema=False) + @app.get("/agents", include_in_schema=False) + @app.get("/agent-chat", include_in_schema=False) + @app.get("/lineage", include_in_schema=False) + @app.get("/audit-log", include_in_schema=False) + async def serve_react_app(): + """ + Serve React app index.html for client-side routing. + Set no-cache headers so users always get the latest version. + """ + response = FileResponse(WEBAPP_DIST / "index.html") + # Prevent caching of the HTML file so users always get latest version + response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" + response.headers["Pragma"] = "no-cache" + response.headers["Expires"] = "0" + return response +else: + # Webapp not built, show API info at root + @app.get("/", tags=["Root"]) + def root(): + """API information.""" + return { + "name": settings.api_title, + "version": settings.api_version, + "docs": "/docs", + "health": "/health", + "api": settings.api_prefix, + } + + +if __name__ == "__main__": + import uvicorn + + uvicorn.run( + "app.main:app", + host=settings.host, + port=settings.port, + reload=settings.debug, + ) diff --git a/dbx-agent-app/app/backend/app/middleware/__init__.py b/dbx-agent-app/app/backend/app/middleware/__init__.py new file mode 100644 index 00000000..0f7fcdd7 --- /dev/null +++ b/dbx-agent-app/app/backend/app/middleware/__init__.py @@ -0,0 +1,5 @@ +"""Authentication and request middleware for the registry API.""" + +from app.middleware.auth import DatabricksAuthMiddleware + +__all__ = ["DatabricksAuthMiddleware"] diff --git a/dbx-agent-app/app/backend/app/middleware/auth.py b/dbx-agent-app/app/backend/app/middleware/auth.py new file mode 100644 index 00000000..00ea9ab1 --- /dev/null +++ b/dbx-agent-app/app/backend/app/middleware/auth.py @@ -0,0 +1,65 @@ +""" +Databricks Apps OBO (On-Behalf-Of) authentication middleware. + +When deployed on Databricks Apps, the platform proxy injects user identity +headers (X-Forwarded-Email, X-Forwarded-User, X-Forwarded-Access-Token) +into every request. This middleware enforces that those headers are present +for all API endpoints, while leaving health/docs endpoints open. + +In local development (DEBUG=true), authentication is skipped entirely. +""" + +import logging + +from fastapi import Request, HTTPException, status as http_status +from fastapi.responses import JSONResponse +from starlette.middleware.base import BaseHTTPMiddleware + +from app.config import settings + +logger = logging.getLogger(__name__) + +OPEN_PATHS = { + "/health", "/ready", "/debug-db", "/docs", "/redoc", "/openapi.json", + # Discovery/registry read-only endpoints - public for frontend access + "/api/apps", "/api/agents", "/api/collections", "/api/tools", + "/api/mcp-servers", "/api/catalog-assets", "/api/workspace-assets", + "/api/discovery", "/api/search", +} + + +class DatabricksAuthMiddleware(BaseHTTPMiddleware): + """Validates Databricks Apps platform headers on non-open endpoints.""" + + async def dispatch(self, request: Request, call_next): + # Always allow health, docs, and schema endpoints + if any(request.url.path.startswith(p) for p in OPEN_PATHS): + return await call_next(request) + + # Allow all /api/* GET requests (read-only discovery) + if request.url.path.startswith("/api/") and request.method == "GET": + return await call_next(request) + + # Skip auth in local dev mode + if settings.debug: + return await call_next(request) + + # Require at least one Databricks Apps identity header + user_email = request.headers.get("X-Forwarded-Email") + access_token = request.headers.get("X-Forwarded-Access-Token") + + if not user_email and not access_token: + logger.warning( + "Unauthenticated request blocked: %s %s", + request.method, + request.url.path, + ) + return JSONResponse( + status_code=401, + content={"detail": "Authentication required"}, + ) + + # Attach identity to request.state for downstream use + request.state.user_email = user_email or "" + request.state.access_token = access_token or "" + return await call_next(request) diff --git a/dbx-agent-app/app/backend/app/models/__init__.py b/dbx-agent-app/app/backend/app/models/__init__.py new file mode 100644 index 00000000..e0261fa2 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/__init__.py @@ -0,0 +1,48 @@ +""" +SQLAlchemy models for the Multi-Agent Registry. + +This package contains all database models: +- App: Databricks Apps metadata +- MCPServer: MCP server configurations +- Tool: Individual tools/functions from MCP servers +- Collection: Curated collections of tools +- CollectionItem: Many-to-many join table for collection membership +""" + +from app.models.app import App +from app.models.mcp_server import MCPServer, MCPServerKind +from app.models.tool import Tool +from app.models.collection import Collection +from app.models.collection_item import CollectionItem +from app.models.supervisor import Supervisor +from app.models.agent import Agent +from app.models.a2a_task import A2ATask +from app.models.discovery_state import DiscoveryState +from app.models.catalog_asset import CatalogAsset +from app.models.workspace_asset import WorkspaceAsset +from app.models.asset_embedding import AssetEmbedding +from app.models.asset_relationship import AssetRelationship +from app.models.audit_log import AuditLog +from app.models.conversation import Conversation, ConversationMessage +from app.models.agent_analytics import AgentAnalytics + +__all__ = [ + "App", + "MCPServer", + "MCPServerKind", + "Tool", + "Collection", + "CollectionItem", + "Supervisor", + "Agent", + "A2ATask", + "DiscoveryState", + "CatalogAsset", + "WorkspaceAsset", + "AssetEmbedding", + "AssetRelationship", + "AuditLog", + "Conversation", + "ConversationMessage", + "AgentAnalytics", +] diff --git a/dbx-agent-app/app/backend/app/models/a2a_task.py b/dbx-agent-app/app/backend/app/models/a2a_task.py new file mode 100644 index 00000000..53199c61 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/a2a_task.py @@ -0,0 +1,39 @@ +from sqlalchemy import Column, Integer, String, Text, DateTime, ForeignKey, Index +from datetime import datetime +from app.database import Base + + +class A2ATask(Base): + """ + A2A Task entity — tracks agent-to-agent task lifecycle. + + Each task corresponds to a message/send call and tracks its state + through submitted → working → completed/failed/canceled. + """ + + __tablename__ = "a2a_tasks" + + id = Column(String(36), primary_key=True) + agent_id = Column( + Integer, + ForeignKey("agents.id", ondelete="CASCADE"), + nullable=False, + ) + context_id = Column(String(36), nullable=True) + status = Column(String(32), nullable=False, default="submitted") + messages = Column(Text, nullable=True) # JSON array of A2A Message objects + artifacts = Column(Text, nullable=True) # JSON array of A2A Artifact objects + metadata_json = Column(Text, nullable=True) # Task-level metadata + webhook_url = Column(Text, nullable=True) # Push notification URL + webhook_token = Column(Text, nullable=True) # Push notification auth token + created_at = Column(DateTime, nullable=False, default=datetime.utcnow) + updated_at = Column(DateTime, nullable=True, default=datetime.utcnow, onupdate=datetime.utcnow) + + __table_args__ = ( + Index("idx_a2a_task_agent_id", "agent_id"), + Index("idx_a2a_task_context_id", "context_id"), + Index("idx_a2a_task_status", "status"), + ) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/agent.py b/dbx-agent-app/app/backend/app/models/agent.py new file mode 100644 index 00000000..b5e9e844 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/agent.py @@ -0,0 +1,57 @@ +from sqlalchemy import Column, Integer, String, Text, DateTime, ForeignKey, Index +from sqlalchemy.orm import relationship +from datetime import datetime +from app.database import Base + + +class Agent(Base): + """ + First-class agent entity in the registry. + + Represents a discoverable, manageable agent that links to a Collection + (its tool set) and carries its own metadata: name, description, + capability tags, status, and endpoint URL. + """ + + __tablename__ = "agents" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + name = Column(String(255), unique=True, nullable=False) + description = Column(Text, nullable=True) + capabilities = Column(Text, nullable=True) + status = Column(String(50), nullable=False, default="draft") + collection_id = Column( + Integer, + ForeignKey("collections.id", ondelete="SET NULL"), + nullable=True, + ) + app_id = Column( + Integer, + ForeignKey("apps.id", ondelete="CASCADE"), + nullable=True, + comment="Link to the backing Databricks App (if this agent is app-based)", + ) + endpoint_url = Column(Text, nullable=True) + + # A2A Protocol fields + auth_token = Column(Text, nullable=True) + a2a_capabilities = Column(Text, nullable=True) # JSON: {"streaming": true, "pushNotifications": false} + skills = Column(Text, nullable=True) # JSON array: [{"id":"search","name":"Search","description":"...","tags":["rag"]}] + protocol_version = Column(String(20), nullable=True, default="0.3.0") + system_prompt = Column(Text, nullable=True) # Rich persona / instructions for LLM + + created_at = Column(DateTime, nullable=False, default=datetime.utcnow) + updated_at = Column(DateTime, nullable=True, default=datetime.utcnow, onupdate=datetime.utcnow) + + # Relationships + collection = relationship("Collection", backref="agents") + app = relationship("App", backref="agent", uselist=False) + + # Indexes for performance + __table_args__ = ( + Index("idx_agent_name", "name"), + Index("idx_agent_collection_id", "collection_id"), + ) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/agent_analytics.py b/dbx-agent-app/app/backend/app/models/agent_analytics.py new file mode 100644 index 00000000..89cb9a42 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/agent_analytics.py @@ -0,0 +1,26 @@ +from sqlalchemy import Column, Integer, String, Text, DateTime, ForeignKey, Index +from datetime import datetime +from app.database import Base + + +class AgentAnalytics(Base): + """Tracks per-invocation performance metrics for agents.""" + + __tablename__ = "agent_analytics" + + id = Column(Integer, primary_key=True, autoincrement=True) + agent_id = Column(Integer, ForeignKey("agents.id", ondelete="CASCADE"), nullable=False) + task_description = Column(Text, nullable=True) + success = Column(Integer, default=1) # 0 or 1 + latency_ms = Column(Integer, nullable=True) + quality_score = Column(Integer, nullable=True) # 1-5, from LLM evaluation + error_message = Column(Text, nullable=True) + created_at = Column(DateTime, nullable=False, default=datetime.utcnow) + + __table_args__ = ( + Index("idx_analytics_agent_id", "agent_id"), + Index("idx_analytics_created_at", "created_at"), + ) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/app.py b/dbx-agent-app/app/backend/app/models/app.py new file mode 100644 index 00000000..c317d62f --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/app.py @@ -0,0 +1,50 @@ +from sqlalchemy import Column, Integer, String, Text, Index +from sqlalchemy.orm import relationship +from app.database import Base + + +class App(Base): + """ + Databricks Apps metadata. + + Represents a Databricks App that may host one or more MCP servers. + Apps are discovered from the workspace and registered in the catalog. + + Attributes: + id: Primary key + name: App name (e.g., "sgp-research-app") + owner: App owner (username or service principal) + url: Deployed app URL + tags: Comma-separated tags for filtering + manifest_url: URL to app.yaml or manifest file + """ + + __tablename__ = "apps" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + name = Column(String(255), nullable=False, unique=True) + owner = Column(String(255), nullable=True) + url = Column(Text, nullable=True) + tags = Column(Text, nullable=True) + manifest_url = Column(Text, nullable=True) + + # Relationships + mcp_servers = relationship( + "MCPServer", + back_populates="app", + cascade="all, delete-orphan", + ) + collection_items = relationship( + "CollectionItem", + back_populates="app", + cascade="all, delete-orphan", + ) + + # Indexes for performance + __table_args__ = ( + Index("idx_app_name", "name"), + Index("idx_app_owner", "owner"), + ) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/asset_embedding.py b/dbx-agent-app/app/backend/app/models/asset_embedding.py new file mode 100644 index 00000000..625e2ee2 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/asset_embedding.py @@ -0,0 +1,29 @@ +""" +AssetEmbedding model — stores vector embeddings for indexed assets. + +Each row links an asset (by type + ID) to its embedding vector, +enabling semantic search via cosine similarity. +""" + +from sqlalchemy import Column, Integer, String, Text, DateTime, Index +from sqlalchemy.sql import func +from app.database import Base + + +class AssetEmbedding(Base): + __tablename__ = "asset_embeddings" + + id = Column(Integer, primary_key=True, autoincrement=True) + asset_type = Column(String(50), nullable=False) # e.g. "table", "notebook", "app" + asset_id = Column(Integer, nullable=False) + text_content = Column(Text, nullable=False) # the text that was embedded + embedding_json = Column(Text, nullable=False) # JSON-serialized float array + embedding_model = Column(String(100), nullable=False) + dimension = Column(Integer, nullable=False) + created_at = Column(DateTime, server_default=func.now()) + updated_at = Column(DateTime, server_default=func.now(), onupdate=func.now()) + + __table_args__ = ( + Index("ix_asset_embedding_asset", "asset_type", "asset_id", unique=True), + Index("ix_asset_embedding_model", "embedding_model"), + ) diff --git a/dbx-agent-app/app/backend/app/models/asset_relationship.py b/dbx-agent-app/app/backend/app/models/asset_relationship.py new file mode 100644 index 00000000..1e3d26a0 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/asset_relationship.py @@ -0,0 +1,60 @@ +""" +AssetRelationship model — directed edges in the knowledge graph. + +Each row represents a relationship between two assets: + source (type + id) --[relationship_type]--> target (type + id) + +Examples: + - table A reads_from table B + - job X writes_to table Y + - notebook N depends_on table T + - dashboard D reads_from table T +""" + +from sqlalchemy import Column, Integer, String, Text, DateTime, Index +from sqlalchemy.sql import func +from app.database import Base + + +class AssetRelationship(Base): + __tablename__ = "asset_relationships" + + id = Column(Integer, primary_key=True, autoincrement=True) + + # Source node + source_type = Column(String(50), nullable=False) # e.g. "table", "job", "notebook" + source_id = Column(Integer, nullable=False) + source_name = Column(String(767), nullable=True) # denormalized for fast graph queries + + # Target node + target_type = Column(String(50), nullable=False) + target_id = Column(Integer, nullable=False) + target_name = Column(String(767), nullable=True) # denormalized + + # Edge metadata + relationship_type = Column(String(50), nullable=False) + # reads_from, writes_to, depends_on, created_by, + # uses_model, derived_from, scheduled_by, consumes + metadata_json = Column(Text, nullable=True) # extra context (column mappings, etc.) + + discovered_at = Column(DateTime, server_default=func.now()) + updated_at = Column(DateTime, server_default=func.now(), onupdate=func.now()) + + __table_args__ = ( + Index("ix_rel_source", "source_type", "source_id"), + Index("ix_rel_target", "target_type", "target_id"), + Index("ix_rel_type", "relationship_type"), + Index( + "ix_rel_unique_edge", + "source_type", "source_id", "target_type", "target_id", "relationship_type", + unique=True, + ), + ) + + def __repr__(self) -> str: + return ( + f" " + f"{self.target_type}:{self.target_id})>" + ) diff --git a/dbx-agent-app/app/backend/app/models/audit_log.py b/dbx-agent-app/app/backend/app/models/audit_log.py new file mode 100644 index 00000000..f6f2611f --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/audit_log.py @@ -0,0 +1,34 @@ +from sqlalchemy import Column, Integer, String, Text, DateTime, Index +from datetime import datetime +from app.database import Base + + +class AuditLog(Base): + """ + Append-only audit log for tracking mutating API actions. + + Records who did what, when, to which resource — supporting + enterprise compliance and operational visibility. + """ + + __tablename__ = "audit_logs" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + timestamp = Column(DateTime, nullable=False, default=datetime.utcnow) + user_email = Column(String(255), nullable=False) + action = Column(String(50), nullable=False) # create, update, delete, crawl, clear + resource_type = Column(String(50), nullable=False) # agent, collection, catalog_asset, etc. + resource_id = Column(String(100), nullable=True) # String to support both int IDs and UUIDs + resource_name = Column(String(255), nullable=True) + details = Column(Text, nullable=True) # JSON text for extra context + ip_address = Column(String(45), nullable=True) # IPv4 or IPv6 + + __table_args__ = ( + Index("idx_audit_timestamp", "timestamp"), + Index("idx_audit_user_email", "user_email"), + Index("idx_audit_action", "action"), + Index("idx_audit_resource_type", "resource_type"), + ) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/catalog_asset.py b/dbx-agent-app/app/backend/app/models/catalog_asset.py new file mode 100644 index 00000000..712df67f --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/catalog_asset.py @@ -0,0 +1,62 @@ +from sqlalchemy import Column, Integer, String, Text, DateTime, Index +from datetime import datetime +from app.database import Base + + +class CatalogAsset(Base): + """ + Unity Catalog asset metadata. + + Represents a table, view, function, model, or volume discovered + from a Databricks Unity Catalog. Assets are indexed by the + CatalogCrawlerService and searchable via the Discover UI. + + Attributes: + id: Primary key + asset_type: One of table, view, function, model, volume + catalog: UC catalog name + schema_name: UC schema name + name: Asset name (table/view/function/model/volume name) + full_name: Three-level namespace (catalog.schema.name) + owner: Asset owner + comment: Asset description/comment from UC + columns_json: JSON blob of column definitions [{name, type, comment, nullable}] + tags_json: JSON blob of UC tags + properties_json: JSON blob of UC properties/metadata + data_source_format: Storage format (DELTA, PARQUET, CSV, etc.) + table_type: MANAGED, EXTERNAL, VIEW + created_at: When indexed + updated_at: Last index update + last_indexed_at: Timestamp of most recent crawl that touched this row + """ + + __tablename__ = "catalog_assets" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + asset_type = Column(String(50), nullable=False) + catalog = Column(String(255), nullable=False) + schema_name = Column(String(255), nullable=False) + name = Column(String(255), nullable=False) + full_name = Column(String(767), nullable=False, unique=True) + owner = Column(String(255), nullable=True) + comment = Column(Text, nullable=True) + columns_json = Column(Text, nullable=True) + tags_json = Column(Text, nullable=True) + properties_json = Column(Text, nullable=True) + data_source_format = Column(String(50), nullable=True) + table_type = Column(String(50), nullable=True) + row_count = Column(Integer, nullable=True) + + created_at = Column(DateTime, nullable=False, default=datetime.utcnow) + updated_at = Column(DateTime, nullable=True, default=datetime.utcnow, onupdate=datetime.utcnow) + last_indexed_at = Column(DateTime, nullable=True) + + __table_args__ = ( + Index("idx_catalog_asset_type", "asset_type"), + Index("idx_catalog_asset_catalog_schema", "catalog", "schema_name"), + Index("idx_catalog_asset_full_name", "full_name"), + Index("idx_catalog_asset_owner", "owner"), + ) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/collection.py b/dbx-agent-app/app/backend/app/models/collection.py new file mode 100644 index 00000000..5ac4dabb --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/collection.py @@ -0,0 +1,37 @@ +from sqlalchemy import Column, Integer, String, Text, Index +from sqlalchemy.orm import relationship +from app.database import Base + + +class Collection(Base): + """ + Curated collections of tools/agents/servers. + + Collections are user-defined groupings of MCP resources (apps, servers, tools). + They serve as the basis for generating supervisors and orchestrating multi-agent + workflows. A collection can contain any combination of apps, servers, or individual tools. + + Attributes: + id: Primary key + name: Collection name (e.g., "Expert Research Toolkit") + description: Human-readable description of the collection's purpose + """ + + __tablename__ = "collections" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + name = Column(String(255), nullable=False, unique=True) + description = Column(Text, nullable=True) + + # Relationships + items = relationship( + "CollectionItem", + back_populates="collection", + cascade="all, delete-orphan", + ) + + # Indexes for performance + __table_args__ = (Index("idx_collection_name", "name"),) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/collection_item.py b/dbx-agent-app/app/backend/app/models/collection_item.py new file mode 100644 index 00000000..22e8ccb4 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/collection_item.py @@ -0,0 +1,78 @@ +from sqlalchemy import Column, Integer, ForeignKey, Index, CheckConstraint +from sqlalchemy.orm import relationship +from app.database import Base + + +class CollectionItem(Base): + """ + Many-to-many join table for collection membership. + + Represents an item in a collection. Each item can be one of: + - An App (app_id set, others null) + - An MCP Server (mcp_server_id set, others null) + - A Tool (tool_id set, others null) + + Exactly one of the foreign keys must be non-null (enforced by check constraint). + + Attributes: + id: Primary key + collection_id: Foreign key to parent collection + app_id: Foreign key to App (nullable) + mcp_server_id: Foreign key to MCP Server (nullable) + tool_id: Foreign key to Tool (nullable) + """ + + __tablename__ = "collection_items" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + collection_id = Column( + Integer, + ForeignKey("collections.id", ondelete="CASCADE"), + nullable=False, + ) + app_id = Column( + Integer, + ForeignKey("apps.id", ondelete="CASCADE"), + nullable=True, + ) + mcp_server_id = Column( + Integer, + ForeignKey("mcp_servers.id", ondelete="CASCADE"), + nullable=True, + ) + tool_id = Column( + Integer, + ForeignKey("tools.id", ondelete="CASCADE"), + nullable=True, + ) + + # Relationships + collection = relationship("Collection", back_populates="items") + app = relationship("App", back_populates="collection_items") + mcp_server = relationship("MCPServer", back_populates="collection_items") + tool = relationship("Tool", back_populates="collection_items") + + # Indexes and constraints + __table_args__ = ( + Index("idx_collection_item_collection_id", "collection_id"), + Index("idx_collection_item_app_id", "app_id"), + Index("idx_collection_item_mcp_server_id", "mcp_server_id"), + Index("idx_collection_item_tool_id", "tool_id"), + # Ensure exactly one of the foreign keys is set + CheckConstraint( + "(app_id IS NOT NULL AND mcp_server_id IS NULL AND tool_id IS NULL) OR " + "(app_id IS NULL AND mcp_server_id IS NOT NULL AND tool_id IS NULL) OR " + "(app_id IS NULL AND mcp_server_id IS NULL AND tool_id IS NOT NULL)", + name="chk_collection_item_exactly_one_ref", + ), + ) + + def __repr__(self) -> str: + ref = ( + f"app_id={self.app_id}" + if self.app_id + else f"server_id={self.mcp_server_id}" + if self.mcp_server_id + else f"tool_id={self.tool_id}" + ) + return f"" diff --git a/dbx-agent-app/app/backend/app/models/conversation.py b/dbx-agent-app/app/backend/app/models/conversation.py new file mode 100644 index 00000000..25503446 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/conversation.py @@ -0,0 +1,72 @@ +from sqlalchemy import Column, Integer, String, Text, DateTime, ForeignKey, Index +from datetime import datetime +from app.database import Base + + +class Conversation(Base): + """ + Persistent chat conversation. + + Stores conversation metadata for the chat interface. Each conversation + contains an ordered list of messages and is optionally scoped to a + tool collection. + + Attributes: + id: UUID string primary key (generated client- or server-side) + title: Auto-generated from first user message, renamable + user_email: Owner of the conversation + collection_id: Optional FK to the tool collection used + created_at: When the conversation started + updated_at: Last message timestamp + """ + + __tablename__ = "conversations" + + id = Column(String(36), primary_key=True) + title = Column(String(255), nullable=False, default="New conversation") + user_email = Column(String(255), nullable=True) + collection_id = Column(Integer, nullable=True) + created_at = Column(DateTime, nullable=False, default=datetime.utcnow) + updated_at = Column(DateTime, nullable=True, default=datetime.utcnow, onupdate=datetime.utcnow) + + __table_args__ = ( + Index("idx_conversation_user", "user_email"), + Index("idx_conversation_updated", "updated_at"), + ) + + def __repr__(self) -> str: + return f"" + + +class ConversationMessage(Base): + """ + Individual message within a conversation. + + Stores each user/assistant message with optional trace linkage + for the inspector panel. + + Attributes: + id: Auto-increment primary key + conversation_id: FK to parent conversation + role: 'user' or 'assistant' + content: Message text + trace_id: Optional link to MLflow trace + created_at: Message timestamp + """ + + __tablename__ = "conversation_messages" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + conversation_id = Column(String(36), ForeignKey("conversations.id", ondelete="CASCADE"), nullable=False) + role = Column(String(20), nullable=False) + content = Column(Text, nullable=False) + trace_id = Column(String(36), nullable=True) + created_at = Column(DateTime, nullable=False, default=datetime.utcnow) + + __table_args__ = ( + Index("idx_conv_msg_conversation", "conversation_id"), + Index("idx_conv_msg_created", "created_at"), + ) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/discovery_state.py b/dbx-agent-app/app/backend/app/models/discovery_state.py new file mode 100644 index 00000000..5a0321e6 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/discovery_state.py @@ -0,0 +1,22 @@ +from sqlalchemy import Column, Integer, String, Boolean, Text +from app.database import Base + + +class DiscoveryState(Base): + """ + Persisted discovery state. + + Single-row table tracking the current state of the MCP discovery process. + Replaces in-memory state so it works correctly with multiple workers. + """ + + __tablename__ = "discovery_state" + + id = Column(Integer, primary_key=True, default=1) + is_running = Column(Boolean, default=False, nullable=False) + last_run_timestamp = Column(String(64), nullable=True) + last_run_status = Column(String(32), nullable=True) + last_run_message = Column(Text, nullable=True) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/mcp_server.py b/dbx-agent-app/app/backend/app/models/mcp_server.py new file mode 100644 index 00000000..90dc2571 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/mcp_server.py @@ -0,0 +1,75 @@ +from sqlalchemy import Column, Integer, String, Text, ForeignKey, Index, Enum as SQLEnum +from sqlalchemy.orm import relationship +from app.database import Base +import enum + + +class MCPServerKind(str, enum.Enum): + """ + MCP Server types. + + - managed: Servers from Databricks MCP catalog (official) + - external: Third-party MCP servers (GitHub, npm, etc.) + - custom: User-deployed MCP servers (private) + """ + + MANAGED = "managed" + EXTERNAL = "external" + CUSTOM = "custom" + + +class MCPServer(Base): + """ + MCP (Model Context Protocol) server configurations. + + Represents an MCP server that provides tools/functions to agents. + Servers can be managed (Databricks catalog), external (third-party), + or custom (user-deployed). + + Attributes: + id: Primary key + app_id: Foreign key to parent App (nullable for standalone servers) + server_url: MCP server endpoint URL + kind: Server type (managed/external/custom) + uc_connection: Unity Catalog connection name (for governance) + scopes: Comma-separated OAuth scopes required + """ + + __tablename__ = "mcp_servers" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + app_id = Column( + Integer, + ForeignKey("apps.id", ondelete="CASCADE"), + nullable=True, + ) + server_url = Column(Text, nullable=False) + kind = Column( + SQLEnum(MCPServerKind), + nullable=False, + default=MCPServerKind.CUSTOM, + ) + uc_connection = Column(String(255), nullable=True) + scopes = Column(Text, nullable=True) + + # Relationships + app = relationship("App", back_populates="mcp_servers") + tools = relationship( + "Tool", + back_populates="mcp_server", + cascade="all, delete-orphan", + ) + collection_items = relationship( + "CollectionItem", + back_populates="mcp_server", + cascade="all, delete-orphan", + ) + + # Indexes for performance + __table_args__ = ( + Index("idx_mcp_server_app_id", "app_id"), + Index("idx_mcp_server_kind", "kind"), + ) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/supervisor.py b/dbx-agent-app/app/backend/app/models/supervisor.py new file mode 100644 index 00000000..eabde505 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/supervisor.py @@ -0,0 +1,44 @@ +from sqlalchemy import Column, Integer, String, Text, DateTime, ForeignKey, Index +from sqlalchemy.orm import relationship +from datetime import datetime +from app.database import Base + + +class Supervisor(Base): + """ + Generated supervisors metadata tracking. + + Tracks supervisors that have been generated from collections, + including when they were generated and where they are deployed. + + Attributes: + id: Primary key + collection_id: Reference to the collection used for generation + app_name: Generated app name + generated_at: Timestamp when supervisor was generated + deployed_url: URL where the supervisor is deployed (optional) + """ + + __tablename__ = "supervisors" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + collection_id = Column( + Integer, + ForeignKey("collections.id", ondelete="CASCADE"), + nullable=False, + ) + app_name = Column(String(255), nullable=False) + generated_at = Column(DateTime, nullable=False, default=datetime.utcnow) + deployed_url = Column(Text, nullable=True) + + # Relationships + collection = relationship("Collection", backref="supervisors") + + # Indexes for performance + __table_args__ = ( + Index("idx_supervisor_collection_id", "collection_id"), + Index("idx_supervisor_app_name", "app_name"), + ) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/tool.py b/dbx-agent-app/app/backend/app/models/tool.py new file mode 100644 index 00000000..5d71e2ad --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/tool.py @@ -0,0 +1,49 @@ +from sqlalchemy import Column, Integer, String, Text, ForeignKey, Index +from sqlalchemy.orm import relationship +from app.database import Base + + +class Tool(Base): + """ + Individual tools/functions from MCP servers. + + Represents a single tool exposed by an MCP server. Tools are discovered + by querying the MCP server's tool listing endpoint. Each tool has a name, + description, and parameter schema (JSON Schema format). + + Attributes: + id: Primary key + mcp_server_id: Foreign key to parent MCP server + name: Tool name (e.g., "search_transcripts") + description: Human-readable description + parameters: JSON Schema for tool parameters (stored as text) + """ + + __tablename__ = "tools" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + mcp_server_id = Column( + Integer, + ForeignKey("mcp_servers.id", ondelete="CASCADE"), + nullable=False, + ) + name = Column(String(255), nullable=False) + description = Column(Text, nullable=True) + parameters = Column(Text, nullable=True) # JSON Schema as text + + # Relationships + mcp_server = relationship("MCPServer", back_populates="tools") + collection_items = relationship( + "CollectionItem", + back_populates="tool", + cascade="all, delete-orphan", + ) + + # Indexes for performance + __table_args__ = ( + Index("idx_tool_mcp_server_id", "mcp_server_id"), + Index("idx_tool_name", "name"), + ) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/models/workspace_asset.py b/dbx-agent-app/app/backend/app/models/workspace_asset.py new file mode 100644 index 00000000..692a2382 --- /dev/null +++ b/dbx-agent-app/app/backend/app/models/workspace_asset.py @@ -0,0 +1,58 @@ +from sqlalchemy import Column, Integer, String, Text, DateTime, Index +from datetime import datetime +from app.database import Base + + +class WorkspaceAsset(Base): + """ + Databricks workspace object metadata. + + Represents a notebook, job, dashboard, SQL query, pipeline, cluster, + or experiment discovered from a Databricks workspace. Assets are indexed + by the WorkspaceCrawlerService and searchable via the Discover UI. + + Attributes: + id: Primary key + asset_type: One of notebook, job, dashboard, sql_query, pipeline, cluster, experiment + workspace_host: Databricks workspace URL + path: Workspace path or resource identifier + name: Human-readable name + owner: Asset owner/creator + description: Asset description + language: For notebooks: python, sql, r, scala + tags_json: JSON blob of tags/labels + metadata_json: Type-specific metadata (job schedule, cluster config, etc.) + content_preview: First 500 chars of content for search matching + created_at: When indexed + updated_at: Last index update + last_indexed_at: Timestamp of most recent crawl + """ + + __tablename__ = "workspace_assets" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + asset_type = Column(String(50), nullable=False) + workspace_host = Column(String(512), nullable=False) + path = Column(Text, nullable=False) + name = Column(String(255), nullable=False) + owner = Column(String(255), nullable=True) + description = Column(Text, nullable=True) + language = Column(String(50), nullable=True) + tags_json = Column(Text, nullable=True) + metadata_json = Column(Text, nullable=True) + content_preview = Column(Text, nullable=True) + resource_id = Column(String(255), nullable=True) + + created_at = Column(DateTime, nullable=False, default=datetime.utcnow) + updated_at = Column(DateTime, nullable=True, default=datetime.utcnow, onupdate=datetime.utcnow) + last_indexed_at = Column(DateTime, nullable=True) + + __table_args__ = ( + Index("idx_workspace_asset_type", "asset_type"), + Index("idx_workspace_asset_host", "workspace_host"), + Index("idx_workspace_asset_owner", "owner"), + Index("idx_workspace_asset_name", "name"), + ) + + def __repr__(self) -> str: + return f"" diff --git a/dbx-agent-app/app/backend/app/routes/__init__.py b/dbx-agent-app/app/backend/app/routes/__init__.py new file mode 100644 index 00000000..5705c801 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/__init__.py @@ -0,0 +1,38 @@ +""" +API route modules. + +This package contains all API route handlers: +- health: Health and readiness checks +- apps: App CRUD operations +- mcp_servers: MCP Server CRUD operations +- tools: Tool listing (read-only) +- collections: Collection and CollectionItem CRUD operations +- discovery: MCP catalog discovery (Phase 2.2) +- supervisors: Supervisor generation (Phase 3.3) +- admin: Admin operations (database management) +- agents: Agent CRUD operations +- chat: Chat interface for testing agents and tools +""" + +from . import health, apps, mcp_servers, tools, collections, discovery, supervisors, agents, admin, chat, traces, a2a, catalog_assets, workspace_assets, search, lineage, audit_log, conversations + +__all__ = [ + "health", + "apps", + "mcp_servers", + "tools", + "collections", + "discovery", + "supervisors", + "agents", + "admin", + "chat", + "traces", + "a2a", + "catalog_assets", + "workspace_assets", + "search", + "lineage", + "audit_log", + "conversations", +] diff --git a/dbx-agent-app/app/backend/app/routes/a2a.py b/dbx-agent-app/app/backend/app/routes/a2a.py new file mode 100644 index 00000000..fc2c5cd0 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/a2a.py @@ -0,0 +1,515 @@ +""" +A2A Protocol endpoint — JSON-RPC 2.0 handler for agent-to-agent communication. + +Handles: + POST /api/a2a/{agent_id} — JSON-RPC dispatch (message/send, tasks/get, tasks/cancel, tasks/list) + POST /api/a2a/{agent_id}/stream — SSE streaming for message/stream + GET /api/a2a/{agent_id}/tasks/{task_id}/subscribe — SSE subscription for existing task + POST /api/a2a/{agent_id}/tasks/{task_id}/webhook — Register push-notification webhook +""" + +import asyncio +import json +import logging +import time +import uuid +from typing import Dict, Any, Optional, List + +from fastapi import APIRouter, HTTPException, Request, status +from fastapi.responses import StreamingResponse +from pydantic import BaseModel + +from app.db_adapter import WarehouseDB +from app.config import settings +from app.schemas.a2a import ( + TaskState, + TERMINAL_STATES, + A2ATaskResponse, + A2ATaskStatus, + A2AMessage, + A2AArtifact, + MessagePart, + JsonRpcRequest, +) + +try: + import mlflow + from mlflow.entities import SpanType + _mlflow_available = True +except ImportError: + _mlflow_available = False + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/a2a", tags=["A2A Protocol"]) + + +# ────────────────────── Auth helper ────────────────────── + +def _validate_bearer_token(request: Request, agent: Dict) -> None: + """Validate bearer token if agent has auth_token set.""" + expected = agent.get("auth_token") + if not expected: + return + auth_header = request.headers.get("authorization", "") + if not auth_header.startswith("Bearer "): + raise HTTPException(status_code=401, detail="Missing Bearer token") + if auth_header[7:] != expected: + raise HTTPException(status_code=401, detail="Invalid Bearer token") + + +# ────────────────────── Task helpers ────────────────────── + +def _task_to_a2a_response(task_dict: Dict) -> Dict[str, Any]: + """Convert DB task dict to A2A TaskResponse dict.""" + messages = [] + if task_dict.get("messages"): + try: + messages = json.loads(task_dict["messages"]) + except (json.JSONDecodeError, TypeError): + pass + + artifacts = [] + if task_dict.get("artifacts"): + try: + artifacts = json.loads(task_dict["artifacts"]) + except (json.JSONDecodeError, TypeError): + pass + + metadata = None + if task_dict.get("metadata_json"): + try: + metadata = json.loads(task_dict["metadata_json"]) + except (json.JSONDecodeError, TypeError): + pass + + return { + "id": task_dict["id"], + "contextId": task_dict.get("context_id"), + "status": {"state": task_dict["status"]}, + "messages": messages, + "artifacts": artifacts, + "metadata": metadata, + } + + +def _jsonrpc_success(rpc_id: Any, result: Any) -> Dict[str, Any]: + return {"jsonrpc": "2.0", "id": rpc_id, "result": result} + + +def _jsonrpc_error(rpc_id: Any, code: int, message: str) -> Dict[str, Any]: + return {"jsonrpc": "2.0", "id": rpc_id, "error": {"code": code, "message": message}} + + +# ────────────────────── LLM processing (reuses chat.py patterns) ────────────────────── + +async def _process_agent_task(agent: Dict, message_text: str) -> str: + """ + Process a task through the agent's tool collection and LLM. + + Reuses the same Foundation Model + MCP tool-calling pattern from chat.py. + """ + from app.routes.chat import ( + get_available_tools, + format_tools_for_llm, + call_foundation_model, + call_mcp_tool, + ) + + collection_id = agent.get("collection_id") + tools = get_available_tools(collection_id=collection_id) + llm_tools = format_tools_for_llm(tools) if tools else None + + if agent.get('system_prompt'): + system_prompt = agent['system_prompt'] + else: + system_prompt = ( + f"You are {agent['name']}" + + (f" — {agent['description']}" if agent.get('description') else "") + + ". Respond helpfully using your available tools." + ) + + messages = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": message_text}, + ] + + # First LLM call + response = await call_foundation_model(messages, llm_tools) + choice = response.get("choices", [{}])[0] + message = choice.get("message", {}) + + # Handle tool calls (single pass — matches chat.py dual-pass pattern) + if message.get("tool_calls"): + for tool_call in message["tool_calls"]: + func = tool_call.get("function", {}) + tool_name = func.get("name", "") + try: + arguments = json.loads(func.get("arguments", "{}")) + except json.JSONDecodeError: + arguments = {} + + tool_info = next((t for t in tools if t.get('name') == tool_name), None) + if tool_info: + server = WarehouseDB.get_mcp_server(tool_info.get('mcp_server_id')) + if server: + result = await call_mcp_tool(server.get('server_url'), tool_name, arguments) + messages.append({ + "role": "assistant", + "content": None, + "tool_calls": [tool_call], + }) + messages.append({ + "role": "tool", + "tool_call_id": tool_call.get("id"), + "content": result, + }) + + # Final LLM call after tool use + response = await call_foundation_model(messages, llm_tools) + choice = response.get("choices", [{}])[0] + message = choice.get("message", {}) + + return message.get("content", "I could not generate a response.") + + +# ────────────────────── JSON-RPC method handlers ────────────────────── + +async def _handle_send_message( + agent: Dict, params: Dict[str, Any] +) -> Dict[str, Any]: + """Handle message/send — create task, process, return completed task.""" + msg_data = params.get("message", {}) + message_text = "" + for part in msg_data.get("parts", []): + if part.get("text"): + message_text += part["text"] + + if not message_text: + return _jsonrpc_error(None, -32602, "Message must contain at least one text part") + + task_id = str(uuid.uuid4()) + context_id = msg_data.get("contextId") or str(uuid.uuid4()) + + # Create task → submitted + WarehouseDB.create_a2a_task( + task_id=task_id, + agent_id=agent["id"], + context_id=context_id, + status=TaskState.SUBMITTED.value, + messages=json.dumps([msg_data]), + ) + + # Update → working + WarehouseDB.update_a2a_task(task_id, status=TaskState.WORKING.value) + + try: + response_text = await _process_agent_task(agent, message_text) + + # Build response message + artifact + response_msg = { + "messageId": str(uuid.uuid4()), + "role": "agent", + "parts": [{"text": response_text}], + "contextId": context_id, + } + artifact = { + "artifactId": str(uuid.uuid4()), + "name": "response", + "parts": [{"text": response_text}], + } + + all_messages = [msg_data, response_msg] + WarehouseDB.update_a2a_task( + task_id, + status=TaskState.COMPLETED.value, + messages=json.dumps(all_messages), + artifacts=json.dumps([artifact]), + ) + + except Exception as e: + logger.error("A2A task %s failed: %s", task_id, e, exc_info=True) + WarehouseDB.update_a2a_task(task_id, status=TaskState.FAILED.value) + task_dict = WarehouseDB.get_a2a_task(task_id) + return _task_to_a2a_response(task_dict) + + task_dict = WarehouseDB.get_a2a_task(task_id) + + # Fire push notification if webhook registered + if task_dict.get("webhook_url"): + _fire_push_notification(task_dict) + + return _task_to_a2a_response(task_dict) + + +async def _handle_get_task(params: Dict[str, Any]) -> Dict[str, Any]: + """Handle tasks/get.""" + task_id = params.get("id") + if not task_id: + return _jsonrpc_error(None, -32602, "Missing required parameter: id") + + task_dict = WarehouseDB.get_a2a_task(task_id) + if not task_dict: + return _jsonrpc_error(None, -32001, f"Task {task_id} not found") + + return _task_to_a2a_response(task_dict) + + +async def _handle_cancel_task(params: Dict[str, Any]) -> Dict[str, Any]: + """Handle tasks/cancel — transition to canceled if not terminal.""" + task_id = params.get("id") + if not task_id: + return _jsonrpc_error(None, -32602, "Missing required parameter: id") + + task_dict = WarehouseDB.get_a2a_task(task_id) + if not task_dict: + return _jsonrpc_error(None, -32001, f"Task {task_id} not found") + + current_state = TaskState(task_dict["status"]) + if current_state in TERMINAL_STATES: + return _jsonrpc_error(None, -32003, f"Cannot cancel task in terminal state: {current_state.value}") + + WarehouseDB.update_a2a_task(task_id, status=TaskState.CANCELED.value) + task_dict = WarehouseDB.get_a2a_task(task_id) + return _task_to_a2a_response(task_dict) + + +async def _handle_list_tasks( + agent_id: int, params: Dict[str, Any] +) -> Dict[str, Any]: + """Handle tasks/list.""" + context_id = params.get("contextId") + task_status = params.get("status") + page = params.get("page", 1) + page_size = params.get("pageSize", 50) + + tasks, total = WarehouseDB.list_a2a_tasks( + agent_id=agent_id, + context_id=context_id, + status=task_status, + page=page, + page_size=page_size, + ) + + return { + "tasks": [_task_to_a2a_response(t) for t in tasks], + "total": total, + } + + +# ────────────────────── Push notification helper ────────────────────── + +def _fire_push_notification(task_dict: Dict) -> None: + """Fire async push notification (best-effort, non-blocking).""" + from app.services.a2a_notifications import send_push_notification + import asyncio + + try: + loop = asyncio.get_event_loop() + if loop.is_running(): + loop.create_task(send_push_notification( + webhook_url=task_dict["webhook_url"], + task_id=task_dict["id"], + status=task_dict["status"], + webhook_token=task_dict.get("webhook_token"), + artifacts=task_dict.get("artifacts"), + )) + except Exception as e: + logger.warning("Failed to fire push notification: %s", e) + + +# ────────────────────── Main JSON-RPC endpoint ────────────────────── + +@router.post( + "/{agent_id}", + status_code=status.HTTP_200_OK, + summary="A2A JSON-RPC Endpoint", + description="Handle A2A protocol JSON-RPC methods: message/send, tasks/get, tasks/cancel, tasks/list", +) +async def a2a_jsonrpc(agent_id: int, rpc: JsonRpcRequest, request: Request) -> Dict[str, Any]: + """Single A2A JSON-RPC 2.0 dispatch endpoint.""" + agent = WarehouseDB.get_agent(agent_id) + if not agent: + return _jsonrpc_error(rpc.id, -32001, f"Agent {agent_id} not found") + + _validate_bearer_token(request, agent) + + params = rpc.params or {} + + method_handlers = { + "message/send": lambda: _handle_send_message(agent, params), + "tasks/get": lambda: _handle_get_task(params), + "tasks/cancel": lambda: _handle_cancel_task(params), + "tasks/list": lambda: _handle_list_tasks(agent_id, params), + } + + handler = method_handlers.get(rpc.method) + if not handler: + return _jsonrpc_error(rpc.id, -32601, f"Method not found: {rpc.method}") + + result = await handler() + + # If result already looks like an error response, wrap it + if isinstance(result, dict) and "error" in result: + return {**result, "id": rpc.id} + + return _jsonrpc_success(rpc.id, result) + + +# ────────────────────── SSE Streaming (Phase 3) ────────────────────── + +@router.post( + "/{agent_id}/stream", + status_code=status.HTTP_200_OK, + summary="A2A Streaming Message", + description="Send a message and receive SSE stream of task events", +) +async def a2a_stream(agent_id: int, rpc: JsonRpcRequest, request: Request): + """SSE streaming endpoint for message/stream.""" + agent = WarehouseDB.get_agent(agent_id) + if not agent: + raise HTTPException(status_code=404, detail=f"Agent {agent_id} not found") + + _validate_bearer_token(request, agent) + + params = rpc.params or {} + msg_data = params.get("message", {}) + message_text = "" + for part in msg_data.get("parts", []): + if part.get("text"): + message_text += part["text"] + + if not message_text: + raise HTTPException(status_code=400, detail="Message must contain text") + + task_id = str(uuid.uuid4()) + context_id = msg_data.get("contextId") or str(uuid.uuid4()) + + async def event_generator(): + # Emit submitted + WarehouseDB.create_a2a_task( + task_id=task_id, + agent_id=agent["id"], + context_id=context_id, + status=TaskState.SUBMITTED.value, + messages=json.dumps([msg_data]), + ) + yield f"event: task_status\ndata: {json.dumps({'taskId': task_id, 'status': 'submitted'})}\n\n" + + # Emit working + WarehouseDB.update_a2a_task(task_id, status=TaskState.WORKING.value) + yield f"event: task_status\ndata: {json.dumps({'taskId': task_id, 'status': 'working', 'stateReason': 'Processing request...'})}\n\n" + + try: + response_text = await _process_agent_task(agent, message_text) + + # Emit artifact + artifact_id = str(uuid.uuid4()) + artifact = { + "artifactId": artifact_id, + "name": "response", + "parts": [{"text": response_text}], + } + yield f"event: task_artifact\ndata: {json.dumps({'taskId': task_id, 'artifact': artifact})}\n\n" + + # Update DB and emit completed + response_msg = { + "messageId": str(uuid.uuid4()), + "role": "agent", + "parts": [{"text": response_text}], + "contextId": context_id, + } + WarehouseDB.update_a2a_task( + task_id, + status=TaskState.COMPLETED.value, + messages=json.dumps([msg_data, response_msg]), + artifacts=json.dumps([artifact]), + ) + yield f"event: task_status\ndata: {json.dumps({'taskId': task_id, 'status': 'completed'})}\n\n" + + except Exception as e: + logger.error("Streaming task %s failed: %s", task_id, e, exc_info=True) + WarehouseDB.update_a2a_task(task_id, status=TaskState.FAILED.value) + yield f"event: task_status\ndata: {json.dumps({'taskId': task_id, 'status': 'failed', 'stateReason': str(e)})}\n\n" + + return StreamingResponse(event_generator(), media_type="text/event-stream") + + +@router.get( + "/{agent_id}/tasks/{task_id}/subscribe", + status_code=status.HTTP_200_OK, + summary="Subscribe to Task Updates", + description="SSE stream for existing task — polls DB and emits events until terminal state", +) +async def a2a_subscribe(agent_id: int, task_id: str, request: Request): + """Subscribe to task status updates via SSE.""" + agent = WarehouseDB.get_agent(agent_id) + if not agent: + raise HTTPException(status_code=404, detail=f"Agent {agent_id} not found") + + task = WarehouseDB.get_a2a_task(task_id) + if not task: + raise HTTPException(status_code=404, detail=f"Task {task_id} not found") + + async def event_generator(): + last_status = None + while True: + task_dict = WarehouseDB.get_a2a_task(task_id) + if not task_dict: + break + + current_status = task_dict["status"] + if current_status != last_status: + yield f"event: task_status\ndata: {json.dumps({'taskId': task_id, 'status': current_status})}\n\n" + last_status = current_status + + # Emit artifacts if terminal + if TaskState(current_status) in TERMINAL_STATES: + if task_dict.get("artifacts"): + try: + artifacts = json.loads(task_dict["artifacts"]) + for artifact in artifacts: + yield f"event: task_artifact\ndata: {json.dumps({'taskId': task_id, 'artifact': artifact})}\n\n" + except (json.JSONDecodeError, TypeError): + pass + break + + await asyncio.sleep(1) + + return StreamingResponse(event_generator(), media_type="text/event-stream") + + +# ────────────────────── Webhook Registration (Phase 3) ────────────────────── + +class WebhookRegistration(BaseModel): + url: str + token: Optional[str] = None + + +@router.post( + "/{agent_id}/tasks/{task_id}/webhook", + status_code=status.HTTP_200_OK, + summary="Register Webhook", + description="Register a push-notification webhook for task state transitions", +) +async def register_webhook( + agent_id: int, task_id: str, webhook: WebhookRegistration, request: Request +) -> Dict[str, Any]: + """Register webhook URL + token on an A2A task.""" + agent = WarehouseDB.get_agent(agent_id) + if not agent: + raise HTTPException(status_code=404, detail=f"Agent {agent_id} not found") + + _validate_bearer_token(request, agent) + + task = WarehouseDB.get_a2a_task(task_id) + if not task: + raise HTTPException(status_code=404, detail=f"Task {task_id} not found") + + WarehouseDB.update_a2a_task( + task_id, + webhook_url=webhook.url, + webhook_token=webhook.token, + ) + + return {"status": "registered", "task_id": task_id, "webhook_url": webhook.url} diff --git a/dbx-agent-app/app/backend/app/routes/admin.py b/dbx-agent-app/app/backend/app/routes/admin.py new file mode 100644 index 00000000..42586232 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/admin.py @@ -0,0 +1,160 @@ +""" +Admin endpoints for initializing demo data using Databricks SQL Warehouse. +""" + +import json +from fastapi import APIRouter, HTTPException, status +from typing import Dict, Any + +from app.db_adapter import WarehouseDB + +router = APIRouter(prefix="/admin", tags=["Admin"]) + + +@router.post( + "/initialize-demo-data", + status_code=status.HTTP_200_OK, + summary="Initialize Demo Data", + description="Populate the registry with demo apps, MCP servers, and tools" +) +def initialize_demo_data() -> Dict[str, Any]: + """ + Initialize the registry with demo data including: + - Guidepoint workspace apps + - MCP servers + - Tools + + Returns counts of created items. + """ + stats = { + "apps_created": 0, + "servers_created": 0, + "tools_created": 0, + "collections_created": 0, + "errors": [] + } + + # Demo apps + demo_apps = [ + { + "name": "guidepoint-sgp-research", + "owner": "Guidepoint", + "url": "https://guidepoint-sgp-research-7474660127789418.aws.databricksapps.com", + "tags": "research,transcripts,experts,sgp", + }, + { + "name": "guidepoint-agent-discovery", + "owner": "Guidepoint", + "url": "https://guidepoint-agent-discovery-7474660127789418.aws.databricksapps.com", + "tags": "discovery,agents,tools", + }, + { + "name": "guidepoint-chat-ui", + "owner": "Guidepoint", + "url": "https://guidepoint-chat-ui-7474660127789418.aws.databricksapps.com", + "tags": "ui,chat,interface", + } + ] + + # Create apps + for app_data in demo_apps: + try: + WarehouseDB.create_app(**app_data) + stats["apps_created"] += 1 + except Exception as e: + stats["errors"].append(f"Error creating app {app_data['name']}: {str(e)}") + + # Demo MCP servers + try: + server = WarehouseDB.create_mcp_server( + server_url="https://guidepoint-sgp-research-7474660127789418.aws.databricksapps.com/mcp", + kind="managed" + ) + stats["servers_created"] += 1 + server_id = server["id"] + + # Demo tools + demo_tools = [ + { + "mcp_server_id": server_id, + "name": "search_transcripts", + "description": "Search expert transcripts by keywords, topics, or expert criteria.", + "parameters": json.dumps({ + "type": "object", + "properties": { + "query": {"type": "string", "description": "Search query"}, + "limit": {"type": "integer", "default": 10} + }, + "required": ["query"] + }) + }, + { + "mcp_server_id": server_id, + "name": "get_expert_profile", + "description": "Get detailed profile information for a specific expert.", + "parameters": json.dumps({ + "type": "object", + "properties": { + "expert_id": {"type": "string", "description": "Expert ID"} + }, + "required": ["expert_id"] + }) + }, + { + "mcp_server_id": server_id, + "name": "find_experts", + "description": "Find experts by industry, topic, or expertise area.", + "parameters": json.dumps({ + "type": "object", + "properties": { + "industry": {"type": "string"}, + "expertise": {"type": "string"}, + "limit": {"type": "integer", "default": 20} + } + }) + } + ] + + for tool_data in demo_tools: + try: + WarehouseDB.create_tool(**tool_data) + stats["tools_created"] += 1 + except Exception as e: + stats["errors"].append(f"Error creating tool {tool_data['name']}: {str(e)}") + + except Exception as e: + stats["errors"].append(f"Error creating MCP server: {str(e)}") + + # Create a demo collection + try: + WarehouseDB.create_collection( + name="Research Tools", + description="Collection of tools for searching expert transcripts and profiles" + ) + stats["collections_created"] = 1 + except Exception as e: + stats["errors"].append(f"Error creating demo collection: {str(e)}") + + return { + "success": True, + "message": "Demo data initialized successfully", + "stats": stats + } + + +@router.delete( + "/clear-all-data", + status_code=status.HTTP_200_OK, + summary="Clear All Data", + description="Delete all apps, servers, tools, and collections (for development only)" +) +def clear_all_data() -> Dict[str, Any]: + """ + Clear all data from the registry. + WARNING: This is destructive and cannot be undone! + """ + # This would require implementing delete_all methods in WarehouseDB + raise HTTPException( + status_code=status.HTTP_501_NOT_IMPLEMENTED, + detail="Clear all data not yet implemented for warehouse backend" + ) diff --git a/dbx-agent-app/app/backend/app/routes/agent_chat.py b/dbx-agent-app/app/backend/app/routes/agent_chat.py new file mode 100644 index 00000000..1232357d --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/agent_chat.py @@ -0,0 +1,81 @@ +""" +Agent Chat endpoints for querying Databricks serving endpoints. + +Provides a chat interface that proxies queries to pre-built Databricks +serving endpoints and enriches responses with routing, slot filling, +and pipeline metadata. +""" + +import logging +from fastapi import APIRouter, HTTPException, status + +from app.schemas.agent_chat import ( + AgentChatRequest, + AgentChatResponse, + AgentChatEndpoint, + AgentChatEndpointsResponse, +) +from app.services.agent_chat import create_agent_chat_service + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/agent-chat", tags=["Agent Chat"]) + + +@router.post("/query", response_model=AgentChatResponse) +async def query_endpoint(request: AgentChatRequest) -> AgentChatResponse: + """ + Query a Databricks serving endpoint. + + Sends the message to the specified endpoint and returns the response + enriched with routing, slot filling, and pipeline metadata. + """ + try: + service = create_agent_chat_service() + return await service.query_endpoint(request.endpoint_name, request.message) + except ValueError as e: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=str(e), + ) + except Exception as e: + logger.error("Agent chat query failed: %s", e) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Agent chat query failed: {str(e)}", + ) + + +@router.get("/endpoints", response_model=AgentChatEndpointsResponse) +async def get_endpoints() -> AgentChatEndpointsResponse: + """ + List available agent chat endpoints. + + Returns the hardcoded demo endpoint list. Can be enhanced later + to pull from the database or workspace. + """ + endpoints = [ + AgentChatEndpoint( + name="guidepoint_research", + displayName="Research Agent", + description="Search and analyze expert interview transcripts", + type="research", + ), + AgentChatEndpoint( + name="guidepoint_supervisor", + displayName="Supervisor Agent", + description="Routes queries to specialized sub-agents", + type="supervisor", + ), + AgentChatEndpoint( + name="agents_users-roberto_sanchez-agent", + displayName="Roberto's Routing Agent", + description="Fast routing assistant for services data", + type="research", + ), + ] + + return AgentChatEndpointsResponse( + endpoints=endpoints, + count=len(endpoints), + ) diff --git a/dbx-agent-app/app/backend/app/routes/agents.py b/dbx-agent-app/app/backend/app/routes/agents.py new file mode 100644 index 00000000..31143341 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/agents.py @@ -0,0 +1,241 @@ +""" +CRUD endpoints for Agents + A2A Agent Card. +""" + +import json +import logging +from fastapi import APIRouter, HTTPException, Request, status, Query +import math + +logger = logging.getLogger(__name__) + +from typing import Dict, Any, List + +from app.db_adapter import WarehouseDB +from app.config import settings +from app.schemas.agent import ( + AgentCreate, + AgentUpdate, + AgentResponse, + AgentCardResponse, + A2ACapabilities, + A2ASkill, +) +from app.schemas.common import PaginatedResponse +from app.services.audit import record_audit + +router = APIRouter(prefix="/agents", tags=["Agents"]) + + +def _safe_agent_response(agent_dict: dict) -> AgentResponse: + """Build AgentResponse, stripping auth_token from output.""" + filtered = {k: v for k, v in agent_dict.items() if k != "auth_token"} + return AgentResponse(**filtered) + + +@router.get( + "", + response_model=PaginatedResponse[AgentResponse], + status_code=status.HTTP_200_OK, + summary="List Agents", + description="List all registered agents with pagination", +) +def list_agents( + page: int = Query(1, ge=1, description="Page number"), + page_size: int = Query(50, ge=1, le=100, description="Items per page"), +) -> PaginatedResponse[AgentResponse]: + """List all agents with pagination.""" + agents, total = WarehouseDB.list_agents(page=page, page_size=page_size) + total = int(total) if total else 0 # Convert string to int from warehouse + total_pages = math.ceil(total / page_size) if total > 0 else 1 + + return PaginatedResponse( + items=[_safe_agent_response(agent) for agent in agents], + total=total, + page=page, + page_size=page_size, + total_pages=total_pages, + ) + + +@router.post( + "", + response_model=AgentResponse, + status_code=status.HTTP_201_CREATED, + summary="Create Agent", + description="Register a new agent", +) +def create_agent(agent_data: AgentCreate, request: Request) -> AgentResponse: + """Create a new agent.""" + try: + agent = WarehouseDB.create_agent( + name=agent_data.name, + description=agent_data.description, + capabilities=agent_data.capabilities, + status=agent_data.status, + collection_id=agent_data.collection_id, + endpoint_url=agent_data.endpoint_url, + auth_token=agent_data.auth_token, + a2a_capabilities=agent_data.a2a_capabilities, + skills=agent_data.skills, + protocol_version=agent_data.protocol_version, + system_prompt=agent_data.system_prompt, + ) + record_audit(request, "create", "agent", str(agent["id"]), agent["name"]) + return _safe_agent_response(agent) + except Exception as e: + logger.error("Failed to create agent: %s", e) + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="Failed to create agent", + ) + + +@router.get( + "/{agent_id}", + response_model=AgentResponse, + status_code=status.HTTP_200_OK, + summary="Get Agent", + description="Get a specific agent by ID", +) +def get_agent(agent_id: int) -> AgentResponse: + """Get a specific agent by ID.""" + agent = WarehouseDB.get_agent(agent_id) + if not agent: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Agent with id {agent_id} not found", + ) + return _safe_agent_response(agent) + + +@router.put( + "/{agent_id}", + response_model=AgentResponse, + status_code=status.HTTP_200_OK, + summary="Update Agent", + description="Update an existing agent", +) +def update_agent(agent_id: int, agent_data: AgentUpdate, request: Request) -> AgentResponse: + """Update an existing agent.""" + existing = WarehouseDB.get_agent(agent_id) + if not existing: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Agent with id {agent_id} not found", + ) + + update_dict = agent_data.model_dump(exclude_unset=True) + agent = WarehouseDB.update_agent(agent_id, **update_dict) + record_audit(request, "update", "agent", str(agent_id), agent["name"]) + return _safe_agent_response(agent) + + +@router.delete( + "/{agent_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete Agent", + description="Delete an agent from the registry", +) +def delete_agent(agent_id: int, request: Request) -> None: + """Delete an agent.""" + existing = WarehouseDB.get_agent(agent_id) + if not existing: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Agent with id {agent_id} not found", + ) + WarehouseDB.delete_agent(agent_id) + record_audit(request, "delete", "agent", str(agent_id), existing["name"]) + + +@router.get( + "/{agent_id}/card", + response_model=AgentCardResponse, + status_code=status.HTTP_200_OK, + summary="Get Agent Card", + description="Get A2A-compliant Agent Card for discovery", +) +def get_agent_card(agent_id: int, request: Request) -> AgentCardResponse: + """Generate A2A-compliant Agent Card from agent model data.""" + agent = WarehouseDB.get_agent(agent_id) + if not agent: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Agent with id {agent_id} not found", + ) + + # Build A2A URL: prefer agent's own endpoint, fallback to registry A2A endpoint + base_url = settings.a2a_base_url or str(request.base_url).rstrip("/") + a2a_url = agent.get("endpoint_url") or f"{base_url}/api/a2a/{agent_id}" + + # Parse capabilities JSON + caps = A2ACapabilities() + if agent.get("a2a_capabilities"): + try: + caps_data = json.loads(agent["a2a_capabilities"]) + caps = A2ACapabilities(**caps_data) + except (json.JSONDecodeError, TypeError): + pass + + # Parse skills JSON + skill_list = [] + if agent.get("skills"): + try: + skills_data = json.loads(agent["skills"]) + skill_list = [A2ASkill(**s) for s in skills_data] + except (json.JSONDecodeError, TypeError): + pass + + # Security schemes: present if agent has auth_token + security_schemes = None + security = None + if agent.get("auth_token"): + security_schemes = { + "bearerAuth": { + "type": "http", + "scheme": "bearer", + } + } + security = [{"bearerAuth": []}] + + return AgentCardResponse( + name=agent["name"], + description=agent.get("description"), + version=settings.api_version, + protocolVersion=agent.get("protocol_version") or settings.a2a_protocol_version, + url=a2a_url, + capabilities=caps, + skills=skill_list, + securitySchemes=security_schemes, + security=security, + ) + + +@router.get( + "/{agent_id}/analytics", + status_code=status.HTTP_200_OK, + summary="Get Agent Analytics", + description="Get performance analytics for an agent (summary stats + recent history)", +) +def get_agent_analytics( + agent_id: int, + limit: int = Query(20, ge=1, le=100, description="Max recent entries to return"), +) -> Dict[str, Any]: + """Get summary stats and recent analytics history for an agent.""" + agent = WarehouseDB.get_agent(agent_id) + if not agent: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Agent with id {agent_id} not found", + ) + + summary = WarehouseDB.get_agent_summary_stats(agent_id) + recent = WarehouseDB.list_agent_analytics(agent_id, limit=limit) + + return { + "agent_id": agent_id, + "agent_name": agent["name"], + "summary": summary, + "recent": recent, + } diff --git a/dbx-agent-app/app/backend/app/routes/apps.py b/dbx-agent-app/app/backend/app/routes/apps.py new file mode 100644 index 00000000..b8be5a3e --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/apps.py @@ -0,0 +1,125 @@ +""" +CRUD endpoints for Apps using Databricks SQL Warehouse. +""" + +import logging +from fastapi import APIRouter, HTTPException, status, Query +import math + +logger = logging.getLogger(__name__) + +from app.db_adapter import WarehouseDB # Auto-switches between SQLite and Warehouse +from app.schemas.app import AppCreate, AppUpdate, AppResponse +from app.schemas.common import PaginatedResponse + +router = APIRouter(prefix="/apps", tags=["Apps"]) + + +@router.get( + "", + response_model=PaginatedResponse[AppResponse], + status_code=status.HTTP_200_OK, + summary="List Apps", + description="List all registered apps with pagination", +) +def list_apps( + page: int = Query(1, ge=1, description="Page number"), + page_size: int = Query(50, ge=1, le=100, description="Items per page"), + owner: str | None = Query(None, description="Filter by owner"), +) -> PaginatedResponse[AppResponse]: + """List all apps with pagination.""" + logger.info(f"[READ] Listing apps (page={page}, page_size={page_size}, owner={owner})") + apps, total = WarehouseDB.list_apps(page=page, page_size=page_size, owner=owner) + logger.info(f"[READ] Found {total} apps total, returning {len(apps)} on this page") + total = int(total) if total else 0 # Convert string to int from warehouse + total_pages = math.ceil(total / page_size) if total > 0 else 1 + + return PaginatedResponse( + items=[AppResponse(**app) for app in apps], + total=total, + page=page, + page_size=page_size, + total_pages=total_pages, + ) + + +@router.post( + "", + response_model=AppResponse, + status_code=status.HTTP_201_CREATED, + summary="Create App", + description="Register a new Databricks App", +) +def create_app(app_data: AppCreate) -> AppResponse: + """Create a new app.""" + try: + app = WarehouseDB.create_app( + name=app_data.name, + owner=app_data.owner, + url=app_data.url, + tags=app_data.tags, + manifest_url=app_data.manifest_url, + ) + return AppResponse(**app) + except Exception as e: + logger.error("Failed to create app: %s", e) + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="Failed to create app", + ) + + +@router.get( + "/{app_id}", + response_model=AppResponse, + status_code=status.HTTP_200_OK, + summary="Get App", + description="Get a specific app by ID", +) +def get_app(app_id: int) -> AppResponse: + """Get a specific app by ID.""" + app = WarehouseDB.get_app(app_id) + if not app: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"App with id {app_id} not found", + ) + return AppResponse(**app) + + +@router.put( + "/{app_id}", + response_model=AppResponse, + status_code=status.HTTP_200_OK, + summary="Update App", + description="Update an existing app", +) +def update_app(app_id: int, app_data: AppUpdate) -> AppResponse: + """Update an existing app.""" + existing = WarehouseDB.get_app(app_id) + if not existing: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"App with id {app_id} not found", + ) + + update_dict = app_data.model_dump(exclude_unset=True) + app = WarehouseDB.update_app(app_id, **update_dict) + return AppResponse(**app) + + +@router.delete( + "/{app_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete App", + description="Delete an app from the registry", +) +def delete_app(app_id: int) -> None: + """Delete an app.""" + existing = WarehouseDB.get_app(app_id) + if not existing: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"App with id {app_id} not found", + ) + WarehouseDB.delete_app(app_id) diff --git a/dbx-agent-app/app/backend/app/routes/audit_log.py b/dbx-agent-app/app/backend/app/routes/audit_log.py new file mode 100644 index 00000000..2e65f1c5 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/audit_log.py @@ -0,0 +1,54 @@ +""" +Read-only endpoints for the audit log. +""" + +import logging +import math +from typing import Optional +from fastapi import APIRouter, Query, status + +from app.db_adapter import WarehouseDB +from app.schemas.audit_log import AuditLogResponse +from app.schemas.common import PaginatedResponse + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/audit-log", tags=["Audit Log"]) + + +@router.get( + "", + response_model=PaginatedResponse[AuditLogResponse], + status_code=status.HTTP_200_OK, + summary="List Audit Log Entries", + description="Query audit log entries with optional filters and pagination", +) +def list_audit_log( + page: int = Query(1, ge=1, description="Page number"), + page_size: int = Query(50, ge=1, le=200, description="Items per page"), + user_email: Optional[str] = Query(None, description="Filter by user email"), + action: Optional[str] = Query(None, description="Filter by action type"), + resource_type: Optional[str] = Query(None, description="Filter by resource type"), + date_from: Optional[str] = Query(None, description="Start date (ISO format)"), + date_to: Optional[str] = Query(None, description="End date (ISO format)"), +) -> PaginatedResponse[AuditLogResponse]: + """List audit log entries with optional filters.""" + entries, total = WarehouseDB.list_audit_logs( + page=page, + page_size=page_size, + user_email=user_email, + action=action, + resource_type=resource_type, + date_from=date_from, + date_to=date_to, + ) + total = int(total) if total else 0 # Convert string to int from warehouse + total_pages = math.ceil(total / page_size) if total > 0 else 1 + + return PaginatedResponse( + items=[AuditLogResponse(**e) for e in entries], + total=total, + page=page, + page_size=page_size, + total_pages=total_pages, + ) diff --git a/dbx-agent-app/app/backend/app/routes/catalog_assets.py b/dbx-agent-app/app/backend/app/routes/catalog_assets.py new file mode 100644 index 00000000..3cf97fae --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/catalog_assets.py @@ -0,0 +1,149 @@ +""" +CRUD and search endpoints for Unity Catalog assets. +""" + +import logging +import math +from fastapi import APIRouter, HTTPException, Query, Request, status, BackgroundTasks + +from app.db_adapter import WarehouseDB +from app.schemas.catalog_asset import ( + CatalogAssetResponse, + CatalogCrawlRequest, + CatalogCrawlResponse, +) +from app.schemas.common import PaginatedResponse +from app.services.audit import record_audit + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/catalog-assets", tags=["Catalog Assets"]) + + +@router.get( + "", + response_model=PaginatedResponse[CatalogAssetResponse], + status_code=status.HTTP_200_OK, + summary="List Catalog Assets", + description="List indexed Unity Catalog assets with filtering and pagination", +) +def list_catalog_assets( + page: int = Query(1, ge=1, description="Page number"), + page_size: int = Query(50, ge=1, le=200, description="Items per page"), + asset_type: str = Query(None, description="Filter by type (table, view, function, model, volume)"), + catalog: str = Query(None, description="Filter by catalog name"), + schema_name: str = Query(None, description="Filter by schema name"), + search: str = Query(None, description="Search by name, comment, or column names"), + owner: str = Query(None, description="Filter by owner"), +) -> PaginatedResponse[CatalogAssetResponse]: + """List catalog assets with optional filters.""" + assets, total = WarehouseDB.list_catalog_assets( + page=page, + page_size=page_size, + asset_type=asset_type, + catalog=catalog, + schema_name=schema_name, + search=search, + owner=owner, + ) + total = int(total) if total else 0 # Convert string to int from warehouse + total_pages = math.ceil(total / page_size) if total > 0 else 1 + + return PaginatedResponse( + items=[CatalogAssetResponse(**a) for a in assets], + total=total, + page=page, + page_size=page_size, + total_pages=total_pages, + ) + + +@router.get( + "/{asset_id}", + response_model=CatalogAssetResponse, + status_code=status.HTTP_200_OK, + summary="Get Catalog Asset", + description="Get a specific catalog asset by ID", +) +def get_catalog_asset(asset_id: int) -> CatalogAssetResponse: + """Get a specific catalog asset.""" + asset = WarehouseDB.get_catalog_asset(asset_id) + if not asset: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Catalog asset with id {asset_id} not found", + ) + return CatalogAssetResponse(**asset) + + +@router.post( + "/crawl", + response_model=CatalogCrawlResponse, + status_code=status.HTTP_200_OK, + summary="Crawl Unity Catalog", + description="Trigger a crawl of Unity Catalog to index tables, views, functions, models, and volumes", +) +def crawl_catalog(request: CatalogCrawlRequest = None, http_request: Request = None) -> CatalogCrawlResponse: + """ + Trigger a Unity Catalog crawl. + + This synchronously crawls the UC hierarchy and indexes all discovered assets. + For large catalogs, consider running via background task. + """ + if request is None: + request = CatalogCrawlRequest() + + try: + from app.services.catalog_crawler import CatalogCrawlerService + + service = CatalogCrawlerService(profile=request.databricks_profile) + stats = service.crawl( + catalogs=request.catalogs, + include_columns=request.include_columns, + ) + + if stats.errors and stats.assets_discovered == 0: + result_status = "failed" + message = f"Catalog crawl failed with {len(stats.errors)} errors" + elif stats.errors: + result_status = "partial" + message = f"Catalog crawl completed with {len(stats.errors)} errors" + else: + result_status = "success" + message = f"Crawled {stats.catalogs_crawled} catalogs, {stats.schemas_crawled} schemas, {stats.assets_discovered} assets" + + if http_request: + record_audit(http_request, "crawl", "catalog_asset", details={ + "catalogs_crawled": stats.catalogs_crawled, + "assets_discovered": stats.assets_discovered, + "new_assets": stats.new_assets, + }) + + return CatalogCrawlResponse( + status=result_status, + message=message, + catalogs_crawled=stats.catalogs_crawled, + schemas_crawled=stats.schemas_crawled, + assets_discovered=stats.assets_discovered, + new_assets=stats.new_assets, + updated_assets=stats.updated_assets, + errors=stats.errors, + ) + except Exception as e: + logger.error("Catalog crawl failed: %s", e) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Catalog crawl failed: {e}", + ) + + +@router.delete( + "", + status_code=status.HTTP_204_NO_CONTENT, + summary="Clear Catalog Assets", + description="Delete all indexed catalog assets (useful for re-indexing)", +) +def clear_catalog_assets(http_request: Request) -> None: + """Delete all catalog assets.""" + WarehouseDB.clear_catalog_assets() + record_audit(http_request, "clear", "catalog_asset") diff --git a/dbx-agent-app/app/backend/app/routes/chat.py b/dbx-agent-app/app/backend/app/routes/chat.py new file mode 100644 index 00000000..bd537fac --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/chat.py @@ -0,0 +1,595 @@ +""" +Chat endpoint for testing registered agents and tools. + +This route provides a chat interface that can use registered MCP servers +and tools from the registry. +""" + +import os +import json +import logging +import time +import uuid +from collections import OrderedDict + +import httpx +from typing import List, Dict, Any, Optional +from fastapi import APIRouter, HTTPException, status +from pydantic import BaseModel +from app.db_adapter import WarehouseDB +from app.config import settings + +try: + import mlflow + from mlflow.entities import SpanType + _mlflow_available = True +except ImportError: + _mlflow_available = False + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/chat", tags=["Chat"]) + +# Configuration +LLM_ENDPOINT = settings.llm_endpoint +MAX_TOKENS = int(os.getenv("MAX_TOKENS", "4096")) + + +class BoundedDict(OrderedDict): + """OrderedDict with max size — evicts oldest entries when full.""" + + def __init__(self, max_size: int = 1000, *args, **kwargs): + self._max_size = max_size + super().__init__(*args, **kwargs) + + def __setitem__(self, key, value): + if key in self: + self.move_to_end(key) + super().__setitem__(key, value) + while len(self) > self._max_size: + self.popitem(last=False) + + +# In-memory stores with LRU eviction to prevent unbounded growth +conversations: BoundedDict = BoundedDict(max_size=500) +trace_events: BoundedDict = BoundedDict(max_size=2000) +trace_spans: BoundedDict = BoundedDict(max_size=2000) + + +class ChatRequest(BaseModel): + """Chat request model.""" + query: Optional[str] = None + text: Optional[str] = None # Frontend alias for query + server_urls: Optional[List[str]] = None # Accepted but ignored + agent_id: Optional[str] = None # MCP server ID to use + collection_id: Optional[int] = None # Collection of tools to use + conversation_id: Optional[str] = None + + @property + def effective_query(self) -> str: + """Use text as fallback if query is empty.""" + return self.query or self.text or "" + + +class ToolCall(BaseModel): + """Tool call info.""" + tool_name: str + arguments: Dict[str, Any] + result: Optional[str] = None + latency_ms: Optional[float] = None + + +class ChatResponse(BaseModel): + """Chat response model.""" + query: str + response: str + text: Optional[str] = None # Mirror of response for frontend + trace_id: Optional[str] = None # Trace ID for event streaming + conversation_id: str + routed_to: Optional[str] = None + agent_display_name: Optional[str] = None + routing_reason: Optional[str] = None + latency_ms: Optional[float] = None + tool_calls: List[ToolCall] = [] + technical_context: Optional[Dict] = None + + +def get_llm_client(): + """Get Databricks Foundation Model client.""" + from databricks.sdk import WorkspaceClient + return WorkspaceClient() + + +def get_available_tools(collection_id: Optional[int] = None, mcp_server_id: Optional[int] = None) -> List[Dict]: + """Get available tools from the registry.""" + tools = [] + seen_tool_ids = set() # Avoid duplicates + + if collection_id: + # Get tools from collection + items = WarehouseDB.list_collection_items(collection_id) + for item in items: + if item.get('tool_id'): + # Individual tool reference + tool = WarehouseDB.get_tool(item['tool_id']) + if tool and tool.get('id') not in seen_tool_ids: + tools.append(tool) + seen_tool_ids.add(tool.get('id')) + + elif item.get('mcp_server_id'): + # MCP server reference - get all its tools + server_tools, _ = WarehouseDB.list_tools(mcp_server_id=item['mcp_server_id']) + for tool in server_tools: + if tool.get('id') not in seen_tool_ids: + tools.append(tool) + seen_tool_ids.add(tool.get('id')) + + elif item.get('app_id'): + # App reference - get all MCP servers for this app, then all their tools + all_servers, _ = WarehouseDB.list_mcp_servers() + for server in all_servers: + if server.get('app_id') == item['app_id']: + server_tools, _ = WarehouseDB.list_tools(mcp_server_id=server.get('id')) + for tool in server_tools: + if tool.get('id') not in seen_tool_ids: + tools.append(tool) + seen_tool_ids.add(tool.get('id')) + + elif mcp_server_id: + # Get tools from specific MCP server + server_tools, _ = WarehouseDB.list_tools(mcp_server_id=mcp_server_id) + tools.extend(server_tools) + else: + # Get all tools + all_tools, _ = WarehouseDB.list_tools(page_size=100) + tools.extend(all_tools) + + return tools + + +def format_tools_for_llm(tools: List[Dict]) -> List[Dict]: + """Format tools for LLM function calling (OpenAI-compatible format).""" + formatted = [] + for tool in tools: + try: + params = json.loads(tool.get('parameters', '{}')) if tool.get('parameters') else {} + except json.JSONDecodeError: + params = {} + + # Ensure parameters follow JSON Schema spec with type: "object" wrapper + if params and params.get("type") != "object": + params = { + "type": "object", + "properties": params, + "required": [], + } + + formatted.append({ + "type": "function", + "function": { + "name": tool.get('name', ''), + "description": tool.get('description', ''), + "parameters": params or {"type": "object", "properties": {}}, + } + }) + return formatted + + +async def call_mcp_tool(server_url: str, tool_name: str, arguments: Dict) -> str: + """Call a tool on an MCP server.""" + # MCP uses JSON-RPC 2.0 + request_body = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": "tools/call", + "params": { + "name": tool_name, + "arguments": arguments + } + } + + async with httpx.AsyncClient(timeout=30.0) as client: + try: + response = await client.post( + server_url, + json=request_body, + headers={"Content-Type": "application/json"} + ) + response.raise_for_status() + result = response.json() + + if "error" in result: + return f"Error: {result['error'].get('message', 'Unknown error')}" + + return json.dumps(result.get("result", {})) + except Exception as e: + return f"Error calling tool: {str(e)}" + + +async def call_foundation_model( + messages: List[Dict], + tools: List[Dict] = None +) -> Dict: + """Call Databricks Foundation Model API (async).""" + from databricks.sdk import WorkspaceClient + w = WorkspaceClient() + + endpoint_url = f"{w.config.host}/serving-endpoints/{LLM_ENDPOINT}/invocations" + + request_body = { + "messages": messages, + "max_tokens": MAX_TOKENS, + } + + if tools: + request_body["tools"] = tools + request_body["tool_choice"] = "auto" + + headers = {"Content-Type": "application/json"} + headers.update(w.config.authenticate()) + + async with httpx.AsyncClient(timeout=120.0) as client: + response = await client.post(endpoint_url, json=request_body, headers=headers) + + if response.status_code != 200: + raise HTTPException( + status_code=response.status_code, + detail=f"LLM API error: {response.text}" + ) + + return response.json() + + +def _make_span(trace_id: str, name: str, start_s: float, end_s: float, + attributes: Dict[str, Any], span_status: str = "OK") -> Dict[str, Any]: + """Build a span dict matching the frontend Span interface.""" + return { + "id": str(uuid.uuid4()), + "trace_id": trace_id, + "name": name, + "start_time": int(start_s * 1000), + "end_time": int(end_s * 1000), + "attributes": attributes, + "status": span_status, + } + + +DATABRICKS_SYSTEM_PROMPT = ( + "You are an intelligent assistant for a Databricks workspace. You have access to tools " + "registered in the Multi-Agent Registry — MCP servers, catalog assets, notebooks, jobs, " + "and other workspace resources.\n\n" + "When answering questions:\n" + "- Use available tools to look up live data when possible\n" + "- Reference specific catalog assets (tables, views, functions) by their full name\n" + "- Explain your reasoning and tool usage clearly" +) + + +async def _run_chat(request: ChatRequest, root_span=None) -> ChatResponse: + """Core chat logic. If root_span is provided, child spans are logged to MLflow.""" + start_time = time.time() + query = request.effective_query + use_mlflow = root_span is not None and _mlflow_available + + trace_id = root_span.request_id if use_mlflow else str(uuid.uuid4()) + + if use_mlflow: + root_span.set_inputs({"query": query, "collection_id": request.collection_id}) + + # Initialize in-memory trace stores (fast path for SSE streaming) + trace_events[trace_id] = [] + trace_spans[trace_id] = [] + + trace_events[trace_id].append({ + "type": "request.started", + "timestamp": time.time(), + "data": {"query": query}, + }) + + # Get or create conversation — persist to DB with in-memory fallback + conversation_id = request.conversation_id or str(uuid.uuid4()) + is_new_conversation = False + db_messages = [] + + try: + existing = WarehouseDB.get_conversation(conversation_id) + if existing: + db_messages = existing.get("messages", []) + else: + is_new_conversation = True + title = query[:60] + ("..." if len(query) > 60 else "") + WarehouseDB.create_conversation( + id=conversation_id, + title=title, + collection_id=request.collection_id, + ) + except Exception as e: + logger.warning("DB conversation access failed, using in-memory fallback: %s", e) + + # In-memory fallback for conversation history + if conversation_id not in conversations: + conversations[conversation_id] = [] + + # Get available tools + tools = get_available_tools( + collection_id=request.collection_id, + mcp_server_id=int(request.agent_id) if request.agent_id else None + ) + + # Build system prompt with context injection + from app.services.chat_context import enrich_system_prompt + system_prompt = enrich_system_prompt(DATABRICKS_SYSTEM_PROMPT, query) + + messages = [{"role": "system", "content": system_prompt}] + + # Add conversation history — prefer DB messages, fall back to in-memory + history = db_messages if db_messages else conversations[conversation_id] + for msg in history[-10:]: + messages.append({"role": msg.get("role"), "content": msg.get("content")}) + + # Add current query + messages.append({"role": "user", "content": query}) + + # Format tools for LLM + llm_tools = format_tools_for_llm(tools) if tools else None + + tool_calls_made = [] + + # Call Foundation Model (with MLflow child span) + llm_start = time.time() + if use_mlflow: + with mlflow.start_span(name="llm_call", span_type=SpanType.CHAT_MODEL) as llm_span: + llm_span.set_inputs({"messages_count": len(messages), "tools_count": len(llm_tools or [])}) + response = await call_foundation_model(messages, llm_tools) + llm_span.set_outputs({"model": response.get("model", ""), "usage": response.get("usage", {})}) + else: + response = await call_foundation_model(messages, llm_tools) + llm_end = time.time() + + trace_spans[trace_id].append( + _make_span(trace_id, "llm_call", llm_start, llm_end, + {"messages_count": len(messages), "tools_count": len(llm_tools or [])}) + ) + + # Extract response + choice = response.get("choices", [{}])[0] + message = choice.get("message", {}) + + # Handle tool calls + if message.get("tool_calls"): + for tool_call in message["tool_calls"]: + func = tool_call.get("function", {}) + tool_name = func.get("name", "") + try: + arguments = json.loads(func.get("arguments", "{}")) + except json.JSONDecodeError: + arguments = {} + + # Find the tool's MCP server + tool_info = next((t for t in tools if t.get('name') == tool_name), None) + if tool_info: + server = WarehouseDB.get_mcp_server(tool_info.get('mcp_server_id')) + if server: + tool_start = time.time() + trace_events[trace_id].append({ + "type": "tool.called", + "timestamp": time.time(), + "data": {"tool_name": tool_name, "arguments": arguments}, + }) + + # Call MCP tool (with MLflow child span) + if use_mlflow: + with mlflow.start_span(name=f"tool:{tool_name}", span_type=SpanType.TOOL) as tool_span: + tool_span.set_inputs({"tool_name": tool_name, "arguments": arguments}) + result = await call_mcp_tool( + server.get('server_url'), tool_name, arguments + ) + tool_latency = (time.time() - tool_start) * 1000 + tool_span.set_outputs({"latency_ms": tool_latency}) + else: + result = await call_mcp_tool( + server.get('server_url'), tool_name, arguments + ) + tool_latency = (time.time() - tool_start) * 1000 + + tool_end = time.time() + trace_events[trace_id].append({ + "type": "tool.output", + "timestamp": time.time(), + "data": {"tool_name": tool_name, "latency_ms": tool_latency}, + }) + trace_spans[trace_id].append( + _make_span(trace_id, f"tool:{tool_name}", tool_start, tool_end, + {"tool_name": tool_name, "latency_ms": tool_latency}) + ) + + tool_calls_made.append(ToolCall( + tool_name=tool_name, + arguments=arguments, + result=result[:500] if result else None, + latency_ms=tool_latency + )) + + # Add tool result to messages and call LLM again + messages.append({ + "role": "assistant", + "content": None, + "tool_calls": [tool_call] + }) + messages.append({ + "role": "tool", + "tool_call_id": tool_call.get("id"), + "content": result + }) + + # Get final response after tool use (with MLflow child span) + llm_final_start = time.time() + if use_mlflow: + with mlflow.start_span(name="llm_final", span_type=SpanType.CHAT_MODEL) as final_span: + final_span.set_inputs({"messages_count": len(messages)}) + response = await call_foundation_model(messages, llm_tools) + final_span.set_outputs({"model": response.get("model", ""), "usage": response.get("usage", {})}) + else: + response = await call_foundation_model(messages, llm_tools) + llm_final_end = time.time() + + trace_spans[trace_id].append( + _make_span(trace_id, "llm_final", llm_final_start, llm_final_end, + {"messages_count": len(messages)}) + ) + + choice = response.get("choices", [{}])[0] + message = choice.get("message", {}) + + response_text = message.get("content", "I'm sorry, I couldn't generate a response.") + + # Save to conversation history — DB + in-memory fallback + conversations[conversation_id].append({"role": "user", "content": query}) + conversations[conversation_id].append({"role": "assistant", "content": response_text}) + + try: + WarehouseDB.create_conversation_message(conversation_id, "user", query) + WarehouseDB.create_conversation_message(conversation_id, "assistant", response_text, trace_id=trace_id) + except Exception as e: + logger.warning("Failed to persist messages to DB: %s", e) + + latency_ms = (time.time() - start_time) * 1000 + + # Final trace event + trace_events[trace_id].append({ + "type": "response.done", + "timestamp": time.time(), + "data": {"latency_ms": latency_ms}, + }) + end_time = time.time() + trace_spans[trace_id].append( + _make_span(trace_id, "chat", start_time, end_time, + {"latency_ms": latency_ms, "tools_called": len(tool_calls_made)}) + ) + + if use_mlflow: + root_span.set_outputs({"response_length": len(response_text), "tools_called": len(tool_calls_made)}) + + return ChatResponse( + query=query, + response=response_text, + text=response_text, + trace_id=trace_id, + conversation_id=conversation_id, + routed_to=request.agent_id, + agent_display_name=f"Collection {request.collection_id}" if request.collection_id else "All Tools", + latency_ms=latency_ms, + tool_calls=tool_calls_made, + technical_context={ + "routing_strategy": "direct" if request.agent_id else "auto", + "tools_available": len(tools), + "tools_called": len(tool_calls_made), + "llm_endpoint": LLM_ENDPOINT, + } + ) + + +@router.post("", response_model=ChatResponse) +async def chat(request: ChatRequest) -> ChatResponse: + """ + Chat with registered agents and tools. + + Sends the query to a Foundation Model that can use + tools registered in the registry. + """ + try: + if _mlflow_available: + with mlflow.start_span(name="chat", span_type=SpanType.AGENT) as root_span: + return await _run_chat(request, root_span=root_span) + else: + return await _run_chat(request) + except HTTPException: + raise + except Exception as e: + logger.error("Chat request failed: %s", e, exc_info=True) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Chat request failed: {str(e)}", + ) + + +@router.get("/conversations/{conversation_id}") +async def get_conversation(conversation_id: str) -> Dict: + """Get conversation history.""" + if conversation_id not in conversations: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Conversation {conversation_id} not found" + ) + + return { + "conversation_id": conversation_id, + "messages": conversations[conversation_id], + "message_count": len(conversations[conversation_id]) + } + + +@router.post("/conversations/{conversation_id}/clear") +async def clear_conversation(conversation_id: str) -> Dict: + """Clear conversation history.""" + if conversation_id in conversations: + del conversations[conversation_id] + + return {"status": "cleared", "conversation_id": conversation_id} + + +@router.get("/tools/preview") +async def preview_available_tools( + collection_id: Optional[int] = None, + mcp_server_id: Optional[int] = None +) -> Dict: + """ + Preview which tools would be available for a chat request. + + Useful for debugging tool resolution from collections. + """ + tools = get_available_tools(collection_id=collection_id, mcp_server_id=mcp_server_id) + llm_tools = format_tools_for_llm(tools) if tools else [] + + return { + "collection_id": collection_id, + "mcp_server_id": mcp_server_id, + "tool_count": len(tools), + "tools": [ + { + "id": t.get('id'), + "name": t.get('name'), + "description": t.get('description'), + "mcp_server_id": t.get('mcp_server_id'), + } + for t in tools + ], + "llm_formatted_tools": llm_tools, + } + + +@router.get("/collections") +async def list_chat_collections() -> Dict: + """ + List available collections for chat with tool counts. + + Use this to see which collections are available and how many tools each has. + """ + collections, total = WarehouseDB.list_collections() + + result = [] + for collection in collections: + collection_id = collection.get('id') + tools = get_available_tools(collection_id=collection_id) + result.append({ + "id": collection_id, + "name": collection.get('name'), + "description": collection.get('description'), + "tool_count": len(tools), + "tools": [t.get('name') for t in tools], + }) + + return { + "collections": result, + "total": total, + } diff --git a/dbx-agent-app/app/backend/app/routes/collections.py b/dbx-agent-app/app/backend/app/routes/collections.py new file mode 100644 index 00000000..4961085c --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/collections.py @@ -0,0 +1,271 @@ +""" +CRUD endpoints for Collections using Databricks SQL Warehouse. +""" + +import logging +from fastapi import APIRouter, HTTPException, Request, status, Query +from typing import List +import math + +logger = logging.getLogger(__name__) + +from app.db_adapter import WarehouseDB # Auto-switches between SQLite and Warehouse +from app.schemas.collection import ( + CollectionCreate, + CollectionUpdate, + CollectionResponse, + CollectionItemCreate, + CollectionItemResponse, +) +from app.schemas.common import PaginatedResponse +from app.services.audit import record_audit + +router = APIRouter(prefix="/collections", tags=["Collections"]) + + +@router.get( + "", + response_model=PaginatedResponse[CollectionResponse], + status_code=status.HTTP_200_OK, + summary="List Collections", + description="List all collections with pagination", +) +def list_collections( + page: int = Query(1, ge=1, description="Page number"), + page_size: int = Query(50, ge=1, le=100, description="Items per page"), +) -> PaginatedResponse[CollectionResponse]: + """List all collections with pagination.""" + collections, total = WarehouseDB.list_collections(page=page, page_size=page_size) + total = int(total) if total else 0 # Convert string to int from warehouse + total_pages = math.ceil(total / page_size) if total > 0 else 1 + + return PaginatedResponse( + items=[CollectionResponse(**c) for c in collections], + total=total, + page=page, + page_size=page_size, + total_pages=total_pages, + ) + + +@router.post( + "", + response_model=CollectionResponse, + status_code=status.HTTP_201_CREATED, + summary="Create Collection", + description="Create a new collection", +) +def create_collection(collection_data: CollectionCreate, request: Request) -> CollectionResponse: + """Create a new collection.""" + try: + collection = WarehouseDB.create_collection( + name=collection_data.name, + description=collection_data.description, + ) + record_audit(request, "create", "collection", str(collection["id"]), collection["name"]) + return CollectionResponse(**collection) + except Exception as e: + logger.error("Failed to create collection: %s", e) + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="Failed to create collection", + ) + + +@router.get( + "/{collection_id}", + response_model=CollectionResponse, + status_code=status.HTTP_200_OK, + summary="Get Collection", + description="Get a specific collection by ID", +) +def get_collection(collection_id: int) -> CollectionResponse: + """Get a specific collection by ID.""" + collection = WarehouseDB.get_collection(collection_id) + if not collection: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection with id {collection_id} not found", + ) + return CollectionResponse(**collection) + + +@router.put( + "/{collection_id}", + response_model=CollectionResponse, + status_code=status.HTTP_200_OK, + summary="Update Collection", + description="Update an existing collection", +) +def update_collection(collection_id: int, collection_data: CollectionUpdate, request: Request) -> CollectionResponse: + """Update an existing collection.""" + existing = WarehouseDB.get_collection(collection_id) + if not existing: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection with id {collection_id} not found", + ) + + update_dict = collection_data.model_dump(exclude_unset=True) + + # Check for duplicate name + if 'name' in update_dict and update_dict['name'] != existing.get('name'): + all_collections, _ = WarehouseDB.list_collections(page=1, page_size=10000) + for c in all_collections: + if c['name'] == update_dict['name'] and c['id'] != collection_id: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="A collection with this name already exists", + ) + + collection = WarehouseDB.update_collection(collection_id, **update_dict) + record_audit(request, "update", "collection", str(collection_id), collection["name"]) + return CollectionResponse(**collection) + + +@router.delete( + "/{collection_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete Collection", + description="Delete a collection (cascades to collection items)", +) +def delete_collection(collection_id: int, request: Request) -> None: + """Delete a collection.""" + existing = WarehouseDB.get_collection(collection_id) + if not existing: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection with id {collection_id} not found", + ) + WarehouseDB.delete_collection(collection_id) + record_audit(request, "delete", "collection", str(collection_id), existing["name"]) + + +# Collection Items endpoints + + +@router.get( + "/{collection_id}/items", + response_model=List[CollectionItemResponse], + status_code=status.HTTP_200_OK, + summary="List Collection Items", + description="List all items in a collection", +) +def list_collection_items(collection_id: int) -> List[CollectionItemResponse]: + """List all items in a specific collection.""" + # list_collection_items returns [] for non-existent collections, + # so check existence only when empty to distinguish "no items" from "not found" + items = WarehouseDB.list_collection_items(collection_id) + if not items: + collection = WarehouseDB.get_collection(collection_id) + if not collection: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection with id {collection_id} not found", + ) + return [CollectionItemResponse(**item) for item in items] + + +@router.post( + "/{collection_id}/items", + response_model=CollectionItemResponse, + status_code=status.HTTP_201_CREATED, + summary="Add Item to Collection", + description="Add an app, MCP server, or tool to a collection", +) +def add_collection_item(collection_id: int, item_data: CollectionItemCreate, request: Request) -> CollectionItemResponse: + """Add an item to a collection.""" + # Verify collection exists + collection = WarehouseDB.get_collection(collection_id) + if not collection: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection with id {collection_id} not found", + ) + + # Validate exactly one reference is set + refs = [item_data.app_id, item_data.mcp_server_id, item_data.tool_id] + if sum(r is not None for r in refs) != 1: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="Exactly one of app_id, mcp_server_id, or tool_id must be set", + ) + + # Check for duplicate item in collection + existing_items = WarehouseDB.list_collection_items(collection_id) + for existing in existing_items: + if (item_data.app_id and existing.get('app_id') == item_data.app_id) or \ + (item_data.mcp_server_id and existing.get('mcp_server_id') == item_data.mcp_server_id) or \ + (item_data.tool_id and existing.get('tool_id') == item_data.tool_id): + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="Item already exists in this collection", + ) + + # Validate referenced entity exists + if item_data.app_id and not WarehouseDB.get_app(item_data.app_id): + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=f"App with id {item_data.app_id} does not exist", + ) + if item_data.mcp_server_id and not WarehouseDB.get_mcp_server(item_data.mcp_server_id): + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=f"MCP Server with id {item_data.mcp_server_id} does not exist", + ) + if item_data.tool_id and not WarehouseDB.get_tool(item_data.tool_id): + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=f"Tool with id {item_data.tool_id} does not exist", + ) + + try: + item = WarehouseDB.add_collection_item( + collection_id=collection_id, + app_id=item_data.app_id, + mcp_server_id=item_data.mcp_server_id, + tool_id=item_data.tool_id, + ) + record_audit(request, "add_item", "collection", str(collection_id), collection["name"], + details={"item_id": item["id"]}) + return CollectionItemResponse(**item) + except Exception as e: + logger.error("Failed to add collection item: %s", e) + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="Failed to add item to collection", + ) + + +@router.delete( + "/{collection_id}/items/{item_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Remove Item from Collection", + description="Remove an item from a collection", +) +def remove_collection_item(collection_id: int, item_id: int, request: Request) -> None: + """Remove an item from a collection.""" + # Verify collection exists + collection = WarehouseDB.get_collection(collection_id) + if not collection: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection with id {collection_id} not found", + ) + + # Verify item exists and belongs to this collection + item = WarehouseDB.get_collection_item(item_id) + if not item: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Item with id {item_id} not found", + ) + if item['collection_id'] != collection_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Item {item_id} does not belong to collection {collection_id}", + ) + + WarehouseDB.delete_collection_item(item_id) + record_audit(request, "remove_item", "collection", str(collection_id), collection["name"], + details={"item_id": item_id}) diff --git a/dbx-agent-app/app/backend/app/routes/conversations.py b/dbx-agent-app/app/backend/app/routes/conversations.py new file mode 100644 index 00000000..62ed3fa6 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/conversations.py @@ -0,0 +1,73 @@ +""" +Conversation CRUD routes. + +Provides endpoints to list, get, rename, and delete persisted chat conversations. +""" + +import logging +from typing import Dict +from fastapi import APIRouter, HTTPException, Query, status + +from app.db_adapter import DatabaseAdapter +from app.schemas.conversation import ( + ConversationListItem, + ConversationResponse, + ConversationRenameRequest, +) + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/conversations", tags=["Conversations"]) + + +@router.get("", response_model=Dict) +async def list_conversations( + user_email: str = Query(None, description="Filter by user email"), + page: int = Query(1, ge=1), + page_size: int = Query(50, ge=1, le=200), +): + """List conversations, newest first.""" + items, total = DatabaseAdapter.list_conversations( + user_email=user_email, page=page, page_size=page_size + ) + return { + "conversations": items, + "total": total, + "page": page, + "page_size": page_size, + } + + +@router.get("/{conversation_id}", response_model=ConversationResponse) +async def get_conversation(conversation_id: str): + """Get a conversation with all its messages.""" + conv = DatabaseAdapter.get_conversation(conversation_id) + if not conv: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Conversation {conversation_id} not found", + ) + return conv + + +@router.patch("/{conversation_id}", response_model=ConversationListItem) +async def rename_conversation(conversation_id: str, body: ConversationRenameRequest): + """Rename a conversation.""" + conv = DatabaseAdapter.update_conversation_title(conversation_id, body.title) + if not conv: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Conversation {conversation_id} not found", + ) + return conv + + +@router.delete("/{conversation_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_conversation(conversation_id: str): + """Delete a conversation and all its messages.""" + deleted = DatabaseAdapter.delete_conversation(conversation_id) + if not deleted: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Conversation {conversation_id} not found", + ) diff --git a/dbx-agent-app/app/backend/app/routes/discovery.py b/dbx-agent-app/app/backend/app/routes/discovery.py new file mode 100644 index 00000000..202c0466 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/discovery.py @@ -0,0 +1,244 @@ +""" +Discovery endpoint for refreshing MCP server catalog and agents. +""" + +import asyncio +import logging +from fastapi import APIRouter, Body, Depends, HTTPException, status, BackgroundTasks +from sqlalchemy.orm import Session +from datetime import datetime, timezone + +from app.database import get_db +from app.models.discovery_state import DiscoveryState +from app.schemas.discovery import ( + DiscoveryRefreshRequest, + DiscoveryRefreshResponse, + DiscoveryStatusResponse, + WorkspaceProfileResponse, + WorkspaceProfilesResponse, +) +from app.services.discovery import DiscoveryService +from app.services.workspace_profiles import ( + discover_workspace_profiles, + DEFAULT_CONFIG_PATH, +) + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/discovery", tags=["Discovery"]) + + +def _get_or_create_state(db: Session) -> DiscoveryState: + """Get the singleton discovery state row, creating it if needed.""" + state = db.query(DiscoveryState).filter(DiscoveryState.id == 1).first() + if not state: + state = DiscoveryState(id=1, is_running=False) + db.add(state) + db.commit() + db.refresh(state) + return state + + +@router.get( + "/workspaces", + response_model=WorkspaceProfilesResponse, + status_code=status.HTTP_200_OK, + summary="Discover Workspace Profiles", + description="Parse ~/.databrickscfg and validate auth for each workspace profile", +) +async def get_workspace_profiles() -> WorkspaceProfilesResponse: + """ + Discover Databricks workspace profiles from CLI config. + + Parses ~/.databrickscfg, validates authentication for each workspace + profile concurrently, and returns status for each. + """ + try: + profiles = await discover_workspace_profiles() + profile_responses = [ + WorkspaceProfileResponse( + name=p.name, + host=p.host, + auth_type=p.auth_type, + is_account_profile=p.is_account_profile, + auth_valid=p.auth_valid, + auth_error=p.auth_error, + username=p.username, + ) + for p in profiles + ] + return WorkspaceProfilesResponse( + profiles=profile_responses, + config_path=DEFAULT_CONFIG_PATH, + total=len(profile_responses), + valid=sum(1 for p in profile_responses if p.auth_valid), + ) + except Exception as e: + logger.error("Failed to discover workspace profiles: %s", e) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Failed to discover workspace profiles: {e}", + ) + + +@router.post( + "/refresh", + response_model=DiscoveryRefreshResponse, + status_code=status.HTTP_200_OK, + summary="Refresh MCP Catalog", + description="Discover MCP servers and tools from custom URLs, workspace, or catalog", +) +async def refresh_discovery( + request: DiscoveryRefreshRequest = Body(default=None), + db: Session = Depends(get_db), +) -> DiscoveryRefreshResponse: + """ + Refresh the MCP catalog by discovering servers and tools. + + This endpoint: + 1. Discovers MCP servers from provided URLs (and optionally workspace/catalog) + 2. Queries each server for available tools + 3. Upserts discovered data into the registry database + 4. Returns summary of discovered/updated entities + """ + # Default to workspace discovery when no body provided + if request is None: + request = DiscoveryRefreshRequest(discover_workspace=True) + elif not request.server_urls and not request.discover_workspace and not request.discover_catalog: + # Explicit request with no sources specified — reject + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="At least one discovery source must be specified", + ) + + state = _get_or_create_state(db) + + # Check if discovery is already running + if state.is_running: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail="Discovery is already running. Use GET /discovery/status to check progress.", + ) + + # Mark as running + state.is_running = True + db.commit() + + try: + # Create discovery service + service = DiscoveryService() + + # Convert Pydantic HttpUrl to string + server_urls = [str(url) for url in request.server_urls] + + # Run MCP discovery and agent discovery in parallel + parallel_tasks = [ + service.discover_all( + custom_urls=server_urls if server_urls else None, + profile=request.databricks_profile, + ) + ] + + run_agent_discovery = request.discover_agents or request.discover_workspace + if run_agent_discovery: + parallel_tasks.append( + service.discover_agents_all(profile=request.databricks_profile) + ) + + gather_results = await asyncio.gather(*parallel_tasks) + + discovery_result = gather_results[0] + agent_result = gather_results[1] if run_agent_discovery else None + + # Capture app count before upsert clears it + apps_discovered = len(getattr(service, "_pending_apps", [])) + + # Upsert MCP results into database + upsert_result = service.upsert_discovery_results(discovery_result) + + # Upsert agent results into database + agent_upsert = None + if agent_result: + agent_upsert = service.upsert_agent_discovery_results(agent_result) + + # Merge errors from both discovery sources + all_errors = list(discovery_result.errors) + if agent_result: + all_errors.extend(agent_result.errors) + + # Determine status + has_results = ( + discovery_result.servers_discovered > 0 + or (agent_result and len(agent_result.agents) > 0) + ) + if all_errors: + if has_results: + result_status = "partial" + message = f"Discovery completed with {len(all_errors)} errors" + else: + result_status = "failed" + message = "Discovery failed: all sources unreachable" + else: + result_status = "success" + message = "Discovery completed successfully" + + # Update state + state.is_running = False + state.last_run_timestamp = datetime.now(timezone.utc).isoformat() + state.last_run_status = result_status + state.last_run_message = message + db.commit() + + return DiscoveryRefreshResponse( + status=result_status, + message=message, + apps_discovered=apps_discovered, + servers_discovered=discovery_result.servers_discovered, + tools_discovered=discovery_result.tools_discovered, + new_servers=upsert_result.new_servers, + updated_servers=upsert_result.updated_servers, + new_tools=upsert_result.new_tools, + updated_tools=upsert_result.updated_tools, + agents_discovered=len(agent_result.agents) if agent_result else 0, + new_agents=agent_upsert.new_agents if agent_upsert else 0, + updated_agents=agent_upsert.updated_agents if agent_upsert else 0, + errors=all_errors, + ) + + except Exception as e: + # Ensure state is cleaned up on failure + state.is_running = False + state.last_run_timestamp = datetime.now(timezone.utc).isoformat() + state.last_run_status = "failed" + state.last_run_message = f"Discovery failed: {type(e).__name__}" + db.commit() + logger.error("Discovery failed: %s", e) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Discovery failed", + ) + + +@router.get( + "/status", + response_model=DiscoveryStatusResponse, + status_code=status.HTTP_200_OK, + summary="Get Discovery Status", + description="Check if discovery is running and view last run results", +) +def get_discovery_status( + db: Session = Depends(get_db), +) -> DiscoveryStatusResponse: + """ + Get the current status of the discovery process. + + Returns: + DiscoveryStatusResponse with current status and last run info + """ + state = _get_or_create_state(db) + return DiscoveryStatusResponse( + is_running=state.is_running, + last_run_timestamp=state.last_run_timestamp, + last_run_status=state.last_run_status, + last_run_message=state.last_run_message, + ) diff --git a/dbx-agent-app/app/backend/app/routes/health.py b/dbx-agent-app/app/backend/app/routes/health.py new file mode 100644 index 00000000..8763bf18 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/health.py @@ -0,0 +1,125 @@ +""" +Health check endpoints. +""" + +import os +from fastapi import APIRouter, Depends, status +from sqlalchemy.orm import Session +from sqlalchemy import text +from app.database import get_db +from app.schemas.common import HealthResponse, ReadyResponse +from app.config import settings + +router = APIRouter(tags=["Health"]) + + +@router.get( + "/health", + response_model=HealthResponse, + status_code=status.HTTP_200_OK, + summary="Health Check", + description="Check if the API is running", +) +def health_check() -> HealthResponse: + """ + Simple health check endpoint that always returns 200 if the API is running. + """ + return HealthResponse( + status="healthy", + version=settings.api_version, + ) + + +@router.get( + "/ready", + response_model=ReadyResponse, + status_code=status.HTTP_200_OK, + summary="Readiness Check", + description="Check if the API is ready to accept requests (database connection)", +) +def readiness_check(db: Session = Depends(get_db)) -> ReadyResponse: + """ + Readiness check that verifies database connectivity. + Returns 200 if ready, 503 if not ready. + """ + try: + # Test database connection + db.execute(text("SELECT 1")) + return ReadyResponse( + ready=True, + database="connected", + ) + except Exception as e: + return ReadyResponse( + ready=False, + database=f"error: {str(e)}", + ) + + +@router.get( + "/auth-test", + status_code=status.HTTP_200_OK, + summary="Test Authentication", + description="Test if authentication is working (requires auth)", +) +def auth_test() -> dict: + """ + Test endpoint to verify authentication is working. + Returns 200 if authenticated, 401 if not. + """ + return { + "authenticated": True, + "message": "Authentication successful" + } + + +@router.get( + "/debug-db", + status_code=status.HTTP_200_OK, + summary="Debug Database Info", + description="Show database file location and record counts (debug only)", +) +def debug_database(db: Session = Depends(get_db)) -> dict: + """ + Debug endpoint to show database information. + Shows database URL, file existence, and record counts. + """ + try: + # Get database URL + db_url = str(db.bind.url) + + # Extract file path from SQLite URL + db_file = None + if db_url.startswith("sqlite:///"): + db_file = db_url.replace("sqlite:///", "") + db_file_exists = os.path.exists(db_file) + db_file_size = os.path.getsize(db_file) if db_file_exists else 0 + else: + db_file_exists = None + db_file_size = None + + # Get record counts + apps_count = db.execute(text("SELECT COUNT(*) FROM apps")).scalar() + mcp_servers_count = db.execute(text("SELECT COUNT(*) FROM mcp_servers")).scalar() + tools_count = db.execute(text("SELECT COUNT(*) FROM tools")).scalar() + agents_count = db.execute(text("SELECT COUNT(*) FROM agents")).scalar() + + return { + "database_url": db_url, + "database_file": db_file, + "file_exists": db_file_exists, + "file_size_bytes": db_file_size, + "record_counts": { + "apps": apps_count, + "mcp_servers": mcp_servers_count, + "tools": tools_count, + "agents": agents_count, + }, + "cwd": os.getcwd(), + "env_database_url": os.environ.get("DATABASE_URL", "not set"), + } + except Exception as e: + return { + "error": str(e), + "type": type(e).__name__, + } diff --git a/dbx-agent-app/app/backend/app/routes/lineage.py b/dbx-agent-app/app/backend/app/routes/lineage.py new file mode 100644 index 00000000..bdffbf07 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/lineage.py @@ -0,0 +1,246 @@ +""" +Lineage & Knowledge Graph endpoints. + +Provides: + - GET /api/lineage/{asset_type}/{asset_id} — upstream/downstream lineage + - GET /api/lineage/{asset_type}/{asset_id}/impact — impact analysis + - POST /api/lineage/crawl — trigger lineage discovery + - GET /api/lineage/relationships — list all relationships +""" + +import logging +from typing import Optional +from fastapi import APIRouter, HTTPException, Query, Request + +from app.schemas.lineage import ( + LineageResponse, + LineageNode, + LineageEdge, + ImpactAnalysisResponse, + LineageCrawlRequest, + LineageCrawlResponse, + RelationshipResponse, +) +from app.services.lineage_crawler import LineageCrawlerService +from app.services.audit import record_audit +from app.db_adapter import DatabaseAdapter + +logger = logging.getLogger(__name__) +router = APIRouter(tags=["Lineage"]) + + +@router.get("/lineage/{asset_type}/{asset_id}", response_model=LineageResponse) +async def get_lineage( + asset_type: str, + asset_id: int, + direction: str = Query("both", enum=["upstream", "downstream", "both"]), + max_depth: int = Query(3, ge=1, le=10), +): + """ + Get upstream and/or downstream lineage for an asset. + + Returns a graph of nodes (assets) and edges (relationships) + traversed from the given starting asset. + """ + # Resolve asset name + root_name = _resolve_asset_name(asset_type, asset_id) + if not root_name: + raise HTTPException(status_code=404, detail=f"Asset {asset_type}/{asset_id} not found") + + nodes = [LineageNode(asset_type=asset_type, asset_id=asset_id, name=root_name, depth=0)] + edges = [] + visited = {(asset_type, asset_id)} + + if direction in ("upstream", "both"): + _traverse(asset_type, asset_id, "upstream", max_depth, 1, nodes, edges, visited) + + if direction in ("downstream", "both"): + _traverse(asset_type, asset_id, "downstream", max_depth, 1, nodes, edges, visited) + + return LineageResponse( + root_type=asset_type, + root_id=asset_id, + root_name=root_name, + direction=direction, + nodes=nodes, + edges=edges, + ) + + +@router.get("/lineage/{asset_type}/{asset_id}/impact", response_model=ImpactAnalysisResponse) +async def get_impact_analysis( + asset_type: str, + asset_id: int, + max_depth: int = Query(5, ge=1, le=10), +): + """ + Impact analysis: what downstream assets would be affected + if this asset changes? + + Traverses downstream relationships recursively. + """ + root_name = _resolve_asset_name(asset_type, asset_id) + if not root_name: + raise HTTPException(status_code=404, detail=f"Asset {asset_type}/{asset_id} not found") + + affected = [] + visited = {(asset_type, asset_id)} + _traverse_impact(asset_type, asset_id, max_depth, 1, affected, visited) + + return ImpactAnalysisResponse( + root_type=asset_type, + root_id=asset_id, + root_name=root_name, + affected_assets=affected, + total_affected=len(affected), + ) + + +@router.post("/lineage/crawl", response_model=LineageCrawlResponse) +async def crawl_lineage(request: LineageCrawlRequest, http_request: Request): + """Trigger lineage discovery across all data sources.""" + service = LineageCrawlerService(databricks_profile=request.databricks_profile) + stats = await service.crawl(include_column_lineage=request.include_column_lineage) + + result_status = "completed" if not stats.errors else "completed_with_errors" + + record_audit(http_request, "crawl", "lineage", details={ + "relationships_discovered": stats.relationships_discovered, + "new_relationships": stats.new_relationships, + }) + + return LineageCrawlResponse( + status=result_status, + message=f"Discovered {stats.relationships_discovered} relationships ({stats.new_relationships} new)", + relationships_discovered=stats.relationships_discovered, + new_relationships=stats.new_relationships, + errors=stats.errors, + ) + + +@router.get("/lineage/relationships", response_model=list[RelationshipResponse]) +async def list_relationships( + source_type: Optional[str] = None, + target_type: Optional[str] = None, + relationship_type: Optional[str] = None, + page: int = Query(1, ge=1), + page_size: int = Query(100, ge=1, le=500), +): + """List all relationships with optional filters.""" + rels, total = DatabaseAdapter.list_asset_relationships( + source_type=source_type, + target_type=target_type, + relationship_type=relationship_type, + page=page, + page_size=page_size, + ) + return rels + + +# --- Graph traversal helpers --- + +def _traverse( + asset_type: str, + asset_id: int, + direction: str, + max_depth: int, + current_depth: int, + nodes: list, + edges: list, + visited: set, +) -> None: + """BFS-style traversal along lineage edges.""" + if current_depth > max_depth: + return + + if direction == "upstream": + # Find relationships where this asset is the TARGET (something feeds into it) + rels = DatabaseAdapter.get_relationships_by_target(asset_type, asset_id) + for rel in rels: + neighbor = (rel["source_type"], rel["source_id"]) + edges.append(LineageEdge( + source_type=rel["source_type"], + source_id=rel["source_id"], + target_type=rel["target_type"], + target_id=rel["target_id"], + relationship_type=rel["relationship_type"], + )) + if neighbor not in visited: + visited.add(neighbor) + name = rel.get("source_name") or _resolve_asset_name(rel["source_type"], rel["source_id"]) or "" + nodes.append(LineageNode( + asset_type=rel["source_type"], + asset_id=rel["source_id"], + name=name, + depth=current_depth, + )) + _traverse(rel["source_type"], rel["source_id"], direction, max_depth, current_depth + 1, nodes, edges, visited) + else: + # Find relationships where this asset is the SOURCE (it feeds into something) + rels = DatabaseAdapter.get_relationships_by_source(asset_type, asset_id) + for rel in rels: + neighbor = (rel["target_type"], rel["target_id"]) + edges.append(LineageEdge( + source_type=rel["source_type"], + source_id=rel["source_id"], + target_type=rel["target_type"], + target_id=rel["target_id"], + relationship_type=rel["relationship_type"], + )) + if neighbor not in visited: + visited.add(neighbor) + name = rel.get("target_name") or _resolve_asset_name(rel["target_type"], rel["target_id"]) or "" + nodes.append(LineageNode( + asset_type=rel["target_type"], + asset_id=rel["target_id"], + name=name, + depth=current_depth, + )) + _traverse(rel["target_type"], rel["target_id"], direction, max_depth, current_depth + 1, nodes, edges, visited) + + +def _traverse_impact( + asset_type: str, + asset_id: int, + max_depth: int, + current_depth: int, + affected: list, + visited: set, +) -> None: + """Traverse downstream to find all affected assets.""" + if current_depth > max_depth: + return + + rels = DatabaseAdapter.get_relationships_by_source(asset_type, asset_id) + for rel in rels: + neighbor = (rel["target_type"], rel["target_id"]) + if neighbor not in visited: + visited.add(neighbor) + name = rel.get("target_name") or _resolve_asset_name(rel["target_type"], rel["target_id"]) or "" + affected.append(LineageNode( + asset_type=rel["target_type"], + asset_id=rel["target_id"], + name=name, + depth=current_depth, + )) + _traverse_impact(rel["target_type"], rel["target_id"], max_depth, current_depth + 1, affected, visited) + + +def _resolve_asset_name(asset_type: str, asset_id: int) -> Optional[str]: + """Resolve an asset's display name by type + id.""" + catalog_types = {"table", "view", "function", "model", "volume"} + workspace_types = {"notebook", "job", "dashboard", "pipeline", "cluster", "experiment"} + + if asset_type in catalog_types: + asset = DatabaseAdapter.get_catalog_asset(asset_id) + return asset["full_name"] if asset else None + elif asset_type in workspace_types: + asset = DatabaseAdapter.get_workspace_asset(asset_id) + return asset["name"] if asset else None + elif asset_type == "app": + asset = DatabaseAdapter.get_app(asset_id) + return asset["name"] if asset else None + elif asset_type == "tool": + asset = DatabaseAdapter.get_tool(asset_id) + return asset["name"] if asset else None + return None diff --git a/dbx-agent-app/app/backend/app/routes/mcp_servers.py b/dbx-agent-app/app/backend/app/routes/mcp_servers.py new file mode 100644 index 00000000..841a2cbc --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/mcp_servers.py @@ -0,0 +1,126 @@ +""" +CRUD endpoints for MCP Servers using Databricks SQL Warehouse. +""" + +import logging +from fastapi import APIRouter, HTTPException, status, Query +import math + +logger = logging.getLogger(__name__) + +from app.db_adapter import WarehouseDB # Auto-switches between SQLite and Warehouse +from app.schemas.mcp_server import MCPServerCreate, MCPServerUpdate, MCPServerResponse +from app.schemas.common import PaginatedResponse + +router = APIRouter(prefix="/mcp_servers", tags=["MCP Servers"]) + + +@router.get( + "", + response_model=PaginatedResponse[MCPServerResponse], + status_code=status.HTTP_200_OK, + summary="List MCP Servers", + description="List all MCP servers with pagination", +) +def list_mcp_servers( + page: int = Query(1, ge=1, description="Page number"), + page_size: int = Query(50, ge=1, le=100, description="Items per page"), + kind: str | None = Query(None, description="Filter by server kind (managed, external)"), + app_id: int | None = Query(None, description="Filter by parent app ID"), +) -> PaginatedResponse[MCPServerResponse]: + """List all MCP servers with pagination.""" + servers, total = WarehouseDB.list_mcp_servers(page=page, page_size=page_size, app_id=app_id, kind=kind) + total = int(total) if total else 0 # Convert string to int from warehouse + total_pages = math.ceil(total / page_size) if total > 0 else 1 + + return PaginatedResponse( + items=[MCPServerResponse(**server) for server in servers], + total=total, + page=page, + page_size=page_size, + total_pages=total_pages, + ) + + +@router.post( + "", + response_model=MCPServerResponse, + status_code=status.HTTP_201_CREATED, + summary="Create MCP Server", + description="Create a new MCP server", +) +def create_mcp_server(server_data: MCPServerCreate) -> MCPServerResponse: + """Create a new MCP server.""" + try: + server = WarehouseDB.create_mcp_server( + server_url=server_data.server_url, + kind=server_data.kind.value if server_data.kind else 'managed', + app_id=server_data.app_id, + uc_connection=server_data.uc_connection, + scopes=server_data.scopes, + ) + return MCPServerResponse(**server) + except Exception as e: + logger.error("Failed to create MCP server: %s", e) + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail="Failed to create MCP server", + ) + + +@router.get( + "/{server_id}", + response_model=MCPServerResponse, + status_code=status.HTTP_200_OK, + summary="Get MCP Server", + description="Get a specific MCP server by ID", +) +def get_mcp_server(server_id: int) -> MCPServerResponse: + """Get a specific MCP server by ID.""" + server = WarehouseDB.get_mcp_server(server_id) + if not server: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"MCP Server with id {server_id} not found", + ) + return MCPServerResponse(**server) + + +@router.put( + "/{server_id}", + response_model=MCPServerResponse, + status_code=status.HTTP_200_OK, + summary="Update MCP Server", + description="Update an existing MCP server", +) +def update_mcp_server(server_id: int, server_data: MCPServerUpdate) -> MCPServerResponse: + """Update an existing MCP server.""" + existing = WarehouseDB.get_mcp_server(server_id) + if not existing: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"MCP Server with id {server_id} not found", + ) + + update_dict = server_data.model_dump(exclude_unset=True) + if 'kind' in update_dict and update_dict['kind'] is not None: + update_dict['kind'] = update_dict['kind'].value + server = WarehouseDB.update_mcp_server(server_id, **update_dict) + return MCPServerResponse(**server) + + +@router.delete( + "/{server_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete MCP Server", + description="Delete an MCP server", +) +def delete_mcp_server(server_id: int) -> None: + """Delete an MCP server.""" + existing = WarehouseDB.get_mcp_server(server_id) + if not existing: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"MCP Server with id {server_id} not found", + ) + WarehouseDB.delete_mcp_server(server_id) diff --git a/dbx-agent-app/app/backend/app/routes/search.py b/dbx-agent-app/app/backend/app/routes/search.py new file mode 100644 index 00000000..5de122f3 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/search.py @@ -0,0 +1,82 @@ +""" +Unified search endpoint — semantic + keyword search across all asset types. +""" + +import logging +from fastapi import APIRouter, HTTPException + +from app.schemas.search import ( + SearchRequest, + SearchResponse, + EmbedStatusResponse, +) +from app.services.search import SearchService +from app.services.embedding import EmbeddingService +from app.db_adapter import DatabaseAdapter + +logger = logging.getLogger(__name__) +router = APIRouter(tags=["Search"]) + + +@router.post("/search", response_model=SearchResponse) +async def search(request: SearchRequest): + """ + Unified semantic search across all indexed assets. + + Combines vector similarity with keyword matching for hybrid ranking. + """ + service = SearchService() + results, search_mode = await service.search( + query=request.query, + types=request.types, + catalogs=request.catalogs, + owner=request.owner, + limit=request.limit, + ) + + return SearchResponse( + query=request.query, + total=len(results), + results=results, + search_mode=search_mode, + ) + + +@router.post("/search/embed-all", response_model=EmbedStatusResponse) +async def embed_all_assets(): + """ + Generate embeddings for all un-embedded assets. + + This is typically called after a crawl to ensure all new assets + have embeddings for semantic search. + """ + service = EmbeddingService() + counts = await service.embed_all_assets() + + total_embedded = sum(counts.values()) + logger.info("Embedded %d new assets: %s", total_embedded, counts) + + stats = DatabaseAdapter.get_embedding_stats() + + return EmbedStatusResponse( + total_assets=stats["total_assets"], + embedded_assets=stats["embedded_assets"], + pending_assets=stats["pending_assets"], + embedding_model=service._model if service._use_fmapi else "keyword-hash", + dimension=service._dimension if service._use_fmapi else 256, + ) + + +@router.get("/search/embed-status", response_model=EmbedStatusResponse) +async def embed_status(): + """Get the current embedding coverage status.""" + service = EmbeddingService() + stats = DatabaseAdapter.get_embedding_stats() + + return EmbedStatusResponse( + total_assets=stats["total_assets"], + embedded_assets=stats["embedded_assets"], + pending_assets=stats["pending_assets"], + embedding_model=service._model if service._use_fmapi else "keyword-hash", + dimension=service._dimension if service._use_fmapi else 256, + ) diff --git a/dbx-agent-app/app/backend/app/routes/supervisor_runtime.py b/dbx-agent-app/app/backend/app/routes/supervisor_runtime.py new file mode 100644 index 00000000..981fd9b2 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/supervisor_runtime.py @@ -0,0 +1,819 @@ +""" +Supervisor runtime - Actually RUN supervisors, not just generate code. + +This provides a chat endpoint that executes the generated supervisor logic +so users can test supervisors immediately without deploying. +""" + +import logging +from fastapi import APIRouter, HTTPException, status +from pydantic import BaseModel, Field +from typing import Optional, Dict, Any, List, Union +import httpx +import json +from dataclasses import dataclass + +logger = logging.getLogger(__name__) + +from app.db_adapter import WarehouseDB +from app.config import settings + +router = APIRouter(prefix="/supervisor-runtime", tags=["Supervisor Runtime"]) + + +@dataclass +class ToolInfo: + """Tool discovered from MCP server.""" + name: str + description: str + spec: Dict[str, Any] + server_url: str + + +class SupervisorChatRequest(BaseModel): + """Chat request for supervisor runtime.""" + collection_id: int = Field(..., description="Collection ID to use for supervisor") + message: str = Field(..., description="User message") + conversation_id: Optional[str] = Field(None, description="Conversation ID") + mock_mode: bool = Field(False, description="Use mock responses (no real LLM/MCP calls). Set to true for demo mode.") + orchestration_mode: bool = Field( + True, + description="Multi-agent orchestration (plan, route, execute, evaluate). Set to false for legacy single-cycle mode.", + ) + + +class SupervisorChatResponse(BaseModel): + """Chat response from supervisor runtime.""" + response: str + conversation_id: str + tools_discovered: int + tools_called: int + mock: bool + + +async def fetch_tool_infos_mock(server_url: str) -> List[ToolInfo]: + """Mock tool discovery for demo purposes.""" + # Return mock tools based on collection + return [ + ToolInfo( + name="search_transcripts", + description="Search expert transcripts using RAG", + spec={ + "type": "function", + "function": { + "name": "search_transcripts", + "description": "Search expert transcripts using RAG", + "parameters": { + "type": "object", + "properties": { + "query": {"type": "string", "description": "Search query"} + }, + "required": ["query"] + } + } + }, + server_url=server_url + ), + ToolInfo( + name="get_expert_profile", + description="Retrieve expert profile by ID", + spec={ + "type": "function", + "function": { + "name": "get_expert_profile", + "description": "Retrieve expert profile by ID", + "parameters": { + "type": "object", + "properties": { + "expert_id": {"type": "string", "description": "Expert ID"} + }, + "required": ["expert_id"] + } + } + }, + server_url=server_url + ) + ] + + +def generate_mock_response(message: str, tools: List[ToolInfo]) -> Dict[str, Any]: + """Generate a mock supervisor response.""" + message_lower = message.lower() + + # Simple keyword matching for demo + if "search" in message_lower or "find" in message_lower: + return { + "response": f"""I found several experts based on your query: "{message}" + +**Top Matches:** + +1. **Dr. Sarah Chen** - Machine Learning Expert + - 15 years experience in ML research + - Specializes in neural networks and deep learning + - Expert ID: EXP-2847 + +2. **Prof. Michael Rodriguez** - AI Ethics Researcher + - Pioneer in responsible AI development + - Published 47 papers on AI ethics + - Expert ID: EXP-1923 + +3. **Dr. Aisha Patel** - Natural Language Processing + - Expert in transformer models and LLMs + - Built systems for 10+ companies + - Expert ID: EXP-3156 + +Would you like me to retrieve full profiles for any of these experts? + +*Note: This is a demo response. In production, this would call the real search_transcripts tool.*""", + "tools_called": 1, + "tool_name": "search_transcripts" + } + + elif "profile" in message_lower or "exp-" in message_lower: + return { + "response": """**Dr. Sarah Chen (EXP-2847)** + +**Background:** +- Ph.D. in Computer Science, Stanford University +- 15 years in Machine Learning Research +- Currently: Chief AI Scientist at TechCorp + +**Expertise:** +- Deep Learning & Neural Networks +- Computer Vision +- Reinforcement Learning +- MLOps and Production ML + +**Recent Work:** +- Led development of breakthrough transformer architecture +- Published in top-tier conferences (NeurIPS, ICML, CVPR) +- Advisory board for 3 AI startups + +**Publications:** 87 papers, 12,000+ citations +**Availability:** Open to consulting engagements + +*Note: This is a demo response. In production, this would call the real get_expert_profile tool.*""", + "tools_called": 1, + "tool_name": "get_expert_profile" + } + + elif any(word in message_lower for word in ["hello", "hi", "hey", "help"]): + return { + "response": f"""Hello! I'm the **Expert Research Toolkit** supervisor. + +I can help you with: + +🔍 **Search Expert Transcripts** - Find relevant expert conversations using vector search + Example: "Find experts in quantum computing" + +👤 **Retrieve Expert Profiles** - Get detailed information about specific experts + Example: "Get profile for EXP-2847" + +**Available Tools:** {len(tools)} tools discovered +- {', '.join([t.name for t in tools])} + +What would you like to explore? + +*Note: This is running in demo mode. In production, this would connect to real MCP servers and use Databricks Foundation Models.*""", + "tools_called": 0, + "tool_name": None + } + + else: + return { + "response": f"""I understand you're asking: "{message}" + +Based on your query, I can: +- Search for relevant experts using the **search_transcripts** tool +- Retrieve detailed profiles using the **get_expert_profile** tool + +Try asking: +- "Find experts in [your topic]" +- "Get profile for [expert ID]" + +**Current Capabilities:** +- {len(tools)} tools available +- Pattern 3 dynamic discovery active +- Ready to orchestrate agent interactions + +*Note: This is running in demo mode with simulated responses.*""", + "tools_called": 0, + "tool_name": None + } + + +@router.post( + "/chat", + response_model=None, + status_code=status.HTTP_200_OK, + summary="Chat with Supervisor Runtime", + description="Execute supervisor logic and chat - no deployment needed!", + responses={ + 200: { + "description": "Supervisor response (standard or orchestrated)", + "content": {"application/json": {}}, + } + }, +) +async def chat_with_supervisor(request: SupervisorChatRequest) -> SupervisorChatResponse: + """ + Chat with a supervisor runtime. + + This runs the supervisor logic locally so you can test it immediately + without deploying to Databricks Apps. + + **Mock Mode (default):** + - Uses simulated responses + - No real LLM or MCP calls + - Instant responses for testing + + **Production Mode (mock_mode=false):** + - Requires DATABRICKS_TOKEN and MCP server access + - Real tool discovery and execution + - Actual LLM orchestration + """ + # Validate collection exists + collection = WarehouseDB.get_collection(request.collection_id) + if not collection: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection {request.collection_id} not found" + ) + + # Get collection items to find MCP servers + items = WarehouseDB.list_collection_items(request.collection_id) + + # Extract MCP server URLs + mcp_server_urls = set() + for item in items: + if item.get("mcp_server_id"): + server = WarehouseDB.get_mcp_server(item["mcp_server_id"]) + if server: + mcp_server_urls.add(server["server_url"]) + if item.get("app_id"): + app = WarehouseDB.get_app(item["app_id"]) + if app and app.get("url"): + mcp_server_urls.add(app["url"]) + + if request.mock_mode: + # MOCK MODE: Simulated responses for demo + tool_infos = [] + for server_url in mcp_server_urls: + tools = await fetch_tool_infos_mock(server_url) + tool_infos.extend(tools) + + # Generate mock response + mock_result = generate_mock_response(request.message, tool_infos) + + return SupervisorChatResponse( + response=mock_result["response"], + conversation_id=request.conversation_id or "demo-session", + tools_discovered=len(tool_infos), + tools_called=mock_result["tools_called"], + mock=True + ) + + elif request.orchestration_mode: + # ORCHESTRATION MODE: Multi-agent planning, routing, execution, evaluation + return await run_orchestrated_supervisor( + collection=collection, + mcp_server_urls=list(mcp_server_urls), + message=request.message, + conversation_id=request.conversation_id, + ) + + else: + # PRODUCTION MODE: Real LLM and MCP calls (single-cycle) + return await run_real_supervisor( + collection=collection, + mcp_server_urls=list(mcp_server_urls), + message=request.message, + conversation_id=request.conversation_id + ) + + +async def fetch_tool_infos_real(server_url: str) -> List[ToolInfo]: + """Fetch tools from real MCP server via JSON-RPC.""" + tools: List[ToolInfo] = [] + + try: + async with httpx.AsyncClient(timeout=30.0) as client: + request_payload = { + "jsonrpc": "2.0", + "id": 1, + "method": "tools/list", + "params": {} + } + + response = await client.post( + server_url, + json=request_payload, + headers={"Content-Type": "application/json"} + ) + response.raise_for_status() + + result = response.json() + + if "error" in result: + logger.warning("MCP server error from %s: %s", server_url, result['error']) + return tools + + tools_data = result.get("result", {}).get("tools", []) + + for tool_data in tools_data: + tool_name = tool_data.get("name", "") + if not tool_name: + continue + + tool_spec = { + "type": "function", + "function": { + "name": tool_name, + "description": tool_data.get("description", ""), + "parameters": tool_data.get("inputSchema", {}) + } + } + + tools.append(ToolInfo( + name=tool_name, + description=tool_data.get("description", ""), + spec=tool_spec, + server_url=server_url + )) + + except Exception as e: + logger.error("Failed to fetch tools from %s: %s", server_url, e) + + return tools + + +async def call_tool_real(tool_name: str, tool_args: Dict[str, Any], server_url: str) -> Any: + """Execute tool on real MCP server.""" + async with httpx.AsyncClient(timeout=60.0) as client: + request_payload = { + "jsonrpc": "2.0", + "id": 2, + "method": "tools/call", + "params": { + "name": tool_name, + "arguments": tool_args + } + } + + response = await client.post( + server_url, + json=request_payload, + headers={"Content-Type": "application/json"} + ) + response.raise_for_status() + + result = response.json() + + if "error" in result: + error = result["error"] + raise Exception(f"Tool call error: {error.get('message', 'Unknown error')}") + + return result.get("result", {}) + + +def _get_databricks_llm_config(): + """Get Databricks LLM endpoint URL and auth headers via WorkspaceClient.""" + from databricks.sdk import WorkspaceClient + w = WorkspaceClient() + endpoint_url = f"{w.config.host}/serving-endpoints/{settings.llm_endpoint}/invocations" + headers = {"Content-Type": "application/json"} + headers.update(w.config.authenticate()) + return endpoint_url, headers + + +async def _delegate_to_agent(agent_id: int, task_description: str, base_url: str) -> str: + """ + Delegate a sub-task to a peer agent via A2A message/send. + + Returns the response text from the peer agent. + """ + from app.services.a2a_client import A2AClient, A2AClientError + + agent = WarehouseDB.get_agent(agent_id) + if not agent: + return f"Error: Agent {agent_id} not found" + + # Determine A2A URL: prefer agent's endpoint_url, fallback to registry's A2A endpoint + a2a_url = agent.get("endpoint_url") or f"{base_url}/api/a2a/{agent_id}" + + try: + async with A2AClient(timeout=120.0) as client: + result = await client.send_message( + agent_url=a2a_url, + message=task_description, + auth_token=agent.get("auth_token"), + ) + + # Extract response text from task artifacts or messages + artifacts = result.get("artifacts", []) + if artifacts: + for artifact in artifacts: + for part in artifact.get("parts", []): + if part.get("text"): + return part["text"] + + messages = result.get("messages", []) + for msg in reversed(messages): + if msg.get("role") == "agent": + for part in msg.get("parts", []): + if part.get("text"): + return part["text"] + + return json.dumps(result) + + except A2AClientError as e: + return f"Delegation error: {e}" + except Exception as e: + return f"Delegation failed: {e}" + + +async def run_orchestrated_supervisor( + collection: Dict[str, Any], + mcp_server_urls: List[str], + message: str, + conversation_id: Optional[str], +) -> SupervisorChatResponse: + """Run multi-agent orchestration: plan -> match -> execute -> evaluate.""" + from dataclasses import asdict + from app.services.orchestrator import Orchestrator, MAX_RETRIES + from app.services.search import SearchService + from app.schemas.orchestrator import OrchestrationChatResponse, SubTaskResultItem + + # Step 1: Discover MCP tools (for tool count reporting) + tool_infos: List[ToolInfo] = [] + for server_url in mcp_server_urls: + if server_url and server_url.strip(): + server_tools = await fetch_tool_infos_real(server_url.strip()) + tool_infos.extend(server_tools) + + # Step 2: Get active A2A agents + peer_agents = WarehouseDB.list_active_a2a_agents() + + # Step 3: Match agents to query via embedding similarity + search_service = SearchService() + matched = await search_service.match_agents(message, limit=5) + + # Merge: start with matched agents, then add any active agents not already included + agent_ids_seen = {m["agent"]["id"] for m in matched} + available_agents = [m["agent"] for m in matched] + for pa in peer_agents: + if pa["id"] not in agent_ids_seen: + available_agents.append(pa) + agent_ids_seen.add(pa["id"]) + + if not available_agents: + return SupervisorChatResponse( + response="No active agents available for orchestration. Register and activate agents first.", + conversation_id=conversation_id or "error-session", + tools_discovered=len(tool_infos), + tools_called=0, + mock=False, + ) + + # Step 4: Plan + orchestrator = Orchestrator() + plan = await orchestrator.classify_and_plan(message, available_agents) + + if plan.complexity == "simple" and plan.sub_tasks: + # For simple queries, fall through to the standard single-cycle supervisor + return await run_real_supervisor( + collection=collection, + mcp_server_urls=mcp_server_urls, + message=message, + conversation_id=conversation_id, + ) + + # Step 5: Execute plan + base_url = settings.a2a_base_url or "http://localhost:8000" + results = await orchestrator.execute_plan(plan, base_url) + + # Step 6: Evaluate results + evaluation = await orchestrator.evaluate_results(message, plan, results) + + # Step 6b: One retry if evaluation says results are poor + retry_count = 0 + while evaluation.needs_retry and retry_count < MAX_RETRIES: + retry_count += 1 + logger.info("Orchestration retry %d: %s", retry_count, evaluation.retry_suggestions) + results = await orchestrator.execute_plan(plan, base_url) + evaluation = await orchestrator.evaluate_results(message, plan, results) + + # Step 7: Record analytics for each sub-task + for r in results: + try: + quality_int = round(evaluation.quality_score) if evaluation.quality_score else None + WarehouseDB.create_agent_analytic( + agent_id=r.agent_id, + task_description=r.description, + success=1 if r.success else 0, + latency_ms=r.latency_ms, + quality_score=quality_int, + error_message=r.error, + ) + except Exception as e: + logger.warning("Failed to record analytics for agent %d: %s", r.agent_id, e) + + # Step 8: Build response + plan_dict = { + "complexity": plan.complexity, + "reasoning": plan.reasoning, + "sub_tasks": [asdict(st) for st in plan.sub_tasks], + } + result_items = [ + SubTaskResultItem( + task_index=r.task_index, + agent_id=r.agent_id, + agent_name=r.agent_name, + description=r.description, + response=r.response, + latency_ms=r.latency_ms, + success=r.success, + error=r.error, + ) + for r in results + ] + + return OrchestrationChatResponse( + response=evaluation.final_response, + conversation_id=conversation_id or "orchestrated-session", + plan=plan_dict, + sub_task_results=result_items, + agents_used=len({r.agent_id for r in results}), + tools_discovered=len(tool_infos), + tools_called=0, + quality_score=evaluation.quality_score, + mock=False, + ) + + +async def run_real_supervisor( + collection: Dict[str, Any], + mcp_server_urls: List[str], + message: str, + conversation_id: Optional[str] +) -> SupervisorChatResponse: + """Run real supervisor with actual LLM and MCP calls + A2A peer agent delegation.""" + + try: + import mlflow + from mlflow.entities import SpanType + _mlflow_ok = True + except ImportError: + _mlflow_ok = False + + # Validate Databricks auth + try: + endpoint_url, auth_headers = _get_databricks_llm_config() + except Exception as e: + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, + detail=f"Databricks auth failed: {e}. Ensure CLI profile or env vars are configured." + ) + + # Step 1: Discover tools from real MCP servers + tool_infos: List[ToolInfo] = [] + for server_url in mcp_server_urls: + if server_url and server_url.strip(): + server_tools = await fetch_tool_infos_real(server_url.strip()) + tool_infos.extend(server_tools) + + # Step 1b: Discover peer agents for delegation + peer_agents = WarehouseDB.list_active_a2a_agents() + + if not tool_infos and not peer_agents: + return SupervisorChatResponse( + response="I couldn't discover any tools or peer agents. Please check that the MCP servers are running.", + conversation_id=conversation_id or "error-session", + tools_discovered=0, + tools_called=0, + mock=False + ) + + # Step 2: Create messages for LLM with peer agent info + peer_agent_desc = "" + if peer_agents: + lines = ["\n\nAvailable Peer Agents (delegate sub-tasks to these using the delegate_to_agent tool):"] + for pa in peer_agents: + caps = pa.get("capabilities", "") or "" + lines.append(f"- **{pa['name']}** (ID: {pa['id']}): {pa.get('description', 'No description')} [Capabilities: {caps}]") + peer_agent_desc = "\n".join(lines) + + system_prompt = f"""You are {collection['name']}, an AI supervisor that coordinates multiple specialized agents. + +Available tools: {len(tool_infos)} tools discovered from MCP servers.{peer_agent_desc} + +Your responsibilities: +1. Understand user requests +2. Select appropriate tools to fulfill requests +3. Call tools with correct parameters +4. Delegate sub-tasks to peer agents when their specialization matches the request +5. Synthesize results into helpful responses + +Always explain your reasoning and provide clear, actionable information.""" + + messages_list = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": message} + ] + + # Step 3: Build tools list — MCP tools + delegation tool + tools_param = [ti.spec for ti in tool_infos] + + if peer_agents: + delegation_tool = { + "type": "function", + "function": { + "name": "delegate_to_agent", + "description": "Delegate a sub-task to a peer agent. Use this when a peer agent's specialization matches the request.", + "parameters": { + "type": "object", + "properties": { + "agent_id": {"type": "integer", "description": "ID of the peer agent to delegate to"}, + "task_description": {"type": "string", "description": "Description of the task to delegate"}, + }, + "required": ["agent_id", "task_description"], + } + } + } + tools_param.append(delegation_tool) + + try: + async with httpx.AsyncClient(timeout=120.0) as client: + request_body = { + "messages": messages_list, + "tools": tools_param, + "tool_choice": "auto", + "max_tokens": 4096, + "temperature": 0.1 + } + + response = await client.post( + endpoint_url, + headers=auth_headers, + json=request_body + ) + response.raise_for_status() + result = response.json() + + except httpx.HTTPError as e: + return SupervisorChatResponse( + response=f"Error calling LLM: {str(e)}. Please check your Databricks credentials and endpoint availability.", + conversation_id=conversation_id or "error-session", + tools_discovered=len(tool_infos), + tools_called=0, + mock=False + ) + + # Step 4: Handle tool calls (MCP tools or delegation) + message_content = result.get("choices", [{}])[0].get("message", {}) + tool_calls = message_content.get("tool_calls", []) + tools_called_count = len(tool_calls) + + if tool_calls: + tool_results = [] + + # Derive base_url for delegation calls + base_url = settings.a2a_base_url or "http://localhost:8000" + + for tool_call in tool_calls: + function = tool_call.get("function", {}) + tool_name = function.get("name") + tool_args = json.loads(function.get("arguments", "{}")) + + if tool_name == "delegate_to_agent": + # A2A delegation + target_agent_id = tool_args.get("agent_id") + task_desc = tool_args.get("task_description", "") + + if _mlflow_ok: + target_agent = WarehouseDB.get_agent(target_agent_id) if target_agent_id else None + agent_name = target_agent["name"] if target_agent else str(target_agent_id) + with mlflow.start_span(name=f"a2a_delegate:{agent_name}", span_type=SpanType.TOOL) as span: + span.set_inputs({"agent_id": target_agent_id, "task_description": task_desc}) + delegation_result = await _delegate_to_agent(target_agent_id, task_desc, base_url) + span.set_outputs({"response_length": len(delegation_result)}) + else: + delegation_result = await _delegate_to_agent(target_agent_id, task_desc, base_url) + + tool_results.append({ + "tool_call_id": tool_call.get("id"), + "role": "tool", + "name": tool_name, + "content": delegation_result + }) + else: + # Regular MCP tool call + tool_info = next((t for t in tool_infos if t.name == tool_name), None) + + if not tool_info: + tool_results.append({ + "tool_call_id": tool_call.get("id"), + "role": "tool", + "name": tool_name, + "content": json.dumps({"error": f"Tool {tool_name} not found"}) + }) + continue + + try: + tool_result = await call_tool_real(tool_name, tool_args, tool_info.server_url) + tool_results.append({ + "tool_call_id": tool_call.get("id"), + "role": "tool", + "name": tool_name, + "content": json.dumps(tool_result) + }) + except Exception as e: + tool_results.append({ + "tool_call_id": tool_call.get("id"), + "role": "tool", + "name": tool_name, + "content": json.dumps({"error": str(e)}) + }) + + # Step 5: Get final response from LLM with tool results + messages_list.append(message_content) + messages_list.extend(tool_results) + + try: + async with httpx.AsyncClient(timeout=120.0) as client: + response = await client.post( + endpoint_url, + headers=auth_headers, + json={ + "messages": messages_list, + "max_tokens": 4096, + "temperature": 0.1 + } + ) + response.raise_for_status() + result = response.json() + except Exception as e: + return SupervisorChatResponse( + response=f"Error getting final response: {str(e)}", + conversation_id=conversation_id or "error-session", + tools_discovered=len(tool_infos), + tools_called=tools_called_count, + mock=False + ) + + # Extract final response + final_content = result.get("choices", [{}])[0].get("message", {}).get("content", "") + + if not final_content: + final_content = "I apologize, but I couldn't generate a response. Please try again." + + return SupervisorChatResponse( + response=final_content, + conversation_id=conversation_id or "new", + tools_discovered=len(tool_infos) + len(peer_agents), + tools_called=tools_called_count, + mock=False + ) + + +@router.get( + "/status/{collection_id}", + status_code=status.HTTP_200_OK, + summary="Get Supervisor Runtime Status", + description="Check if supervisor runtime is available for a collection" +) +async def get_supervisor_status(collection_id: int) -> Dict[str, Any]: + """Get supervisor runtime status for a collection.""" + collection = WarehouseDB.get_collection(collection_id) + if not collection: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection {collection_id} not found" + ) + + # Get items to count tools + items = WarehouseDB.list_collection_items(collection_id) + + # Count MCP servers + mcp_servers = set() + for item in items: + if item.get("mcp_server_id"): + mcp_servers.add(item["mcp_server_id"]) + if item.get("app_id"): + app = WarehouseDB.get_app(item["app_id"]) + if app and app.get("url"): + mcp_servers.add(app["url"]) + + return { + "collection_id": collection_id, + "collection_name": collection["name"], + "runtime_available": True, + "mock_mode": True, + "production_mode": False, + "tools_available": len(items), + "mcp_servers": len(mcp_servers), + "endpoints": { + "chat": f"/api/supervisor-runtime/chat", + "status": f"/api/supervisor-runtime/status/{collection_id}" + } + } diff --git a/dbx-agent-app/app/backend/app/routes/supervisors.py b/dbx-agent-app/app/backend/app/routes/supervisors.py new file mode 100644 index 00000000..99e2ae9e --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/supervisors.py @@ -0,0 +1,275 @@ +""" +REST API endpoints for supervisor generation. +""" + +from fastapi import APIRouter, Depends, HTTPException, Request, status, Response +from fastapi.responses import StreamingResponse +from datetime import datetime +from typing import List +import io +import zipfile + +from app.db_adapter import WarehouseDB # Auto-switches between SQLite and Warehouse +from app.schemas.supervisor import ( + SupervisorGenerateRequest, + SupervisorGenerateResponse, + SupervisorPreviewResponse, + SupervisorMetadata, + SupervisorListResponse, +) +from app.services.generator import GeneratorService, GeneratorError, get_generator_service +from app.services.audit import record_audit + +router = APIRouter(prefix="/supervisors", tags=["Supervisors"]) + + +@router.post( + "/generate", + response_model=SupervisorGenerateResponse, + status_code=status.HTTP_201_CREATED, + summary="Generate Supervisor", + description="Generate a supervisor from a collection", +) +def generate_supervisor( + request: SupervisorGenerateRequest, + http_request: Request, + generator: GeneratorService = Depends(get_generator_service), +) -> SupervisorGenerateResponse: + """ + Generate a supervisor from a collection. + + Creates three files: + - supervisor.py: Main supervisor code with Pattern 3 + - requirements.txt: Python dependencies + - app.yaml: Databricks Apps deployment config + + The generated supervisor uses dynamic tool discovery at runtime. + """ + try: + # Generate supervisor code + files = generator.generate_and_validate( + collection_id=request.collection_id, + llm_endpoint=request.llm_endpoint, + app_name=request.app_name, + ) + + # Fetch collection for metadata + collection = WarehouseDB.get_collection(request.collection_id) + if not collection: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection with id {request.collection_id} not found", + ) + + # Determine app name + app_name = request.app_name + if not app_name: + # Normalize collection name to valid app name + collection_name = collection.get('name', 'supervisor') + app_name = collection_name.lower().replace(" ", "-") + app_name = "".join(c for c in app_name if c.isalnum() or c == "-") + + # Persist supervisor metadata + WarehouseDB.create_supervisor( + collection_id=request.collection_id, + app_name=app_name, + ) + + record_audit(http_request, "generate", "supervisor", resource_name=app_name, + details={"collection_id": request.collection_id}) + + return SupervisorGenerateResponse( + collection_id=collection.get('id'), + collection_name=collection.get('name', ''), + app_name=app_name, + files=files, + generated_at=datetime.utcnow().isoformat() + "Z", + supervisor_url=f"/apps/{app_name}", + code=files.get("supervisor.py"), + ) + + except GeneratorError as e: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=str(e), + ) + + +@router.get( + "/{collection_id}/preview", + response_model=SupervisorPreviewResponse, + status_code=status.HTTP_200_OK, + summary="Preview Supervisor", + description="Preview generated files without full generation", +) +def preview_supervisor( + collection_id: int, + llm_endpoint: str = "databricks-meta-llama-3-1-70b-instruct", + app_name: str = None, + generator: GeneratorService = Depends(get_generator_service), +) -> SupervisorPreviewResponse: + """ + Preview generated files before download. + + Returns metadata about what will be generated and a preview + of each file (first 500 characters). + """ + try: + # Fetch collection + collection = WarehouseDB.get_collection(collection_id) + if not collection: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection with id {collection_id} not found", + ) + + # Fetch collection items + _, items = generator.fetch_collection_items(collection_id) + + # Resolve MCP server URLs + mcp_server_urls = generator.resolve_mcp_server_urls(items) + + # Generate files + files = generator.generate_supervisor_code( + collection_id=collection_id, + llm_endpoint=llm_endpoint, + app_name=app_name, + ) + + # Determine app name + if not app_name: + collection_name = collection.get('name', 'supervisor') + app_name = collection_name.lower().replace(" ", "-") + app_name = "".join(c for c in app_name if c.isalnum() or c == "-") + + # Create preview (first 500 chars of each file) + preview = { + filename: content[:500] + ("..." if len(content) > 500 else "") + for filename, content in files.items() + } + + return SupervisorPreviewResponse( + collection_id=collection.get('id'), + collection_name=collection.get('name', ''), + app_name=app_name, + mcp_server_urls=mcp_server_urls, + tool_count=len(items), + preview=preview, + ) + + except GeneratorError as e: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=str(e), + ) + + +@router.post( + "/{collection_id}/download", + response_class=StreamingResponse, + status_code=status.HTTP_200_OK, + summary="Download Supervisor", + description="Download generated supervisor as zip archive", +) +def download_supervisor( + collection_id: int, + llm_endpoint: str = "databricks-meta-llama-3-1-70b-instruct", + app_name: str = None, + generator: GeneratorService = Depends(get_generator_service), +) -> StreamingResponse: + """ + Download generated supervisor as a zip archive. + + The zip contains: + - supervisor.py + - requirements.txt + - app.yaml + + Can be deployed directly to Databricks Apps. + """ + try: + # Fetch collection + collection = WarehouseDB.get_collection(collection_id) + if not collection: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Collection with id {collection_id} not found", + ) + + # Generate files + files = generator.generate_and_validate( + collection_id=collection_id, + llm_endpoint=llm_endpoint, + app_name=app_name, + ) + + # Determine app name for filename + if not app_name: + collection_name = collection.get('name', 'supervisor') + app_name = collection_name.lower().replace(" ", "-") + app_name = "".join(c for c in app_name if c.isalnum() or c == "-") + + # Persist supervisor metadata + WarehouseDB.create_supervisor( + collection_id=collection_id, + app_name=app_name, + ) + + # Create zip file in memory + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, mode="w", compression=zipfile.ZIP_DEFLATED) as zip_file: + for filename, content in files.items(): + zip_file.writestr(filename, content) + + zip_buffer.seek(0) + + return StreamingResponse( + zip_buffer, + media_type="application/zip", + headers={ + "Content-Disposition": f"attachment; filename={app_name}.zip" + }, + ) + + except GeneratorError as e: + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, + detail=str(e), + ) + + +@router.get( + "", + response_model=SupervisorListResponse, + status_code=status.HTTP_200_OK, + summary="List Supervisors", + description="List generated supervisors with metadata", +) +def list_supervisors() -> SupervisorListResponse: + """ + List all generated supervisors with metadata tracking. + """ + supervisors, total = WarehouseDB.list_supervisors() + return SupervisorListResponse( + supervisors=[SupervisorMetadata(**s) for s in supervisors], + total=total, + ) + + +@router.delete( + "/{supervisor_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete Supervisor Metadata", + description="Delete supervisor metadata (does not undeploy the app)", +) +def delete_supervisor(supervisor_id: int, request: Request) -> None: + """ + Delete supervisor metadata. + """ + deleted = WarehouseDB.delete_supervisor(supervisor_id) + if not deleted: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Supervisor with id {supervisor_id} not found", + ) + record_audit(request, "delete", "supervisor", str(supervisor_id)) diff --git a/dbx-agent-app/app/backend/app/routes/tools.py b/dbx-agent-app/app/backend/app/routes/tools.py new file mode 100644 index 00000000..d02844c4 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/tools.py @@ -0,0 +1,68 @@ +""" +Read-only endpoints for Tools using Databricks SQL Warehouse. +""" + +from fastapi import APIRouter, HTTPException, status, Query +import math + +from app.db_adapter import WarehouseDB # Auto-switches between SQLite and Warehouse +from app.schemas.tool import ToolResponse +from app.schemas.common import PaginatedResponse + +router = APIRouter(prefix="/tools", tags=["Tools"]) + + +@router.get( + "", + response_model=PaginatedResponse[ToolResponse], + status_code=status.HTTP_200_OK, + summary="List Tools", + description="List all tools with pagination", +) +def list_tools( + page: int = Query(1, ge=1, description="Page number"), + page_size: int = Query(50, ge=1, le=100, description="Items per page"), + mcp_server_id: int | None = Query(None, description="Filter by MCP server ID"), + name: str | None = Query(None, description="Filter by tool name (substring match)"), + search: str | None = Query(None, description="Full-text search on name and description"), + tags: str | None = Query(None, description="Filter by parent app tags (comma-separated)"), + owner: str | None = Query(None, description="Filter by parent app owner (substring match)"), +) -> PaginatedResponse[ToolResponse]: + """List all tools with optional filtering and pagination.""" + tools, total = WarehouseDB.list_tools( + page=page, + page_size=page_size, + mcp_server_id=mcp_server_id, + name=name, + search=search, + tags=tags, + owner=owner, + ) + total = int(total) if total else 0 # Convert string to int from warehouse + total_pages = math.ceil(total / page_size) if total > 0 else 1 + + return PaginatedResponse( + items=[ToolResponse(**tool) for tool in tools], + total=total, + page=page, + page_size=page_size, + total_pages=total_pages, + ) + + +@router.get( + "/{tool_id}", + response_model=ToolResponse, + status_code=status.HTTP_200_OK, + summary="Get Tool", + description="Get a specific tool by ID", +) +def get_tool(tool_id: int) -> ToolResponse: + """Get a specific tool by ID.""" + tool = WarehouseDB.get_tool(tool_id) + if not tool: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Tool with id {tool_id} not found", + ) + return ToolResponse(**tool) diff --git a/dbx-agent-app/app/backend/app/routes/traces.py b/dbx-agent-app/app/backend/app/routes/traces.py new file mode 100644 index 00000000..18eb4c6f --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/traces.py @@ -0,0 +1,103 @@ +""" +SSE events and trace span endpoints for chat tracing. + +Provides: +- GET /events?trace_id=X — SSE stream of trace events for a chat request +- GET /traces/{trace_id} — JSON span data for a trace +""" + +import json +import logging +from typing import AsyncGenerator + +from fastapi import APIRouter, HTTPException, Query, status +from fastapi.responses import StreamingResponse + +logger = logging.getLogger(__name__) + +router = APIRouter(tags=["Traces"]) + + +async def _event_stream(trace_id: str) -> AsyncGenerator[str, None]: + """Generate SSE messages from stored trace events.""" + from app.routes.chat import trace_events + + events = trace_events.get(trace_id, []) + for event in events: + event_type = event.get("type", "message") + data = json.dumps(event.get("data", {})) + yield f"event: {event_type}\ndata: {data}\n\n" + + +@router.get("/events") +async def stream_events(trace_id: str = Query(..., description="Trace ID to stream events for")): + """ + SSE endpoint that replays stored trace events for a given trace_id. + + The chat endpoint is synchronous, so all events are already stored + by the time the frontend connects. Events are streamed and then the + connection closes. + """ + from app.routes.chat import trace_events + + if trace_id not in trace_events: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"No events found for trace_id {trace_id}", + ) + + return StreamingResponse( + _event_stream(trace_id), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "X-Accel-Buffering": "no", + }, + ) + + +@router.get("/traces/{trace_id}") +async def get_trace(trace_id: str): + """Return span data for a given trace. + + First checks in-memory store (fast path for current session). + Falls back to MLflow for traces that survived a server restart. + """ + from app.routes.chat import trace_spans + + # Fast path: in-memory data from current session + if trace_id in trace_spans: + return { + "trace_id": trace_id, + "spans": trace_spans[trace_id], + } + + # Fallback: fetch from MLflow persistence + try: + from mlflow.client import MlflowClient + client = MlflowClient() + trace = client.get_trace(trace_id) + if trace and trace.data and trace.data.spans: + spans = [] + for s in trace.data.spans: + spans.append({ + "id": s.span_id, + "trace_id": trace_id, + "name": s.name, + "start_time": s.start_time_ns // 1_000_000 if s.start_time_ns else 0, + "end_time": s.end_time_ns // 1_000_000 if s.end_time_ns else 0, + "attributes": dict(s.attributes) if s.attributes else {}, + "status": "ERROR" if s.status and str(s.status).upper() == "ERROR" else "OK", + }) + return { + "trace_id": trace_id, + "spans": spans, + } + except Exception as e: + logger.warning("MLflow trace fallback failed for %s: %s", trace_id, e) + + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"No trace data found for trace_id {trace_id}", + ) diff --git a/dbx-agent-app/app/backend/app/routes/workspace_assets.py b/dbx-agent-app/app/backend/app/routes/workspace_assets.py new file mode 100644 index 00000000..27ed21d6 --- /dev/null +++ b/dbx-agent-app/app/backend/app/routes/workspace_assets.py @@ -0,0 +1,144 @@ +""" +CRUD and search endpoints for Databricks workspace assets. +""" + +import logging +import math +from fastapi import APIRouter, HTTPException, Query, Request, status + +from app.db_adapter import WarehouseDB +from app.schemas.workspace_asset import ( + WorkspaceAssetResponse, + WorkspaceCrawlRequest, + WorkspaceCrawlResponse, +) +from app.schemas.common import PaginatedResponse +from app.services.audit import record_audit + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/workspace-assets", tags=["Workspace Assets"]) + + +@router.get( + "", + response_model=PaginatedResponse[WorkspaceAssetResponse], + status_code=status.HTTP_200_OK, + summary="List Workspace Assets", + description="List indexed workspace assets with filtering and pagination", +) +def list_workspace_assets( + page: int = Query(1, ge=1, description="Page number"), + page_size: int = Query(50, ge=1, le=200, description="Items per page"), + asset_type: str = Query(None, description="Filter by type (notebook, job, dashboard, pipeline, cluster, experiment)"), + search: str = Query(None, description="Search by name, description, or content preview"), + owner: str = Query(None, description="Filter by owner"), + workspace_host: str = Query(None, description="Filter by workspace host"), +) -> PaginatedResponse[WorkspaceAssetResponse]: + """List workspace assets with optional filters.""" + assets, total = WarehouseDB.list_workspace_assets( + page=page, + page_size=page_size, + asset_type=asset_type, + search=search, + owner=owner, + workspace_host=workspace_host, + ) + total = int(total) if total else 0 # Convert string to int from warehouse + total_pages = math.ceil(total / page_size) if total > 0 else 1 + + return PaginatedResponse( + items=[WorkspaceAssetResponse(**a) for a in assets], + total=total, + page=page, + page_size=page_size, + total_pages=total_pages, + ) + + +@router.get( + "/{asset_id}", + response_model=WorkspaceAssetResponse, + status_code=status.HTTP_200_OK, + summary="Get Workspace Asset", + description="Get a specific workspace asset by ID", +) +def get_workspace_asset(asset_id: int) -> WorkspaceAssetResponse: + """Get a specific workspace asset.""" + asset = WarehouseDB.get_workspace_asset(asset_id) + if not asset: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Workspace asset with id {asset_id} not found", + ) + return WorkspaceAssetResponse(**asset) + + +@router.post( + "/crawl", + response_model=WorkspaceCrawlResponse, + status_code=status.HTTP_200_OK, + summary="Crawl Workspace", + description="Trigger a crawl of the Databricks workspace to index notebooks, jobs, dashboards, etc.", +) +def crawl_workspace(request: WorkspaceCrawlRequest = None, http_request: Request = None) -> WorkspaceCrawlResponse: + """ + Trigger a workspace crawl. + + Indexes notebooks, jobs, dashboards, pipelines, clusters, and experiments. + """ + if request is None: + request = WorkspaceCrawlRequest() + + try: + from app.services.workspace_crawler import WorkspaceCrawlerService + + service = WorkspaceCrawlerService(profile=request.databricks_profile) + stats = service.crawl( + asset_types=request.asset_types, + root_path=request.root_path or "/", + ) + + if stats.errors and stats.assets_discovered == 0: + result_status = "failed" + message = f"Workspace crawl failed with {len(stats.errors)} errors" + elif stats.errors: + result_status = "partial" + message = f"Workspace crawl completed with {len(stats.errors)} errors" + else: + result_status = "success" + message = f"Discovered {stats.assets_discovered} assets across {len(stats.by_type)} types" + + if http_request: + record_audit(http_request, "crawl", "workspace_asset", details={ + "assets_discovered": stats.assets_discovered, + "new_assets": stats.new_assets, + }) + + return WorkspaceCrawlResponse( + status=result_status, + message=message, + assets_discovered=stats.assets_discovered, + new_assets=stats.new_assets, + updated_assets=stats.updated_assets, + by_type=stats.by_type, + errors=stats.errors, + ) + except Exception as e: + logger.error("Workspace crawl failed: %s", e) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"Workspace crawl failed: {e}", + ) + + +@router.delete( + "", + status_code=status.HTTP_204_NO_CONTENT, + summary="Clear Workspace Assets", + description="Delete all indexed workspace assets (useful for re-indexing)", +) +def clear_workspace_assets(http_request: Request) -> None: + """Delete all workspace assets.""" + WarehouseDB.clear_workspace_assets() + record_audit(http_request, "clear", "workspace_asset") diff --git a/dbx-agent-app/app/backend/app/schemas/__init__.py b/dbx-agent-app/app/backend/app/schemas/__init__.py new file mode 100644 index 00000000..c9c567a5 --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/__init__.py @@ -0,0 +1,61 @@ +""" +Pydantic schemas for request/response validation. + +This package contains all API schemas: +- app: App creation, update, and response schemas +- mcp_server: MCP server schemas +- tool: Tool schemas and filtering +- collection: Collection and collection item schemas +- discovery: Discovery request and response schemas +- common: Shared schemas and base models +""" + +from app.schemas.app import AppCreate, AppUpdate, AppResponse +from app.schemas.mcp_server import MCPServerCreate, MCPServerUpdate, MCPServerResponse +from app.schemas.tool import ToolResponse, ToolFilter +from app.schemas.collection import ( + CollectionCreate, + CollectionUpdate, + CollectionResponse, + CollectionItemCreate, + CollectionItemResponse, +) +from app.schemas.discovery import ( + DiscoveryRefreshRequest, + DiscoveryRefreshResponse, + DiscoveryStatusResponse, +) +from app.schemas.supervisor import ( + SupervisorGenerateRequest, + SupervisorGenerateResponse, + SupervisorPreviewResponse, + SupervisorMetadata, + SupervisorListResponse, +) +from app.schemas.common import PaginatedResponse, HealthResponse + +__all__ = [ + "AppCreate", + "AppUpdate", + "AppResponse", + "MCPServerCreate", + "MCPServerUpdate", + "MCPServerResponse", + "ToolResponse", + "ToolFilter", + "CollectionCreate", + "CollectionUpdate", + "CollectionResponse", + "CollectionItemCreate", + "CollectionItemResponse", + "DiscoveryRefreshRequest", + "DiscoveryRefreshResponse", + "DiscoveryStatusResponse", + "SupervisorGenerateRequest", + "SupervisorGenerateResponse", + "SupervisorPreviewResponse", + "SupervisorMetadata", + "SupervisorListResponse", + "PaginatedResponse", + "HealthResponse", +] diff --git a/dbx-agent-app/app/backend/app/schemas/a2a.py b/dbx-agent-app/app/backend/app/schemas/a2a.py new file mode 100644 index 00000000..876c5ebb --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/a2a.py @@ -0,0 +1,73 @@ +""" +A2A Protocol Pydantic schemas — JSON-RPC request/response, task states, messages, artifacts. +""" + +from enum import Enum +from pydantic import BaseModel, Field +from typing import Optional, List, Dict, Any + + +class TaskState(str, Enum): + SUBMITTED = "submitted" + WORKING = "working" + COMPLETED = "completed" + FAILED = "failed" + CANCELED = "canceled" + INPUT_REQUIRED = "input-required" + AUTH_REQUIRED = "auth-required" + REJECTED = "rejected" + + +TERMINAL_STATES = { + TaskState.COMPLETED, + TaskState.FAILED, + TaskState.CANCELED, + TaskState.REJECTED, +} + + +class MessagePart(BaseModel): + text: Optional[str] = None + mediaType: Optional[str] = None + + +class A2AMessage(BaseModel): + messageId: str + role: str # "user" or "agent" + parts: List[MessagePart] + contextId: Optional[str] = None + metadata: Optional[Dict[str, Any]] = None + + +class A2AArtifact(BaseModel): + artifactId: str + name: Optional[str] = None + parts: List[MessagePart] + + +class A2ATaskStatus(BaseModel): + state: TaskState + stateReason: Optional[str] = None + + +class A2ATaskResponse(BaseModel): + id: str + contextId: Optional[str] = None + status: A2ATaskStatus + messages: List[A2AMessage] = Field(default_factory=list) + artifacts: List[A2AArtifact] = Field(default_factory=list) + metadata: Optional[Dict[str, Any]] = None + + +class JsonRpcRequest(BaseModel): + jsonrpc: str = "2.0" + id: Any + method: str + params: Optional[Dict[str, Any]] = None + + +class JsonRpcResponse(BaseModel): + jsonrpc: str = "2.0" + id: Any + result: Optional[Any] = None + error: Optional[Dict[str, Any]] = None diff --git a/dbx-agent-app/app/backend/app/schemas/agent.py b/dbx-agent-app/app/backend/app/schemas/agent.py new file mode 100644 index 00000000..aa06da9c --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/agent.py @@ -0,0 +1,129 @@ +""" +Pydantic schemas for Agent entities. +""" + +from pydantic import BaseModel, Field +from typing import Optional, List + + +class A2ASkill(BaseModel): + """A2A skill descriptor.""" + + id: str = Field(..., description="Skill identifier") + name: str = Field(..., description="Human-readable name") + description: Optional[str] = Field(None, description="What the skill does") + tags: Optional[List[str]] = Field(None, description="Skill tags") + + +class A2ACapabilities(BaseModel): + """A2A agent capabilities.""" + + streaming: bool = Field(False, description="Supports SSE streaming") + pushNotifications: bool = Field(False, description="Supports push notifications") + + +class AgentBase(BaseModel): + """Base schema for Agent with common fields.""" + + name: str = Field( + ..., + min_length=1, + max_length=255, + description="Agent name", + example="Research Agent", + ) + description: Optional[str] = Field( + None, + description="What the agent does", + example="Searches expert transcripts and profiles", + ) + capabilities: Optional[str] = Field( + None, + description="Comma-separated capability tags", + example="search,analysis,reporting", + ) + status: Optional[str] = Field( + "draft", + description="Agent status: draft, active, inactive, error", + example="active", + ) + collection_id: Optional[int] = Field( + None, + description="ID of the linked collection (tool set)", + example=1, + ) + endpoint_url: Optional[str] = Field( + None, + description="Serving endpoint or runtime URL", + example="https://my-workspace.cloud.databricks.com/serving-endpoints/agent-1", + ) + # A2A Protocol fields + a2a_capabilities: Optional[str] = Field( + None, + description='JSON: {"streaming": true, "pushNotifications": false}', + ) + skills: Optional[str] = Field( + None, + description='JSON array of A2A skill descriptors', + ) + protocol_version: Optional[str] = Field( + None, + description="A2A protocol version", + example="0.3.0", + ) + system_prompt: Optional[str] = Field( + None, + description="Rich persona / instructions for LLM when processing A2A tasks", + ) + + +class AgentCreate(AgentBase): + """Schema for creating a new Agent. Status defaults to 'draft'.""" + + auth_token: Optional[str] = Field(None, description="Bearer token for inbound A2A auth") + + +class AgentUpdate(BaseModel): + """Schema for updating an Agent. All fields are optional.""" + + name: Optional[str] = Field( + None, + min_length=1, + max_length=255, + description="Agent name", + ) + description: Optional[str] = Field(None, description="What the agent does") + capabilities: Optional[str] = Field(None, description="Comma-separated capability tags") + status: Optional[str] = Field(None, description="Agent status") + collection_id: Optional[int] = Field(None, description="Linked collection ID") + endpoint_url: Optional[str] = Field(None, description="Serving endpoint URL") + auth_token: Optional[str] = Field(None, description="Bearer token for inbound A2A auth") + a2a_capabilities: Optional[str] = Field(None, description="A2A capabilities JSON") + skills: Optional[str] = Field(None, description="A2A skills JSON array") + protocol_version: Optional[str] = Field(None, description="A2A protocol version") + system_prompt: Optional[str] = Field(None, description="Rich persona / instructions for LLM") + + +class AgentResponse(AgentBase): + """Schema for Agent response (excludes auth_token for security).""" + + id: int = Field(..., description="Agent ID", example=1) + created_at: Optional[str] = Field(None, description="Creation timestamp") + updated_at: Optional[str] = Field(None, description="Last update timestamp") + + class Config: + from_attributes = True + + +class AgentCardResponse(BaseModel): + """A2A-compliant Agent Card response.""" + + name: str + description: Optional[str] = None + version: Optional[str] = None + protocolVersion: str = "0.3.0" + url: str + capabilities: A2ACapabilities = Field(default_factory=A2ACapabilities) + skills: List[A2ASkill] = Field(default_factory=list) + securitySchemes: Optional[dict] = None + security: Optional[list] = None diff --git a/dbx-agent-app/app/backend/app/schemas/agent_chat.py b/dbx-agent-app/app/backend/app/schemas/agent_chat.py new file mode 100644 index 00000000..4142f675 --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/agent_chat.py @@ -0,0 +1,168 @@ +""" +Agent Chat Schemas + +Pydantic models for the Agent Chat feature that proxies queries +to Databricks serving endpoints and enriches responses with +routing, slot filling, and pipeline metadata. +""" + +from typing import Optional, Dict, Any, List + +from pydantic import BaseModel, Field + + +# --- Request schemas --- + +class AgentChatRequest(BaseModel): + """Request to query a Databricks serving endpoint.""" + endpoint_name: str = Field(..., description="Name of the Databricks serving endpoint") + message: str = Field(..., min_length=1, description="User message to send to the agent") + + +# --- Routing schemas --- + +class ToolCall(BaseModel): + """A tool invocation by an agent.""" + tool: str = Field(..., description="Name of the tool invoked") + description: str = Field(..., description="Description of what the tool did") + input: Optional[Dict[str, Any]] = Field(None, description="Input parameters passed to the tool") + output: Optional[Dict[str, Any]] = Field(None, description="Output returned by the tool") + + +class ProcessingStep(BaseModel): + """A processing step in the agent routing flow.""" + step: int = Field(..., description="Step number in the processing sequence") + name: str = Field(..., description="Name of the processing step") + description: str = Field(..., description="Description of what this step does") + timestamp: int = Field(..., description="Timestamp in milliseconds") + details: Dict[str, Any] = Field(default_factory=dict, description="Additional step details") + + +class RoutingInfo(BaseModel): + """Agent routing information showing supervisor/sub-agent flow.""" + usedSupervisor: bool = Field(False, description="Whether a supervisor agent was used for routing") + subAgent: Optional[str] = Field(None, description="Name of the sub-agent that was routed to") + toolCalls: List[ToolCall] = Field(default_factory=list, description="List of tool invocations") + processingSteps: List[ProcessingStep] = Field(default_factory=list, description="Processing steps taken") + + +# --- Slot filling schemas --- + +class NLToSQLMapping(BaseModel): + """A natural language to SQL clause mapping.""" + naturalLanguage: str = Field(..., description="The natural language phrase") + sqlClause: str = Field(..., description="The corresponding SQL clause") + type: str = Field(..., description="Type of SQL clause (WHERE, MATCH, LIMIT)") + confidence: float = Field(..., ge=0.0, le=1.0, description="Confidence score of the mapping") + + +class SlotData(BaseModel): + """Extracted slot filling data from the user query.""" + entities: List[str] = Field(default_factory=list, description="Detected entities") + topics: List[str] = Field(default_factory=list, description="Detected topics") + filters: Dict[str, Any] = Field(default_factory=dict, description="Extracted filters") + searchTerms: List[str] = Field(default_factory=list, description="Search terms extracted from query") + + +class SlotFillingInfo(BaseModel): + """Complete slot filling information including query construction.""" + slots: SlotData = Field(default_factory=SlotData, description="Extracted slot data") + elasticQuery: Dict[str, Any] = Field(default_factory=dict, description="Generated Elasticsearch query") + nlToSql: List[NLToSQLMapping] = Field(default_factory=list, description="NL to SQL mappings") + + +# --- Pipeline schemas --- + +class PipelineStepCostBreakdown(BaseModel): + """Cost breakdown for a pipeline step.""" + input: float = Field(0.0, description="Input token cost") + output: float = Field(0.0, description="Output token cost") + + +class PipelineStepMetrics(BaseModel): + """Metrics for a single pipeline step.""" + tokensProcessed: Optional[int] = Field(None, description="Tokens processed in this step") + entitiesFound: Optional[int] = Field(None, description="Number of entities found") + inputTokens: Optional[int] = Field(None, description="Input tokens used") + outputTokens: Optional[int] = Field(None, description="Output tokens generated") + tokensPerSecond: Optional[int] = Field(None, description="Token generation speed") + estimatedCost: Optional[float] = Field(None, description="Estimated cost for this step") + costBreakdown: Optional[PipelineStepCostBreakdown] = Field(None, description="Cost breakdown") + latency: Optional[int] = Field(None, description="Step latency in milliseconds") + + +class PipelineStep(BaseModel): + """A step in the processing pipeline.""" + id: int = Field(..., description="Step ID") + name: str = Field(..., description="Step name") + status: str = Field("completed", description="Step status") + timestamp: int = Field(..., description="Step timestamp in milliseconds") + duration: int = Field(0, description="Step duration in milliseconds") + details: Dict[str, Any] = Field(default_factory=dict, description="Step details") + tools: List[str] = Field(default_factory=list, description="Tools used in this step") + metrics: Optional[PipelineStepMetrics] = Field(None, description="Step metrics") + + +class CostBreakdown(BaseModel): + """Overall cost breakdown with formatted strings.""" + input: str = Field(..., description="Input cost formatted as dollar amount") + output: str = Field(..., description="Output cost formatted as dollar amount") + total: str = Field(..., description="Total cost formatted as dollar amount") + + +class LatencyBreakdown(BaseModel): + """Latency breakdown by processing phase.""" + preprocessing: int = Field(0, description="Preprocessing latency in ms") + search: int = Field(0, description="Search latency in ms") + llm: int = Field(0, description="LLM generation latency in ms") + postprocessing: int = Field(0, description="Postprocessing latency in ms") + total: int = Field(0, description="Total latency in ms") + + +class PipelineMetrics(BaseModel): + """Aggregate metrics for the entire pipeline.""" + totalTokens: int = Field(0, description="Total tokens used") + inputTokens: int = Field(0, description="Total input tokens") + outputTokens: int = Field(0, description="Total output tokens") + estimatedCost: float = Field(0.0, description="Total estimated cost") + tokensPerSecond: int = Field(0, description="Overall token generation speed") + costBreakdown: CostBreakdown = Field(..., description="Cost breakdown") + latencyBreakdown: LatencyBreakdown = Field(..., description="Latency breakdown") + + +class PipelineInfo(BaseModel): + """Complete processing pipeline information.""" + steps: List[PipelineStep] = Field(default_factory=list, description="Pipeline steps") + totalDuration: int = Field(0, description="Total pipeline duration in ms") + totalSteps: int = Field(0, description="Number of steps in pipeline") + startTime: int = Field(0, description="Pipeline start time in ms") + endTime: int = Field(0, description="Pipeline end time in ms") + metrics: Optional[PipelineMetrics] = Field(None, description="Aggregate pipeline metrics") + + +# --- Response schemas --- + +class AgentChatResponse(BaseModel): + """Response from querying a Databricks serving endpoint.""" + content: str = Field(..., description="Response text from the agent") + requestId: Optional[str] = Field(None, description="Databricks request ID for tracing") + endpoint: str = Field(..., description="Name of the endpoint that was queried") + timestamp: str = Field(..., description="ISO format timestamp of the response") + routing: Optional[RoutingInfo] = Field(None, description="Agent routing information") + slotFilling: Optional[SlotFillingInfo] = Field(None, description="Slot filling and query construction info") + pipeline: Optional[PipelineInfo] = Field(None, description="Processing pipeline visualization data") + + +class AgentChatEndpoint(BaseModel): + """An available agent chat endpoint.""" + name: str = Field(..., description="Endpoint name") + displayName: str = Field(..., description="Human-readable display name") + description: str = Field(..., description="Description of the endpoint") + type: str = Field(..., description="Endpoint type (research, supervisor, etc.)") + endpointUrl: Optional[str] = Field(None, description="Direct URL of the endpoint") + + +class AgentChatEndpointsResponse(BaseModel): + """Response listing available agent chat endpoints.""" + endpoints: List[AgentChatEndpoint] = Field(..., description="List of available endpoints") + count: int = Field(..., description="Total number of endpoints") diff --git a/dbx-agent-app/app/backend/app/schemas/app.py b/dbx-agent-app/app/backend/app/schemas/app.py new file mode 100644 index 00000000..ef271c55 --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/app.py @@ -0,0 +1,83 @@ +""" +Pydantic schemas for App entities. +""" + +from pydantic import BaseModel, Field, HttpUrl +from typing import Optional +from datetime import datetime + + +class AppBase(BaseModel): + """Base schema for App with common fields.""" + + name: str = Field( + ..., + min_length=1, + max_length=255, + description="App name", + example="sgp-research-app", + ) + owner: Optional[str] = Field( + None, + max_length=255, + description="App owner (username or service principal)", + example="stuart.gano@example.com", + ) + url: Optional[str] = Field( + None, + description="Deployed app URL", + example="https://my-workspace.cloud.databricks.com/apps/sgp-research-app", + ) + tags: Optional[str] = Field( + None, + description="Comma-separated tags", + example="research,guidepoint,mcp", + ) + manifest_url: Optional[str] = Field( + None, + description="URL to app.yaml or manifest file", + example="https://github.com/org/repo/blob/main/app.yaml", + ) + + +class AppCreate(AppBase): + """Schema for creating a new App.""" + + pass + + +class AppUpdate(BaseModel): + """Schema for updating an App. All fields are optional.""" + + name: Optional[str] = Field( + None, + min_length=1, + max_length=255, + description="App name", + ) + owner: Optional[str] = Field( + None, + max_length=255, + description="App owner", + ) + url: Optional[str] = Field( + None, + description="Deployed app URL", + ) + tags: Optional[str] = Field( + None, + description="Comma-separated tags", + ) + manifest_url: Optional[str] = Field( + None, + description="URL to app.yaml or manifest file", + ) + + +class AppResponse(AppBase): + """Schema for App response.""" + + id: int = Field(..., description="App ID", example=1) + + class Config: + from_attributes = True diff --git a/dbx-agent-app/app/backend/app/schemas/audit_log.py b/dbx-agent-app/app/backend/app/schemas/audit_log.py new file mode 100644 index 00000000..3045fabb --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/audit_log.py @@ -0,0 +1,23 @@ +""" +Pydantic schemas for the audit log API. +""" + +from pydantic import BaseModel, Field +from typing import Optional + + +class AuditLogResponse(BaseModel): + """Single audit log entry.""" + + id: int + timestamp: str = Field(..., description="ISO timestamp of the action") + user_email: str = Field(..., description="Email of the user who performed the action") + action: str = Field(..., description="Action type (create, update, delete, crawl, clear)") + resource_type: str = Field(..., description="Type of resource affected") + resource_id: Optional[str] = Field(None, description="ID of the affected resource") + resource_name: Optional[str] = Field(None, description="Human-readable name of the resource") + details: Optional[str] = Field(None, description="JSON string with extra context") + ip_address: Optional[str] = Field(None, description="Client IP address") + + class Config: + from_attributes = True diff --git a/dbx-agent-app/app/backend/app/schemas/catalog_asset.py b/dbx-agent-app/app/backend/app/schemas/catalog_asset.py new file mode 100644 index 00000000..e6a0c9d9 --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/catalog_asset.py @@ -0,0 +1,72 @@ +""" +Pydantic schemas for CatalogAsset entities. +""" + +from pydantic import BaseModel, Field +from typing import Optional, List +from datetime import datetime + + +class ColumnInfo(BaseModel): + """Schema for a single column in a UC table/view.""" + + name: str = Field(..., description="Column name") + type: str = Field(..., description="Column data type") + comment: Optional[str] = Field(None, description="Column description") + nullable: bool = Field(True, description="Whether column allows nulls") + position: Optional[int] = Field(None, description="Column ordinal position") + + +class CatalogAssetResponse(BaseModel): + """Response schema for a catalog asset.""" + + id: int = Field(..., description="Asset ID") + asset_type: str = Field(..., description="Asset type (table, view, function, model, volume)") + catalog: str = Field(..., description="UC catalog name") + schema_name: str = Field(..., description="UC schema name") + name: str = Field(..., description="Asset name") + full_name: str = Field(..., description="Three-level namespace (catalog.schema.name)") + owner: Optional[str] = Field(None, description="Asset owner") + comment: Optional[str] = Field(None, description="Asset description") + columns_json: Optional[str] = Field(None, description="JSON array of column definitions") + tags_json: Optional[str] = Field(None, description="JSON array of UC tags") + properties_json: Optional[str] = Field(None, description="JSON object of UC properties") + data_source_format: Optional[str] = Field(None, description="Storage format (DELTA, PARQUET, etc.)") + table_type: Optional[str] = Field(None, description="Table type (MANAGED, EXTERNAL, VIEW)") + row_count: Optional[int] = Field(None, description="Approximate row count") + created_at: Optional[str] = Field(None, description="When first indexed") + updated_at: Optional[str] = Field(None, description="Last index update") + last_indexed_at: Optional[str] = Field(None, description="Most recent crawl timestamp") + + class Config: + from_attributes = True + + +class CatalogCrawlRequest(BaseModel): + """Request to trigger a catalog crawl.""" + + catalogs: Optional[List[str]] = Field( + None, + description="Specific catalogs to crawl. If empty, crawls all accessible catalogs.", + ) + include_columns: bool = Field( + True, + description="Whether to fetch column metadata for tables/views", + ) + databricks_profile: Optional[str] = Field( + None, + description="Databricks CLI profile to use for authentication", + ) + + +class CatalogCrawlResponse(BaseModel): + """Response from catalog crawl.""" + + status: str = Field(..., description="Crawl status (success/partial/failed)") + message: str = Field(..., description="Human-readable status message") + catalogs_crawled: int = Field(0, description="Number of catalogs crawled") + schemas_crawled: int = Field(0, description="Number of schemas crawled") + assets_discovered: int = Field(0, description="Total assets discovered") + new_assets: int = Field(0, description="New assets added") + updated_assets: int = Field(0, description="Existing assets updated") + errors: List[str] = Field(default_factory=list, description="Errors encountered") diff --git a/dbx-agent-app/app/backend/app/schemas/collection.py b/dbx-agent-app/app/backend/app/schemas/collection.py new file mode 100644 index 00000000..c526c328 --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/collection.py @@ -0,0 +1,130 @@ +""" +Pydantic schemas for Collection and CollectionItem entities. +""" + +from pydantic import BaseModel, Field, field_validator +from typing import Optional, List + + +class CollectionBase(BaseModel): + """Base schema for Collection with common fields.""" + + name: str = Field( + ..., + min_length=1, + max_length=255, + description="Collection name", + example="Expert Research Toolkit", + ) + description: Optional[str] = Field( + None, + description="Purpose description", + example="Tools for researching expert profiles and transcripts", + ) + + +class CollectionCreate(CollectionBase): + """Schema for creating a new Collection.""" + + pass + + +class CollectionUpdate(BaseModel): + """Schema for updating a Collection. All fields are optional.""" + + name: Optional[str] = Field( + None, + min_length=1, + max_length=255, + description="Collection name", + ) + description: Optional[str] = Field( + None, + description="Purpose description", + ) + + +class CollectionResponse(CollectionBase): + """Schema for Collection response.""" + + id: int = Field(..., description="Collection ID", example=1) + + class Config: + from_attributes = True + + +class CollectionItemCountsResponse(BaseModel): + """Schema for collection item counts.""" + + total: int = Field(..., description="Total number of items", example=5) + apps: int = Field(..., description="Number of apps", example=2) + servers: int = Field(..., description="Number of MCP servers", example=2) + tools: int = Field(..., description="Number of tools", example=1) + + +class CollectionWithItemsResponse(CollectionResponse): + """Schema for Collection response with nested items.""" + + items: List["CollectionItemResponse"] = Field( + default_factory=list, + description="Items in this collection", + ) + + class Config: + from_attributes = True + + +class CollectionItemCreate(BaseModel): + """ + Schema for adding an item to a collection via API. + Exactly one of app_id, mcp_server_id, or tool_id must be set. + collection_id comes from the URL path parameter. + """ + + app_id: Optional[int] = Field( + None, + description="App ID (mutually exclusive with mcp_server_id and tool_id)", + example=1, + ) + mcp_server_id: Optional[int] = Field( + None, + description="MCP Server ID (mutually exclusive with app_id and tool_id)", + example=None, + ) + tool_id: Optional[int] = Field( + None, + description="Tool ID (mutually exclusive with app_id and mcp_server_id)", + example=None, + ) + + @field_validator("tool_id") + @classmethod + def validate_exactly_one_ref(cls, v, info): + """Validate that exactly one of app_id, mcp_server_id, or tool_id is set.""" + app_id = info.data.get("app_id") + mcp_server_id = info.data.get("mcp_server_id") + tool_id = v + + non_null_count = sum( + x is not None for x in [app_id, mcp_server_id, tool_id] + ) + + if non_null_count != 1: + raise ValueError( + "Exactly one of app_id, mcp_server_id, or tool_id must be set" + ) + + return v + + +class CollectionItemResponse(BaseModel): + """Schema for CollectionItem response.""" + + id: int = Field(..., description="Collection Item ID", example=1) + collection_id: int = Field(..., description="Collection ID", example=1) + app_id: Optional[int] = Field(None, description="App ID", example=1) + mcp_server_id: Optional[int] = Field(None, description="MCP Server ID", example=None) + tool_id: Optional[int] = Field(None, description="Tool ID", example=None) + + class Config: + from_attributes = True diff --git a/dbx-agent-app/app/backend/app/schemas/common.py b/dbx-agent-app/app/backend/app/schemas/common.py new file mode 100644 index 00000000..5e02f48f --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/common.py @@ -0,0 +1,35 @@ +""" +Common schemas shared across the API. +""" + +from pydantic import BaseModel, Field +from typing import Generic, TypeVar, List + +T = TypeVar("T") + + +class HealthResponse(BaseModel): + """Health check response.""" + + status: str = Field(..., description="API status", example="healthy") + version: str = Field(..., description="API version", example="0.1.0") + + +class ReadyResponse(BaseModel): + """Readiness check response.""" + + ready: bool = Field(..., description="Whether the API is ready to accept requests") + database: str = Field(..., description="Database connection status") + + +class PaginatedResponse(BaseModel, Generic[T]): + """Generic paginated response.""" + + items: List[T] = Field(..., description="List of items") + total: int = Field(..., description="Total number of items", ge=0) + page: int = Field(..., description="Current page number", ge=1) + page_size: int = Field(..., description="Number of items per page", ge=1) + total_pages: int = Field(..., description="Total number of pages", ge=1) + + class Config: + from_attributes = True diff --git a/dbx-agent-app/app/backend/app/schemas/conversation.py b/dbx-agent-app/app/backend/app/schemas/conversation.py new file mode 100644 index 00000000..66eb9203 --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/conversation.py @@ -0,0 +1,57 @@ +""" +Pydantic schemas for the conversations API. +""" + +from pydantic import BaseModel, Field +from typing import Optional, List + + +class ConversationMessageResponse(BaseModel): + """Single message within a conversation.""" + + id: int + conversation_id: str + role: str = Field(..., description="'user' or 'assistant'") + content: str + trace_id: Optional[str] = None + created_at: Optional[str] = None + + class Config: + from_attributes = True + + +class ConversationListItem(BaseModel): + """Conversation summary for list views.""" + + id: str + title: str + user_email: Optional[str] = None + collection_id: Optional[int] = None + message_count: int = 0 + created_at: Optional[str] = None + updated_at: Optional[str] = None + + class Config: + from_attributes = True + + +class ConversationResponse(BaseModel): + """Full conversation with messages.""" + + id: str + title: str + user_email: Optional[str] = None + collection_id: Optional[int] = None + message_count: int = 0 + created_at: Optional[str] = None + updated_at: Optional[str] = None + messages: List[ConversationMessageResponse] = [] + + class Config: + from_attributes = True + + +class ConversationRenameRequest(BaseModel): + """Request to rename a conversation.""" + + title: str = Field(..., min_length=1, max_length=255) diff --git a/dbx-agent-app/app/backend/app/schemas/discovery.py b/dbx-agent-app/app/backend/app/schemas/discovery.py new file mode 100644 index 00000000..fb4dcd83 --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/discovery.py @@ -0,0 +1,154 @@ +""" +Pydantic schemas for Discovery entities. +""" + +from pydantic import BaseModel, Field, HttpUrl +from typing import List, Optional + + +class DiscoveryRefreshRequest(BaseModel): + """Request schema for discovery refresh endpoint.""" + + server_urls: List[HttpUrl] = Field( + default_factory=list, + description="List of custom MCP server URLs to discover", + example=["https://mcp.example.com/api", "https://another-mcp.example.com"], + ) + discover_workspace: bool = Field( + default=False, + description="Discover MCP servers deployed in workspace (stub)", + example=False, + ) + discover_catalog: bool = Field( + default=False, + description="Discover managed servers from MCP catalog (stub)", + example=False, + ) + discover_agents: bool = Field( + default=False, + description="Auto-discover agents from serving endpoints and workspace apps", + example=False, + ) + databricks_profile: Optional[str] = Field( + None, + description="Databricks CLI profile to use for workspace discovery", + example="fe-vm-serverless-dxukih", + ) + + +class DiscoveryRefreshResponse(BaseModel): + """Response from discovery refresh endpoint.""" + + status: str = Field( + ..., + description="Discovery status (success/partial/failed)", + example="success", + ) + message: str = Field( + ..., + description="Human-readable message", + example="Discovery completed successfully", + ) + apps_discovered: int = Field( + default=0, + description="Number of apps discovered", + example=0, + ) + servers_discovered: int = Field( + default=0, + description="Number of MCP servers discovered", + example=2, + ) + tools_discovered: int = Field( + default=0, + description="Number of tools discovered", + example=15, + ) + new_servers: int = Field( + default=0, + description="Number of new servers added", + example=1, + ) + updated_servers: int = Field( + default=0, + description="Number of existing servers updated", + example=1, + ) + new_tools: int = Field( + default=0, + description="Number of new tools added", + example=10, + ) + updated_tools: int = Field( + default=0, + description="Number of existing tools updated", + example=5, + ) + agents_discovered: int = Field( + default=0, + description="Number of agents discovered from serving endpoints and apps", + example=0, + ) + new_agents: int = Field( + default=0, + description="Number of new agents added", + example=0, + ) + updated_agents: int = Field( + default=0, + description="Number of existing agents updated", + example=0, + ) + errors: List[str] = Field( + default_factory=list, + description="List of errors encountered during discovery", + example=[], + ) + + +class WorkspaceProfileResponse(BaseModel): + """A single Databricks workspace profile with auth status.""" + + name: str = Field(..., description="Profile name from ~/.databrickscfg") + host: Optional[str] = Field(None, description="Workspace URL") + auth_type: Optional[str] = Field(None, description="Authentication type (pat, oauth-m2m, etc.)") + is_account_profile: bool = Field(False, description="Whether this is an account-level profile") + auth_valid: bool = Field(False, description="Whether authentication succeeded") + auth_error: Optional[str] = Field(None, description="Error message if auth failed") + username: Optional[str] = Field(None, description="Authenticated username") + + +class WorkspaceProfilesResponse(BaseModel): + """Response from workspace profiles discovery endpoint.""" + + profiles: List[WorkspaceProfileResponse] = Field( + default_factory=list, description="List of discovered workspace profiles" + ) + config_path: str = Field(..., description="Path to the databrickscfg file used") + total: int = Field(0, description="Total number of profiles found") + valid: int = Field(0, description="Number of profiles with valid auth") + + +class DiscoveryStatusResponse(BaseModel): + """Response from discovery status endpoint.""" + + is_running: bool = Field( + ..., + description="Whether discovery is currently running", + example=False, + ) + last_run_timestamp: Optional[str] = Field( + None, + description="ISO timestamp of last discovery run", + example="2026-02-10T10:30:00Z", + ) + last_run_status: Optional[str] = Field( + None, + description="Status of last run (success/partial/failed)", + example="success", + ) + last_run_message: Optional[str] = Field( + None, + description="Message from last run", + example="Discovery completed successfully", + ) diff --git a/dbx-agent-app/app/backend/app/schemas/lineage.py b/dbx-agent-app/app/backend/app/schemas/lineage.py new file mode 100644 index 00000000..6198ef53 --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/lineage.py @@ -0,0 +1,65 @@ +""" +Schemas for the lineage / knowledge graph endpoints. +""" + +from pydantic import BaseModel, Field +from typing import Optional, List + + +class RelationshipResponse(BaseModel): + id: int + source_type: str + source_id: int + source_name: Optional[str] = None + target_type: str + target_id: int + target_name: Optional[str] = None + relationship_type: str + metadata_json: Optional[str] = None + discovered_at: Optional[str] = None + + +class LineageNode(BaseModel): + asset_type: str + asset_id: int + name: str + full_name: Optional[str] = None + depth: int = Field(0, description="Distance from the queried asset (0 = self)") + + +class LineageEdge(BaseModel): + source_type: str + source_id: int + target_type: str + target_id: int + relationship_type: str + + +class LineageResponse(BaseModel): + root_type: str + root_id: int + root_name: str + direction: str = Field(..., description="'upstream', 'downstream', or 'both'") + nodes: List[LineageNode] + edges: List[LineageEdge] + + +class ImpactAnalysisResponse(BaseModel): + root_type: str + root_id: int + root_name: str + affected_assets: List[LineageNode] + total_affected: int + + +class LineageCrawlRequest(BaseModel): + databricks_profile: Optional[str] = None + include_column_lineage: bool = Field(False, description="Also crawl column-level lineage") + + +class LineageCrawlResponse(BaseModel): + status: str + message: str + relationships_discovered: int + new_relationships: int + errors: List[str] = [] diff --git a/dbx-agent-app/app/backend/app/schemas/mcp_server.py b/dbx-agent-app/app/backend/app/schemas/mcp_server.py new file mode 100644 index 00000000..720ed1ac --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/mcp_server.py @@ -0,0 +1,87 @@ +""" +Pydantic schemas for MCPServer entities. +""" + +from pydantic import BaseModel, Field, HttpUrl +from typing import Optional +from enum import Enum + + +class MCPServerKindSchema(str, Enum): + """MCP Server types.""" + + MANAGED = "managed" + EXTERNAL = "external" + CUSTOM = "custom" + + +class MCPServerBase(BaseModel): + """Base schema for MCPServer with common fields.""" + + app_id: Optional[int] = Field( + None, + description="Foreign key to parent App (nullable for standalone servers)", + example=1, + ) + server_url: str = Field( + ..., + description="MCP server endpoint URL", + example="https://api.guidepoint.com/mcp", + ) + kind: MCPServerKindSchema = Field( + ..., + description="Server type (managed/external/custom)", + example="custom", + ) + uc_connection: Optional[str] = Field( + None, + max_length=255, + description="Unity Catalog connection name (for governance)", + example="guidepoint_connection", + ) + scopes: Optional[str] = Field( + None, + description="Comma-separated OAuth scopes required", + example="read:experts,read:transcripts", + ) + + +class MCPServerCreate(MCPServerBase): + """Schema for creating a new MCPServer.""" + + pass + + +class MCPServerUpdate(BaseModel): + """Schema for updating an MCPServer. All fields are optional.""" + + app_id: Optional[int] = Field( + None, + description="Foreign key to parent App", + ) + server_url: Optional[str] = Field( + None, + description="MCP server endpoint URL", + ) + kind: Optional[MCPServerKindSchema] = Field( + None, + description="Server type", + ) + uc_connection: Optional[str] = Field( + None, + max_length=255, + description="Unity Catalog connection name", + ) + scopes: Optional[str] = Field( + None, + description="Comma-separated OAuth scopes", + ) + + +class MCPServerResponse(MCPServerBase): + """Schema for MCPServer response.""" + + id: int = Field(..., description="MCP Server ID", example=1) + + class Config: + from_attributes = True diff --git a/dbx-agent-app/app/backend/app/schemas/orchestrator.py b/dbx-agent-app/app/backend/app/schemas/orchestrator.py new file mode 100644 index 00000000..c90c924d --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/orchestrator.py @@ -0,0 +1,48 @@ +""" +Pydantic schemas for the multi-agent orchestration endpoint. +""" + +from pydantic import BaseModel, Field +from typing import Optional, List, Dict, Any + + +class OrchestrationChatRequest(BaseModel): + """Chat request with optional multi-agent orchestration.""" + + collection_id: int = Field(..., description="Collection ID to use for supervisor") + message: str = Field(..., description="User message") + conversation_id: Optional[str] = Field(None, description="Conversation ID") + orchestration_mode: bool = Field( + default=False, + description="Enable multi-agent orchestration (plan, route, execute, evaluate)", + ) + mock_mode: bool = Field(True, description="Use mock responses (no real LLM/MCP calls)") + + +class SubTaskResultItem(BaseModel): + """Result from a single orchestrated sub-task.""" + + task_index: int + agent_id: int + agent_name: str + description: str + response: str + latency_ms: int + success: bool + error: Optional[str] = None + + +class OrchestrationChatResponse(BaseModel): + """Chat response from orchestrated supervisor.""" + + response: str + conversation_id: str + plan: Optional[Dict[str, Any]] = Field( + None, description="The decomposition plan (complexity, reasoning, sub-tasks)" + ) + sub_task_results: Optional[List[SubTaskResultItem]] = None + agents_used: int = 0 + tools_discovered: int = 0 + tools_called: int = 0 + quality_score: Optional[float] = None + mock: bool = False diff --git a/dbx-agent-app/app/backend/app/schemas/search.py b/dbx-agent-app/app/backend/app/schemas/search.py new file mode 100644 index 00000000..67c55fb0 --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/search.py @@ -0,0 +1,48 @@ +""" +Schemas for the unified semantic search endpoint. +""" + +from pydantic import BaseModel, Field +from typing import Optional, List + + +class SearchRequest(BaseModel): + query: str = Field(..., description="Natural language search query") + types: Optional[List[str]] = Field( + None, + description="Filter by asset types (e.g. ['table', 'notebook', 'app']). None = all types.", + ) + catalogs: Optional[List[str]] = Field( + None, + description="Filter catalog assets to specific catalogs", + ) + owner: Optional[str] = Field(None, description="Filter by owner") + limit: int = Field(20, ge=1, le=100, description="Max results to return") + + +class SearchResultItem(BaseModel): + asset_type: str = Field(..., description="Type of asset (table, notebook, app, etc.)") + asset_id: int = Field(..., description="ID of the asset in its source table") + name: str + description: Optional[str] = None + full_name: Optional[str] = None # for catalog assets + path: Optional[str] = None # for workspace assets + owner: Optional[str] = None + score: float = Field(..., description="Relevance score (0-1)") + match_type: str = Field(..., description="'semantic', 'keyword', or 'hybrid'") + snippet: Optional[str] = Field(None, description="Highlighted text snippet") + + +class SearchResponse(BaseModel): + query: str + total: int + results: List[SearchResultItem] + search_mode: str = Field(..., description="'semantic', 'keyword', or 'hybrid'") + + +class EmbedStatusResponse(BaseModel): + total_assets: int + embedded_assets: int + pending_assets: int + embedding_model: str + dimension: int diff --git a/dbx-agent-app/app/backend/app/schemas/supervisor.py b/dbx-agent-app/app/backend/app/schemas/supervisor.py new file mode 100644 index 00000000..af2d6961 --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/supervisor.py @@ -0,0 +1,146 @@ +""" +Pydantic schemas for Supervisor generation endpoints. +""" + +from pydantic import BaseModel, Field +from typing import Dict, Optional +from datetime import datetime + + +class SupervisorGenerateRequest(BaseModel): + """Schema for generating a supervisor from a collection.""" + + collection_id: int = Field( + ..., + description="Collection ID to generate supervisor from", + example=1, + ) + llm_endpoint: str = Field( + default="databricks-meta-llama-3-1-70b-instruct", + description="Databricks Foundation Model endpoint name", + example="databricks-meta-llama-3-1-70b-instruct", + ) + app_name: Optional[str] = Field( + None, + description="Custom app name (defaults to normalized collection name)", + example="expert-research-toolkit", + ) + mode: Optional[str] = Field( + default="code-first", + description="Generation mode (accepted but used for frontend compatibility)", + example="code-first", + ) + + +class SupervisorGenerateResponse(BaseModel): + """Schema for supervisor generation response.""" + + collection_id: int = Field( + ..., + description="Collection ID used for generation", + example=1, + ) + collection_name: str = Field( + ..., + description="Collection name", + example="Expert Research Toolkit", + ) + app_name: str = Field( + ..., + description="Generated app name", + example="expert-research-toolkit", + ) + files: Dict[str, str] = Field( + ..., + description="Generated files (filename -> content)", + example={ + "supervisor.py": "# Generated supervisor code...", + "requirements.txt": "fastapi\nuvicorn\n", + "app.yaml": "name: expert-research-toolkit\n", + }, + ) + generated_at: str = Field( + ..., + description="ISO 8601 timestamp of generation", + example="2024-01-15T10:30:00Z", + ) + supervisor_url: Optional[str] = Field( + None, + description="Deployed supervisor URL (placeholder in local dev)", + example="https://example.databricks.com/apps/expert-research-toolkit", + ) + code: Optional[str] = Field( + None, + description="Generated supervisor.py content", + example="# Generated supervisor code...", + ) + + +class SupervisorPreviewResponse(BaseModel): + """Schema for supervisor preview response.""" + + collection_id: int = Field( + ..., + description="Collection ID", + example=1, + ) + collection_name: str = Field( + ..., + description="Collection name", + example="Expert Research Toolkit", + ) + app_name: str = Field( + ..., + description="App name that would be generated", + example="expert-research-toolkit", + ) + mcp_server_urls: list[str] = Field( + ..., + description="MCP server URLs that will be included", + example=["https://mcp1.example.com", "https://mcp2.example.com"], + ) + tool_count: int = Field( + ..., + description="Number of tools/items in collection", + example=5, + ) + preview: Dict[str, str] = Field( + ..., + description="Preview of generated files (filename -> first 500 chars)", + example={ + "supervisor.py": "# Generated supervisor code...", + "requirements.txt": "fastapi\nuvicorn\n", + "app.yaml": "name: expert-research-toolkit\n", + }, + ) + + +class SupervisorMetadata(BaseModel): + """Schema for supervisor metadata tracking.""" + + id: int = Field(..., description="Supervisor ID", example=1) + collection_id: int = Field(..., description="Collection ID", example=1) + app_name: str = Field(..., description="App name", example="expert-research-toolkit") + generated_at: datetime = Field( + ..., + description="Generation timestamp", + example="2024-01-15T10:30:00", + ) + deployed_url: Optional[str] = Field( + None, + description="Deployed app URL (if deployed)", + example="https://example.databricks.com/apps/expert-research-toolkit", + ) + + class Config: + from_attributes = True + + +class SupervisorListResponse(BaseModel): + """Schema for listing generated supervisors.""" + + supervisors: list[SupervisorMetadata] = Field( + ..., + description="List of generated supervisors", + ) + total: int = Field(..., description="Total count", example=5) diff --git a/dbx-agent-app/app/backend/app/schemas/tool.py b/dbx-agent-app/app/backend/app/schemas/tool.py new file mode 100644 index 00000000..00ce48c3 --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/tool.py @@ -0,0 +1,55 @@ +""" +Pydantic schemas for Tool entities. +""" + +from pydantic import BaseModel, Field +from typing import Optional + + +class ToolFilter(BaseModel): + """Schema for filtering tools.""" + + mcp_server_id: Optional[int] = Field( + None, + description="Filter by MCP server ID", + example=1, + ) + name: Optional[str] = Field( + None, + description="Filter by tool name (partial match)", + example="search", + ) + page: int = Field( + 1, + ge=1, + description="Page number", + example=1, + ) + page_size: int = Field( + 50, + ge=1, + le=100, + description="Number of items per page", + example=50, + ) + + +class ToolResponse(BaseModel): + """Schema for Tool response.""" + + id: int = Field(..., description="Tool ID", example=1) + mcp_server_id: int = Field(..., description="MCP Server ID", example=1) + name: str = Field(..., description="Tool name", example="search_transcripts") + description: Optional[str] = Field( + None, + description="Human-readable description", + example="Search expert call transcripts by keyword", + ) + parameters: Optional[str] = Field( + None, + description="JSON Schema for tool parameters", + example='{"type": "object", "properties": {"query": {"type": "string"}}}', + ) + + class Config: + from_attributes = True diff --git a/dbx-agent-app/app/backend/app/schemas/workspace_asset.py b/dbx-agent-app/app/backend/app/schemas/workspace_asset.py new file mode 100644 index 00000000..d0bfa93b --- /dev/null +++ b/dbx-agent-app/app/backend/app/schemas/workspace_asset.py @@ -0,0 +1,58 @@ +""" +Pydantic schemas for WorkspaceAsset entities. +""" + +from pydantic import BaseModel, Field +from typing import Optional, List + + +class WorkspaceAssetResponse(BaseModel): + """Response schema for a workspace asset.""" + + id: int = Field(..., description="Asset ID") + asset_type: str = Field(..., description="Asset type (notebook, job, dashboard, etc.)") + workspace_host: str = Field(..., description="Databricks workspace URL") + path: str = Field(..., description="Workspace path or resource identifier") + name: str = Field(..., description="Human-readable name") + owner: Optional[str] = Field(None, description="Asset owner/creator") + description: Optional[str] = Field(None, description="Asset description") + language: Optional[str] = Field(None, description="Language (for notebooks)") + tags_json: Optional[str] = Field(None, description="JSON array of tags") + metadata_json: Optional[str] = Field(None, description="Type-specific metadata JSON") + content_preview: Optional[str] = Field(None, description="Content preview for search") + resource_id: Optional[str] = Field(None, description="Databricks resource ID") + created_at: Optional[str] = Field(None, description="When first indexed") + updated_at: Optional[str] = Field(None, description="Last index update") + last_indexed_at: Optional[str] = Field(None, description="Most recent crawl timestamp") + + class Config: + from_attributes = True + + +class WorkspaceCrawlRequest(BaseModel): + """Request to trigger a workspace crawl.""" + + asset_types: Optional[List[str]] = Field( + None, + description="Specific types to crawl (notebook, job, dashboard, pipeline, cluster, experiment). If empty, crawls all.", + ) + root_path: Optional[str] = Field( + "/", + description="Root path to start notebook crawl from", + ) + databricks_profile: Optional[str] = Field( + None, + description="Databricks CLI profile to use for authentication", + ) + + +class WorkspaceCrawlResponse(BaseModel): + """Response from workspace crawl.""" + + status: str = Field(..., description="Crawl status (success/partial/failed)") + message: str = Field(..., description="Human-readable status message") + assets_discovered: int = Field(0, description="Total assets discovered") + new_assets: int = Field(0, description="New assets added") + updated_assets: int = Field(0, description="Existing assets updated") + by_type: dict = Field(default_factory=dict, description="Counts per asset type") + errors: List[str] = Field(default_factory=list, description="Errors encountered") diff --git a/dbx-agent-app/app/backend/app/services/__init__.py b/dbx-agent-app/app/backend/app/services/__init__.py new file mode 100644 index 00000000..7248f76d --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/__init__.py @@ -0,0 +1,26 @@ +""" +Services for the Multi-Agent Registry API. + +This package contains business logic services for: +- MCP client integration (mcp_client.py) +- Tool specification parsing (tool_parser.py) +- Discovery orchestration (discovery.py) +- Code generation (generator.py) +""" + +from app.services.mcp_client import MCPClient, MCPConnectionError, MCPTimeoutError +from app.services.tool_parser import ToolParser, normalize_tool_spec +from app.services.discovery import DiscoveryService +from app.services.generator import GeneratorService, GeneratorError, get_generator_service + +__all__ = [ + "MCPClient", + "MCPConnectionError", + "MCPTimeoutError", + "ToolParser", + "normalize_tool_spec", + "DiscoveryService", + "GeneratorService", + "GeneratorError", + "get_generator_service", +] diff --git a/dbx-agent-app/app/backend/app/services/a2a_client.py b/dbx-agent-app/app/backend/app/services/a2a_client.py new file mode 100644 index 00000000..4e882f7a --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/a2a_client.py @@ -0,0 +1,213 @@ +""" +A2A Client — outbound calls to peer agents following the A2A protocol. + +Follows the same httpx async + context manager pattern as mcp_client.py. +VERSION: 2026-02-25-v2 (OAuth fix) +""" + +import json +import uuid +import logging +from typing import Dict, Any, Optional, AsyncIterator + +import httpx + +logger = logging.getLogger(__name__) +logger.info("[A2A-CLIENT] Module loaded - VERSION: 2026-02-25-v2 with OAuth redirect fix") + + +class A2AClientError(Exception): + """Raised when an A2A call fails.""" + pass + + +class A2AClient: + """ + Async client for sending A2A JSON-RPC requests to peer agents. + + Usage: + async with A2AClient() as client: + result = await client.send_message(agent_url, "Search for AI experts") + """ + + def __init__(self, timeout: float = 60.0): + self.timeout = timeout + self._client: Optional[httpx.AsyncClient] = None + + async def __aenter__(self): + self._client = httpx.AsyncClient( + timeout=self.timeout, + follow_redirects=True, + ) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self._client: + await self._client.aclose() + + def _auth_headers(self, auth_token: Optional[str] = None) -> Dict[str, str]: + headers = {"Content-Type": "application/json"} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + return headers + + async def _jsonrpc_call( + self, + url: str, + method: str, + params: Dict[str, Any], + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """Send a JSON-RPC 2.0 request and return the result.""" + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": method, + "params": params, + } + + try: + response = await self._client.post( + url, + json=payload, + headers=self._auth_headers(auth_token), + ) + response.raise_for_status() + result = response.json() + + if "error" in result: + error = result["error"] + raise A2AClientError( + f"A2A error: {error.get('message', 'Unknown')} (code: {error.get('code')})" + ) + + return result.get("result", {}) + + except httpx.TimeoutException as e: + raise A2AClientError(f"A2A request to {url} timed out: {e}") + except httpx.HTTPStatusError as e: + raise A2AClientError(f"A2A HTTP error from {url}: {e.response.status_code}") + except json.JSONDecodeError as e: + raise A2AClientError(f"Invalid JSON from {url}: {e}") + + async def send_message( + self, + agent_url: str, + message: str, + context_id: Optional[str] = None, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """Send a message/send request to a peer agent.""" + params: Dict[str, Any] = { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + } + if context_id: + params["message"]["contextId"] = context_id + + return await self._jsonrpc_call(agent_url, "message/send", params, auth_token) + + async def send_streaming_message( + self, + agent_url: str, + message: str, + auth_token: Optional[str] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """Send a streaming message and yield SSE events.""" + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + # Streaming endpoint is /stream relative to the agent's A2A URL + stream_url = agent_url.rstrip("/") + "/stream" + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": "message/stream", + "params": { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + }, + } + + async with self._client.stream( + "POST", + stream_url, + json=payload, + headers=self._auth_headers(auth_token), + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.startswith("data: "): + try: + yield json.loads(line[6:]) + except json.JSONDecodeError: + continue + + async def get_task( + self, + agent_url: str, + task_id: str, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """Get a task by ID from a peer agent.""" + return await self._jsonrpc_call( + agent_url, "tasks/get", {"id": task_id}, auth_token + ) + + async def cancel_task( + self, + agent_url: str, + task_id: str, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """Cancel a task on a peer agent.""" + return await self._jsonrpc_call( + agent_url, "tasks/cancel", {"id": task_id}, auth_token + ) + + async def fetch_agent_card( + self, base_url: str, auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """Fetch an agent's Agent Card from /.well-known/agent.json or /card.""" + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + headers = {} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + # Try well-known path first, then /card + # Use a fresh client that does NOT follow redirects to avoid OAuth session contamination + async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=False) as probe_client: + for path in ["/.well-known/agent.json", "/card"]: + try: + url = base_url.rstrip("/") + path + response = await probe_client.get(url, headers=headers) + + # Explicit handling of OAuth redirects (3xx status codes) + if response.status_code in (301, 302, 303, 307, 308): + # OAuth redirect detected - app doesn't support SP auth + logger.debug(f"OAuth redirect detected for {url} (status {response.status_code})") + continue + + if response.status_code == 200: + # Check if response body is empty + if not response.text or response.text.isspace(): + logger.debug(f"Empty response body for {url}") + continue + return response.json() + except Exception as e: + logger.warning(f"Agent card fetch failed for {url}: {type(e).__name__}: {e}") + continue + + raise A2AClientError(f"Could not fetch Agent Card from {base_url}") diff --git a/dbx-agent-app/app/backend/app/services/a2a_notifications.py b/dbx-agent-app/app/backend/app/services/a2a_notifications.py new file mode 100644 index 00000000..9dfaf0db --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/a2a_notifications.py @@ -0,0 +1,49 @@ +""" +A2A Push Notification Service — fires HTTP POST to registered webhooks on task state transitions. +""" + +import json +import logging +from typing import Optional + +import httpx + +logger = logging.getLogger(__name__) + + +async def send_push_notification( + webhook_url: str, + task_id: str, + status: str, + webhook_token: Optional[str] = None, + artifacts: Optional[str] = None, +) -> bool: + """ + Fire a push notification to a registered webhook URL. + + Returns True on success, False on failure (best-effort, never raises). + """ + headers = {"Content-Type": "application/json"} + if webhook_token: + headers["Authorization"] = f"Bearer {webhook_token}" + + payload = { + "taskId": task_id, + "status": status, + } + + if artifacts: + try: + payload["artifacts"] = json.loads(artifacts) if isinstance(artifacts, str) else artifacts + except (json.JSONDecodeError, TypeError): + pass + + try: + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.post(webhook_url, json=payload, headers=headers) + response.raise_for_status() + logger.info("Push notification sent: task=%s status=%s url=%s", task_id, status, webhook_url) + return True + except Exception as e: + logger.warning("Push notification failed: task=%s url=%s error=%s", task_id, webhook_url, e) + return False diff --git a/dbx-agent-app/app/backend/app/services/agent_chat.py b/dbx-agent-app/app/backend/app/services/agent_chat.py new file mode 100644 index 00000000..0aa41030 --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/agent_chat.py @@ -0,0 +1,620 @@ +""" +Agent Chat Service + +Proxies chat messages to Databricks serving endpoints and enriches responses +with routing visualization, slot filling, and pipeline metadata. +""" + +import re +import math +import logging +from datetime import datetime +from typing import Optional, Dict, Any, List + +import httpx + +from app.schemas.agent_chat import ( + AgentChatResponse, + RoutingInfo, + ToolCall, + ProcessingStep, + SlotFillingInfo, + SlotData, + NLToSQLMapping, + PipelineInfo, + PipelineStep, + PipelineStepMetrics, + PipelineStepCostBreakdown, + PipelineMetrics, + CostBreakdown, + LatencyBreakdown, +) + +logger = logging.getLogger(__name__) + +# Token cost estimates (approximate for Claude Sonnet 4.5) +INPUT_COST_PER_1M = 3.00 +OUTPUT_COST_PER_1M = 15.00 + +STOP_WORDS = frozenset({ + 'what', 'do', 'say', 'about', 'the', 'are', 'is', 'in', + 'to', 'a', 'an', 'and', 'or', 'how', 'who', 'where', 'when', + 'which', 'that', 'this', 'with', 'for', 'from', 'can', 'will', +}) + +ENTITY_PATTERNS = [ + re.compile(r'experts?', re.I), + re.compile(r'healthcare', re.I), + re.compile(r'AI', re.I), + re.compile(r'supply chain', re.I), + re.compile(r'digital transformation', re.I), + re.compile(r'(\w+)\s+experts?', re.I), +] + + +class AgentChatService: + """Service for proxying chat to Databricks serving endpoints with enrichment.""" + + def __init__(self): + from databricks.sdk import WorkspaceClient + self._workspace_client = WorkspaceClient() + + async def query_endpoint( + self, + endpoint_name: str, + message: str, + ) -> AgentChatResponse: + """ + Query a Databricks serving endpoint and enrich the response. + + Args: + endpoint_name: Name of the serving endpoint + message: User message to send + + Returns: + AgentChatResponse with content and enrichment metadata + """ + start_time = int(datetime.now().timestamp() * 1000) + + w = self._workspace_client + workspace_url = w.config.host + if not workspace_url: + raise ValueError("Could not determine Databricks workspace URL") + + # Ensure no trailing slash + workspace_url = workspace_url.rstrip("/") + + url = f"{workspace_url}/serving-endpoints/{endpoint_name}/invocations" + # Use authenticate() to support all SDK auth types (CLI, PAT, OAuth, etc.) + headers = {"Content-Type": "application/json"} + auth_headers = w.config.authenticate() + headers.update(auth_headers) + + payload = { + "input": [{"role": "user", "content": message}] + } + + logger.info( + "Querying endpoint %s at %s", endpoint_name, workspace_url + ) + + async with httpx.AsyncClient(timeout=60.0) as client: + resp = await client.post(url, json=payload, headers=headers) + if resp.status_code != 200: + error_text = resp.text + logger.error( + "Endpoint %s returned %d: %s", + endpoint_name, resp.status_code, error_text, + ) + raise ValueError( + f"Databricks API error ({resp.status_code}): {error_text}" + ) + data = resp.json() + + # Extract response text + response_text = self._extract_response_text(data) + request_id = ( + data.get("databricks_output", {}).get("databricks_request_id") + ) + + logger.info( + "Received response from %s (request_id=%s, length=%d)", + endpoint_name, request_id, len(response_text), + ) + + # Enrich response with metadata + routing = self._parse_routing_info(response_text, message) + slots = self._extract_slot_filling(message, response_text) + elastic_query = self._generate_elasticsearch_query(slots, message) + nl_to_sql = self._generate_nl_to_sql_mapping(message, slots) + pipeline = self._build_pipeline( + message, response_text, endpoint_name, start_time + ) + + return AgentChatResponse( + content=response_text, + requestId=request_id, + endpoint=endpoint_name, + timestamp=datetime.now().isoformat(), + routing=routing, + slotFilling=SlotFillingInfo( + slots=SlotData(**slots), + elasticQuery=elastic_query, + nlToSql=[NLToSQLMapping(**m) for m in nl_to_sql], + ), + pipeline=pipeline, + ) + + @staticmethod + def _extract_response_text(data: Dict[str, Any]) -> str: + """Extract text content from Databricks serving endpoint response.""" + # Standard serving endpoint format + output = data.get("output") + if output and len(output) > 0: + content = output[0].get("content", []) + if content and len(content) > 0: + return content[0].get("text", "No response") + + # Databricks App /query format + if "response" in data: + return data["response"] + + return "No response" + + @staticmethod + def _parse_routing_info( + text: str, message: str + ) -> Optional[RoutingInfo]: + """Parse routing information from agent response text.""" + if not text: + return None + + used_supervisor = "[Demo Response from" in text + sub_agent = None + tool_calls: List[ToolCall] = [] + processing_steps: List[ProcessingStep] = [] + + # Check for sub-agent routing + sub_agent_match = re.search(r'\[Demo Response from ([^\]]+)\]', text) + if sub_agent_match: + sub_agent = sub_agent_match.group(1) + used_supervisor = True + processing_steps.append(ProcessingStep( + step=1, + name="Supervisor Routing", + description="Analyzed query intent and selected appropriate sub-agent", + timestamp=int(datetime.now().timestamp() * 1000), + details={ + "decision": "Route to research agent", + "reason": "Query requires expert transcript search", + }, + )) + + # Detect tool usage + tool_keywords = ["search_transcripts", "Found", "interviews", "expert"] + if any(kw in text for kw in tool_keywords): + interview_match = re.search( + r'(\d+)\s+(?:relevant\s+)?(?:expert\s+)?interviews?', text, re.I + ) + count = int(interview_match.group(1)) if interview_match else 0 + + tool_calls.append(ToolCall( + tool="search_transcripts", + description="Searched expert interview transcripts", + input={"query": message, "top_k": count or 10}, + output={ + "count": count, + "source": "main.guidepoint.expert_transcripts", + }, + )) + + # Add semantic search step + if not any(s.name == "Semantic Search" for s in processing_steps): + processing_steps.append(ProcessingStep( + step=len(processing_steps) + 1, + name="Semantic Search", + description="Executed vector similarity search on expert transcripts", + timestamp=int(datetime.now().timestamp() * 1000) + 100, + details={ + "backend": "Elasticsearch", + "searchType": "semantic + keyword", + "resultsFound": count, + }, + )) + + processing_steps.append(ProcessingStep( + step=len(processing_steps) + 1, + name="Result Ranking", + description="Ranked results by relevance score and recency", + timestamp=int(datetime.now().timestamp() * 1000) + 200, + details={ + "algorithm": "BM25 + semantic similarity", + "topResults": count or 10, + }, + )) + + processing_steps.append(ProcessingStep( + step=len(processing_steps) + 1, + name="Response Generation", + description="LLM synthesized insights from top results", + timestamp=int(datetime.now().timestamp() * 1000) + 300, + details={ + "model": "databricks-claude-sonnet-4-5", + "technique": "RAG (Retrieval Augmented Generation)", + }, + )) + + return RoutingInfo( + usedSupervisor=used_supervisor, + subAgent=sub_agent, + toolCalls=tool_calls, + processingSteps=processing_steps, + ) + + @staticmethod + def _extract_slot_filling( + message: str, response_text: str + ) -> Dict[str, Any]: + """Extract slot filling information from query and response.""" + entities: List[str] = [] + topics: List[str] = [] + filters: Dict[str, Any] = {} + + # Extract entities + for pattern in ENTITY_PATTERNS: + matches = pattern.findall(message) + for match in matches: + lower = match.lower() + if lower not in entities: + entities.append(lower) + + # Extract topics from response + if response_text: + topic_matches = re.findall(r'about\s+"([^"]+)"', response_text, re.I) + for topic in topic_matches: + if topic not in topics: + topics.append(topic) + + # Extract interview count + if response_text: + interview_match = re.search( + r'(\d+)\s+(?:relevant\s+)?(?:expert\s+)?interviews?', + response_text, re.I, + ) + if interview_match: + filters["interviewCount"] = int(interview_match.group(1)) + + # Build search terms + words = message.lower().split() + search_terms = [ + w for w in words if len(w) > 3 and w not in STOP_WORDS + ] + + return { + "entities": entities, + "topics": topics, + "filters": filters, + "searchTerms": search_terms, + } + + @staticmethod + def _generate_elasticsearch_query( + slots: Dict[str, Any], message: str + ) -> Dict[str, Any]: + """Generate Elasticsearch query structure from slot data.""" + query: Dict[str, Any] = { + "query": { + "bool": { + "must": [], + "filter": [], + "should": [], + } + }, + "size": 10, + "_source": [ + "expert_name", "interview_id", "content", "topics", "date" + ], + } + + # Add text search + search_terms = slots.get("searchTerms", []) + if search_terms: + query["query"]["bool"]["must"].append({ + "multi_match": { + "query": message, + "fields": ["content^2", "summary", "topics"], + "type": "best_fields", + "fuzziness": "AUTO", + } + }) + + # Add entity filters + entities = slots.get("entities", []) + if entities: + for entity in entities: + query["query"]["bool"]["should"].append({ + "match": { + "topics": {"query": entity, "boost": 2} + } + }) + query["query"]["bool"]["minimum_should_match"] = 1 + + # Adjust size from filters + filters = slots.get("filters", {}) + if filters.get("interviewCount"): + query["size"] = filters["interviewCount"] + + return query + + @staticmethod + def _generate_nl_to_sql_mapping( + message: str, slots: Dict[str, Any] + ) -> List[Dict[str, Any]]: + """Generate NL to SQL mapping visualization.""" + mappings: List[Dict[str, Any]] = [] + + entities = slots.get("entities", []) + if entities: + for entity in entities: + mappings.append({ + "naturalLanguage": entity, + "sqlClause": f"topics LIKE '%{entity}%'", + "type": "WHERE", + "confidence": 0.85, + }) + + search_terms = slots.get("searchTerms", []) + if search_terms: + mappings.append({ + "naturalLanguage": ", ".join(search_terms), + "sqlClause": ( + f"MATCH(content, summary) AGAINST(" + f"'{' '.join(search_terms)}' IN BOOLEAN MODE)" + ), + "type": "MATCH", + "confidence": 0.9, + }) + + filters = slots.get("filters", {}) + if filters.get("interviewCount"): + count = filters["interviewCount"] + mappings.append({ + "naturalLanguage": f"{count} interviews", + "sqlClause": f"LIMIT {count}", + "type": "LIMIT", + "confidence": 1.0, + }) + + return mappings + + def _build_pipeline( + self, + message: str, + response_text: str, + endpoint: str, + start_time: int, + ) -> PipelineInfo: + """Build processing pipeline visualization with metrics.""" + steps: List[PipelineStep] = [] + + # Token estimates + msg_tokens = math.ceil(len(message) / 4) + resp_tokens = math.ceil(len(response_text) / 4) + input_cost = (msg_tokens / 1_000_000) * INPUT_COST_PER_1M + output_cost = (resp_tokens / 1_000_000) * OUTPUT_COST_PER_1M + total_cost = input_cost + output_cost + + # Slot filling for entity count + slots = self._extract_slot_filling(message, response_text) + entity_count = len(slots.get("entities", [])) + + # Step 1: Request Received + steps.append(PipelineStep( + id=1, name="Request Received", status="completed", + timestamp=start_time, duration=0, + details={ + "endpoint": endpoint, + "messageLength": len(message), + "source": "Agent Chat", + }, + tools=[], + )) + + # Step 2: Authentication + steps.append(PipelineStep( + id=2, name="Authentication", status="completed", + timestamp=start_time + 50, duration=50, + details={"method": "OAuth", "validated": True}, + tools=["databricks_auth_token"], + )) + + # Step 3: Query Preprocessing + steps.append(PipelineStep( + id=3, name="Query Preprocessing", status="completed", + timestamp=start_time + 100, duration=50, + details={ + "entityExtraction": True, + "tokenization": True, + "intentClassification": True, + "inputTokens": msg_tokens, + "confidence": 0.92, + }, + tools=["entity_extractor", "tokenizer"], + metrics=PipelineStepMetrics( + tokensProcessed=msg_tokens, + entitiesFound=entity_count, + latency=50, + ), + )) + + # Step 4: Supervisor Routing (conditional) + if endpoint == "guidepoint_supervisor": + steps.append(PipelineStep( + id=4, name="Supervisor Routing", status="completed", + timestamp=start_time + 200, duration=100, + details={ + "analysisType": "Intent classification", + "selectedAgent": "guidepoint_research", + "confidence": 0.95, + "reasoning": "Query requires expert transcript search", + }, + tools=["intent_classifier", "agent_router"], + )) + + # Step 5: Slot Filling + steps.append(PipelineStep( + id=len(steps) + 1, name="Slot Filling (NL→SQL)", status="completed", + timestamp=start_time + 300, duration=100, + details={ + "entitiesExtracted": True, + "sqlGenerated": True, + "queryOptimized": True, + }, + tools=["entity_extractor", "sql_generator", "query_optimizer"], + metrics=PipelineStepMetrics(latency=100), + )) + + # Step 6: Search Execution + interview_match = re.search( + r'(\d+)\s+(?:relevant\s+)?(?:expert\s+)?interviews?', + response_text, re.I, + ) + results_found = int(interview_match.group(1)) if interview_match else 0 + + steps.append(PipelineStep( + id=len(steps) + 1, name="Search Execution", status="completed", + timestamp=start_time + 500, duration=200, + details={ + "backend": "Elasticsearch", + "index": "expert_transcripts", + "searchType": "hybrid (semantic + keyword)", + "resultsFound": results_found, + }, + tools=["search_transcripts", "elasticsearch_client"], + metrics=PipelineStepMetrics(latency=200), + )) + + # Step 7: Result Ranking + steps.append(PipelineStep( + id=len(steps) + 1, name="Result Ranking", status="completed", + timestamp=start_time + 750, duration=50, + details={ + "algorithm": "BM25 + Semantic Similarity", + "reranking": True, + "diversification": True, + }, + tools=["bm25_ranker", "semantic_reranker"], + )) + + # Step 8: Context Preparation + steps.append(PipelineStep( + id=len(steps) + 1, name="Context Preparation", status="completed", + timestamp=start_time + 850, duration=100, + details={ + "documentsSelected": results_found or 10, + "contextWindow": "4096 tokens", + "compressionUsed": False, + }, + tools=["context_builder"], + )) + + # Step 9: LLM Response Generation + steps.append(PipelineStep( + id=len(steps) + 1, name="LLM Response Generation", status="completed", + timestamp=start_time + 1000, duration=2000, + details={ + "model": "databricks-claude-sonnet-4-5", + "temperature": 0.7, + "technique": "RAG (Retrieval Augmented Generation)", + "inputTokens": msg_tokens + 450, + "outputTokens": resp_tokens, + "totalTokens": msg_tokens + 450 + resp_tokens, + }, + tools=["foundation_model_api", "claude_sonnet_4_5"], + metrics=PipelineStepMetrics( + inputTokens=msg_tokens + 450, + outputTokens=resp_tokens, + tokensPerSecond=resp_tokens // 2 if resp_tokens else 0, + estimatedCost=total_cost, + costBreakdown=PipelineStepCostBreakdown( + input=input_cost, output=output_cost + ), + latency=2000, + ), + )) + + # Step 10: Response Formatting + steps.append(PipelineStep( + id=len(steps) + 1, name="Response Formatting", status="completed", + timestamp=start_time + 3100, duration=50, + details={ + "markdown": True, + "citations": True, + "structuredOutput": True, + }, + tools=["markdown_formatter"], + )) + + # Step 11: MLflow Trace Logging + steps.append(PipelineStep( + id=len(steps) + 1, name="MLflow Trace Logging", status="completed", + timestamp=start_time + 3200, duration=100, + details={ + "experiment": "guidepoint-supervisor", + "metricsLogged": ["latency", "token_count", "cost"], + "traceId": "generated_trace_id", + }, + tools=["mlflow_client"], + )) + + # Calculate totals + total_duration = ( + steps[-1].timestamp + steps[-1].duration - start_time + ) + + # Latency breakdown + preprocessing_latency = sum(s.duration for s in steps[:4]) + search_latency = sum(s.duration for s in steps[4:8]) + llm_step = next( + (s for s in steps if s.name == "LLM Response Generation"), None + ) + llm_latency = llm_step.duration if llm_step else 0 + postprocessing_latency = sum(s.duration for s in steps[9:]) + + total_tokens = msg_tokens + 450 + resp_tokens + tokens_per_second = ( + resp_tokens // max(total_duration // 1000, 1) + if total_duration > 0 else 0 + ) + + return PipelineInfo( + steps=steps, + totalDuration=total_duration, + totalSteps=len(steps), + startTime=start_time, + endTime=start_time + total_duration, + metrics=PipelineMetrics( + totalTokens=total_tokens, + inputTokens=msg_tokens + 450, + outputTokens=resp_tokens, + estimatedCost=total_cost, + tokensPerSecond=tokens_per_second, + costBreakdown=CostBreakdown( + input=f"${input_cost:.6f}", + output=f"${output_cost:.6f}", + total=f"${total_cost:.6f}", + ), + latencyBreakdown=LatencyBreakdown( + preprocessing=preprocessing_latency, + search=search_latency, + llm=llm_latency, + postprocessing=postprocessing_latency, + total=total_duration, + ), + ), + ) + + +def create_agent_chat_service() -> AgentChatService: + """Factory function for AgentChatService.""" + return AgentChatService() diff --git a/dbx-agent-app/app/backend/app/services/audit.py b/dbx-agent-app/app/backend/app/services/audit.py new file mode 100644 index 00000000..a10c2fb6 --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/audit.py @@ -0,0 +1,64 @@ +""" +Audit logging helper — fire-and-forget recording of mutating API actions. + +Usage in route handlers: + from app.services.audit import record_audit + + record_audit( + request=request, + action="create", + resource_type="agent", + resource_id=str(agent["id"]), + resource_name=agent["name"], + ) +""" + +import json +import logging +from typing import Optional, Any +from fastapi import Request + +logger = logging.getLogger(__name__) + + +def record_audit( + request: Request, + action: str, + resource_type: str, + resource_id: Optional[str] = None, + resource_name: Optional[str] = None, + details: Optional[Any] = None, +) -> None: + """ + Record an audit log entry. Fire-and-forget — failures are logged + as warnings but never raised to the caller. + + Args: + request: The FastAPI Request object (used for user identity + IP). + action: The mutation type (create, update, delete, crawl, clear). + resource_type: The kind of resource (agent, collection, etc.). + resource_id: The ID of the affected resource (stringified). + resource_name: Human-readable name for the resource. + details: Extra context dict (will be JSON-serialized). + """ + try: + from app.db_adapter import DatabaseAdapter + + user_email = getattr(request.state, "user_email", None) or "local-dev" + ip_address = request.client.host if request.client else None + + details_str = None + if details is not None: + details_str = json.dumps(details) if not isinstance(details, str) else details + + DatabaseAdapter.create_audit_log( + user_email=user_email, + action=action, + resource_type=resource_type, + resource_id=resource_id, + resource_name=resource_name, + details=details_str, + ip_address=ip_address, + ) + except Exception as e: + logger.warning("Failed to record audit log: %s", e) diff --git a/dbx-agent-app/app/backend/app/services/catalog_crawler.py b/dbx-agent-app/app/backend/app/services/catalog_crawler.py new file mode 100644 index 00000000..74cf6e84 --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/catalog_crawler.py @@ -0,0 +1,278 @@ +""" +Unity Catalog asset crawler service. + +Walks the UC hierarchy (catalogs → schemas → tables/views/functions/models/volumes) +using the Databricks SDK and indexes metadata into the registry database. +""" + +import json +import logging +from dataclasses import dataclass, field +from datetime import datetime, timezone +from typing import List, Optional + +from app.config import settings + +logger = logging.getLogger(__name__) + + +@dataclass +class CrawlStats: + """Tracks crawl progress and results.""" + + catalogs_crawled: int = 0 + schemas_crawled: int = 0 + assets_discovered: int = 0 + new_assets: int = 0 + updated_assets: int = 0 + errors: List[str] = field(default_factory=list) + + +class CatalogCrawlerService: + """ + Crawls Unity Catalog to index tables, views, functions, models, and volumes. + + Uses the Databricks SDK WorkspaceClient to walk the UC namespace hierarchy + and upserts discovered metadata into the CatalogAsset table. + """ + + def __init__(self, profile: Optional[str] = None): + self._profile = profile + + def _get_client(self): + """Get a Databricks WorkspaceClient.""" + from databricks.sdk import WorkspaceClient + + if self._profile: + return WorkspaceClient(profile=self._profile) + return WorkspaceClient() + + def crawl( + self, + catalogs: Optional[List[str]] = None, + include_columns: bool = True, + ) -> CrawlStats: + """ + Crawl Unity Catalog and upsert assets into the database. + + Args: + catalogs: Specific catalogs to crawl. If None, crawls all accessible. + include_columns: Whether to fetch column details for tables/views. + + Returns: + CrawlStats with crawl results. + """ + stats = CrawlStats() + client = self._get_client() + now = datetime.now(timezone.utc) + + # Get catalogs to crawl + try: + if catalogs: + catalog_list = catalogs + else: + catalog_list = [c.name for c in client.catalogs.list()] + except Exception as e: + stats.errors.append(f"Failed to list catalogs: {e}") + return stats + + for catalog_name in catalog_list: + try: + self._crawl_catalog(client, catalog_name, include_columns, now, stats) + stats.catalogs_crawled += 1 + except Exception as e: + stats.errors.append(f"Failed to crawl catalog '{catalog_name}': {e}") + logger.error("Catalog crawl error for '%s': %s", catalog_name, e) + + return stats + + def _crawl_catalog( + self, + client, + catalog_name: str, + include_columns: bool, + now: datetime, + stats: CrawlStats, + ): + """Crawl all schemas in a catalog.""" + try: + schemas = list(client.schemas.list(catalog_name=catalog_name)) + except Exception as e: + stats.errors.append(f"Failed to list schemas in '{catalog_name}': {e}") + return + + for schema in schemas: + schema_name = schema.name + if schema_name in ("information_schema",): + continue + + try: + self._crawl_schema(client, catalog_name, schema_name, include_columns, now, stats) + stats.schemas_crawled += 1 + except Exception as e: + stats.errors.append(f"Failed to crawl schema '{catalog_name}.{schema_name}': {e}") + logger.error("Schema crawl error for '%s.%s': %s", catalog_name, schema_name, e) + + def _crawl_schema( + self, + client, + catalog_name: str, + schema_name: str, + include_columns: bool, + now: datetime, + stats: CrawlStats, + ): + """Crawl all assets in a schema.""" + # Tables and views + try: + tables = list(client.tables.list( + catalog_name=catalog_name, + schema_name=schema_name, + )) + for table in tables: + asset_type = "view" if table.table_type and str(table.table_type) == "VIEW" else "table" + columns = None + if include_columns and hasattr(table, "columns") and table.columns: + columns = [ + { + "name": col.name, + "type": str(col.type_text) if col.type_text else str(col.type_name), + "comment": col.comment, + "nullable": col.nullable if hasattr(col, "nullable") else True, + "position": col.position if hasattr(col, "position") else None, + } + for col in table.columns + ] + + properties = None + if hasattr(table, "properties") and table.properties: + properties = dict(table.properties) + + self._upsert_asset( + asset_type=asset_type, + catalog=catalog_name, + schema_name=schema_name, + name=table.name, + full_name=f"{catalog_name}.{schema_name}.{table.name}", + owner=table.owner if hasattr(table, "owner") else None, + comment=table.comment if hasattr(table, "comment") else None, + columns_json=json.dumps(columns) if columns else None, + properties_json=json.dumps(properties) if properties else None, + data_source_format=str(table.data_source_format) if hasattr(table, "data_source_format") and table.data_source_format else None, + table_type=str(table.table_type) if hasattr(table, "table_type") and table.table_type else None, + now=now, + stats=stats, + ) + except Exception as e: + stats.errors.append(f"Failed to list tables in '{catalog_name}.{schema_name}': {e}") + + # Functions + try: + functions = list(client.functions.list( + catalog_name=catalog_name, + schema_name=schema_name, + )) + for func in functions: + params = None + if hasattr(func, "input_params") and func.input_params: + params_list = func.input_params.parameters if hasattr(func.input_params, "parameters") else [] + params = [ + { + "name": p.name, + "type": str(p.type_text) if hasattr(p, "type_text") else str(p.type_name), + "comment": p.comment if hasattr(p, "comment") else None, + } + for p in params_list + ] + + self._upsert_asset( + asset_type="function", + catalog=catalog_name, + schema_name=schema_name, + name=func.name, + full_name=f"{catalog_name}.{schema_name}.{func.name}", + owner=func.owner if hasattr(func, "owner") else None, + comment=func.comment if hasattr(func, "comment") else None, + columns_json=json.dumps(params) if params else None, + now=now, + stats=stats, + ) + except Exception as e: + # Functions API may not be available in all workspaces + logger.debug("Functions listing skipped for '%s.%s': %s", catalog_name, schema_name, e) + + # Volumes + try: + volumes = list(client.volumes.list( + catalog_name=catalog_name, + schema_name=schema_name, + )) + for vol in volumes: + self._upsert_asset( + asset_type="volume", + catalog=catalog_name, + schema_name=schema_name, + name=vol.name, + full_name=f"{catalog_name}.{schema_name}.{vol.name}", + owner=vol.owner if hasattr(vol, "owner") else None, + comment=vol.comment if hasattr(vol, "comment") else None, + now=now, + stats=stats, + ) + except Exception as e: + logger.debug("Volumes listing skipped for '%s.%s': %s", catalog_name, schema_name, e) + + def _upsert_asset( + self, + asset_type: str, + catalog: str, + schema_name: str, + name: str, + full_name: str, + now: datetime, + stats: CrawlStats, + owner: Optional[str] = None, + comment: Optional[str] = None, + columns_json: Optional[str] = None, + tags_json: Optional[str] = None, + properties_json: Optional[str] = None, + data_source_format: Optional[str] = None, + table_type: Optional[str] = None, + ): + """Upsert a single catalog asset into the database.""" + from app.db_adapter import WarehouseDB + + existing = WarehouseDB.get_catalog_asset_by_full_name(full_name) + + if existing: + WarehouseDB.update_catalog_asset( + existing["id"], + owner=owner, + comment=comment, + columns_json=columns_json, + tags_json=tags_json, + properties_json=properties_json, + data_source_format=data_source_format, + table_type=table_type, + last_indexed_at=now.isoformat(), + ) + stats.updated_assets += 1 + else: + WarehouseDB.create_catalog_asset( + asset_type=asset_type, + catalog=catalog, + schema_name=schema_name, + name=name, + full_name=full_name, + owner=owner, + comment=comment, + columns_json=columns_json, + tags_json=tags_json, + properties_json=properties_json, + data_source_format=data_source_format, + table_type=table_type, + last_indexed_at=now.isoformat(), + ) + stats.new_assets += 1 + + stats.assets_discovered += 1 diff --git a/dbx-agent-app/app/backend/app/services/chat_context.py b/dbx-agent-app/app/backend/app/services/chat_context.py new file mode 100644 index 00000000..a7431785 --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/chat_context.py @@ -0,0 +1,282 @@ +""" +Context-aware chat service. + +Extracts entity references from user messages (e.g. catalog.schema.table), +resolves them against the asset database, and formats a workspace context +block to inject into the LLM system prompt. +""" + +import re +import json +import logging +from typing import List, Dict, Optional + +from app.db_adapter import DatabaseAdapter + +logger = logging.getLogger(__name__) + +# Max chars for the injected context block to avoid blowing up the context window +CONTEXT_CHAR_LIMIT = 4000 + +# --- Entity extraction patterns --- + +# Three-level: catalog.schema.table (with optional backtick quoting) +_THREE_LEVEL = re.compile( + r"`?(\w+)\.(\w+)\.(\w+)`?", +) + +# Two-level: schema.table — only if the parts look like identifiers (lowercase/snake) +_TWO_LEVEL = re.compile( + r"(? List[Dict]: + """ + Extract entity references from user text. + + Returns a list of dicts with keys: + - match_type: 'three_level' | 'two_level' | 'keyword' | 'path' + - raw: the matched string + - catalog, schema_name, name (for catalog assets) + - asset_type (for keyword matches) + - path (for workspace path matches) + """ + entities: List[Dict] = [] + seen = set() + + # Three-level names (highest confidence) + for m in _THREE_LEVEL.finditer(text): + full = f"{m.group(1)}.{m.group(2)}.{m.group(3)}" + if full not in seen: + seen.add(full) + entities.append({ + "match_type": "three_level", + "raw": full, + "catalog": m.group(1), + "schema_name": m.group(2), + "name": m.group(3), + }) + + # Two-level names (only if not already matched as three-level substring) + for m in _TWO_LEVEL.finditer(text): + full = f"{m.group(1)}.{m.group(2)}" + if full not in seen and not any(full in e["raw"] for e in entities): + seen.add(full) + entities.append({ + "match_type": "two_level", + "raw": full, + "schema_name": m.group(1), + "name": m.group(2), + }) + + # Natural language keyword references + for m in _NL_KEYWORD.finditer(text): + asset_type = m.group(1).lower() + name = m.group(2).strip("`\"'") + key = f"{asset_type}:{name}" + if key not in seen and name not in seen: + seen.add(key) + entities.append({ + "match_type": "keyword", + "raw": name, + "asset_type": asset_type, + "name": name, + }) + + # Workspace paths + for m in _WORKSPACE_PATH.finditer(text): + path = m.group(1) + if path not in seen: + seen.add(path) + entities.append({ + "match_type": "path", + "raw": path, + "path": path, + }) + + return entities + + +def resolve_entities(entities: List[Dict]) -> List[Dict]: + """ + Resolve extracted entities against the CatalogAsset and WorkspaceAsset tables. + + Returns a list of resolved asset metadata dicts, or empty list if nothing found. + """ + resolved: List[Dict] = [] + + for entity in entities: + match_type = entity.get("match_type") + + if match_type == "three_level": + full_name = entity["raw"] + asset = DatabaseAdapter.get_catalog_asset_by_full_name(full_name) + if asset: + resolved.append(asset) + continue + + if match_type == "two_level": + # Search by schema + name across all catalogs + schema_name = entity.get("schema_name", "") + name = entity.get("name", "") + assets, _ = DatabaseAdapter.list_catalog_assets( + schema_name=schema_name, search=name, page_size=3 + ) + for a in assets: + if a not in resolved: + resolved.append(a) + + if match_type == "keyword": + name = entity.get("name", "") + asset_type = entity.get("asset_type", "") + + # Check catalog assets first + catalog_types = {"table", "view", "function", "model", "volume"} + workspace_types = {"notebook", "job", "dashboard", "pipeline"} + + if asset_type in catalog_types: + assets, _ = DatabaseAdapter.list_catalog_assets( + asset_type=asset_type, search=name, page_size=3 + ) + for a in assets: + if a not in resolved: + resolved.append(a) + + if asset_type in workspace_types: + assets, _ = DatabaseAdapter.list_workspace_assets( + asset_type=asset_type, search=name, page_size=3 + ) + for a in assets: + if a not in resolved: + resolved.append(a) + + # If type is ambiguous, search both + if asset_type not in catalog_types and asset_type not in workspace_types: + cat_assets, _ = DatabaseAdapter.list_catalog_assets(search=name, page_size=2) + ws_assets, _ = DatabaseAdapter.list_workspace_assets(search=name, page_size=2) + for a in cat_assets + ws_assets: + if a not in resolved: + resolved.append(a) + + if match_type == "path": + path = entity.get("path", "") + assets, _ = DatabaseAdapter.list_workspace_assets(search=path, page_size=3) + for a in assets: + if a not in resolved: + resolved.append(a) + + return resolved + + +def _format_catalog_asset(asset: Dict) -> str: + """Format a single catalog asset for the context block.""" + parts = [f"**{asset.get('asset_type', 'asset').title()}** `{asset.get('full_name', asset.get('name', '?'))}`"] + + if asset.get("owner"): + parts.append(f"Owner: {asset['owner']}") + + if asset.get("comment"): + comment = asset["comment"][:120] + parts.append(f"Description: {comment}") + + if asset.get("columns_json"): + try: + columns = json.loads(asset["columns_json"]) + col_names = [c.get("name", "") for c in columns[:8]] + col_str = ", ".join(col_names) + if len(columns) > 8: + col_str += f", ... ({len(columns)} total)" + parts.append(f"Columns: {col_str}") + except (json.JSONDecodeError, TypeError): + pass + + if asset.get("row_count"): + parts.append(f"Rows: {asset['row_count']:,}") + + if asset.get("table_type"): + parts.append(f"Type: {asset['table_type']}") + + return " — ".join(parts) + + +def _format_workspace_asset(asset: Dict) -> str: + """Format a single workspace asset for the context block.""" + parts = [f"**{asset.get('asset_type', 'asset').title()}** `{asset.get('path', asset.get('name', '?'))}`"] + + if asset.get("owner"): + parts.append(f"Owner: {asset['owner']}") + + if asset.get("description"): + desc = asset["description"][:120] + parts.append(f"Description: {desc}") + + if asset.get("language"): + parts.append(f"Language: {asset['language']}") + + return " — ".join(parts) + + +def format_context_block(resolved: List[Dict]) -> str: + """ + Format resolved assets into a Markdown context block. + + Caps output at ~CONTEXT_CHAR_LIMIT chars. + """ + if not resolved: + return "" + + lines = ["## Workspace Context", "The user is discussing these assets:"] + total_len = sum(len(l) for l in lines) + + for asset in resolved: + if "full_name" in asset: + line = "- " + _format_catalog_asset(asset) + elif "path" in asset: + line = "- " + _format_workspace_asset(asset) + else: + continue + + if total_len + len(line) > CONTEXT_CHAR_LIMIT: + lines.append(f"- _(+{len(resolved) - len(lines) + 2} more assets)_") + break + + lines.append(line) + total_len += len(line) + + return "\n".join(lines) + + +def enrich_system_prompt(base_prompt: str, user_message: str) -> str: + """ + Orchestrator: extract entities from user message, resolve against DB, + format context block, and append to the base system prompt. + """ + try: + entities = extract_entities(user_message) + if not entities: + return base_prompt + + resolved = resolve_entities(entities) + if not resolved: + return base_prompt + + context_block = format_context_block(resolved) + if not context_block: + return base_prompt + + return f"{base_prompt}\n\n{context_block}" + except Exception as e: + logger.warning("Context enrichment failed (using base prompt): %s", e) + return base_prompt diff --git a/dbx-agent-app/app/backend/app/services/collections.py b/dbx-agent-app/app/backend/app/services/collections.py new file mode 100644 index 00000000..7b458bf6 --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/collections.py @@ -0,0 +1,204 @@ +""" +Business logic and validation for Collections and CollectionItems. + +This service layer provides validation logic for collections including: +- Verifying referenced entities exist before adding to collections +- Preventing duplicate items in collections +- Validating item types (app, mcp_server, tool) +- Checking constraints (exactly one FK must be non-null) +""" + +from typing import Optional, Tuple +from sqlalchemy.orm import Session +from sqlalchemy.exc import IntegrityError + +from app.models import App, MCPServer, Tool, Collection, CollectionItem + + +class CollectionValidationError(Exception): + """Custom exception for collection validation errors.""" + + pass + + +class CollectionService: + """Service layer for collections business logic.""" + + @staticmethod + def validate_item_exists( + db: Session, + app_id: Optional[int] = None, + mcp_server_id: Optional[int] = None, + tool_id: Optional[int] = None, + ) -> Tuple[bool, str]: + """ + Validate that the referenced entity exists in the database. + + Args: + db: Database session + app_id: Optional app ID + mcp_server_id: Optional MCP server ID + tool_id: Optional tool ID + + Returns: + Tuple of (is_valid, error_message) + - (True, "") if entity exists + - (False, error_msg) if entity doesn't exist + """ + if app_id is not None: + app = db.query(App).filter(App.id == app_id).first() + if not app: + return False, f"App with id {app_id} does not exist" + + if mcp_server_id is not None: + server = db.query(MCPServer).filter(MCPServer.id == mcp_server_id).first() + if not server: + return False, f"MCP Server with id {mcp_server_id} does not exist" + + if tool_id is not None: + tool = db.query(Tool).filter(Tool.id == tool_id).first() + if not tool: + return False, f"Tool with id {tool_id} does not exist" + + return True, "" + + @staticmethod + def check_duplicate_item( + db: Session, + collection_id: int, + app_id: Optional[int] = None, + mcp_server_id: Optional[int] = None, + tool_id: Optional[int] = None, + ) -> bool: + """ + Check if an item already exists in the collection. + + Args: + db: Database session + collection_id: Collection ID + app_id: Optional app ID + mcp_server_id: Optional MCP server ID + tool_id: Optional tool ID + + Returns: + True if duplicate exists, False otherwise + """ + query = db.query(CollectionItem).filter( + CollectionItem.collection_id == collection_id + ) + + if app_id is not None: + query = query.filter(CollectionItem.app_id == app_id) + elif mcp_server_id is not None: + query = query.filter(CollectionItem.mcp_server_id == mcp_server_id) + elif tool_id is not None: + query = query.filter(CollectionItem.tool_id == tool_id) + + return query.first() is not None + + @staticmethod + def validate_and_add_item( + db: Session, + collection_id: int, + app_id: Optional[int] = None, + mcp_server_id: Optional[int] = None, + tool_id: Optional[int] = None, + ) -> Tuple[bool, str, Optional[CollectionItem]]: + """ + Validate and add an item to a collection. + + Performs all validation checks: + 1. Verify referenced entity exists + 2. Check for duplicates + 3. Ensure collection exists + + Args: + db: Database session + collection_id: Collection ID + app_id: Optional app ID + mcp_server_id: Optional MCP server ID + tool_id: Optional tool ID + + Returns: + Tuple of (success, message, item) + - (True, "", item) on success + - (False, error_msg, None) on failure + """ + # Verify collection exists + collection = db.query(Collection).filter(Collection.id == collection_id).first() + if not collection: + return False, f"Collection with id {collection_id} does not exist", None + + # Verify referenced entity exists + is_valid, error_msg = CollectionService.validate_item_exists( + db, app_id=app_id, mcp_server_id=mcp_server_id, tool_id=tool_id + ) + if not is_valid: + return False, error_msg, None + + # Check for duplicates + if CollectionService.check_duplicate_item( + db, + collection_id=collection_id, + app_id=app_id, + mcp_server_id=mcp_server_id, + tool_id=tool_id, + ): + item_type = ( + "app" + if app_id + else "server" if mcp_server_id else "tool" + ) + item_id = app_id or mcp_server_id or tool_id + return ( + False, + f"{item_type.capitalize()} with id {item_id} already exists in collection", + None, + ) + + # Create the item + try: + item = CollectionItem( + collection_id=collection_id, + app_id=app_id, + mcp_server_id=mcp_server_id, + tool_id=tool_id, + ) + db.add(item) + db.flush() # Flush to catch any DB constraints without committing + return True, "", item + except IntegrityError as e: + db.rollback() + return False, f"Database integrity error: {str(e)}", None + + @staticmethod + def get_collection_item_counts(db: Session, collection_id: int) -> dict: + """ + Get counts of different item types in a collection. + + Args: + db: Database session + collection_id: Collection ID + + Returns: + Dictionary with counts: { + "total": int, + "apps": int, + "servers": int, + "tools": int + } + """ + items = ( + db.query(CollectionItem) + .filter(CollectionItem.collection_id == collection_id) + .all() + ) + + counts = { + "total": len(items), + "apps": sum(1 for item in items if item.app_id is not None), + "servers": sum(1 for item in items if item.mcp_server_id is not None), + "tools": sum(1 for item in items if item.tool_id is not None), + } + + return counts diff --git a/dbx-agent-app/app/backend/app/services/discovery.py b/dbx-agent-app/app/backend/app/services/discovery.py new file mode 100644 index 00000000..52b9508d --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/discovery.py @@ -0,0 +1,1127 @@ +""" +Discovery orchestration service. + +This module coordinates the discovery of MCP servers and their tools +from various sources: +- Workspace-deployed MCP servers (Databricks Apps with MCP endpoints) +- MCP catalog managed servers (future Databricks MCP catalog) +- Custom MCP server URLs +""" + +import asyncio +import json +import logging +from typing import List, Dict, Any, Optional +from dataclasses import dataclass +from sqlalchemy.orm import Session + +from app.config import settings +from app.services.mcp_client import ( + MCPClient, + MCPTool, + MCPConnectionError, + MCPTimeoutError, +) +from app.services.tool_parser import ToolParser, NormalizedTool +from app.models import MCPServer, Tool +from app.models.agent import Agent +from app.models.app import App +from app.models.mcp_server import MCPServerKind +from app.services.a2a_client import A2AClient, A2AClientError +from app.db_adapter import DatabaseAdapter + +logger = logging.getLogger(__name__) + +# Common MCP endpoint paths to probe on Databricks Apps +MCP_PROBE_PATHS = ["/mcp", "/api/mcp"] +MCP_PROBE_TIMEOUT = 5.0 + + +@dataclass +class DiscoveredServer: + """ + Represents a discovered MCP server. + + Attributes: + server_url: MCP server endpoint URL + kind: Server type (managed/external/custom) + tools: List of discovered tools + error: Error message if discovery failed (optional) + """ + + server_url: str + kind: str + tools: List[NormalizedTool] + error: Optional[str] = None + + +@dataclass +class DiscoveryResult: + """ + Results from a discovery operation. + + Attributes: + servers_discovered: Number of servers discovered + tools_discovered: Total number of tools discovered + servers: List of discovered servers with their tools + errors: List of error messages encountered + """ + + servers_discovered: int + tools_discovered: int + servers: List[DiscoveredServer] + errors: List[str] + + +@dataclass +class UpsertResult: + """ + Results from upserting discovery data into the database. + + Attributes: + new_servers: Number of new servers created + updated_servers: Number of existing servers updated + new_tools: Number of new tools created + updated_tools: Number of existing tools updated + """ + + new_servers: int = 0 + updated_servers: int = 0 + new_tools: int = 0 + updated_tools: int = 0 + + +@dataclass +class DiscoveredAgent: + """An agent found during auto-discovery.""" + + name: str + endpoint_url: str + description: Optional[str] = None + capabilities: Optional[str] = None + a2a_capabilities: Optional[str] = None + skills: Optional[str] = None + protocol_version: Optional[str] = None + source: str = "serving_endpoint" # "serving_endpoint" | "app" + app_id: Optional[int] = None # Link to backing app (if source="app") + + +@dataclass +class AgentDiscoveryResult: + """Results from agent auto-discovery.""" + + agents: List[DiscoveredAgent] + errors: List[str] + + +@dataclass +class AgentUpsertResult: + """Results from upserting discovered agents into the database.""" + + new_agents: int = 0 + updated_agents: int = 0 + + +# Agent card probe paths and timeout +AGENT_CARD_PATHS = ["/.well-known/agent.json", "/card"] +AGENT_CARD_PROBE_TIMEOUT = 5.0 + +# Foundation Model API endpoint prefixes — skip these during agent discovery +FMAPI_PREFIXES = ( + "databricks-claude-", "databricks-gpt-", "databricks-llama-", + "databricks-meta-", "databricks-gemini-", "databricks-gemma-", + "databricks-qwen", "databricks-gte-", "databricks-bge-", +) + + +class DiscoveryService: + """ + Orchestrates discovery of MCP servers and tools. + + Coordinates scanning workspace, querying catalog, and discovering + tools from custom server URLs. + """ + + def __init__(self, mcp_client: Optional[MCPClient] = None): + """ + Initialize discovery service. + + Args: + mcp_client: Optional MCP client instance (for testing) + """ + self._mcp_client = mcp_client + self._pending_apps: List[Dict[str, Any]] = [] + + async def discover_from_url( + self, + server_url: str, + kind: str = "custom", + ) -> DiscoveredServer: + """ + Discover tools from a single MCP server URL. + + Args: + server_url: MCP server endpoint URL + kind: Server type (managed/external/custom) + + Returns: + DiscoveredServer with tools or error + + Example: + >>> service = DiscoveryService() + >>> server = await service.discover_from_url("https://mcp.example.com") + >>> print(f"Found {len(server.tools)} tools") + """ + tools = [] + error = None + + try: + if self._mcp_client: + # Use provided client (for testing) + mcp_tools = await self._mcp_client.list_tools(server_url) + else: + # Create new client for this request + async with MCPClient() as client: + mcp_tools = await client.list_tools(server_url) + + # Parse and normalize each tool + for mcp_tool in mcp_tools: + try: + normalized_tool = ToolParser.parse_tool( + { + "name": mcp_tool.name, + "description": mcp_tool.description, + "inputSchema": mcp_tool.input_schema, + } + ) + tools.append(normalized_tool) + except ValueError as e: + # Skip invalid tool but continue processing others + continue + + except (MCPConnectionError, MCPTimeoutError) as e: + error = str(e) + except Exception as e: + error = f"Unexpected error: {str(e)}" + + return DiscoveredServer( + server_url=server_url, + kind=kind, + tools=tools, + error=error, + ) + + async def discover_from_workspace( + self, profile: Optional[str] = None, + ) -> DiscoveryResult: + """ + Discover MCP servers deployed as Databricks Apps in the workspace. + + Enumerates running apps via the Databricks SDK, then probes each for + MCP endpoints at common paths (/mcp, /api/mcp). + + Args: + profile: Databricks CLI profile name (falls back to settings/env) + + Returns: + DiscoveryResult with any MCP-enabled apps found + """ + errors: List[str] = [] + servers: List[DiscoveredServer] = [] + apps_metadata: List[Dict[str, Any]] = [] + + # --- 1. List workspace apps via SDK (synchronous → thread pool) --- + try: + app_list = await self._list_workspace_apps(profile) + except Exception as e: + logger.error("Workspace app listing failed: %s", e) + return DiscoveryResult( + servers_discovered=0, + tools_discovered=0, + servers=[], + errors=[f"Failed to list workspace apps: {e}"], + ) + + if not app_list: + return DiscoveryResult( + servers_discovered=0, tools_discovered=0, servers=[], errors=[], + ) + + # --- 2. Register ALL running apps (regardless of MCP support) --- + for app_info in app_list: + app_url = app_info.get("url") + if not app_url: + continue + apps_metadata.append({ + "name": app_info["name"], + "url": app_url, + "owner": app_info.get("owner"), + "mcp_url": None, # Will be set if MCP endpoint found + }) + + # --- 3. Probe each running app for MCP endpoints --- + probe_tasks = [] + for app_info in app_list: + app_url = app_info.get("url") + if not app_url: + continue + probe_tasks.append(self._probe_app_for_mcp(app_info)) + + if probe_tasks: + probe_results = await asyncio.gather( + *probe_tasks, return_exceptions=True, + ) + for result in probe_results: + if isinstance(result, Exception): + errors.append(str(result)) + elif result is not None: + discovered_server, mcp_meta = result + servers.append(discovered_server) + # Update the matching app entry with the MCP URL + for app_meta in apps_metadata: + if app_meta["name"] == mcp_meta["name"]: + app_meta["mcp_url"] = mcp_meta["mcp_url"] + break + + # Stash app metadata so upsert_discovery_results can create App rows + self._pending_apps = apps_metadata + + total_tools = sum(len(s.tools) for s in servers) + return DiscoveryResult( + servers_discovered=len(servers), + tools_discovered=total_tools, + servers=servers, + errors=errors, + ) + + async def discover_from_catalog(self) -> DiscoveryResult: + """ + Discover managed MCP servers from Databricks MCP catalog. + + Queries the catalog URL (if configured) for a list of managed MCP + servers, then discovers tools from each. Gracefully returns empty + results when the catalog URL is not set or not responding. + + Returns: + DiscoveryResult with managed servers (or empty if catalog unavailable) + """ + catalog_url = settings.mcp_catalog_url + if not catalog_url: + logger.debug("mcp_catalog_url not configured — skipping catalog discovery") + return DiscoveryResult( + servers_discovered=0, tools_discovered=0, servers=[], errors=[], + ) + + errors: List[str] = [] + servers: List[DiscoveredServer] = [] + + try: + import httpx + + async with httpx.AsyncClient(timeout=10.0) as http: + resp = await http.get(catalog_url) + resp.raise_for_status() + catalog_data = resp.json() + + # Expected shape: { "servers": [ { "url": "...", ... }, ... ] } + server_entries = catalog_data.get("servers", []) + + discover_tasks = [ + self.discover_from_url( + entry["url"], kind="managed", + ) + for entry in server_entries + if entry.get("url") + ] + + if discover_tasks: + results = await asyncio.gather(*discover_tasks) + servers = list(results) + + except Exception as e: + logger.warning("Catalog discovery unavailable (%s): %s", catalog_url, e) + errors.append(f"Catalog endpoint not available: {e}") + + successful = [s for s in servers if s.error is None] + total_tools = sum(len(s.tools) for s in successful) + errs_from_servers = [ + f"{s.server_url}: {s.error}" for s in servers if s.error + ] + + return DiscoveryResult( + servers_discovered=len(successful), + tools_discovered=total_tools, + servers=servers, + errors=errors + errs_from_servers, + ) + + async def discover_from_urls( + self, + server_urls: List[str], + kind: str = "custom", + ) -> DiscoveryResult: + """ + Discover tools from a list of custom MCP server URLs. + + Queries multiple servers in parallel and aggregates results. + + Args: + server_urls: List of MCP server endpoint URLs + kind: Server type for all URLs (default: custom) + + Returns: + DiscoveryResult with aggregated findings + + Example: + >>> service = DiscoveryService() + >>> urls = ["https://mcp1.example.com", "https://mcp2.example.com"] + >>> result = await service.discover_from_urls(urls) + >>> print(f"Discovered {result.tools_discovered} tools from {result.servers_discovered} servers") + """ + if not server_urls: + return DiscoveryResult( + servers_discovered=0, + tools_discovered=0, + servers=[], + errors=[], + ) + + # Discover from all URLs in parallel + tasks = [ + self.discover_from_url(url, kind=kind) + for url in server_urls + ] + servers = await asyncio.gather(*tasks) + + # Aggregate results + total_tools = sum(len(server.tools) for server in servers) + successful_servers = sum(1 for server in servers if server.error is None) + errors = [ + f"{server.server_url}: {server.error}" + for server in servers + if server.error + ] + + return DiscoveryResult( + servers_discovered=successful_servers, + tools_discovered=total_tools, + servers=list(servers), + errors=errors, + ) + + async def discover_all( + self, + custom_urls: Optional[List[str]] = None, + profile: Optional[str] = None, + ) -> DiscoveryResult: + """ + Run full discovery from all sources. + + Discovers from: + 1. Workspace-deployed apps (Databricks Apps with MCP endpoints) + 2. MCP catalog managed servers + 3. Custom URLs (if provided) + + Args: + custom_urls: Optional list of custom MCP server URLs + profile: Databricks CLI profile for workspace discovery + + Returns: + Aggregated DiscoveryResult from all sources + """ + # Run all discovery sources in parallel + tasks = [ + self.discover_from_workspace(profile=profile), + self.discover_from_catalog(), + ] + + if custom_urls: + tasks.append(self.discover_from_urls(custom_urls)) + + results = await asyncio.gather(*tasks) + + # Aggregate results + all_servers = [] + all_errors = [] + total_servers = 0 + total_tools = 0 + + for result in results: + all_servers.extend(result.servers) + all_errors.extend(result.errors) + total_servers += result.servers_discovered + total_tools += result.tools_discovered + + return DiscoveryResult( + servers_discovered=total_servers, + tools_discovered=total_tools, + servers=all_servers, + errors=all_errors, + ) + + + # ---- Agent auto-discovery methods ---- + + async def discover_agents_all( + self, profile: Optional[str] = None, + ) -> AgentDiscoveryResult: + """ + Discover agents from serving endpoints and workspace apps in parallel. + + De-duplicates by name; serving endpoint wins on collision. + """ + tasks = [ + self._discover_agents_from_serving_endpoints(profile), + self._discover_agents_from_apps(profile), + ] + results = await asyncio.gather(*tasks, return_exceptions=True) + + all_agents: List[DiscoveredAgent] = [] + all_errors: List[str] = [] + + for result in results: + if isinstance(result, Exception): + all_errors.append(f"Agent discovery error: {result}") + else: + all_agents.extend(result.agents) + all_errors.extend(result.errors) + + # De-duplicate by name — serving_endpoint source wins over app + seen: Dict[str, DiscoveredAgent] = {} + for agent in all_agents: + existing = seen.get(agent.name) + if existing is None: + seen[agent.name] = agent + elif agent.source == "serving_endpoint": + seen[agent.name] = agent + + return AgentDiscoveryResult( + agents=list(seen.values()), + errors=all_errors, + ) + + async def _discover_agents_from_serving_endpoints( + self, profile: Optional[str] = None, + ) -> AgentDiscoveryResult: + """ + Discover agents from Databricks Model Serving endpoints. + + Filters to READY endpoints, constructs invocation URLs, and + optionally fetches A2A Agent Cards for richer metadata. + """ + agents: List[DiscoveredAgent] = [] + errors: List[str] = [] + + def _list_endpoints_sync() -> tuple: + from databricks.sdk import WorkspaceClient + + client = WorkspaceClient(profile=profile) if profile else WorkspaceClient() + workspace_host = client.config.host.rstrip("/") + + # Extract auth token for cross-service probes + auth_headers = client.config.authenticate() + auth_val = auth_headers.get("Authorization", "") + token = auth_val[7:] if auth_val.startswith("Bearer ") else None + + all_eps = list(client.serving_endpoints.list()) + + results = [] + skipped = 0 + for ep in all_eps: + ep_name = getattr(ep, "name", None) + if not ep_name: + continue + + # Skip Foundation Model API endpoints (not agents) + if ep_name.startswith(FMAPI_PREFIXES): + skipped += 1 + continue + + state_obj = getattr(ep, "state", None) + ready_val = getattr(state_obj, "ready", None) if state_obj else None + # SDK returns an enum (EndpointStateReady.READY) — compare as string + if not ready_val or "READY" not in str(ready_val): + continue + + # Extract task type + config = getattr(ep, "config", None) + served_entities = getattr(config, "served_entities", None) or [] + task = None + for entity in served_entities: + t = getattr(entity, "task", None) + if t: + task = str(t) + break + + # Extract tags + tags = {} + for tag in getattr(ep, "tags", None) or []: + key = getattr(tag, "key", None) + value = getattr(tag, "value", None) + if key: + tags[key] = value + + results.append({ + "name": ep_name, + "url": f"{workspace_host}/serving-endpoints/{ep_name}/invocations", + "task": task, + "tags": tags, + "workspace_host": workspace_host, + }) + return results, token + + try: + loop = asyncio.get_event_loop() + result_tuple = await loop.run_in_executor(None, _list_endpoints_sync) + endpoints, ws_token = result_tuple + if ws_token: + self._workspace_token = ws_token + except Exception as e: + logger.error("Failed to list serving endpoints: %s", e) + return AgentDiscoveryResult( + agents=[], + errors=[f"Failed to list serving endpoints: {e}"], + ) + + logger.info( + "Serving endpoints: %d candidates (from listing)", + len(endpoints), + ) + + if not endpoints: + return AgentDiscoveryResult(agents=[], errors=[]) + + # Probe each endpoint for agent card in parallel + probe_tasks = [ + self._probe_endpoint_for_agent(ep_info) for ep_info in endpoints + ] + probe_results = await asyncio.gather(*probe_tasks, return_exceptions=True) + + for result in probe_results: + if isinstance(result, Exception): + errors.append(str(result)) + elif result is not None: + agents.append(result) + + logger.info( + "Serving endpoint agent discovery: %d endpoints checked, %d agents found", + len(endpoints), len(agents), + ) + return AgentDiscoveryResult(agents=agents, errors=errors) + + async def _probe_endpoint_for_agent( + self, ep_info: Dict[str, Any], + ) -> Optional[DiscoveredAgent]: + """ + Check if a serving endpoint is an agent. + + An endpoint qualifies if it has task=llm/v1/chat, an 'agent' tag, + or responds to an Agent Card request. + """ + name = ep_info["name"] + url = ep_info["url"] + task = ep_info.get("task") + tags = ep_info.get("tags", {}) + workspace_host = ep_info.get("workspace_host", "") + + tag_keys_lower = {k.lower() for k in tags} + is_agent_tagged = "agent" in tag_keys_lower + is_chat_task = task and "chat" in task.lower() + # MLflow-deployed agents have a MONITOR_EXPERIMENT_ID tag + is_mlflow_agent = "monitor_experiment_id" in tag_keys_lower + + # Try fetching A2A Agent Card from the workspace app URL (if app-backed) + token = getattr(self, "_workspace_token", None) + agent_card = None + try: + async with A2AClient(timeout=AGENT_CARD_PROBE_TIMEOUT) as client: + agent_card = await client.fetch_agent_card( + workspace_host + f"/serving-endpoints/{name}", + auth_token=token, + ) + except A2AClientError: + pass + except Exception: + pass + + if agent_card: + return DiscoveredAgent( + name=agent_card.get("name", name), + endpoint_url=url, + description=agent_card.get("description"), + capabilities=",".join(agent_card.get("capabilities", {}).keys()) + if isinstance(agent_card.get("capabilities"), dict) + else None, + a2a_capabilities=json.dumps(agent_card.get("capabilities")) + if agent_card.get("capabilities") + else None, + skills=json.dumps(agent_card.get("skills")) + if agent_card.get("skills") + else None, + protocol_version=agent_card.get("protocolVersion"), + source="serving_endpoint", + ) + + # No card, but if it looks like an agent based on task/tags, still register + if is_agent_tagged or is_chat_task or is_mlflow_agent: + tag_list = [f"{k}={v}" for k, v in tags.items() if v] if tags else [] + return DiscoveredAgent( + name=name, + endpoint_url=url, + description=f"Serving endpoint ({task or 'unknown task'})", + capabilities=",".join(tag_list) if tag_list else task, + source="serving_endpoint", + ) + + return None + + async def _discover_agents_from_apps( + self, profile: Optional[str] = None, + ) -> AgentDiscoveryResult: + """ + Discover agents from Databricks Apps that expose A2A Agent Cards. + + Reuses _list_workspace_apps() and probes each running app at + /.well-known/agent.json and /card. + """ + agents: List[DiscoveredAgent] = [] + errors: List[str] = [] + + try: + app_list = await self._list_workspace_apps(profile) + except Exception as e: + logger.error("Workspace app listing failed for agent discovery: %s", e) + return AgentDiscoveryResult( + agents=[], + errors=[f"Failed to list workspace apps for agent discovery: {e}"], + ) + + if not app_list: + return AgentDiscoveryResult(agents=[], errors=[]) + + probe_tasks = [] + for app_info in app_list: + app_url = app_info.get("url") + if not app_url: + continue + probe_tasks.append(self._probe_app_for_agent(app_info)) + + logger.info(f"[AGENT-DISCOVERY] Built {len(probe_tasks)} probe tasks from {len(app_list)} apps") + + if probe_tasks: + logger.info(f"[AGENT-DISCOVERY] Probing {len(probe_tasks)} apps for agent cards") + probe_results = await asyncio.gather( + *probe_tasks, return_exceptions=True, + ) + none_count = 0 + for i, result in enumerate(probe_results): + if isinstance(result, Exception): + errors.append(str(result)) + logger.warning(f"[AGENT-DISCOVERY] App {i}: Exception - {result}") + elif result is not None: + agents.append(result) + logger.info(f"[AGENT-DISCOVERY] App {i}: Found agent '{result.name}'") + else: + none_count += 1 + logger.info(f"[AGENT-DISCOVERY] Results: {len(agents)} agents, {len(errors)} errors, {none_count} None") + + logger.info( + "App agent discovery: %d apps checked, %d agents found", + len(app_list), len(agents), + ) + return AgentDiscoveryResult(agents=agents, errors=errors) + + async def _probe_app_for_agent( + self, app_info: Dict[str, Any], + ) -> Optional[DiscoveredAgent]: + """ + Probe a single Databricks App for an A2A Agent Card. + + Tries /.well-known/agent.json and /card with a short timeout. + """ + app_url = app_info["url"] + app_name = app_info["name"] + + token = getattr(self, "_workspace_token", None) + agent_card = None + try: + logger.info(f"[AGENT] Probing app '{app_name}' at {app_url}") + async with A2AClient(timeout=AGENT_CARD_PROBE_TIMEOUT) as client: + agent_card = await client.fetch_agent_card(app_url, auth_token=token) + logger.info(f"[AGENT] ✓ Found agent card for '{app_name}': name={agent_card.get('name', app_name)}") + except A2AClientError as e: + logger.info(f"[AGENT] ✗ No agent card for '{app_name}' (A2AClientError): {e}") + return None + except Exception as e: + logger.error(f"[AGENT] ✗ Probe failed for '{app_name}': {type(e).__name__}: {e}", exc_info=True) + return None + + if not agent_card: + logger.warning(f"[AGENT] ✗ Agent card is None for '{app_name}'") + return None + + discovered = DiscoveredAgent( + name=agent_card.get("name", app_name), + endpoint_url=app_url, + description=agent_card.get("description"), + capabilities=",".join(agent_card.get("capabilities", {}).keys()) + if isinstance(agent_card.get("capabilities"), dict) + else None, + a2a_capabilities=json.dumps(agent_card.get("capabilities")) + if agent_card.get("capabilities") + else None, + skills=json.dumps(agent_card.get("skills")) + if agent_card.get("skills") + else None, + protocol_version=agent_card.get("protocolVersion"), + source="app", + ) + logger.info(f"[AGENT] Created DiscoveredAgent for '{discovered.name}' from app '{app_name}'") + return discovered + + def upsert_agent_discovery_results( + self, + result: AgentDiscoveryResult, + ) -> AgentUpsertResult: + """ + Upsert discovered agents into the database. + + Matches by unique agent name. New agents get status="discovered". + Existing agents: factual fields are updated, but manual fields + (auth_token, system_prompt, collection_id, status) are preserved. + + For agents from apps (source="app"), links them to their backing app via app_id. + """ + upsert = AgentUpsertResult() + + for discovered in result.agents: + # Look up app_id if this agent is from an app + app_id = None + if discovered.source == "app" and discovered.endpoint_url: + app = DatabaseAdapter.get_app_by_url(discovered.endpoint_url) + if app: + app_id = app["id"] + logger.info(f"[AGENT] Linking agent '{discovered.name}' to app '{app['name']}' (id={app_id})") + + # Check if agent already exists + existing = DatabaseAdapter.get_agent_by_name(discovered.name) + + if existing: + # Update factual fields only + DatabaseAdapter.upsert_agent_by_name( + name=discovered.name, + endpoint_url=discovered.endpoint_url, + description=discovered.description, + capabilities=discovered.capabilities, + a2a_capabilities=discovered.a2a_capabilities, + skills=discovered.skills, + protocol_version=discovered.protocol_version, + app_id=app_id, + ) + upsert.updated_agents += 1 + else: + # Create new agent + DatabaseAdapter.upsert_agent_by_name( + name=discovered.name, + endpoint_url=discovered.endpoint_url, + description=discovered.description, + capabilities=discovered.capabilities, + a2a_capabilities=discovered.a2a_capabilities, + skills=discovered.skills, + protocol_version=discovered.protocol_version, + status="discovered", + app_id=app_id, + ) + upsert.new_agents += 1 + + return upsert + + def upsert_discovery_results( + self, + discovery_result: DiscoveryResult, + ) -> UpsertResult: + """ + Upsert discovery results into the database. + + Creates or updates Apps, MCP servers, and tools based on discovery. + Handles duplicates gracefully by updating existing records. + + Args: + discovery_result: Results from discovery operation + + Returns: + UpsertResult with counts of new/updated entities + """ + upsert_result = UpsertResult() + + pending_apps = getattr(self, "_pending_apps", []) + logger.info(f"[UPSERT] Starting upsert with {len(pending_apps)} pending apps") + + # Build a url→app_id map from pending app metadata (set during workspace discovery) + url_to_app_id: Dict[str, int] = {} + for app_meta in pending_apps: + logger.info(f"[UPSERT] Processing app: {app_meta['name']}") + app_id = self._upsert_app( + name=app_meta["name"], + url=app_meta.get("url"), + owner=app_meta.get("owner"), + ) + logger.info(f"[UPSERT] App '{app_meta['name']}' upserted with ID: {app_id}") + # Map the MCP endpoint URL back to this App + if app_meta.get("mcp_url"): + url_to_app_id[app_meta["mcp_url"]] = app_id + upsert_result.updated_servers += 1 # Count apps as updated + + for discovered_server in discovery_result.servers: + # Skip servers with errors + if discovered_server.error: + continue + + app_id = url_to_app_id.get(discovered_server.server_url) + + # Upsert MCP server + server_dict = self._upsert_mcp_server( + discovered_server.server_url, + discovered_server.kind, + app_id=app_id, + ) + logger.info(f"[UPSERT] MCP server upserted: {discovered_server.server_url} (id={server_dict['id']})") + upsert_result.updated_servers += 1 + + # Get the server ID + mcp_server_id = server_dict["id"] + + # Upsert tools + for normalized_tool in discovered_server.tools: + tool_dict = self._upsert_tool( + mcp_server_id, + normalized_tool, + ) + logger.info(f"[UPSERT] Tool upserted: {normalized_tool.name} (id={tool_dict['id']})") + upsert_result.updated_tools += 1 + + logger.info(f"[UPSERT] Upsert complete. Apps: {len(pending_apps)}, Servers: {upsert_result.updated_servers}, Tools: {upsert_result.updated_tools}") + self._pending_apps = [] # Clear after upsert + return upsert_result + + def _upsert_app( + self, + name: str, + url: Optional[str] = None, + owner: Optional[str] = None, + ) -> int: + """ + Upsert a Databricks App and return its ID. + + Matches by unique app name. Creates a new row if not found, + otherwise updates the existing one. + + Args: + name: App name (unique key) + url: Deployed app URL + owner: App owner + + Returns: + The App's primary key ID + """ + logger.info(f"[DB] Upserting app '{name}' with url={url}, owner={owner}") + app_dict = DatabaseAdapter.upsert_app_by_name( + name=name, url=url, owner=owner + ) + logger.info(f"[DB] App '{name}' upserted with id={app_dict['id']}") + return app_dict["id"] + + def _upsert_mcp_server( + self, + server_url: str, + kind: str, + app_id: Optional[int] = None, + ) -> Dict: + """ + Upsert an MCP server into the database. + + Args: + server_url: MCP server endpoint URL + kind: Server type (managed/external/custom) + app_id: Optional FK to parent App + + Returns: + Server dict with ID + """ + return DatabaseAdapter.upsert_mcp_server_by_url( + server_url=server_url, kind=kind, app_id=app_id + ) + + def _upsert_tool( + self, + mcp_server_id: int, + normalized_tool: NormalizedTool, + ) -> Dict: + """ + Upsert a tool into the database. + + Args: + mcp_server_id: ID of the parent MCP server + normalized_tool: Normalized tool specification + + Returns: + Tool dict with ID + """ + return DatabaseAdapter.upsert_tool_by_server_and_name( + mcp_server_id=mcp_server_id, + name=normalized_tool.name, + description=normalized_tool.description, + parameters=normalized_tool.parameters, + ) + + + # ----- Private helpers for workspace discovery ----- + + async def _list_workspace_apps( + self, profile: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + Enumerate Databricks Apps in the workspace via the SDK. + + Runs the synchronous SDK call in a thread pool. Returns a list of + dicts with keys: name, url, owner, status. + """ + + def _list_sync() -> tuple: + from databricks.sdk import WorkspaceClient + + if profile: + client = WorkspaceClient(profile=profile) + else: + client = WorkspaceClient() + + # Extract the SP's auth token for cross-app requests + auth_headers = client.config.authenticate() + token = None + auth_val = auth_headers.get("Authorization", "") + if auth_val.startswith("Bearer "): + token = auth_val[7:] + + results = [] + for app in client.apps.list(): + # Determine if app is running via compute_status (preferred) + # or fall back to active_deployment status + compute_state = None + cs = getattr(app, "compute_status", None) + if cs: + compute_state = str(getattr(cs, "state", "")) + + deploy_state = None + dep = getattr(app, "active_deployment", None) + if dep: + dep_status = getattr(dep, "status", None) + if dep_status: + deploy_state = str(getattr(dep_status, "state", "")) + + app_url = getattr(app, "url", None) or "" + app_url = app_url.rstrip("/") if app_url else "" + + results.append({ + "name": app.name, + "url": app_url, + "owner": getattr(app, "creator", None) + or getattr(app, "updater", None), + "compute_state": compute_state, + "deploy_state": deploy_state, + }) + return results, token + + loop = asyncio.get_event_loop() + result_tuple = await loop.run_in_executor(None, _list_sync) + all_apps, workspace_token = result_tuple + + # Store token for cross-app probe requests + self._workspace_token = workspace_token + + # Filter to running apps: compute is ACTIVE or deployment SUCCEEDED + running = [ + a for a in all_apps + if a.get("url") and ( + "ACTIVE" in (a.get("compute_state") or "") + or "SUCCEEDED" in (a.get("deploy_state") or "") + ) + ] + + logger.info( + "Workspace apps: %d total, %d running", + len(all_apps), len(running), + ) + return running + + async def _probe_app_for_mcp( + self, app_info: Dict[str, Any], + ) -> Optional[tuple]: + """ + Probe a single Databricks App for an MCP endpoint. + + Tries common MCP paths with a short timeout. Returns a tuple of + (DiscoveredServer, app_metadata_dict) if MCP is found, else None. + """ + app_url = app_info["url"] + app_name = app_info["name"] + + # Build candidate URLs to probe + candidate_urls = [f"{app_url}{path}" for path in MCP_PROBE_PATHS] + + token = getattr(self, "_workspace_token", None) + async with MCPClient(timeout=MCP_PROBE_TIMEOUT) as client: + for probe_url in candidate_urls: + try: + mcp_tools = await client.list_tools(probe_url, auth_token=token) + + # Parse tools + tools: List[NormalizedTool] = [] + for mcp_tool in mcp_tools: + try: + tools.append(ToolParser.parse_tool({ + "name": mcp_tool.name, + "description": mcp_tool.description, + "inputSchema": mcp_tool.input_schema, + })) + except ValueError: + continue + + logger.info( + "MCP endpoint found: %s (%d tools)", + probe_url, len(tools), + ) + + server = DiscoveredServer( + server_url=probe_url, + kind="managed", + tools=tools, + ) + app_meta = { + "name": app_name, + "url": app_url, + "owner": app_info.get("owner"), + "mcp_url": probe_url, + } + return (server, app_meta) + + except (MCPConnectionError, MCPTimeoutError): + # This path didn't respond as MCP — try next + continue + except Exception as e: + logger.debug( + "Probe %s failed unexpectedly: %s", probe_url, e, + ) + continue + + # No MCP endpoint found — not an error, just a non-MCP app + return None + + +async def create_discovery_service() -> DiscoveryService: + """ + Factory function to create a DiscoveryService instance. + + Returns: + Configured DiscoveryService instance + """ + return DiscoveryService() diff --git a/dbx-agent-app/app/backend/app/services/embedding.py b/dbx-agent-app/app/backend/app/services/embedding.py new file mode 100644 index 00000000..c1f851e2 --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/embedding.py @@ -0,0 +1,239 @@ +""" +Embedding Service — generates vector embeddings for indexed assets. + +Dual-path: + - Production: Databricks Foundation Model API (databricks-bge-large-en) + - Dev/local: Simple keyword-based pseudo-embeddings using TF-IDF-style hashing + +Embeddings are stored in the AssetEmbedding table and used by SearchService +for cosine-similarity ranking. +""" + +import json +import logging +import math +import hashlib +from typing import List, Optional, Dict, Any + +import httpx + +from app.config import settings +from app.db_adapter import DatabaseAdapter + +logger = logging.getLogger(__name__) + +# Default embedding dimension for the keyword fallback +KEYWORD_EMBED_DIM = 256 + + +class EmbeddingService: + """Generates and stores embeddings for asset text content.""" + + def __init__(self): + self._use_fmapi = bool(settings.databricks_host) + self._model = settings.embedding_model + self._dimension = settings.embedding_dimension + + def _build_text(self, asset_type: str, asset: Dict[str, Any]) -> str: + """Build searchable text from an asset dict (type-specific).""" + parts: List[str] = [] + + # Name is always primary + if asset.get("name"): + parts.append(asset["name"]) + + # Full name for catalog assets + if asset.get("full_name"): + parts.append(asset["full_name"]) + + # Description / comment + for key in ("comment", "description"): + if asset.get(key): + parts.append(asset[key]) + + # Owner + if asset.get("owner"): + parts.append(f"owner: {asset['owner']}") + + # Column names for tables/views + if asset.get("columns_json"): + try: + columns = json.loads(asset["columns_json"]) + col_names = [c.get("name", "") for c in columns if c.get("name")] + if col_names: + parts.append("columns: " + ", ".join(col_names[:50])) + except (json.JSONDecodeError, TypeError): + pass + + # Workspace asset path + if asset.get("path"): + parts.append(asset["path"]) + + # Content preview + if asset.get("content_preview"): + parts.append(asset["content_preview"][:500]) + + # Agent-specific fields + if asset.get("capabilities"): + parts.append(f"capabilities: {asset['capabilities']}") + if asset.get("skills"): + try: + skills = json.loads(asset["skills"]) + skill_names = [s.get("name", "") for s in skills if s.get("name")] + if skill_names: + parts.append("skills: " + ", ".join(skill_names)) + except (json.JSONDecodeError, TypeError): + pass + + # Type context + parts.append(f"type: {asset_type}") + + return " ".join(parts) + + async def embed_text(self, text: str) -> List[float]: + """Generate embedding vector for a single text string.""" + if self._use_fmapi: + return await self._embed_via_fmapi(text) + return self._embed_keyword(text) + + async def embed_texts(self, texts: List[str]) -> List[List[float]]: + """Batch-embed multiple text strings.""" + if self._use_fmapi: + return await self._embed_batch_fmapi(texts) + return [self._embed_keyword(t) for t in texts] + + async def embed_asset(self, asset_type: str, asset_id: int, asset: Dict[str, Any]) -> None: + """Generate and store embedding for a single asset.""" + text = self._build_text(asset_type, asset) + if not text.strip(): + return + + embedding = await self.embed_text(text) + dim = len(embedding) + model = self._model if self._use_fmapi else "keyword-hash" + + existing = DatabaseAdapter.get_asset_embedding(asset_type, asset_id) + if existing: + DatabaseAdapter.update_asset_embedding( + existing["id"], + text_content=text, + embedding_json=json.dumps(embedding), + embedding_model=model, + dimension=dim, + ) + else: + DatabaseAdapter.create_asset_embedding( + asset_type=asset_type, + asset_id=asset_id, + text_content=text, + embedding_json=json.dumps(embedding), + embedding_model=model, + dimension=dim, + ) + + async def embed_all_assets(self) -> Dict[str, int]: + """Embed all un-embedded assets. Returns counts by type.""" + counts: Dict[str, int] = {} + + # Catalog assets + catalog_assets, total = DatabaseAdapter.list_catalog_assets(page=1, page_size=5000) + for asset in catalog_assets: + existing = DatabaseAdapter.get_asset_embedding(asset["asset_type"], asset["id"]) + if not existing: + await self.embed_asset(asset["asset_type"], asset["id"], asset) + counts[asset["asset_type"]] = counts.get(asset["asset_type"], 0) + 1 + + # Workspace assets + workspace_assets, total = DatabaseAdapter.list_workspace_assets(page=1, page_size=5000) + for asset in workspace_assets: + existing = DatabaseAdapter.get_asset_embedding(asset["asset_type"], asset["id"]) + if not existing: + await self.embed_asset(asset["asset_type"], asset["id"], asset) + counts[asset["asset_type"]] = counts.get(asset["asset_type"], 0) + 1 + + # Apps + apps, total = DatabaseAdapter.list_apps(page=1, page_size=500) + for app in apps: + existing = DatabaseAdapter.get_asset_embedding("app", app["id"]) + if not existing: + await self.embed_asset("app", app["id"], app) + counts["app"] = counts.get("app", 0) + 1 + + # Tools + tools, total = DatabaseAdapter.list_tools(page=1, page_size=500) + for tool in tools: + existing = DatabaseAdapter.get_asset_embedding("tool", tool["id"]) + if not existing: + await self.embed_asset("tool", tool["id"], tool) + counts["tool"] = counts.get("tool", 0) + 1 + + # Agents + agents, total = DatabaseAdapter.list_agents(page=1, page_size=500) + for agent in agents: + existing = DatabaseAdapter.get_asset_embedding("agent", agent["id"]) + if not existing: + await self.embed_asset("agent", agent["id"], agent) + counts["agent"] = counts.get("agent", 0) + 1 + + return counts + + # --- Databricks FMAPI --- + + async def _embed_via_fmapi(self, text: str) -> List[float]: + """Call Databricks Foundation Model API for a single embedding.""" + results = await self._embed_batch_fmapi([text]) + return results[0] + + async def _embed_batch_fmapi(self, texts: List[str]) -> List[List[float]]: + """Batch call to Databricks FMAPI embedding endpoint.""" + from databricks.sdk import WorkspaceClient + + w = WorkspaceClient() + workspace_url = w.config.host.rstrip("/") + url = f"{workspace_url}/serving-endpoints/{self._model}/invocations" + + headers = {"Content-Type": "application/json"} + headers.update(w.config.authenticate()) + + payload = {"input": texts} + + async with httpx.AsyncClient(timeout=60.0) as client: + resp = await client.post(url, json=payload, headers=headers) + if resp.status_code != 200: + logger.error("FMAPI embedding error %d: %s", resp.status_code, resp.text) + raise ValueError(f"Embedding API error ({resp.status_code}): {resp.text}") + data = resp.json() + + # Standard OpenAI-compatible response format + embeddings = [] + for item in sorted(data.get("data", []), key=lambda x: x.get("index", 0)): + embeddings.append(item["embedding"]) + + return embeddings + + # --- Keyword fallback --- + + def _embed_keyword(self, text: str) -> List[float]: + """ + Generate a pseudo-embedding using deterministic hash-based feature extraction. + Produces a fixed-size vector from word-level hashing — good enough for + keyword-overlap similarity without any ML dependencies. + """ + dim = KEYWORD_EMBED_DIM + vector = [0.0] * dim + words = text.lower().split() + + for word in words: + # Hash each word to a bucket + h = int(hashlib.md5(word.encode()).hexdigest(), 16) + idx = h % dim + # Use a second hash for sign (simulates random projections) + sign = 1.0 if (h >> 16) % 2 == 0 else -1.0 + vector[idx] += sign + + # L2 normalize + norm = math.sqrt(sum(v * v for v in vector)) + if norm > 0: + vector = [v / norm for v in vector] + + return vector diff --git a/dbx-agent-app/app/backend/app/services/generator.py b/dbx-agent-app/app/backend/app/services/generator.py new file mode 100644 index 00000000..e2ea230c --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/generator.py @@ -0,0 +1,273 @@ +""" +Code generation service for supervisors. + +This service generates code-first supervisors from collections using +Jinja2 templates. Generated supervisors use Pattern 3 (dynamic tool +discovery at runtime). +""" + +from datetime import datetime +from typing import Dict, List, Optional, Any +from pathlib import Path +from jinja2 import Environment, FileSystemLoader, TemplateNotFound + +from app.db_adapter import WarehouseDB # Auto-switches between SQLite and Warehouse +from app.config import settings + + +class GeneratorError(Exception): + """Raised when code generation fails.""" + pass + + +class GeneratorService: + """Service for generating supervisor code from collections.""" + + def __init__(self): + """Initialize generator with Jinja2 environment.""" + template_dir = Path(__file__).parent.parent / "templates" + self.env = Environment( + loader=FileSystemLoader(str(template_dir)), + autoescape=False, # Don't escape Python code + trim_blocks=True, + lstrip_blocks=True, + ) + + def fetch_collection_items( + self, collection_id: int + ) -> tuple[Dict, List[Dict]]: + """ + Fetch collection and its items with full details. + + Args: + collection_id: Collection ID + + Returns: + Tuple of (collection, items_list) + items_list contains dicts with type, name, description, server_url + + Raises: + GeneratorError: If collection not found or has no items + """ + collection = WarehouseDB.get_collection(collection_id) + if not collection: + raise GeneratorError(f"Collection with id {collection_id} not found") + + items_data = [] + + # Get all collection items with joined data + items = WarehouseDB.list_collection_items(collection_id) + + if not items: + # Allow empty collections - they can still generate supervisors + # with no tools initially + pass + + # Process each item + for item in items: + if item.get('tool_id'): + # Individual tool + tool = WarehouseDB.get_tool(item['tool_id']) + if tool: + mcp_server = WarehouseDB.get_mcp_server(tool.get('mcp_server_id')) if tool.get('mcp_server_id') else None + items_data.append({ + "type": "tool", + "name": tool.get('name', ''), + "description": tool.get('description') or "", + "server_url": mcp_server.get('server_url', '') if mcp_server else "", + }) + + elif item.get('mcp_server_id'): + # Entire MCP server (all its tools) + mcp_server = WarehouseDB.get_mcp_server(item['mcp_server_id']) + if mcp_server: + items_data.append({ + "type": "mcp_server", + "name": "MCP Server", + "description": f"All tools from {mcp_server.get('server_url', '')}", + "server_url": mcp_server.get('server_url', ''), + }) + + elif item.get('app_id'): + # Databricks App (all its MCP servers) + app = WarehouseDB.get_app(item['app_id']) + if app: + # For now, just add the app as an item + # TODO: Get MCP servers for this app + items_data.append({ + "type": "app", + "name": app.get('name', ''), + "description": f"Tools from {app.get('name', '')}", + "server_url": app.get('url', ''), + }) + + return collection, items_data + + def resolve_mcp_server_urls(self, items: List[Dict]) -> List[str]: + """ + Extract unique MCP server URLs from collection items. + + Args: + items: List of item dictionaries from fetch_collection_items + + Returns: + List of unique MCP server URLs + """ + urls = set() + for item in items: + server_url = item.get("server_url", "").strip() + if server_url: + urls.add(server_url) + return sorted(list(urls)) + + def generate_supervisor_code( + self, + collection_id: int, + llm_endpoint: str = "databricks-meta-llama-3-1-70b-instruct", + app_name: Optional[str] = None, + ) -> Dict[str, str]: + """ + Generate supervisor code from a collection. + + Creates three files: + - supervisor.py: Main supervisor code with Pattern 3 + - requirements.txt: Python dependencies + - app.yaml: Databricks Apps deployment config + + Args: + collection_id: Collection ID + llm_endpoint: Databricks Foundation Model endpoint name + app_name: Optional custom app name (defaults to collection name) + + Returns: + Dictionary mapping filenames to file contents: + { + "supervisor.py": "...", + "requirements.txt": "...", + "app.yaml": "..." + } + + Raises: + GeneratorError: If generation fails + """ + # Fetch collection data + collection, items = self.fetch_collection_items(collection_id) + + # Extract MCP server URLs + mcp_server_urls = self.resolve_mcp_server_urls(items) + + # Generate app name + if not app_name: + # Convert collection name to valid app name + # e.g., "Expert Research Toolkit" -> "expert-research-toolkit" + collection_name = collection.get('name', 'supervisor') + app_name = collection_name.lower().replace(" ", "-") + app_name = "".join(c for c in app_name if c.isalnum() or c == "-") + + # Template context + context = { + "collection_id": collection_id, + "collection_name": collection.get('name', ''), + "collection_description": collection.get('description') or "", + "generated_at": datetime.utcnow().isoformat() + "Z", + "tool_list": items, + "mcp_server_urls": ",".join(mcp_server_urls), + "llm_endpoint": llm_endpoint, + "app_name": app_name, + "databricks_host": settings.databricks_host or "${DATABRICKS_HOST}", + } + + # Generate files + files = {} + + try: + # Generate supervisor.py + template = self.env.get_template("supervisor_code_first.py.jinja2") + files["supervisor.py"] = template.render(**context) + + # Generate requirements.txt + template = self.env.get_template("requirements.txt.jinja2") + files["requirements.txt"] = template.render(**context) + + # Generate app.yaml + template = self.env.get_template("app.yaml.jinja2") + files["app.yaml"] = template.render(**context) + + except TemplateNotFound as e: + raise GeneratorError(f"Template not found: {e.name}") + except Exception as e: + raise GeneratorError(f"Template rendering failed: {str(e)}") + + return files + + def validate_python_syntax(self, code: str) -> tuple[bool, Optional[str]]: + """ + Validate that generated Python code is syntactically correct. + + Args: + code: Python source code + + Returns: + Tuple of (is_valid, error_message) + - (True, None) if valid + - (False, error_msg) if invalid + """ + try: + compile(code, "", "exec") + return True, None + except SyntaxError as e: + return False, f"Syntax error at line {e.lineno}: {e.msg}" + except Exception as e: + return False, f"Validation error: {str(e)}" + + def generate_and_validate( + self, + collection_id: int, + llm_endpoint: str = "databricks-meta-llama-3-1-70b-instruct", + app_name: Optional[str] = None, + ) -> Dict[str, str]: + """ + Generate supervisor code and validate Python syntax. + + This is a convenience method that combines generation and validation. + + Args: + collection_id: Collection ID + llm_endpoint: Databricks Foundation Model endpoint name + app_name: Optional custom app name + + Returns: + Dictionary of generated files + + Raises: + GeneratorError: If generation or validation fails + """ + files = self.generate_supervisor_code( + collection_id=collection_id, + llm_endpoint=llm_endpoint, + app_name=app_name, + ) + + # Validate supervisor.py + is_valid, error_msg = self.validate_python_syntax(files["supervisor.py"]) + if not is_valid: + raise GeneratorError(f"Generated Python code is invalid: {error_msg}") + + return files + + +# Singleton instance +_generator_service: Optional[GeneratorService] = None + + +def get_generator_service() -> GeneratorService: + """ + Get or create singleton generator service instance. + + Returns: + GeneratorService instance + """ + global _generator_service + if _generator_service is None: + _generator_service = GeneratorService() + return _generator_service diff --git a/dbx-agent-app/app/backend/app/services/lineage_crawler.py b/dbx-agent-app/app/backend/app/services/lineage_crawler.py new file mode 100644 index 00000000..2fcc040d --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/lineage_crawler.py @@ -0,0 +1,305 @@ +""" +Lineage Crawler Service — discovers relationships between assets. + +Data sources: + 1. UC system tables: system.access.table_lineage (table→table) + 2. Job definitions: jobs API → task SQL/notebook → table refs + 3. Notebook SQL cells: regex-based table reference extraction + 4. Dashboard queries: dashboard→query→table dependencies + +Produces AssetRelationship rows that form the knowledge graph. +""" + +import json +import re +import logging +from dataclasses import dataclass, field +from typing import List, Dict, Optional, Any + +from app.config import settings +from app.db_adapter import DatabaseAdapter + +logger = logging.getLogger(__name__) + +# Regex for extracting table references from SQL +# Matches: catalog.schema.table or schema.table patterns +TABLE_REF_PATTERN = re.compile( + r'(?:FROM|JOIN|INTO|UPDATE|TABLE|MERGE\s+INTO)\s+' + r'(?:`?(\w+)`?\.)?(?:`?(\w+)`?\.)`?(\w+)`?', + re.IGNORECASE, +) + + +@dataclass +class LineageCrawlStats: + relationships_discovered: int = 0 + new_relationships: int = 0 + errors: List[str] = field(default_factory=list) + + +class LineageCrawlerService: + """Discovers and indexes relationships between assets.""" + + def __init__(self, databricks_profile: Optional[str] = None): + self._profile = databricks_profile + + def _get_client(self): + from databricks.sdk import WorkspaceClient + kwargs = {} + if self._profile: + kwargs["profile"] = self._profile + elif settings.databricks_host: + kwargs["host"] = settings.databricks_host + if settings.databricks_token: + kwargs["token"] = settings.databricks_token + return WorkspaceClient(**kwargs) + + async def crawl(self, include_column_lineage: bool = False) -> LineageCrawlStats: + """Run all lineage discovery sources.""" + stats = LineageCrawlStats() + + # 1. UC table lineage from system tables + await self._crawl_table_lineage(stats) + + # 2. Job → table relationships + await self._crawl_job_dependencies(stats) + + # 3. Notebook → table references (from indexed content) + self._crawl_notebook_references(stats) + + logger.info( + "Lineage crawl complete: %d discovered, %d new, %d errors", + stats.relationships_discovered, stats.new_relationships, len(stats.errors), + ) + return stats + + async def _crawl_table_lineage(self, stats: LineageCrawlStats) -> None: + """Query UC system.access.table_lineage for table-to-table data flow.""" + try: + w = self._get_client() + + # Build the lookup of catalog assets by full_name for ID resolution + asset_lookup = self._build_catalog_asset_lookup() + if not asset_lookup: + logger.info("No catalog assets indexed — skipping UC lineage crawl") + return + + # Query system table via SQL statement execution + warehouse_id = settings.databricks_warehouse_id + if not warehouse_id: + logger.warning("No warehouse_id configured — cannot query system tables for lineage") + stats.errors.append("No warehouse_id configured for system table queries") + return + + sql = """ + SELECT + source_table_full_name, + target_table_full_name, + source_type, + target_type + FROM system.access.table_lineage + WHERE source_table_full_name IS NOT NULL + AND target_table_full_name IS NOT NULL + GROUP BY 1, 2, 3, 4 + """ + + rows = self._execute_sql(w, warehouse_id, sql) + + for row in rows: + source_full = row.get("source_table_full_name", "") + target_full = row.get("target_table_full_name", "") + + source_asset = asset_lookup.get(source_full) + target_asset = asset_lookup.get(target_full) + + if not source_asset or not target_asset: + continue + + rel_type = "reads_from" + source_uc_type = row.get("source_type", "") + if source_uc_type and "WRITE" in source_uc_type.upper(): + rel_type = "writes_to" + + self._upsert_relationship( + source_type=source_asset["asset_type"], + source_id=source_asset["id"], + source_name=source_full, + target_type=target_asset["asset_type"], + target_id=target_asset["id"], + target_name=target_full, + relationship_type=rel_type, + stats=stats, + ) + + except Exception as e: + msg = f"UC lineage crawl error: {e}" + logger.error(msg) + stats.errors.append(msg) + + async def _crawl_job_dependencies(self, stats: LineageCrawlStats) -> None: + """Parse job task configs to find job→table scheduling relationships.""" + try: + # Get all indexed jobs + jobs, _ = DatabaseAdapter.list_workspace_assets( + page=1, page_size=1000, asset_type="job" + ) + if not jobs: + return + + asset_lookup = self._build_catalog_asset_lookup() + + for job_asset in jobs: + metadata = {} + if job_asset.get("metadata_json"): + try: + metadata = json.loads(job_asset["metadata_json"]) + except (json.JSONDecodeError, TypeError): + pass + + # Extract table refs from task types + task_types = metadata.get("task_types", "") + if not task_types: + continue + + # If we have content_preview with SQL, extract table refs + content = job_asset.get("content_preview", "") or "" + if not content: + continue + + table_refs = self._extract_table_refs(content) + for ref in table_refs: + target = asset_lookup.get(ref) + if target: + self._upsert_relationship( + source_type="job", + source_id=job_asset["id"], + source_name=job_asset["name"], + target_type=target["asset_type"], + target_id=target["id"], + target_name=ref, + relationship_type="scheduled_by", + stats=stats, + ) + + except Exception as e: + msg = f"Job dependency crawl error: {e}" + logger.error(msg) + stats.errors.append(msg) + + def _crawl_notebook_references(self, stats: LineageCrawlStats) -> None: + """Parse notebook content previews for SQL table references.""" + try: + notebooks, _ = DatabaseAdapter.list_workspace_assets( + page=1, page_size=2000, asset_type="notebook" + ) + if not notebooks: + return + + asset_lookup = self._build_catalog_asset_lookup() + if not asset_lookup: + return + + for nb in notebooks: + content = nb.get("content_preview", "") or "" + if not content: + continue + + table_refs = self._extract_table_refs(content) + for ref in table_refs: + target = asset_lookup.get(ref) + if target: + self._upsert_relationship( + source_type="notebook", + source_id=nb["id"], + source_name=nb["name"], + target_type=target["asset_type"], + target_id=target["id"], + target_name=ref, + relationship_type="reads_from", + stats=stats, + ) + + except Exception as e: + msg = f"Notebook reference crawl error: {e}" + logger.error(msg) + stats.errors.append(msg) + + # --- Helpers --- + + def _build_catalog_asset_lookup(self) -> Dict[str, Dict]: + """Build {full_name: asset_dict} lookup for all catalog assets.""" + assets, _ = DatabaseAdapter.list_catalog_assets(page=1, page_size=5000) + return {a["full_name"]: a for a in assets if a.get("full_name")} + + def _extract_table_refs(self, sql_text: str) -> List[str]: + """Extract three-part table references from SQL text.""" + refs = set() + for match in TABLE_REF_PATTERN.finditer(sql_text): + catalog, schema, table = match.group(1), match.group(2), match.group(3) + if catalog and schema and table: + refs.add(f"{catalog}.{schema}.{table}") + elif schema and table: + refs.add(f"{schema}.{table}") + return list(refs) + + def _execute_sql(self, client, warehouse_id: str, sql: str) -> List[Dict]: + """Execute SQL via Databricks Statement Execution API.""" + try: + result = client.statement_execution.execute_statement( + warehouse_id=warehouse_id, + statement=sql, + wait_timeout="120s", + ) + + if not result.result or not result.result.data_array: + return [] + + columns = [col.name for col in result.manifest.schema.columns] + rows = [] + for row_data in result.result.data_array: + row = dict(zip(columns, row_data)) + rows.append(row) + return rows + + except Exception as e: + logger.error("SQL execution error: %s", e) + return [] + + def _upsert_relationship( + self, + source_type: str, + source_id: int, + source_name: str, + target_type: str, + target_id: int, + target_name: str, + relationship_type: str, + stats: LineageCrawlStats, + metadata: Optional[Dict] = None, + ) -> None: + """Create or update a relationship edge.""" + stats.relationships_discovered += 1 + + existing = DatabaseAdapter.get_asset_relationship( + source_type, source_id, target_type, target_id, relationship_type + ) + + if existing: + # Already exists — update if metadata changed + if metadata: + DatabaseAdapter.update_asset_relationship( + existing["id"], + metadata_json=json.dumps(metadata), + ) + else: + DatabaseAdapter.create_asset_relationship( + source_type=source_type, + source_id=source_id, + source_name=source_name, + target_type=target_type, + target_id=target_id, + target_name=target_name, + relationship_type=relationship_type, + metadata_json=json.dumps(metadata) if metadata else None, + ) + stats.new_relationships += 1 diff --git a/dbx-agent-app/app/backend/app/services/mcp_client.py b/dbx-agent-app/app/backend/app/services/mcp_client.py new file mode 100644 index 00000000..ce062ac5 --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/mcp_client.py @@ -0,0 +1,294 @@ +""" +MCP Client for discovering and listing tools from MCP servers. + +This module provides a client wrapper for the Model Context Protocol (MCP), +enabling communication with MCP servers to discover available tools. + +The MCP protocol uses JSON-RPC 2.0 over HTTP/SSE for communication. +""" + +import asyncio +import json +from typing import List, Dict, Any, Optional +from dataclasses import dataclass +import httpx +from app.config import settings + + +class MCPConnectionError(Exception): + """Raised when connection to MCP server fails.""" + + pass + + +class MCPTimeoutError(Exception): + """Raised when MCP server request times out.""" + + pass + + +@dataclass +class MCPTool: + """ + Represents a tool discovered from an MCP server. + + Attributes: + name: Tool identifier (e.g., "search_transcripts") + description: Human-readable description + input_schema: JSON Schema for tool parameters + """ + + name: str + description: Optional[str] + input_schema: Dict[str, Any] + + +class MCPClient: + """ + Client for interacting with MCP servers. + + Handles connection pooling, timeouts, and error recovery when + communicating with MCP servers to list available tools. + + Usage: + async with MCPClient() as client: + tools = await client.list_tools("https://mcp.example.com") + """ + + def __init__( + self, + timeout: float = 30.0, + max_connections: int = 10, + max_keepalive_connections: int = 5, + ): + """ + Initialize MCP client with connection pool settings. + + Args: + timeout: Request timeout in seconds (default: 30.0) + max_connections: Maximum total connections (default: 10) + max_keepalive_connections: Maximum keep-alive connections (default: 5) + """ + self.timeout = timeout + self.limits = httpx.Limits( + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + ) + self._client: Optional[httpx.AsyncClient] = None + self._request_id = 0 + + async def __aenter__(self): + """Async context manager entry.""" + self._client = httpx.AsyncClient( + timeout=self.timeout, + limits=self.limits, + follow_redirects=True, + ) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """Async context manager exit.""" + if self._client: + await self._client.aclose() + + def _next_request_id(self) -> int: + """Generate next JSON-RPC request ID.""" + self._request_id += 1 + return self._request_id + + async def _send_jsonrpc_request( + self, + server_url: str, + method: str, + params: Optional[Dict[str, Any]] = None, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a JSON-RPC 2.0 request to an MCP server. + + Args: + server_url: MCP server endpoint URL + method: JSON-RPC method name + params: Optional method parameters + + Returns: + JSON-RPC response result + + Raises: + MCPConnectionError: If connection fails + MCPTimeoutError: If request times out + """ + if not self._client: + raise MCPConnectionError("Client not initialized. Use async context manager.") + + request_payload = { + "jsonrpc": "2.0", + "id": self._next_request_id(), + "method": method, + } + + if params: + request_payload["params"] = params + + try: + headers = {"Content-Type": "application/json"} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + response = await self._client.post( + server_url, + json=request_payload, + headers=headers, + ) + response.raise_for_status() + + result = response.json() + + if "error" in result: + error = result["error"] + raise MCPConnectionError( + f"MCP server error: {error.get('message', 'Unknown error')} " + f"(code: {error.get('code', 'unknown')})" + ) + + return result.get("result", {}) + + except httpx.TimeoutException as e: + raise MCPTimeoutError(f"Request to {server_url} timed out: {str(e)}") + except httpx.HTTPError as e: + raise MCPConnectionError(f"HTTP error connecting to {server_url}: {str(e)}") + except json.JSONDecodeError as e: + raise MCPConnectionError(f"Invalid JSON response from {server_url}: {str(e)}") + + async def list_tools(self, server_url: str, auth_token: Optional[str] = None) -> List[MCPTool]: + """ + List all tools available from an MCP server. + + Sends a 'tools/list' JSON-RPC request to the MCP server and + parses the response into MCPTool objects. + + Args: + server_url: MCP server endpoint URL + + Returns: + List of MCPTool objects + + Raises: + MCPConnectionError: If connection or parsing fails + MCPTimeoutError: If request times out + + Example: + >>> async with MCPClient() as client: + ... tools = await client.list_tools("https://mcp.example.com") + ... for tool in tools: + ... print(f"{tool.name}: {tool.description}") + """ + try: + result = await self._send_jsonrpc_request( + server_url=server_url, + method="tools/list", + params={}, + auth_token=auth_token, + ) + + tools_data = result.get("tools", []) + tools = [] + + for tool_data in tools_data: + try: + tool = MCPTool( + name=tool_data.get("name", ""), + description=tool_data.get("description"), + input_schema=tool_data.get("inputSchema", {}), + ) + + if not tool.name: + continue + + tools.append(tool) + + except (KeyError, TypeError) as e: + # Log but don't fail entire operation for one bad tool + continue + + return tools + + except (MCPConnectionError, MCPTimeoutError): + raise + except Exception as e: + raise MCPConnectionError(f"Unexpected error listing tools: {str(e)}") + + async def get_server_info(self, server_url: str) -> Dict[str, Any]: + """ + Get server information from an MCP server. + + Sends an 'initialize' JSON-RPC request to get server capabilities + and metadata. + + Args: + server_url: MCP server endpoint URL + + Returns: + Server information dictionary + + Raises: + MCPConnectionError: If connection fails + MCPTimeoutError: If request times out + """ + try: + result = await self._send_jsonrpc_request( + server_url=server_url, + method="initialize", + params={ + "protocolVersion": "2024-11-05", + "clientInfo": { + "name": settings.api_title, + "version": settings.api_version, + }, + }, + ) + + return result + + except (MCPConnectionError, MCPTimeoutError): + raise + except Exception as e: + raise MCPConnectionError(f"Unexpected error getting server info: {str(e)}") + + async def ping(self, server_url: str) -> bool: + """ + Ping an MCP server to check if it's alive. + + Sends a 'ping' JSON-RPC request (or attempts connection). + + Args: + server_url: MCP server endpoint URL + + Returns: + True if server is reachable, False otherwise + """ + try: + await self._send_jsonrpc_request( + server_url=server_url, + method="ping", + params={}, + ) + return True + except (MCPConnectionError, MCPTimeoutError): + return False + except Exception: + return False + + +async def create_mcp_client() -> MCPClient: + """ + Factory function to create an MCP client instance. + + Returns: + Configured MCPClient instance + """ + return MCPClient( + timeout=30.0, + max_connections=10, + max_keepalive_connections=5, + ) diff --git a/dbx-agent-app/app/backend/app/services/orchestrator.py b/dbx-agent-app/app/backend/app/services/orchestrator.py new file mode 100644 index 00000000..b12bff80 --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/orchestrator.py @@ -0,0 +1,365 @@ +""" +Orchestrator Service — multi-agent planning, execution, and evaluation. + +Transforms a user message into an orchestration plan (simple or complex), +executes sub-tasks across agents in parallel where possible, +and evaluates the combined results for quality. +""" + +import asyncio +import json +import logging +import time +from dataclasses import dataclass, field, asdict +from typing import List, Dict, Any, Optional + +import httpx + +from app.config import settings + +logger = logging.getLogger(__name__) + +MAX_RETRIES = 1 + + +# ── Data structures ────────────────────────────────────────────────── + +@dataclass +class SubTask: + description: str + agent_id: int + agent_name: str + depends_on: List[int] = field(default_factory=list) # indices into sub_tasks list + + +@dataclass +class OrchestrationPlan: + complexity: str # "simple" | "complex" + reasoning: str + sub_tasks: List[SubTask] = field(default_factory=list) + + +@dataclass +class SubTaskResult: + task_index: int + agent_id: int + agent_name: str + description: str + response: str + latency_ms: int + success: bool + error: Optional[str] = None + + +@dataclass +class OrchestrationResult: + final_response: str + quality_score: float # 1-5 + needs_retry: bool + retry_suggestions: Optional[str] = None + + +# ── Helper ──────────────────────────────────────────────────────────── + +def _get_llm_config(): + """Get Databricks LLM endpoint URL and auth headers.""" + from databricks.sdk import WorkspaceClient + w = WorkspaceClient() + endpoint_url = f"{w.config.host}/serving-endpoints/{settings.llm_endpoint}/invocations" + headers = {"Content-Type": "application/json"} + headers.update(w.config.authenticate()) + return endpoint_url, headers + + +async def _llm_call(endpoint_url: str, headers: dict, messages: list, max_tokens: int = 4096) -> str: + """Make a single LLM call and return the content string.""" + async with httpx.AsyncClient(timeout=120.0) as client: + resp = await client.post( + endpoint_url, + headers=headers, + json={ + "messages": messages, + "max_tokens": max_tokens, + "temperature": 0.1, + }, + ) + resp.raise_for_status() + data = resp.json() + return data.get("choices", [{}])[0].get("message", {}).get("content", "") + + +def _parse_json_from_llm(text: str) -> dict: + """Extract the first JSON object from LLM output, handling markdown fences.""" + cleaned = text.strip() + if cleaned.startswith("```"): + lines = cleaned.split("\n") + # Drop first and last fence lines + json_lines = [] + inside = False + for line in lines: + if line.strip().startswith("```") and not inside: + inside = True + continue + if line.strip().startswith("```") and inside: + break + if inside: + json_lines.append(line) + cleaned = "\n".join(json_lines) + return json.loads(cleaned) + + +# ── Orchestrator class ──────────────────────────────────────────────── + +class Orchestrator: + """Plans, executes, and evaluates multi-agent orchestrations.""" + + async def classify_and_plan( + self, + message: str, + available_agents: List[Dict[str, Any]], + ) -> OrchestrationPlan: + """Classify the query and produce an orchestration plan. + + Args: + message: The user's natural-language query. + available_agents: List of agent dicts (id, name, description, capabilities). + + Returns: + OrchestrationPlan with complexity and sub-tasks. + """ + endpoint_url, headers = _get_llm_config() + + agent_descriptions = "\n".join( + f"- Agent ID {a['id']}: {a['name']} — {a.get('description', 'no description')} " + f"[capabilities: {a.get('capabilities', 'general')}]" + for a in available_agents + ) + + system_prompt = """You are an orchestration planner. Given a user query and a list of available agents, +decide whether the query is "simple" (one agent can handle it) or "complex" (needs multiple agents). + +For complex queries, decompose into sub-tasks with agent assignments. + +Respond with ONLY a JSON object (no markdown fencing, no explanation): +{ + "complexity": "simple" or "complex", + "reasoning": "brief explanation of your decision", + "sub_tasks": [ + { + "description": "what this sub-task should accomplish", + "agent_id": , + "agent_name": "", + "depends_on": [] + } + ] +} + +Rules: +- depends_on contains indices (0-based) of sub_tasks this task depends on. +- For simple queries, return exactly one sub-task. +- Only assign agents from the available list. +- Prefer parallel execution: minimize dependencies.""" + + user_prompt = f"""Available agents: +{agent_descriptions} + +User query: {message}""" + + raw = await _llm_call(endpoint_url, headers, [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ]) + + try: + parsed = _parse_json_from_llm(raw) + except (json.JSONDecodeError, ValueError): + logger.warning("Failed to parse plan JSON, falling back to simple plan. Raw: %s", raw[:500]) + if available_agents: + first = available_agents[0] + return OrchestrationPlan( + complexity="simple", + reasoning="Failed to parse LLM plan; delegating to best-match agent.", + sub_tasks=[SubTask( + description=message, + agent_id=first["id"], + agent_name=first["name"], + )], + ) + return OrchestrationPlan(complexity="simple", reasoning="No agents available.", sub_tasks=[]) + + sub_tasks = [] + for st in parsed.get("sub_tasks", []): + sub_tasks.append(SubTask( + description=st.get("description", message), + agent_id=st.get("agent_id", 0), + agent_name=st.get("agent_name", "unknown"), + depends_on=st.get("depends_on", []), + )) + + return OrchestrationPlan( + complexity=parsed.get("complexity", "simple"), + reasoning=parsed.get("reasoning", ""), + sub_tasks=sub_tasks, + ) + + async def execute_plan( + self, + plan: OrchestrationPlan, + base_url: str, + ) -> List[SubTaskResult]: + """Execute all sub-tasks, respecting dependency order. + + Tasks are grouped into levels: level 0 has no deps, level 1 depends + only on level-0 tasks, etc. Tasks within a level run concurrently. + """ + from app.routes.supervisor_runtime import _delegate_to_agent + + results: List[Optional[SubTaskResult]] = [None] * len(plan.sub_tasks) + completed_indices: set = set() + + # Build levels + levels = self._build_execution_levels(plan.sub_tasks) + + for level in levels: + coros = [] + for idx in level: + st = plan.sub_tasks[idx] + coros.append(self._execute_single(idx, st, base_url, _delegate_to_agent)) + + level_results = await asyncio.gather(*coros, return_exceptions=True) + for i, res in enumerate(level_results): + idx = level[i] + if isinstance(res, Exception): + results[idx] = SubTaskResult( + task_index=idx, + agent_id=plan.sub_tasks[idx].agent_id, + agent_name=plan.sub_tasks[idx].agent_name, + description=plan.sub_tasks[idx].description, + response="", + latency_ms=0, + success=False, + error=str(res), + ) + else: + results[idx] = res + completed_indices.add(idx) + + return [r for r in results if r is not None] + + async def evaluate_results( + self, + message: str, + plan: OrchestrationPlan, + results: List[SubTaskResult], + ) -> OrchestrationResult: + """Evaluate whether the combined results adequately answer the query.""" + endpoint_url, headers = _get_llm_config() + + results_summary = "\n\n".join( + f"Sub-task {r.task_index} ({r.agent_name}): {'SUCCESS' if r.success else 'FAILED'}\n" + f"Task: {r.description}\n" + f"Response: {r.response[:1000]}" + for r in results + ) + + system_prompt = """You are an evaluation agent. Given the original user query and sub-task results, +do TWO things: + +1. Synthesize a clear, helpful final response for the user that combines all sub-task results. +2. Rate the overall quality. + +Respond with ONLY a JSON object: +{ + "final_response": "The complete, user-facing answer synthesizing all results.", + "quality_score": , + "needs_retry": , + "retry_suggestions": "" +} + +A quality_score below 2.5 should set needs_retry to true.""" + + user_prompt = f"""Original query: {message} + +Plan reasoning: {plan.reasoning} + +Sub-task results: +{results_summary}""" + + raw = await _llm_call(endpoint_url, headers, [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ]) + + try: + parsed = _parse_json_from_llm(raw) + except (json.JSONDecodeError, ValueError): + logger.warning("Failed to parse evaluation JSON, using raw response.") + return OrchestrationResult( + final_response=raw or "Unable to synthesize results.", + quality_score=3.0, + needs_retry=False, + ) + + return OrchestrationResult( + final_response=parsed.get("final_response", raw), + quality_score=float(parsed.get("quality_score", 3.0)), + needs_retry=bool(parsed.get("needs_retry", False)), + retry_suggestions=parsed.get("retry_suggestions"), + ) + + # ── Internals ───────────────────────────────────────────────────── + + @staticmethod + def _build_execution_levels(sub_tasks: List[SubTask]) -> List[List[int]]: + """Group sub-task indices into execution levels by dependency depth.""" + n = len(sub_tasks) + if n == 0: + return [] + + depth = [0] * n + for i, st in enumerate(sub_tasks): + for dep_idx in st.depends_on: + if 0 <= dep_idx < n: + depth[i] = max(depth[i], depth[dep_idx] + 1) + + max_depth = max(depth) if depth else 0 + levels: List[List[int]] = [[] for _ in range(max_depth + 1)] + for i, d in enumerate(depth): + levels[d].append(i) + return levels + + @staticmethod + async def _execute_single( + idx: int, + sub_task: SubTask, + base_url: str, + delegate_fn, + ) -> SubTaskResult: + """Execute a single sub-task via A2A delegation.""" + start = time.monotonic() + try: + response = await delegate_fn(sub_task.agent_id, sub_task.description, base_url) + elapsed = int((time.monotonic() - start) * 1000) + is_error = response.startswith("Error:") or response.startswith("Delegation error:") or response.startswith("Delegation failed:") + return SubTaskResult( + task_index=idx, + agent_id=sub_task.agent_id, + agent_name=sub_task.agent_name, + description=sub_task.description, + response=response, + latency_ms=elapsed, + success=not is_error, + error=response if is_error else None, + ) + except Exception as e: + elapsed = int((time.monotonic() - start) * 1000) + return SubTaskResult( + task_index=idx, + agent_id=sub_task.agent_id, + agent_name=sub_task.agent_name, + description=sub_task.description, + response="", + latency_ms=elapsed, + success=False, + error=str(e), + ) diff --git a/dbx-agent-app/app/backend/app/services/search.py b/dbx-agent-app/app/backend/app/services/search.py new file mode 100644 index 00000000..b05ac46c --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/search.py @@ -0,0 +1,390 @@ +""" +Search Service — unified semantic + keyword search across all asset types. + +Combines: + 1. Vector similarity (cosine) from stored embeddings + 2. Keyword matching (ILIKE) from the database + 3. Relevance ranking with quality/recency signals + +Returns a single ranked list of SearchResultItem objects. +""" + +import json +import logging +import math +from typing import List, Dict, Any, Optional, Tuple + +from app.db_adapter import DatabaseAdapter +from app.services.embedding import EmbeddingService +from app.schemas.search import SearchResultItem + +logger = logging.getLogger(__name__) + + +def _cosine_similarity(a: List[float], b: List[float]) -> float: + """Compute cosine similarity between two vectors.""" + dot = sum(x * y for x, y in zip(a, b)) + norm_a = math.sqrt(sum(x * x for x in a)) + norm_b = math.sqrt(sum(x * x for x in b)) + if norm_a == 0 or norm_b == 0: + return 0.0 + return dot / (norm_a * norm_b) + + +class SearchService: + """Unified search across all asset types.""" + + def __init__(self): + self._embedding_service = EmbeddingService() + + async def search( + self, + query: str, + types: Optional[List[str]] = None, + catalogs: Optional[List[str]] = None, + owner: Optional[str] = None, + limit: int = 20, + ) -> Tuple[List[SearchResultItem], str]: + """ + Execute a unified search. + + Returns (results, search_mode) where search_mode is + 'semantic', 'keyword', or 'hybrid'. + """ + # Try semantic search first + semantic_results = await self._semantic_search(query, types, limit * 2) + + # Also run keyword search + keyword_results = self._keyword_search(query, types, catalogs, owner, limit * 2) + + if semantic_results and keyword_results: + merged = self._merge_results(semantic_results, keyword_results, limit) + return merged, "hybrid" + elif semantic_results: + return semantic_results[:limit], "semantic" + else: + return keyword_results[:limit], "keyword" + + async def _semantic_search( + self, + query: str, + types: Optional[List[str]], + limit: int, + ) -> List[SearchResultItem]: + """Search by embedding similarity.""" + # Get all embeddings (for small-scale, load into memory) + embeddings = DatabaseAdapter.list_all_asset_embeddings() + if not embeddings: + return [] + + # Embed the query + query_vec = await self._embedding_service.embed_text(query) + + # Compute similarities + scored: List[Tuple[float, Dict]] = [] + for emb in embeddings: + if types and emb["asset_type"] not in types: + continue + + asset_vec = json.loads(emb["embedding_json"]) + sim = _cosine_similarity(query_vec, asset_vec) + if sim > 0.05: # threshold to cut noise + scored.append((sim, emb)) + + # Sort by similarity descending + scored.sort(key=lambda x: x[0], reverse=True) + + results: List[SearchResultItem] = [] + for sim, emb in scored[:limit]: + asset = self._load_asset(emb["asset_type"], emb["asset_id"]) + if not asset: + continue + + results.append(SearchResultItem( + asset_type=emb["asset_type"], + asset_id=emb["asset_id"], + name=asset.get("name", ""), + description=asset.get("comment") or asset.get("description"), + full_name=asset.get("full_name"), + path=asset.get("path"), + owner=asset.get("owner"), + score=round(sim, 4), + match_type="semantic", + snippet=self._make_snippet(emb.get("text_content", ""), query), + )) + + return results + + def _keyword_search( + self, + query: str, + types: Optional[List[str]], + catalogs: Optional[List[str]] = None, + owner: Optional[str] = None, + limit: int = 40, + ) -> List[SearchResultItem]: + """Search by keyword matching across all asset tables.""" + results: List[SearchResultItem] = [] + per_type_limit = max(limit // 3, 10) + + # Catalog assets + if not types or any(t in ("table", "view", "function", "model", "volume") for t in types): + asset_type_filter = None + if types: + catalog_types = [t for t in types if t in ("table", "view", "function", "model", "volume")] + if len(catalog_types) == 1: + asset_type_filter = catalog_types[0] + + catalog_filter = catalogs[0] if catalogs and len(catalogs) == 1 else None + + assets, _ = DatabaseAdapter.list_catalog_assets( + page=1, page_size=per_type_limit, + asset_type=asset_type_filter, + catalog=catalog_filter, + search=query, + owner=owner, + ) + for asset in assets: + score = self._keyword_score(query, asset) + results.append(SearchResultItem( + asset_type=asset["asset_type"], + asset_id=asset["id"], + name=asset["name"], + description=asset.get("comment"), + full_name=asset.get("full_name"), + owner=asset.get("owner"), + score=round(score, 4), + match_type="keyword", + snippet=asset.get("comment", "")[:200] if asset.get("comment") else None, + )) + + # Workspace assets + if not types or any(t in ("notebook", "job", "dashboard", "pipeline", "cluster", "experiment") for t in types): + ws_type_filter = None + if types: + ws_types = [t for t in types if t in ("notebook", "job", "dashboard", "pipeline", "cluster", "experiment")] + if len(ws_types) == 1: + ws_type_filter = ws_types[0] + + assets, _ = DatabaseAdapter.list_workspace_assets( + page=1, page_size=per_type_limit, + asset_type=ws_type_filter, + search=query, + owner=owner, + ) + for asset in assets: + score = self._keyword_score(query, asset) + results.append(SearchResultItem( + asset_type=asset["asset_type"], + asset_id=asset["id"], + name=asset["name"], + description=asset.get("description"), + path=asset.get("path"), + owner=asset.get("owner"), + score=round(score, 4), + match_type="keyword", + snippet=asset.get("description", "")[:200] if asset.get("description") else None, + )) + + # Apps + if not types or "app" in types: + apps, _ = DatabaseAdapter.list_apps(page=1, page_size=per_type_limit) + for app in apps: + if query.lower() in (app.get("name", "") or "").lower(): + score = self._keyword_score(query, app) + results.append(SearchResultItem( + asset_type="app", + asset_id=app["id"], + name=app["name"], + description=None, + owner=app.get("owner"), + score=round(score, 4), + match_type="keyword", + )) + + # Tools + if not types or "tool" in types: + tools, _ = DatabaseAdapter.list_tools(page=1, page_size=per_type_limit) + for tool in tools: + searchable = f"{tool.get('name', '')} {tool.get('description', '')}".lower() + if query.lower() in searchable: + score = self._keyword_score(query, tool) + results.append(SearchResultItem( + asset_type="tool", + asset_id=tool["id"], + name=tool["name"], + description=tool.get("description"), + score=round(score, 4), + match_type="keyword", + )) + + # Agents + if not types or "agent" in types: + agents, _ = DatabaseAdapter.list_agents(page=1, page_size=per_type_limit) + for agent in agents: + searchable = f"{agent.get('name', '')} {agent.get('description', '')} {agent.get('capabilities', '')}".lower() + if query.lower() in searchable: + score = self._keyword_score(query, agent) + results.append(SearchResultItem( + asset_type="agent", + asset_id=agent["id"], + name=agent["name"], + description=agent.get("description"), + score=round(score, 4), + match_type="keyword", + )) + + # Sort by score + results.sort(key=lambda r: r.score, reverse=True) + return results[:limit] + + def _keyword_score(self, query: str, asset: Dict[str, Any]) -> float: + """Simple keyword relevance score (0-1).""" + query_lower = query.lower() + query_words = query_lower.split() + score = 0.0 + + name = (asset.get("name", "") or "").lower() + full_name = (asset.get("full_name", "") or "").lower() + desc = (asset.get("comment") or asset.get("description") or "").lower() + + # Exact name match — highest signal + if query_lower == name: + score += 1.0 + elif query_lower in name: + score += 0.7 + elif query_lower in full_name: + score += 0.5 + + # Word-level matches in name + for word in query_words: + if len(word) < 3: + continue + if word in name: + score += 0.3 + if word in desc: + score += 0.1 + + # Quality boost: assets with descriptions rank higher + if desc: + score += 0.05 + + # Owner match + if asset.get("owner") and query_lower in (asset["owner"] or "").lower(): + score += 0.2 + + return min(score, 1.0) + + def _merge_results( + self, + semantic: List[SearchResultItem], + keyword: List[SearchResultItem], + limit: int, + ) -> List[SearchResultItem]: + """Merge semantic and keyword results with deduplication.""" + seen = set() + merged: List[SearchResultItem] = [] + + # Build lookup of keyword scores + kw_scores: Dict[str, float] = {} + for r in keyword: + key = f"{r.asset_type}:{r.asset_id}" + kw_scores[key] = r.score + + # Semantic results get boosted if they also match keywords + for r in semantic: + key = f"{r.asset_type}:{r.asset_id}" + if key in seen: + continue + seen.add(key) + + kw_boost = kw_scores.get(key, 0.0) * 0.3 + merged.append(SearchResultItem( + asset_type=r.asset_type, + asset_id=r.asset_id, + name=r.name, + description=r.description, + full_name=r.full_name, + path=r.path, + owner=r.owner, + score=round(min(r.score + kw_boost, 1.0), 4), + match_type="hybrid" if kw_boost > 0 else "semantic", + snippet=r.snippet, + )) + + # Add keyword-only results not covered by semantic + for r in keyword: + key = f"{r.asset_type}:{r.asset_id}" + if key in seen: + continue + seen.add(key) + merged.append(r) + + merged.sort(key=lambda r: r.score, reverse=True) + return merged[:limit] + + def _load_asset(self, asset_type: str, asset_id: int) -> Optional[Dict]: + """Load full asset dict by type + id.""" + catalog_types = {"table", "view", "function", "model", "volume"} + workspace_types = {"notebook", "job", "dashboard", "pipeline", "cluster", "experiment"} + + if asset_type in catalog_types: + return DatabaseAdapter.get_catalog_asset(asset_id) + elif asset_type in workspace_types: + return DatabaseAdapter.get_workspace_asset(asset_id) + elif asset_type == "app": + return DatabaseAdapter.get_app(asset_id) + elif asset_type == "tool": + return DatabaseAdapter.get_tool(asset_id) + elif asset_type == "agent": + return DatabaseAdapter.get_agent(asset_id) + elif asset_type == "server": + return DatabaseAdapter.get_mcp_server(asset_id) + return None + + async def match_agents( + self, task_description: str, limit: int = 5, + ) -> List[Dict[str, Any]]: + """Find agents whose capabilities match a task description. + + Returns a list of {"agent": {...}, "score": float} dicts, + sorted by cosine similarity descending. + """ + query_vec = await self._embedding_service.embed_text(task_description) + + embeddings = DatabaseAdapter.list_all_asset_embeddings() + agent_embeddings = [e for e in embeddings if e["asset_type"] == "agent"] + + scored: List[Dict[str, Any]] = [] + for emb in agent_embeddings: + asset_vec = json.loads(emb["embedding_json"]) + sim = _cosine_similarity(query_vec, asset_vec) + if sim > 0.1: + agent = DatabaseAdapter.get_agent(emb["asset_id"]) + if agent and agent.get("status") == "active" and agent.get("endpoint_url"): + scored.append({"agent": agent, "score": sim}) + + scored.sort(key=lambda x: x["score"], reverse=True) + return scored[:limit] + + def _make_snippet(self, text: str, query: str, max_len: int = 200) -> Optional[str]: + """Extract a relevant snippet around the query match.""" + if not text: + return None + + query_lower = query.lower() + text_lower = text.lower() + idx = text_lower.find(query_lower) + + if idx >= 0: + start = max(0, idx - 50) + end = min(len(text), idx + len(query) + 150) + snippet = text[start:end] + if start > 0: + snippet = "..." + snippet + if end < len(text): + snippet = snippet + "..." + return snippet + + # No exact match — return beginning + return text[:max_len] + ("..." if len(text) > max_len else "") diff --git a/dbx-agent-app/app/backend/app/services/tool_parser.py b/dbx-agent-app/app/backend/app/services/tool_parser.py new file mode 100644 index 00000000..3c5685f2 --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/tool_parser.py @@ -0,0 +1,208 @@ +""" +Tool specification parser and normalizer. + +This module handles parsing tool specifications from MCP servers +and normalizing them to the registry's schema format. +""" + +import json +from typing import Dict, Any, Optional +from dataclasses import dataclass + + +@dataclass +class NormalizedTool: + """ + Normalized tool specification for registry storage. + + Attributes: + name: Tool identifier (required) + description: Human-readable description (optional) + parameters: JSON Schema string for parameters (optional) + """ + + name: str + description: Optional[str] + parameters: Optional[str] + + +class ToolParser: + """ + Parser for tool specifications from MCP servers. + + Handles extracting and normalizing tool metadata from various + MCP server response formats. + """ + + @staticmethod + def extract_parameters_schema(input_schema: Dict[str, Any]) -> str: + """ + Extract parameters schema from MCP input schema. + + Converts the input schema to a JSON string for storage. + Handles missing or invalid schemas gracefully. + + Args: + input_schema: MCP tool input schema (JSON Schema format) + + Returns: + JSON string of parameters schema + + Example: + >>> schema = {"type": "object", "properties": {"query": {"type": "string"}}} + >>> result = ToolParser.extract_parameters_schema(schema) + >>> print(result) + '{"type": "object", "properties": {"query": {"type": "string"}}}' + """ + if not input_schema or not isinstance(input_schema, dict): + return "{}" + + try: + return json.dumps(input_schema, separators=(",", ":")) + except (TypeError, ValueError): + return "{}" + + @staticmethod + def normalize_description(description: Optional[str]) -> Optional[str]: + """ + Normalize tool description. + + Handles missing descriptions, trims whitespace, and limits length. + + Args: + description: Raw description from MCP server + + Returns: + Normalized description or None + """ + if not description: + return None + + normalized = description.strip() + + if not normalized: + return None + + # Limit description length to prevent database issues + max_length = 5000 + if len(normalized) > max_length: + normalized = normalized[:max_length] + "..." + + return normalized + + @staticmethod + def normalize_name(name: str) -> str: + """ + Normalize tool name. + + Ensures name is valid and consistent. + + Args: + name: Raw tool name from MCP server + + Returns: + Normalized tool name + + Raises: + ValueError: If name is empty or invalid + """ + if not name or not isinstance(name, str): + raise ValueError("Tool name is required and must be a string") + + normalized = name.strip() + + if not normalized: + raise ValueError("Tool name cannot be empty") + + # Limit name length + max_length = 255 + if len(normalized) > max_length: + normalized = normalized[:max_length] + + return normalized + + @staticmethod + def parse_tool(tool_data: Dict[str, Any]) -> NormalizedTool: + """ + Parse and normalize a tool specification. + + Converts raw MCP tool data into a normalized format for registry storage. + + Args: + tool_data: Raw tool data from MCP server + + Returns: + NormalizedTool with validated and normalized fields + + Raises: + ValueError: If tool data is invalid or missing required fields + + Example: + >>> tool_data = { + ... "name": "search_experts", + ... "description": "Search for experts by keyword", + ... "inputSchema": { + ... "type": "object", + ... "properties": {"query": {"type": "string"}} + ... } + ... } + >>> tool = ToolParser.parse_tool(tool_data) + >>> print(tool.name) + search_experts + """ + if not isinstance(tool_data, dict): + raise ValueError("Tool data must be a dictionary") + + # Extract and normalize name (required) + raw_name = tool_data.get("name") + name = ToolParser.normalize_name(raw_name) + + # Extract and normalize description (optional) + raw_description = tool_data.get("description") + description = ToolParser.normalize_description(raw_description) + + # Extract and normalize parameters (optional) + input_schema = tool_data.get("inputSchema", {}) + parameters = ToolParser.extract_parameters_schema(input_schema) + + return NormalizedTool( + name=name, + description=description, + parameters=parameters, + ) + + +def normalize_tool_spec( + name: str, + description: Optional[str] = None, + input_schema: Optional[Dict[str, Any]] = None, +) -> NormalizedTool: + """ + Convenience function to normalize a tool specification. + + Args: + name: Tool name + description: Tool description (optional) + input_schema: Tool parameters schema (optional) + + Returns: + NormalizedTool instance + + Example: + >>> tool = normalize_tool_spec( + ... name="my_tool", + ... description="Does something useful", + ... input_schema={"type": "object"} + ... ) + >>> print(tool.name) + my_tool + """ + tool_data = {"name": name} + + if description: + tool_data["description"] = description + + if input_schema: + tool_data["inputSchema"] = input_schema + + return ToolParser.parse_tool(tool_data) diff --git a/dbx-agent-app/app/backend/app/services/workspace_crawler.py b/dbx-agent-app/app/backend/app/services/workspace_crawler.py new file mode 100644 index 00000000..94a0abca --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/workspace_crawler.py @@ -0,0 +1,340 @@ +""" +Databricks workspace object crawler service. + +Indexes notebooks, jobs, dashboards, pipelines, clusters, and experiments +using the Databricks SDK into the registry database. +""" + +import json +import logging +from dataclasses import dataclass, field +from datetime import datetime, timezone +from typing import Dict, List, Optional + +from app.config import settings + +logger = logging.getLogger(__name__) + +ALL_ASSET_TYPES = ["notebook", "job", "dashboard", "pipeline", "cluster", "experiment"] + + +@dataclass +class WorkspaceCrawlStats: + """Tracks workspace crawl progress and results.""" + + assets_discovered: int = 0 + new_assets: int = 0 + updated_assets: int = 0 + by_type: Dict[str, int] = field(default_factory=dict) + errors: List[str] = field(default_factory=list) + + +class WorkspaceCrawlerService: + """ + Crawls a Databricks workspace to index notebooks, jobs, dashboards, + pipelines, clusters, and experiments. + + Uses the Databricks SDK WorkspaceClient for each asset type. + """ + + def __init__(self, profile: Optional[str] = None): + self._profile = profile + self._workspace_host: Optional[str] = None + + def _get_client(self): + """Get a Databricks WorkspaceClient.""" + from databricks.sdk import WorkspaceClient + + if self._profile: + client = WorkspaceClient(profile=self._profile) + else: + client = WorkspaceClient() + self._workspace_host = str(client.config.host).rstrip("/") + return client + + def crawl( + self, + asset_types: Optional[List[str]] = None, + root_path: str = "/", + ) -> WorkspaceCrawlStats: + """ + Crawl workspace and upsert assets into the database. + + Args: + asset_types: Specific types to crawl. If None, crawls all. + root_path: Root path for notebook crawl. + + Returns: + WorkspaceCrawlStats with crawl results. + """ + stats = WorkspaceCrawlStats() + client = self._get_client() + now = datetime.now(timezone.utc) + types_to_crawl = asset_types or ALL_ASSET_TYPES + + for asset_type in types_to_crawl: + try: + crawler = getattr(self, f"_crawl_{asset_type}s", None) + if crawler: + crawler(client, now, stats, root_path=root_path) + else: + stats.errors.append(f"Unknown asset type: {asset_type}") + except Exception as e: + stats.errors.append(f"Failed to crawl {asset_type}s: {e}") + logger.error("Workspace crawl error for %ss: %s", asset_type, e) + + return stats + + def _crawl_notebooks(self, client, now: datetime, stats: WorkspaceCrawlStats, root_path: str = "/"): + """Crawl workspace notebooks recursively.""" + from databricks.sdk.service.workspace import ObjectType + + count = 0 + try: + objects = client.workspace.list(root_path, recursive=True) + for obj in objects: + if obj.object_type != ObjectType.NOTEBOOK: + continue + + name = obj.path.rsplit("/", 1)[-1] if obj.path else "unknown" + language = str(obj.language).lower() if obj.language else None + + self._upsert_workspace_asset( + asset_type="notebook", + path=obj.path, + name=name, + language=language, + resource_id=str(obj.object_id) if obj.object_id else None, + now=now, + stats=stats, + ) + count += 1 + except Exception as e: + stats.errors.append(f"Notebook listing error at '{root_path}': {e}") + + stats.by_type["notebook"] = count + + def _crawl_jobs(self, client, now: datetime, stats: WorkspaceCrawlStats, **kwargs): + """Crawl Databricks jobs.""" + count = 0 + try: + jobs = client.jobs.list() + for job in jobs: + job_settings = job.settings if hasattr(job, "settings") else None + name = job_settings.name if job_settings and hasattr(job_settings, "name") else f"job-{job.job_id}" + + metadata = { + "job_id": job.job_id, + } + if job_settings: + if hasattr(job_settings, "schedule") and job_settings.schedule: + metadata["schedule"] = str(job_settings.schedule.quartz_cron_expression) if hasattr(job_settings.schedule, "quartz_cron_expression") else None + if hasattr(job_settings, "tasks") and job_settings.tasks: + metadata["task_count"] = len(job_settings.tasks) + metadata["task_types"] = list(set( + t.task_key for t in job_settings.tasks if hasattr(t, "task_key") + )) + + owner = None + if hasattr(job, "creator_user_name"): + owner = job.creator_user_name + + self._upsert_workspace_asset( + asset_type="job", + path=f"/jobs/{job.job_id}", + name=name, + owner=owner, + metadata_json=json.dumps(metadata), + resource_id=str(job.job_id), + now=now, + stats=stats, + ) + count += 1 + except Exception as e: + stats.errors.append(f"Jobs listing error: {e}") + + stats.by_type["job"] = count + + def _crawl_dashboards(self, client, now: datetime, stats: WorkspaceCrawlStats, **kwargs): + """Crawl Lakeview dashboards.""" + count = 0 + try: + dashboards = client.lakeview.list() + for dashboard in dashboards: + name = dashboard.display_name if hasattr(dashboard, "display_name") else "untitled" + path = dashboard.path if hasattr(dashboard, "path") else f"/dashboards/{dashboard.dashboard_id}" + + metadata = { + "dashboard_id": dashboard.dashboard_id, + } + if hasattr(dashboard, "warehouse_id") and dashboard.warehouse_id: + metadata["warehouse_id"] = dashboard.warehouse_id + + owner = None + if hasattr(dashboard, "creator_user_name"): + owner = dashboard.creator_user_name + + self._upsert_workspace_asset( + asset_type="dashboard", + path=path, + name=name, + owner=owner, + metadata_json=json.dumps(metadata), + resource_id=dashboard.dashboard_id if hasattr(dashboard, "dashboard_id") else None, + now=now, + stats=stats, + ) + count += 1 + except Exception as e: + stats.errors.append(f"Dashboard listing error: {e}") + + stats.by_type["dashboard"] = count + + def _crawl_pipelines(self, client, now: datetime, stats: WorkspaceCrawlStats, **kwargs): + """Crawl DLT/SDP pipelines.""" + count = 0 + try: + pipelines = client.pipelines.list_pipelines() + for pipeline in pipelines: + name = pipeline.name if hasattr(pipeline, "name") else f"pipeline-{pipeline.pipeline_id}" + + metadata = { + "pipeline_id": pipeline.pipeline_id, + "state": str(pipeline.state) if hasattr(pipeline, "state") and pipeline.state else None, + } + + owner = None + if hasattr(pipeline, "creator_user_name"): + owner = pipeline.creator_user_name + + self._upsert_workspace_asset( + asset_type="pipeline", + path=f"/pipelines/{pipeline.pipeline_id}", + name=name, + owner=owner, + metadata_json=json.dumps(metadata), + resource_id=pipeline.pipeline_id, + now=now, + stats=stats, + ) + count += 1 + except Exception as e: + stats.errors.append(f"Pipeline listing error: {e}") + + stats.by_type["pipeline"] = count + + def _crawl_clusters(self, client, now: datetime, stats: WorkspaceCrawlStats, **kwargs): + """Crawl compute clusters.""" + count = 0 + try: + clusters = client.clusters.list() + for cluster in clusters: + name = cluster.cluster_name if hasattr(cluster, "cluster_name") else f"cluster-{cluster.cluster_id}" + + metadata = { + "cluster_id": cluster.cluster_id, + "state": str(cluster.state) if hasattr(cluster, "state") and cluster.state else None, + "spark_version": cluster.spark_version if hasattr(cluster, "spark_version") else None, + "node_type_id": cluster.node_type_id if hasattr(cluster, "node_type_id") else None, + } + + owner = None + if hasattr(cluster, "creator_user_name"): + owner = cluster.creator_user_name + + self._upsert_workspace_asset( + asset_type="cluster", + path=f"/clusters/{cluster.cluster_id}", + name=name, + owner=owner, + metadata_json=json.dumps(metadata), + resource_id=cluster.cluster_id, + now=now, + stats=stats, + ) + count += 1 + except Exception as e: + stats.errors.append(f"Cluster listing error: {e}") + + stats.by_type["cluster"] = count + + def _crawl_experiments(self, client, now: datetime, stats: WorkspaceCrawlStats, **kwargs): + """Crawl MLflow experiments.""" + count = 0 + try: + experiments = client.experiments.list_experiments() + for exp in experiments: + name = exp.name if hasattr(exp, "name") else f"experiment-{exp.experiment_id}" + + metadata = { + "experiment_id": exp.experiment_id, + "lifecycle_stage": str(exp.lifecycle_stage) if hasattr(exp, "lifecycle_stage") and exp.lifecycle_stage else None, + } + + self._upsert_workspace_asset( + asset_type="experiment", + path=exp.name if exp.name and exp.name.startswith("/") else f"/experiments/{exp.experiment_id}", + name=name.rsplit("/", 1)[-1] if "/" in name else name, + metadata_json=json.dumps(metadata), + resource_id=exp.experiment_id, + now=now, + stats=stats, + ) + count += 1 + except Exception as e: + stats.errors.append(f"Experiment listing error: {e}") + + stats.by_type["experiment"] = count + + def _upsert_workspace_asset( + self, + asset_type: str, + path: str, + name: str, + now: datetime, + stats: WorkspaceCrawlStats, + owner: Optional[str] = None, + description: Optional[str] = None, + language: Optional[str] = None, + tags_json: Optional[str] = None, + metadata_json: Optional[str] = None, + content_preview: Optional[str] = None, + resource_id: Optional[str] = None, + ): + """Upsert a single workspace asset into the database.""" + from app.db_adapter import WarehouseDB + + existing = WarehouseDB.get_workspace_asset_by_path(self._workspace_host, path) + + if existing: + WarehouseDB.update_workspace_asset( + existing["id"], + name=name, + owner=owner, + description=description, + language=language, + tags_json=tags_json, + metadata_json=metadata_json, + content_preview=content_preview, + resource_id=resource_id, + last_indexed_at=now.isoformat(), + ) + stats.updated_assets += 1 + else: + WarehouseDB.create_workspace_asset( + asset_type=asset_type, + workspace_host=self._workspace_host, + path=path, + name=name, + owner=owner, + description=description, + language=language, + tags_json=tags_json, + metadata_json=metadata_json, + content_preview=content_preview, + resource_id=resource_id, + last_indexed_at=now.isoformat(), + ) + stats.new_assets += 1 + + stats.assets_discovered += 1 diff --git a/dbx-agent-app/app/backend/app/services/workspace_profiles.py b/dbx-agent-app/app/backend/app/services/workspace_profiles.py new file mode 100644 index 00000000..1ac78a06 --- /dev/null +++ b/dbx-agent-app/app/backend/app/services/workspace_profiles.py @@ -0,0 +1,140 @@ +""" +Service for discovering Databricks workspace profiles from ~/.databrickscfg. + +Parses the CLI config file and validates authentication for each profile +by calling the Databricks SDK's current_user.me() endpoint. +""" + +import asyncio +import configparser +import logging +import os +from dataclasses import dataclass, field +from pathlib import Path +from typing import List, Optional + +logger = logging.getLogger(__name__) + +DEFAULT_CONFIG_PATH = os.path.expanduser("~/.databrickscfg") + + +@dataclass +class WorkspaceProfile: + name: str + host: Optional[str] = None + auth_type: Optional[str] = None + is_account_profile: bool = False + auth_valid: bool = False + auth_error: Optional[str] = None + username: Optional[str] = None + + +def parse_databricks_config( + config_path: str = DEFAULT_CONFIG_PATH, +) -> List[dict]: + """Parse ~/.databrickscfg and return raw profile dicts.""" + path = Path(config_path) + if not path.exists(): + logger.warning("Databricks config not found at %s", config_path) + return [] + + parser = configparser.ConfigParser() + parser.read(str(path)) + + profiles = [] + for section in parser.sections(): + profile = {"name": section} + profile.update(dict(parser[section])) + profiles.append(profile) + + return profiles + + +def _detect_auth_type(profile: dict) -> str: + """Infer auth type from profile fields.""" + if profile.get("token"): + return "pat" + if profile.get("client_id") and profile.get("client_secret"): + return "oauth-m2m" + if profile.get("azure_client_id"): + return "azure-service-principal" + if profile.get("google_service_account"): + return "google-service-account" + return "default" + + +def _validate_profile_sync(profile_name: str, host: Optional[str]) -> WorkspaceProfile: + """Synchronously validate a single profile's auth (runs in thread pool).""" + try: + from databricks.sdk import WorkspaceClient + + w = WorkspaceClient(profile=profile_name) + me = w.current_user.me() + return WorkspaceProfile( + name=profile_name, + host=host, + auth_valid=True, + username=me.user_name or me.display_name, + ) + except Exception as e: + return WorkspaceProfile( + name=profile_name, + host=host, + auth_valid=False, + auth_error=str(e), + ) + + +async def validate_profile_auth(profile_name: str, host: Optional[str]) -> WorkspaceProfile: + """Validate a profile's auth by calling current_user.me() in a thread pool.""" + loop = asyncio.get_event_loop() + return await loop.run_in_executor(None, _validate_profile_sync, profile_name, host) + + +async def discover_workspace_profiles( + config_path: str = DEFAULT_CONFIG_PATH, +) -> List[WorkspaceProfile]: + """Parse config and validate all workspace profiles concurrently.""" + raw_profiles = parse_databricks_config(config_path) + + if not raw_profiles: + return [] + + results: List[WorkspaceProfile] = [] + validation_tasks = [] + + for raw in raw_profiles: + name = raw["name"] + host = raw.get("host") + is_account = bool(raw.get("account_id")) + auth_type = _detect_auth_type(raw) + + if is_account: + # Flag account-level profiles but skip workspace auth validation + results.append( + WorkspaceProfile( + name=name, + host=host, + auth_type=auth_type, + is_account_profile=True, + auth_valid=False, + auth_error="Account-level profile (not a workspace)", + ) + ) + else: + validation_tasks.append((name, host, auth_type)) + + # Validate workspace profiles concurrently + if validation_tasks: + validated = await asyncio.gather( + *[validate_profile_auth(name, host) for name, host, _ in validation_tasks] + ) + # Patch in auth_type from parsed config + for profile, (_, _, auth_type) in zip(validated, validation_tasks): + profile.auth_type = auth_type + results.extend(validated) + + # Sort: valid profiles first, then by name + results.sort(key=lambda p: (not p.auth_valid, p.is_account_profile, p.name)) + + return results diff --git a/dbx-agent-app/app/backend/app/static_files.py b/dbx-agent-app/app/backend/app/static_files.py new file mode 100644 index 00000000..065cf7ed --- /dev/null +++ b/dbx-agent-app/app/backend/app/static_files.py @@ -0,0 +1,66 @@ +""" +Static file serving for React frontend. +Serves the built React app from the dist directory. +""" +from fastapi import APIRouter +from fastapi.responses import FileResponse, HTMLResponse +from fastapi.staticfiles import StaticFiles +from pathlib import Path +import os + +router = APIRouter() + +# Path to the React build directory +STATIC_DIR = Path(__file__).parent.parent / "webapp_dist" + +def setup_static_files(app): + """ + Mount static files and setup catch-all route for React Router. + Call this from main.py after setting up all API routes. + """ + # Check if webapp_dist exists + if STATIC_DIR.exists(): + # Mount static assets (JS, CSS, images) + app.mount("/assets", StaticFiles(directory=STATIC_DIR / "assets"), name="assets") + + # Catch-all route for React Router - must be last + @app.get("/{full_path:path}") + async def serve_react(full_path: str): + """ + Serve index.html for all non-API routes. + This allows React Router to handle client-side routing. + """ + # If it's an API route, let it pass through to API handlers + if full_path.startswith("api/") or full_path == "health" or full_path == "docs" or full_path == "openapi.json": + return None # Let FastAPI handle these + + # For root or any other path, serve index.html + index_path = STATIC_DIR / "index.html" + if index_path.exists(): + return FileResponse(index_path) + else: + return HTMLResponse( + content="

Frontend not built

Run: cd webapp && npm run build && cp -r dist ../registry-api/webapp_dist

", + status_code=404 + ) + else: + # Webapp not built yet, show instructions + @app.get("/") + async def root(): + return HTMLResponse( + content=""" +

Multi-Agent Registry API

+

API is running! Frontend not deployed yet.

+ +

To deploy frontend:

+
+cd webapp
+npm run build
+cp -r dist ../registry-api/webapp_dist
+                
+ """, + status_code=200 + ) diff --git a/dbx-agent-app/app/backend/app/templates/app.yaml.jinja2 b/dbx-agent-app/app/backend/app/templates/app.yaml.jinja2 new file mode 100644 index 00000000..258a02f1 --- /dev/null +++ b/dbx-agent-app/app/backend/app/templates/app.yaml.jinja2 @@ -0,0 +1,52 @@ +# Databricks App Configuration +# Supervisor: {{ collection_name }} +# Generated: {{ generated_at }} + +# Application metadata +name: {{ app_name }} +description: "Code-first supervisor for {{ collection_name }} collection (Pattern 3)" + +# Runtime configuration +command: + - "uvicorn" + - "supervisor:app" + - "--host" + - "0.0.0.0" + - "--port" + - "8000" + +# Environment variables +env: + # Databricks Foundation Model API + - name: DATABRICKS_HOST + value: "{{ databricks_host }}" + - name: DATABRICKS_TOKEN + value: "{{ '{{secrets/databricks/token}}' }}" + + # LLM endpoint + - name: LLM_ENDPOINT + value: "{{ llm_endpoint }}" + + # MCP Server URLs (comma-separated) + - name: MCP_SERVER_URLS + value: "{{ mcp_server_urls }}" + + # MLflow tracking + - name: MLFLOW_TRACKING_URI + value: "databricks" + - name: MLFLOW_EXPERIMENT_NAME + value: "/Shared/supervisors/{{ collection_name }}" + +# Resource limits +resources: + cpu: "2" + memory: "4Gi" + +# Health check +health_check: + path: "/health" + interval: 30 + timeout: 10 + +# Port configuration +port: 8000 diff --git a/dbx-agent-app/app/backend/app/templates/requirements.txt.jinja2 b/dbx-agent-app/app/backend/app/templates/requirements.txt.jinja2 new file mode 100644 index 00000000..04375d07 --- /dev/null +++ b/dbx-agent-app/app/backend/app/templates/requirements.txt.jinja2 @@ -0,0 +1,19 @@ +# Generated Supervisor Requirements +# Collection: {{ collection_name }} +# Generated: {{ generated_at }} + +# FastAPI and ASGI server +fastapi==0.115.0 +uvicorn[standard]==0.32.0 + +# HTTP client for MCP communication +httpx==0.27.2 + +# MLflow for tracing +mlflow==2.18.0 + +# Data validation +pydantic==2.9.2 + +# Environment variable loading (optional) +python-dotenv==1.0.1 diff --git a/dbx-agent-app/app/backend/app/templates/supervisor_code_first.py.jinja2 b/dbx-agent-app/app/backend/app/templates/supervisor_code_first.py.jinja2 new file mode 100644 index 00000000..04a4aec4 --- /dev/null +++ b/dbx-agent-app/app/backend/app/templates/supervisor_code_first.py.jinja2 @@ -0,0 +1,421 @@ +""" +Code-First Supervisor: {{ collection_name }} + +Generated supervisor with dynamic tool discovery at runtime (Pattern 3). +This supervisor discovers tools from MCP servers at runtime and uses +OpenAI-style tool calling to orchestrate agent interactions. + +Collection ID: {{ collection_id }} +Generated: {{ generated_at }} +""" + +import os +import json +from typing import List, Dict, Any, Optional +from dataclasses import dataclass +import httpx +import mlflow +from fastapi import FastAPI, HTTPException +from pydantic import BaseModel, Field + + +# Configuration from environment variables +LLM_ENDPOINT = os.environ.get("LLM_ENDPOINT", "databricks-meta-llama-3-1-70b-instruct") +DATABRICKS_HOST = os.environ.get("DATABRICKS_HOST") +DATABRICKS_TOKEN = os.environ.get("DATABRICKS_TOKEN") +MCP_SERVER_URLS = os.environ.get("MCP_SERVER_URLS", "").split(",") + + +# Data models +@dataclass +class ToolInfo: + """Represents a tool discovered from an MCP server.""" + name: str + description: str + spec: Dict[str, Any] + server_url: str + + +class ChatRequest(BaseModel): + """Request schema for chat endpoint.""" + message: str = Field(..., description="User message") + conversation_id: Optional[str] = Field(None, description="Conversation ID for continuity") + + +class ChatResponse(BaseModel): + """Response schema for chat endpoint.""" + response: str = Field(..., description="Supervisor response") + conversation_id: str = Field(..., description="Conversation ID") + trace_info: Optional[Dict[str, Any]] = Field(None, description="MLflow trace information") + + +# MCP Client Functions +async def fetch_tool_infos(server_url: str) -> List[ToolInfo]: + """ + Fetch available tools from an MCP server. + + This implements Pattern 3: Dynamic tool discovery at runtime. + No hard-coded function catalogs - tools are discovered on-demand. + + Args: + server_url: MCP server endpoint URL + + Returns: + List of ToolInfo objects representing available tools + + Raises: + httpx.HTTPError: If MCP server is unreachable + """ + tools: List[ToolInfo] = [] + + try: + async with httpx.AsyncClient(timeout=30.0) as client: + # Send JSON-RPC request to list tools + request_payload = { + "jsonrpc": "2.0", + "id": 1, + "method": "tools/list", + "params": {} + } + + response = await client.post( + server_url, + json=request_payload, + headers={"Content-Type": "application/json"} + ) + response.raise_for_status() + + result = response.json() + + if "error" in result: + print(f"Warning: MCP server error from {server_url}: {result['error']}") + return tools + + tools_data = result.get("result", {}).get("tools", []) + + # Convert MCP tool format to OpenAI tool spec + for tool_data in tools_data: + tool_name = tool_data.get("name", "") + if not tool_name: + continue + + tool_spec = { + "type": "function", + "function": { + "name": tool_name, + "description": tool_data.get("description", ""), + "parameters": tool_data.get("inputSchema", {}) + } + } + + tools.append(ToolInfo( + name=tool_name, + description=tool_data.get("description", ""), + spec=tool_spec, + server_url=server_url + )) + + except httpx.HTTPError as e: + print(f"Warning: Failed to fetch tools from {server_url}: {str(e)}") + except Exception as e: + print(f"Warning: Unexpected error fetching tools from {server_url}: {str(e)}") + + return tools + + +async def call_tool(tool_name: str, tool_args: Dict[str, Any], server_url: str) -> Any: + """ + Call a tool on an MCP server. + + Args: + tool_name: Name of the tool to call + tool_args: Arguments to pass to the tool + server_url: MCP server endpoint URL + + Returns: + Tool execution result + + Raises: + httpx.HTTPError: If tool call fails + """ + async with httpx.AsyncClient(timeout=60.0) as client: + request_payload = { + "jsonrpc": "2.0", + "id": 2, + "method": "tools/call", + "params": { + "name": tool_name, + "arguments": tool_args + } + } + + response = await client.post( + server_url, + json=request_payload, + headers={"Content-Type": "application/json"} + ) + response.raise_for_status() + + result = response.json() + + if "error" in result: + error = result["error"] + raise Exception(f"Tool call error: {error.get('message', 'Unknown error')}") + + return result.get("result", {}) + + +# Supervisor Logic +async def run_supervisor(user_text: str, conversation_id: Optional[str] = None) -> Dict[str, Any]: + """ + Run the supervisor agent with dynamic tool discovery. + + This implements Pattern 3: + 1. Discover tools from MCP servers at runtime + 2. Pass all available tools to LLM + 3. Execute tool calls as requested by LLM + 4. Return final response + + Args: + user_text: User input message + conversation_id: Optional conversation ID for continuity + + Returns: + Dictionary with response, conversation_id, and trace info + """ + # Start MLflow trace + with mlflow.start_span(name="supervisor") as span: + span.set_inputs({"user_text": user_text, "conversation_id": conversation_id}) + + # Step 1: Dynamically discover tools from all MCP servers + tool_infos: List[ToolInfo] = [] + + with mlflow.start_span(name="discover_tools") as discover_span: + for server_url in MCP_SERVER_URLS: + if not server_url or server_url.strip() == "": + continue + server_tools = await fetch_tool_infos(server_url.strip()) + tool_infos.extend(server_tools) + + discover_span.set_outputs({ + "tool_count": len(tool_infos), + "tools": [t.name for t in tool_infos] + }) + + # Step 2: Create messages for LLM + messages = [ + { + "role": "system", + "content": """You are {{ collection_name }}, an AI supervisor that coordinates multiple specialized agents. + +Available tools: {{ tool_list|length }} tools from {{ collection_name }} collection. + +Your responsibilities: +1. Understand user requests +2. Select appropriate tools to fulfill requests +3. Call tools with correct parameters +4. Synthesize results into helpful responses + +Always explain your reasoning and provide clear, actionable information.""" + }, + { + "role": "user", + "content": user_text + } + ] + + # Step 3: Call LLM with available tools + with mlflow.start_span(name="llm_call") as llm_span: + # Prepare tools for API call + tools_param = [ti.spec for ti in tool_infos] if tool_infos else None + + # Call Databricks Foundation Model API + async with httpx.AsyncClient(timeout=120.0) as client: + headers = { + "Authorization": f"Bearer {DATABRICKS_TOKEN}", + "Content-Type": "application/json" + } + + request_body = { + "messages": messages, + "max_tokens": 4096, + "temperature": 0.1 + } + + if tools_param: + request_body["tools"] = tools_param + request_body["tool_choice"] = "auto" + + llm_span.set_inputs(request_body) + + response = await client.post( + f"{DATABRICKS_HOST}/serving-endpoints/{LLM_ENDPOINT}/invocations", + headers=headers, + json=request_body + ) + response.raise_for_status() + + result = response.json() + llm_span.set_outputs(result) + + # Step 4: Handle tool calls if requested + message = result.get("choices", [{}])[0].get("message", {}) + tool_calls = message.get("tool_calls", []) + + if tool_calls: + with mlflow.start_span(name="execute_tools") as tools_span: + tool_results = [] + + for tool_call in tool_calls: + function = tool_call.get("function", {}) + tool_name = function.get("name") + tool_args = json.loads(function.get("arguments", "{}")) + + # Find the server URL for this tool + tool_info = next((t for t in tool_infos if t.name == tool_name), None) + + if not tool_info: + tool_results.append({ + "tool_call_id": tool_call.get("id"), + "error": f"Tool {tool_name} not found" + }) + continue + + with mlflow.start_span(name=f"call_{tool_name}") as tool_span: + tool_span.set_inputs({"tool_name": tool_name, "arguments": tool_args}) + + try: + tool_result = await call_tool(tool_name, tool_args, tool_info.server_url) + tool_span.set_outputs(tool_result) + + tool_results.append({ + "tool_call_id": tool_call.get("id"), + "role": "tool", + "name": tool_name, + "content": json.dumps(tool_result) + }) + except Exception as e: + tool_span.set_attribute("error", str(e)) + tool_results.append({ + "tool_call_id": tool_call.get("id"), + "role": "tool", + "name": tool_name, + "content": json.dumps({"error": str(e)}) + }) + + tools_span.set_outputs({"results": tool_results}) + + # Step 5: Get final response from LLM + with mlflow.start_span(name="llm_final") as final_span: + messages.append(message) + messages.extend(tool_results) + + async with httpx.AsyncClient(timeout=120.0) as client: + headers = { + "Authorization": f"Bearer {DATABRICKS_TOKEN}", + "Content-Type": "application/json" + } + + request_body = { + "messages": messages, + "max_tokens": 4096, + "temperature": 0.1 + } + + final_span.set_inputs(request_body) + + response = await client.post( + f"{DATABRICKS_HOST}/serving-endpoints/{LLM_ENDPOINT}/invocations", + headers=headers, + json=request_body + ) + response.raise_for_status() + + result = response.json() + final_span.set_outputs(result) + + # Extract final response + final_message = result.get("choices", [{}])[0].get("message", {}) + final_content = final_message.get("content", "I apologize, but I couldn't generate a response.") + + # Get trace information + trace_info = { + "trace_id": span.span_id if hasattr(span, 'span_id') else None, + "tools_discovered": len(tool_infos), + "tools_called": len(tool_calls) if tool_calls else 0 + } + + span.set_outputs({ + "response": final_content, + "trace_info": trace_info + }) + + return { + "response": final_content, + "conversation_id": conversation_id or "new", + "trace_info": trace_info + } + + +# FastAPI Application +app = FastAPI( + title="{{ collection_name }} Supervisor", + description="Code-first supervisor with dynamic tool discovery (Pattern 3)", + version="1.0.0" +) + + +@app.get("/health") +async def health(): + """Health check endpoint.""" + return {"status": "healthy", "supervisor": "{{ collection_name }}"} + + +@app.post("/chat", response_model=ChatResponse) +async def chat(request: ChatRequest): + """ + Chat endpoint for interacting with the supervisor. + + The supervisor dynamically discovers tools from MCP servers and + orchestrates agent interactions using OpenAI-style tool calling. + """ + try: + result = await run_supervisor(request.message, request.conversation_id) + return ChatResponse(**result) + except httpx.HTTPError as e: + raise HTTPException(status_code=503, detail=f"Service error: {str(e)}") + except Exception as e: + raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}") + + +@app.get("/tools") +async def list_tools(): + """ + List all currently available tools from MCP servers. + + This endpoint shows which tools the supervisor can currently access. + """ + tool_infos: List[ToolInfo] = [] + + for server_url in MCP_SERVER_URLS: + if not server_url or server_url.strip() == "": + continue + server_tools = await fetch_tool_infos(server_url.strip()) + tool_infos.extend(server_tools) + + return { + "tools": [ + { + "name": t.name, + "description": t.description, + "server_url": t.server_url + } + for t in tool_infos + ], + "total": len(tool_infos) + } + + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/dbx-agent-app/app/backend/data/.gitkeep b/dbx-agent-app/app/backend/data/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/dbx-agent-app/app/backend/init_warehouse_schema.sql b/dbx-agent-app/app/backend/init_warehouse_schema.sql new file mode 100644 index 00000000..04d6b763 --- /dev/null +++ b/dbx-agent-app/app/backend/init_warehouse_schema.sql @@ -0,0 +1,84 @@ +-- Initialize warehouse schema for registry API +-- This creates the core tables needed for the multi-agent registry + +-- Apps table +CREATE TABLE IF NOT EXISTS serverless_dxukih_catalog.registry.apps ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + name STRING NOT NULL UNIQUE, + owner STRING, + url STRING, + tags STRING, + manifest_url STRING +); + +-- Agents table +CREATE TABLE IF NOT EXISTS serverless_dxukih_catalog.registry.agents ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + name STRING NOT NULL UNIQUE, + description STRING, + capabilities STRING, + status STRING NOT NULL DEFAULT 'draft', + collection_id INT, + app_id INT, + endpoint_url STRING, + auth_token STRING, + a2a_capabilities STRING, + skills STRING, + protocol_version STRING DEFAULT '0.3.0', + system_prompt STRING, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP(), + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP() +); + +-- Collections table +CREATE TABLE IF NOT EXISTS serverless_dxukih_catalog.registry.collections ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + name STRING NOT NULL UNIQUE, + description STRING +); + +-- MCP Servers table +CREATE TABLE IF NOT EXISTS serverless_dxukih_catalog.registry.mcp_servers ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + app_id INT, + server_url STRING NOT NULL, + kind STRING NOT NULL, + uc_connection STRING, + scopes STRING +); + +-- Tools table +CREATE TABLE IF NOT EXISTS serverless_dxukih_catalog.registry.tools ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + mcp_server_id INT NOT NULL, + name STRING NOT NULL, + description STRING, + parameters STRING +); + +-- Collection Items table +CREATE TABLE IF NOT EXISTS serverless_dxukih_catalog.registry.collection_items ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + collection_id INT NOT NULL, + app_id INT, + mcp_server_id INT, + tool_id INT +); + +-- Discovery State table +CREATE TABLE IF NOT EXISTS serverless_dxukih_catalog.registry.discovery_state ( + id INT PRIMARY KEY, + is_running BOOLEAN NOT NULL, + last_run_timestamp STRING, + last_run_status STRING, + last_run_message STRING +); + +-- Supervisors table +CREATE TABLE IF NOT EXISTS serverless_dxukih_catalog.registry.supervisors ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + collection_id INT NOT NULL, + app_name STRING NOT NULL, + generated_at TIMESTAMP NOT NULL, + deployed_url STRING +); diff --git a/dbx-agent-app/app/backend/pytest.ini b/dbx-agent-app/app/backend/pytest.ini new file mode 100644 index 00000000..dfe1b62b --- /dev/null +++ b/dbx-agent-app/app/backend/pytest.ini @@ -0,0 +1,17 @@ +[pytest] +testpaths = tests +python_files = test_*.py +python_classes = Test* +python_functions = test_* +asyncio_mode = auto +asyncio_default_fixture_loop_scope = function +addopts = + -v + --strict-markers + --tb=short + --cov=app + --cov-report=term-missing + --cov-report=html +markers = + slow: marks tests as slow (deselect with '-m "not slow"') + integration: marks tests as integration tests diff --git a/dbx-agent-app/app/backend/requirements.txt b/dbx-agent-app/app/backend/requirements.txt new file mode 100644 index 00000000..b9e938e1 --- /dev/null +++ b/dbx-agent-app/app/backend/requirements.txt @@ -0,0 +1,38 @@ +# FastAPI Framework +fastapi>=0.111.0 +uvicorn[standard]>=0.30.0 + +# Database +sqlalchemy>=2.0.0 +psycopg[binary]>=3.2.0 +alembic>=1.13.0 +aiosqlite>=0.19.0 # Async SQLite support + +# Data Validation +pydantic>=2.6.0 +pydantic-settings>=2.0.0 + +# Environment +python-dotenv>=1.0.0 + +# Databricks SDK +databricks-sdk>=0.31.0 +databricks-sql-connector>=3.0.0 + +# MCP Integration +databricks-mcp>=0.1.0 + +# HTTP Client +httpx>=0.27.0 + +# Template Engine (for code generation) +jinja2>=3.1.0 + +# MLflow (for tracing) +mlflow>=3.0.0 + +# Testing +pytest>=8.0.0 +pytest-asyncio>=0.23.0 +pytest-cov>=4.1.0 +httpx>=0.27.0 # Required by TestClient diff --git a/dbx-agent-app/app/backend/tests/__init__.py b/dbx-agent-app/app/backend/tests/__init__.py new file mode 100644 index 00000000..5c98f1c1 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/__init__.py @@ -0,0 +1,3 @@ +""" +Test suite for Multi-Agent Registry API. +""" diff --git a/dbx-agent-app/app/backend/tests/conftest.py b/dbx-agent-app/app/backend/tests/conftest.py new file mode 100644 index 00000000..80534379 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/conftest.py @@ -0,0 +1,136 @@ +""" +Pytest fixtures for testing the Multi-Agent Registry API. +""" + +import pytest +from fastapi.testclient import TestClient +from sqlalchemy import create_engine, event +from sqlalchemy.orm import sessionmaker +from sqlalchemy.pool import StaticPool + +from app.main import app +from app.database import Base, get_db +from app.models import App, MCPServer, Tool, Collection, CollectionItem +import app.database as _database_module + + +# Create in-memory SQLite database for testing +SQLALCHEMY_DATABASE_URL = "sqlite:///:memory:" + +engine = create_engine( + SQLALCHEMY_DATABASE_URL, + connect_args={"check_same_thread": False}, + poolclass=StaticPool, +) + +# Enable foreign key constraints in SQLite +@event.listens_for(engine, "connect") +def set_sqlite_pragma(dbapi_conn, connection_record): + cursor = dbapi_conn.cursor() + cursor.execute("PRAGMA foreign_keys=ON") + cursor.close() + +TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) + + +@pytest.fixture +def db(): + """ + Create a fresh database for each test. + Patches SessionLocal so the WarehouseDB adapter also uses the test engine. + """ + Base.metadata.create_all(bind=engine) + original_session_local = _database_module.SessionLocal + _database_module.SessionLocal = TestingSessionLocal + db = TestingSessionLocal() + try: + yield db + finally: + db.close() + _database_module.SessionLocal = original_session_local + Base.metadata.drop_all(bind=engine) + + +@pytest.fixture +def client(db): + """ + Create a test client with dependency override. + """ + + def override_get_db(): + try: + yield db + finally: + pass + + app.dependency_overrides[get_db] = override_get_db + yield TestClient(app) + app.dependency_overrides.clear() + + +@pytest.fixture +def sample_app(db): + """ + Create a sample app for testing. + """ + app = App( + name="test-app", + owner="test@example.com", + url="https://example.com/app", + tags="test,sample", + ) + db.add(app) + db.commit() + db.refresh(app) + return app + + +@pytest.fixture +def sample_mcp_server(db, sample_app): + """ + Create a sample MCP server for testing. + """ + from app.models.mcp_server import MCPServerKind + + server = MCPServer( + app_id=sample_app.id, + server_url="https://example.com/mcp", + kind=MCPServerKind.CUSTOM, + scopes="read,write", + ) + db.add(server) + db.commit() + db.refresh(server) + return server + + +@pytest.fixture +def sample_tool(db, sample_mcp_server): + """ + Create a sample tool for testing. + """ + tool = Tool( + mcp_server_id=sample_mcp_server.id, + name="test_tool", + description="A test tool", + parameters='{"type": "object"}', + ) + db.add(tool) + db.commit() + db.refresh(tool) + return tool + + +@pytest.fixture +def sample_collection(db): + """ + Create a sample collection for testing. + """ + collection = Collection( + name="Test Collection", + description="A test collection", + ) + db.add(collection) + db.commit() + db.refresh(collection) + return collection diff --git a/dbx-agent-app/app/backend/tests/test_agent_analytics.py b/dbx-agent-app/app/backend/tests/test_agent_analytics.py new file mode 100644 index 00000000..21dd10c7 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_agent_analytics.py @@ -0,0 +1,151 @@ +""" +Tests for agent analytics CRUD and the GET /agents/{id}/analytics endpoint. +""" + +import pytest +from app.models.agent import Agent +from app.models.agent_analytics import AgentAnalytics +from app.db_adapter import DatabaseAdapter + + +# ── Fixtures ────────────────────────────────────────────────────────── + + +@pytest.fixture +def sample_agent(db): + """Create a sample agent for analytics tests.""" + agent = Agent( + name="analytics-test-agent", + description="Agent for analytics tests", + capabilities="search,summarize", + status="active", + endpoint_url="https://example.com/agent", + ) + db.add(agent) + db.commit() + db.refresh(agent) + return agent + + +@pytest.fixture +def sample_analytics(db, sample_agent): + """Create sample analytics entries.""" + entries = [ + AgentAnalytics(agent_id=sample_agent.id, task_description="task 1", success=1, latency_ms=100, quality_score=4), + AgentAnalytics(agent_id=sample_agent.id, task_description="task 2", success=1, latency_ms=200, quality_score=5), + AgentAnalytics(agent_id=sample_agent.id, task_description="task 3", success=0, latency_ms=50, quality_score=2, error_message="timeout"), + ] + for e in entries: + db.add(e) + db.commit() + return entries + + +# ── DatabaseAdapter CRUD tests ──────────────────────────────────────── + + +class TestAnalyticsCrud: + """Test analytics CRUD via DatabaseAdapter.""" + + def test_create_agent_analytic(self, db, sample_agent): + result = DatabaseAdapter.create_agent_analytic( + agent_id=sample_agent.id, + task_description="test task", + success=1, + latency_ms=150, + quality_score=4, + ) + assert result["agent_id"] == sample_agent.id + assert result["task_description"] == "test task" + assert result["success"] == 1 + assert result["latency_ms"] == 150 + assert result["quality_score"] == 4 + assert result["error_message"] is None + assert "id" in result + assert "created_at" in result + + def test_create_agent_analytic_failure(self, db, sample_agent): + result = DatabaseAdapter.create_agent_analytic( + agent_id=sample_agent.id, + task_description="failing task", + success=0, + latency_ms=30, + quality_score=1, + error_message="connection refused", + ) + assert result["success"] == 0 + assert result["error_message"] == "connection refused" + + def test_create_agent_analytic_minimal(self, db, sample_agent): + """Create with only required field (agent_id).""" + result = DatabaseAdapter.create_agent_analytic(agent_id=sample_agent.id) + assert result["agent_id"] == sample_agent.id + assert result["success"] == 1 # default + assert result["task_description"] is None + + def test_list_agent_analytics(self, db, sample_agent, sample_analytics): + results = DatabaseAdapter.list_agent_analytics(sample_agent.id) + assert len(results) == 3 + + def test_list_agent_analytics_limit(self, db, sample_agent, sample_analytics): + results = DatabaseAdapter.list_agent_analytics(sample_agent.id, limit=2) + assert len(results) == 2 + + def test_list_agent_analytics_empty(self, db, sample_agent): + results = DatabaseAdapter.list_agent_analytics(sample_agent.id) + assert results == [] + + def test_get_agent_summary_stats(self, db, sample_agent, sample_analytics): + stats = DatabaseAdapter.get_agent_summary_stats(sample_agent.id) + assert stats["agent_id"] == sample_agent.id + assert stats["total_invocations"] == 3 + assert stats["success_count"] == 2 + assert stats["failure_count"] == 1 + assert stats["success_rate"] == pytest.approx(2 / 3, abs=0.001) + assert stats["avg_latency_ms"] is not None + # avg of 100, 200, 50 = 116.67 + assert stats["avg_latency_ms"] == pytest.approx(117, abs=1) + # avg of 4, 5, 2 = 3.67 + assert stats["avg_quality_score"] == pytest.approx(3.67, abs=0.01) + + def test_get_agent_summary_stats_empty(self, db, sample_agent): + stats = DatabaseAdapter.get_agent_summary_stats(sample_agent.id) + assert stats["total_invocations"] == 0 + assert stats["success_rate"] is None + assert stats["avg_latency_ms"] is None + assert stats["avg_quality_score"] is None + + +# ── API endpoint tests ──────────────────────────────────────────────── + + +class TestAnalyticsEndpoint: + """Test GET /agents/{id}/analytics endpoint.""" + + def test_get_analytics(self, client, db, sample_agent, sample_analytics): + response = client.get(f"/api/agents/{sample_agent.id}/analytics") + assert response.status_code == 200 + data = response.json() + assert data["agent_id"] == sample_agent.id + assert data["agent_name"] == "analytics-test-agent" + assert "summary" in data + assert "recent" in data + assert data["summary"]["total_invocations"] == 3 + assert len(data["recent"]) == 3 + + def test_get_analytics_with_limit(self, client, db, sample_agent, sample_analytics): + response = client.get(f"/api/agents/{sample_agent.id}/analytics?limit=1") + assert response.status_code == 200 + data = response.json() + assert len(data["recent"]) == 1 + + def test_get_analytics_not_found(self, client, db): + response = client.get("/api/agents/9999/analytics") + assert response.status_code == 404 + + def test_get_analytics_empty(self, client, db, sample_agent): + response = client.get(f"/api/agents/{sample_agent.id}/analytics") + assert response.status_code == 200 + data = response.json() + assert data["summary"]["total_invocations"] == 0 + assert data["recent"] == [] diff --git a/dbx-agent-app/app/backend/tests/test_apps.py b/dbx-agent-app/app/backend/tests/test_apps.py new file mode 100644 index 00000000..ca4fcd6c --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_apps.py @@ -0,0 +1,113 @@ +""" +Tests for App CRUD endpoints. +""" + +import pytest + + +def test_list_apps_empty(client): + """Test listing apps when none exist.""" + response = client.get("/api/apps") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 0 + assert data["items"] == [] + assert data["page"] == 1 + + +def test_create_app(client): + """Test creating a new app.""" + response = client.post( + "/api/apps", + json={ + "name": "test-app", + "owner": "test@example.com", + "url": "https://example.com/app", + "tags": "test,sample", + }, + ) + assert response.status_code == 201 + data = response.json() + assert data["name"] == "test-app" + assert data["owner"] == "test@example.com" + assert "id" in data + + +def test_create_app_duplicate_name(client, sample_app): + """Test creating an app with duplicate name fails.""" + response = client.post( + "/api/apps", + json={ + "name": sample_app.name, + "owner": "another@example.com", + }, + ) + assert response.status_code == 422 + + +def test_get_app(client, sample_app): + """Test getting a specific app.""" + response = client.get(f"/api/apps/{sample_app.id}") + assert response.status_code == 200 + data = response.json() + assert data["id"] == sample_app.id + assert data["name"] == sample_app.name + + +def test_get_app_not_found(client): + """Test getting non-existent app returns 404.""" + response = client.get("/api/apps/9999") + assert response.status_code == 404 + + +def test_update_app(client, sample_app): + """Test updating an app.""" + response = client.put( + f"/api/apps/{sample_app.id}", + json={"owner": "updated@example.com"}, + ) + assert response.status_code == 200 + data = response.json() + assert data["owner"] == "updated@example.com" + assert data["name"] == sample_app.name # Unchanged + + +def test_delete_app(client, sample_app): + """Test deleting an app.""" + response = client.delete(f"/api/apps/{sample_app.id}") + assert response.status_code == 204 + + # Verify it's gone + response = client.get(f"/api/apps/{sample_app.id}") + assert response.status_code == 404 + + +def test_list_apps_with_pagination(client): + """Test listing apps with pagination.""" + # Create multiple apps + for i in range(5): + client.post( + "/api/apps", + json={"name": f"app-{i}", "owner": "test@example.com"}, + ) + + # Test pagination + response = client.get("/api/apps?page=1&page_size=2") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 5 + assert len(data["items"]) == 2 + assert data["page"] == 1 + assert data["total_pages"] == 3 + + +def test_list_apps_filter_by_owner(client): + """Test filtering apps by owner.""" + client.post("/api/apps", json={"name": "app-1", "owner": "user1@example.com"}) + client.post("/api/apps", json={"name": "app-2", "owner": "user2@example.com"}) + + response = client.get("/api/apps?owner=user1@example.com") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert data["items"][0]["owner"] == "user1@example.com" diff --git a/dbx-agent-app/app/backend/tests/test_collections.py b/dbx-agent-app/app/backend/tests/test_collections.py new file mode 100644 index 00000000..caa860f6 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_collections.py @@ -0,0 +1,455 @@ +""" +Tests for Collection and CollectionItem endpoints. +""" + +import pytest + + +def test_list_collections_empty(client): + """Test listing collections when none exist.""" + response = client.get("/api/collections") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 0 + assert data["items"] == [] + + +def test_create_collection(client): + """Test creating a new collection.""" + response = client.post( + "/api/collections", + json={ + "name": "Test Collection", + "description": "A test collection", + }, + ) + assert response.status_code == 201 + data = response.json() + assert data["name"] == "Test Collection" + assert "id" in data + + +def test_create_collection_duplicate_name(client, sample_collection): + """Test creating collection with duplicate name fails.""" + response = client.post( + "/api/collections", + json={ + "name": sample_collection.name, + "description": "Different description", + }, + ) + assert response.status_code == 422 + + +def test_get_collection(client, sample_collection): + """Test getting a specific collection.""" + response = client.get(f"/api/collections/{sample_collection.id}") + assert response.status_code == 200 + data = response.json() + assert data["id"] == sample_collection.id + assert data["name"] == sample_collection.name + + +def test_get_collection_not_found(client): + """Test getting non-existent collection returns 404.""" + response = client.get("/api/collections/9999") + assert response.status_code == 404 + + +def test_update_collection(client, sample_collection): + """Test updating a collection.""" + response = client.put( + f"/api/collections/{sample_collection.id}", + json={"description": "Updated description"}, + ) + assert response.status_code == 200 + data = response.json() + assert data["description"] == "Updated description" + + +def test_delete_collection(client, sample_collection): + """Test deleting a collection.""" + response = client.delete(f"/api/collections/{sample_collection.id}") + assert response.status_code == 204 + + # Verify it's gone + response = client.get(f"/api/collections/{sample_collection.id}") + assert response.status_code == 404 + + +# Collection Items tests + + +def test_list_collection_items_empty(client, sample_collection): + """Test listing items in an empty collection.""" + response = client.get(f"/api/collections/{sample_collection.id}/items") + assert response.status_code == 200 + assert response.json() == [] + + +def test_add_app_to_collection(client, sample_collection, sample_app): + """Test adding an app to a collection.""" + response = client.post( + f"/api/collections/{sample_collection.id}/items", + json={ + "collection_id": sample_collection.id, + "app_id": sample_app.id, + "mcp_server_id": None, + "tool_id": None, + }, + ) + assert response.status_code == 201 + data = response.json() + assert data["collection_id"] == sample_collection.id + assert data["app_id"] == sample_app.id + assert data["mcp_server_id"] is None + assert data["tool_id"] is None + + +def test_add_mcp_server_to_collection(client, sample_collection, sample_mcp_server): + """Test adding an MCP server to a collection.""" + response = client.post( + f"/api/collections/{sample_collection.id}/items", + json={ + "collection_id": sample_collection.id, + "app_id": None, + "mcp_server_id": sample_mcp_server.id, + "tool_id": None, + }, + ) + assert response.status_code == 201 + data = response.json() + assert data["mcp_server_id"] == sample_mcp_server.id + + +def test_add_tool_to_collection(client, sample_collection, sample_tool): + """Test adding a tool to a collection.""" + response = client.post( + f"/api/collections/{sample_collection.id}/items", + json={ + "collection_id": sample_collection.id, + "app_id": None, + "mcp_server_id": None, + "tool_id": sample_tool.id, + }, + ) + assert response.status_code == 201 + data = response.json() + assert data["tool_id"] == sample_tool.id + + +def test_add_item_multiple_refs_fails(client, sample_collection, sample_app, sample_tool): + """Test adding item with multiple references fails validation.""" + # This should fail at Pydantic validation level + # Using raise_server_exceptions=False to handle validation errors gracefully + with pytest.raises(Exception): + # Pydantic field validator will raise ValueError before route handler + response = client.post( + f"/api/collections/{sample_collection.id}/items", + json={ + "collection_id": sample_collection.id, + "app_id": sample_app.id, + "mcp_server_id": None, + "tool_id": sample_tool.id, + }, + ) + + +def test_add_item_no_refs_fails(client, sample_collection): + """Test adding item with no references fails validation.""" + # This should fail at Pydantic validation level + # Using raise_server_exceptions=False to handle validation errors gracefully + with pytest.raises(Exception): + # Pydantic field validator will raise ValueError before route handler + response = client.post( + f"/api/collections/{sample_collection.id}/items", + json={ + "collection_id": sample_collection.id, + "app_id": None, + "mcp_server_id": None, + "tool_id": None, + }, + ) + + +def test_remove_item_from_collection(client, sample_collection, sample_app, db): + """Test removing an item from a collection.""" + from app.models import CollectionItem + + # Add item to collection + item = CollectionItem( + collection_id=sample_collection.id, + app_id=sample_app.id, + ) + db.add(item) + db.commit() + db.refresh(item) + + # Remove item + response = client.delete( + f"/api/collections/{sample_collection.id}/items/{item.id}" + ) + assert response.status_code == 204 + + # Verify it's gone + response = client.get(f"/api/collections/{sample_collection.id}/items") + assert response.status_code == 200 + assert response.json() == [] + + +def test_remove_item_from_wrong_collection_fails(client, sample_app, db): + """Test removing item from wrong collection fails.""" + from app.models import Collection, CollectionItem + + # Create two collections + collection1 = Collection(name="Collection 1") + collection2 = Collection(name="Collection 2") + db.add_all([collection1, collection2]) + db.commit() + + # Add item to collection1 + item = CollectionItem(collection_id=collection1.id, app_id=sample_app.id) + db.add(item) + db.commit() + db.refresh(item) + + # Try to remove from collection2 + response = client.delete(f"/api/collections/{collection2.id}/items/{item.id}") + assert response.status_code == 404 + + +# Validation tests + + +def test_add_nonexistent_app_fails(client, sample_collection): + """Test adding non-existent app to collection fails.""" + response = client.post( + f"/api/collections/{sample_collection.id}/items", + json={ + "collection_id": sample_collection.id, + "app_id": 9999, + "mcp_server_id": None, + "tool_id": None, + }, + ) + assert response.status_code == 422 + assert "does not exist" in response.json()["detail"] + + +def test_add_nonexistent_server_fails(client, sample_collection): + """Test adding non-existent MCP server to collection fails.""" + response = client.post( + f"/api/collections/{sample_collection.id}/items", + json={ + "collection_id": sample_collection.id, + "app_id": None, + "mcp_server_id": 9999, + "tool_id": None, + }, + ) + assert response.status_code == 422 + assert "does not exist" in response.json()["detail"] + + +def test_add_nonexistent_tool_fails(client, sample_collection): + """Test adding non-existent tool to collection fails.""" + response = client.post( + f"/api/collections/{sample_collection.id}/items", + json={ + "collection_id": sample_collection.id, + "app_id": None, + "mcp_server_id": None, + "tool_id": 9999, + }, + ) + assert response.status_code == 422 + assert "does not exist" in response.json()["detail"] + + +def test_add_duplicate_app_fails(client, sample_collection, sample_app, db): + """Test adding duplicate app to collection fails.""" + from app.models import CollectionItem + + # Add app to collection first time + item = CollectionItem(collection_id=sample_collection.id, app_id=sample_app.id) + db.add(item) + db.commit() + + # Try to add same app again + response = client.post( + f"/api/collections/{sample_collection.id}/items", + json={ + "collection_id": sample_collection.id, + "app_id": sample_app.id, + "mcp_server_id": None, + "tool_id": None, + }, + ) + assert response.status_code == 422 + assert "already exists" in response.json()["detail"] + + +def test_add_duplicate_server_fails(client, sample_collection, sample_mcp_server, db): + """Test adding duplicate MCP server to collection fails.""" + from app.models import CollectionItem + + # Add server to collection first time + item = CollectionItem( + collection_id=sample_collection.id, mcp_server_id=sample_mcp_server.id + ) + db.add(item) + db.commit() + + # Try to add same server again + response = client.post( + f"/api/collections/{sample_collection.id}/items", + json={ + "collection_id": sample_collection.id, + "app_id": None, + "mcp_server_id": sample_mcp_server.id, + "tool_id": None, + }, + ) + assert response.status_code == 422 + assert "already exists" in response.json()["detail"] + + +def test_add_duplicate_tool_fails(client, sample_collection, sample_tool, db): + """Test adding duplicate tool to collection fails.""" + from app.models import CollectionItem + + # Add tool to collection first time + item = CollectionItem(collection_id=sample_collection.id, tool_id=sample_tool.id) + db.add(item) + db.commit() + + # Try to add same tool again + response = client.post( + f"/api/collections/{sample_collection.id}/items", + json={ + "collection_id": sample_collection.id, + "app_id": None, + "mcp_server_id": None, + "tool_id": sample_tool.id, + }, + ) + assert response.status_code == 422 + assert "already exists" in response.json()["detail"] + + +# Pagination and listing tests + + +def test_list_collections_with_pagination(client, db): + """Test listing collections with pagination.""" + from app.models import Collection + + # Create multiple collections + for i in range(5): + collection = Collection( + name=f"Collection {i}", + description=f"Description {i}", + ) + db.add(collection) + db.commit() + + # Test pagination + response = client.get("/api/collections?page=1&page_size=2") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 5 + assert len(data["items"]) == 2 + assert data["page"] == 1 + assert data["page_size"] == 2 + assert data["total_pages"] == 3 + + +def test_collection_cascade_delete(client, sample_collection, sample_app, db): + """Test deleting collection cascades to items.""" + from app.models import CollectionItem + + # Add items to collection + item1 = CollectionItem(collection_id=sample_collection.id, app_id=sample_app.id) + db.add(item1) + db.commit() + + # Delete collection + response = client.delete(f"/api/collections/{sample_collection.id}") + assert response.status_code == 204 + + # Verify items are deleted + items = db.query(CollectionItem).filter( + CollectionItem.collection_id == sample_collection.id + ).all() + assert len(items) == 0 + + +def test_list_items_with_multiple_types( + client, sample_collection, sample_app, sample_mcp_server, sample_tool, db +): + """Test listing items shows all types in collection.""" + from app.models import CollectionItem + + # Add different types of items + item1 = CollectionItem(collection_id=sample_collection.id, app_id=sample_app.id) + item2 = CollectionItem( + collection_id=sample_collection.id, mcp_server_id=sample_mcp_server.id + ) + item3 = CollectionItem(collection_id=sample_collection.id, tool_id=sample_tool.id) + db.add_all([item1, item2, item3]) + db.commit() + + # List items + response = client.get(f"/api/collections/{sample_collection.id}/items") + assert response.status_code == 200 + items = response.json() + assert len(items) == 3 + + # Verify item types + app_items = [i for i in items if i["app_id"] is not None] + server_items = [i for i in items if i["mcp_server_id"] is not None] + tool_items = [i for i in items if i["tool_id"] is not None] + assert len(app_items) == 1 + assert len(server_items) == 1 + assert len(tool_items) == 1 + + +def test_update_collection_name_only(client, sample_collection): + """Test updating only the collection name.""" + response = client.put( + f"/api/collections/{sample_collection.id}", + json={"name": "Updated Name"}, + ) + assert response.status_code == 200 + data = response.json() + assert data["name"] == "Updated Name" + assert data["description"] == sample_collection.description + + +def test_update_collection_description_only(client, sample_collection): + """Test updating only the collection description.""" + response = client.put( + f"/api/collections/{sample_collection.id}", + json={"description": "New description"}, + ) + assert response.status_code == 200 + data = response.json() + assert data["name"] == sample_collection.name + assert data["description"] == "New description" + + +def test_update_collection_duplicate_name_fails(client, sample_collection, db): + """Test updating collection to duplicate name fails.""" + from app.models import Collection + + # Create another collection + other_collection = Collection(name="Other Collection") + db.add(other_collection) + db.commit() + + # Try to rename to existing name + response = client.put( + f"/api/collections/{sample_collection.id}", + json={"name": other_collection.name}, + ) + assert response.status_code == 422 diff --git a/dbx-agent-app/app/backend/tests/test_collections_service.py b/dbx-agent-app/app/backend/tests/test_collections_service.py new file mode 100644 index 00000000..76b3f777 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_collections_service.py @@ -0,0 +1,250 @@ +""" +Tests for CollectionService business logic. +""" + +import pytest +from app.services.collections import CollectionService + + +def test_validate_item_exists_app(db, sample_app): + """Test validating that an app exists.""" + is_valid, error = CollectionService.validate_item_exists( + db, app_id=sample_app.id + ) + assert is_valid + assert error == "" + + +def test_validate_item_exists_server(db, sample_mcp_server): + """Test validating that a server exists.""" + is_valid, error = CollectionService.validate_item_exists( + db, mcp_server_id=sample_mcp_server.id + ) + assert is_valid + assert error == "" + + +def test_validate_item_exists_tool(db, sample_tool): + """Test validating that a tool exists.""" + is_valid, error = CollectionService.validate_item_exists( + db, tool_id=sample_tool.id + ) + assert is_valid + assert error == "" + + +def test_validate_item_nonexistent_app(db): + """Test validating non-existent app returns error.""" + is_valid, error = CollectionService.validate_item_exists(db, app_id=9999) + assert not is_valid + assert "does not exist" in error + + +def test_validate_item_nonexistent_server(db): + """Test validating non-existent server returns error.""" + is_valid, error = CollectionService.validate_item_exists(db, mcp_server_id=9999) + assert not is_valid + assert "does not exist" in error + + +def test_validate_item_nonexistent_tool(db): + """Test validating non-existent tool returns error.""" + is_valid, error = CollectionService.validate_item_exists(db, tool_id=9999) + assert not is_valid + assert "does not exist" in error + + +def test_check_duplicate_item_app(db, sample_collection, sample_app): + """Test checking for duplicate app in collection.""" + from app.models import CollectionItem + + # No duplicate initially + is_duplicate = CollectionService.check_duplicate_item( + db, collection_id=sample_collection.id, app_id=sample_app.id + ) + assert not is_duplicate + + # Add item + item = CollectionItem(collection_id=sample_collection.id, app_id=sample_app.id) + db.add(item) + db.commit() + + # Now it's a duplicate + is_duplicate = CollectionService.check_duplicate_item( + db, collection_id=sample_collection.id, app_id=sample_app.id + ) + assert is_duplicate + + +def test_check_duplicate_item_server(db, sample_collection, sample_mcp_server): + """Test checking for duplicate server in collection.""" + from app.models import CollectionItem + + # No duplicate initially + is_duplicate = CollectionService.check_duplicate_item( + db, collection_id=sample_collection.id, mcp_server_id=sample_mcp_server.id + ) + assert not is_duplicate + + # Add item + item = CollectionItem( + collection_id=sample_collection.id, mcp_server_id=sample_mcp_server.id + ) + db.add(item) + db.commit() + + # Now it's a duplicate + is_duplicate = CollectionService.check_duplicate_item( + db, collection_id=sample_collection.id, mcp_server_id=sample_mcp_server.id + ) + assert is_duplicate + + +def test_check_duplicate_item_tool(db, sample_collection, sample_tool): + """Test checking for duplicate tool in collection.""" + from app.models import CollectionItem + + # No duplicate initially + is_duplicate = CollectionService.check_duplicate_item( + db, collection_id=sample_collection.id, tool_id=sample_tool.id + ) + assert not is_duplicate + + # Add item + item = CollectionItem(collection_id=sample_collection.id, tool_id=sample_tool.id) + db.add(item) + db.commit() + + # Now it's a duplicate + is_duplicate = CollectionService.check_duplicate_item( + db, collection_id=sample_collection.id, tool_id=sample_tool.id + ) + assert is_duplicate + + +def test_validate_and_add_item_success_app(db, sample_collection, sample_app): + """Test successfully adding an app to collection.""" + success, message, item = CollectionService.validate_and_add_item( + db, collection_id=sample_collection.id, app_id=sample_app.id + ) + assert success + assert message == "" + assert item is not None + assert item.app_id == sample_app.id + + +def test_validate_and_add_item_success_server(db, sample_collection, sample_mcp_server): + """Test successfully adding a server to collection.""" + success, message, item = CollectionService.validate_and_add_item( + db, collection_id=sample_collection.id, mcp_server_id=sample_mcp_server.id + ) + assert success + assert message == "" + assert item is not None + assert item.mcp_server_id == sample_mcp_server.id + + +def test_validate_and_add_item_success_tool(db, sample_collection, sample_tool): + """Test successfully adding a tool to collection.""" + success, message, item = CollectionService.validate_and_add_item( + db, collection_id=sample_collection.id, tool_id=sample_tool.id + ) + assert success + assert message == "" + assert item is not None + assert item.tool_id == sample_tool.id + + +def test_validate_and_add_item_nonexistent_collection(db, sample_app): + """Test adding item to non-existent collection fails.""" + success, message, item = CollectionService.validate_and_add_item( + db, collection_id=9999, app_id=sample_app.id + ) + assert not success + assert "does not exist" in message + assert item is None + + +def test_validate_and_add_item_nonexistent_app(db, sample_collection): + """Test adding non-existent app fails.""" + success, message, item = CollectionService.validate_and_add_item( + db, collection_id=sample_collection.id, app_id=9999 + ) + assert not success + assert "does not exist" in message + assert item is None + + +def test_validate_and_add_item_duplicate_app(db, sample_collection, sample_app): + """Test adding duplicate app fails.""" + from app.models import CollectionItem + + # Add app first time + item = CollectionItem(collection_id=sample_collection.id, app_id=sample_app.id) + db.add(item) + db.commit() + + # Try to add again + success, message, item = CollectionService.validate_and_add_item( + db, collection_id=sample_collection.id, app_id=sample_app.id + ) + assert not success + assert "already exists" in message + assert item is None + + +def test_get_collection_item_counts_empty(db, sample_collection): + """Test getting counts for empty collection.""" + counts = CollectionService.get_collection_item_counts(db, sample_collection.id) + assert counts["total"] == 0 + assert counts["apps"] == 0 + assert counts["servers"] == 0 + assert counts["tools"] == 0 + + +def test_get_collection_item_counts_mixed( + db, sample_collection, sample_app, sample_mcp_server, sample_tool +): + """Test getting counts for collection with mixed items.""" + from app.models import CollectionItem + + # Add items + item1 = CollectionItem(collection_id=sample_collection.id, app_id=sample_app.id) + item2 = CollectionItem( + collection_id=sample_collection.id, mcp_server_id=sample_mcp_server.id + ) + item3 = CollectionItem(collection_id=sample_collection.id, tool_id=sample_tool.id) + db.add_all([item1, item2, item3]) + db.commit() + + # Get counts + counts = CollectionService.get_collection_item_counts(db, sample_collection.id) + assert counts["total"] == 3 + assert counts["apps"] == 1 + assert counts["servers"] == 1 + assert counts["tools"] == 1 + + +def test_get_collection_item_counts_multiple_same_type( + db, sample_collection, sample_app +): + """Test getting counts with multiple items of same type.""" + from app.models import CollectionItem, App + + # Create another app + app2 = App(name="test-app-2", owner="test@example.com") + db.add(app2) + db.commit() + + # Add both apps + item1 = CollectionItem(collection_id=sample_collection.id, app_id=sample_app.id) + item2 = CollectionItem(collection_id=sample_collection.id, app_id=app2.id) + db.add_all([item1, item2]) + db.commit() + + # Get counts + counts = CollectionService.get_collection_item_counts(db, sample_collection.id) + assert counts["total"] == 2 + assert counts["apps"] == 2 + assert counts["servers"] == 0 + assert counts["tools"] == 0 diff --git a/dbx-agent-app/app/backend/tests/test_discovery.py b/dbx-agent-app/app/backend/tests/test_discovery.py new file mode 100644 index 00000000..7b7fddaf --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_discovery.py @@ -0,0 +1,449 @@ +""" +Tests for Discovery endpoint. +""" + +import pytest +from unittest.mock import AsyncMock, patch, MagicMock +from app.services.discovery import DiscoveryResult, DiscoveredServer +from app.services.tool_parser import NormalizedTool +from app.models import MCPServer, Tool +from app.models.mcp_server import MCPServerKind + + +@pytest.fixture(autouse=True) +def reset_discovery_state(): + """Reset discovery state before each test.""" + from app.routes import discovery + discovery._discovery_state = { + "is_running": False, + "last_run_timestamp": None, + "last_run_status": None, + "last_run_message": None, + } + yield + + +@pytest.fixture +def mock_discovery_result(): + """Create a mock discovery result with sample data.""" + return DiscoveryResult( + servers_discovered=2, + tools_discovered=5, + servers=[ + DiscoveredServer( + server_url="https://mcp1.example.com", + kind="custom", + tools=[ + NormalizedTool( + name="tool1", + description="Tool 1 description", + parameters='{"type": "object"}', + ), + NormalizedTool( + name="tool2", + description="Tool 2 description", + parameters='{"type": "object"}', + ), + ], + ), + DiscoveredServer( + server_url="https://mcp2.example.com", + kind="custom", + tools=[ + NormalizedTool( + name="tool3", + description="Tool 3 description", + parameters='{"type": "object"}', + ), + NormalizedTool( + name="tool4", + description="Tool 4 description", + parameters='{"type": "object"}', + ), + NormalizedTool( + name="tool5", + description="Tool 5 description", + parameters='{"type": "object"}', + ), + ], + ), + ], + errors=[], + ) + + +@pytest.mark.asyncio +async def test_discovery_refresh_success(client, db, mock_discovery_result): + """Test discovery refresh endpoint with successful discovery.""" + with patch("app.routes.discovery.DiscoveryService") as mock_service_class: + # Setup mock + mock_service = MagicMock() + mock_service_class.return_value = mock_service + mock_service.discover_all = AsyncMock(return_value=mock_discovery_result) + mock_service.upsert_discovery_results = MagicMock() + + # Mock upsert result + from app.services.discovery import UpsertResult + mock_service.upsert_discovery_results.return_value = UpsertResult( + new_servers=2, + updated_servers=0, + new_tools=5, + updated_tools=0, + ) + + # Make request + response = client.post( + "/api/discovery/refresh", + json={ + "server_urls": ["https://mcp1.example.com", "https://mcp2.example.com"], + }, + ) + + # Assert response + assert response.status_code == 200 + data = response.json() + assert data["status"] == "success" + assert data["servers_discovered"] == 2 + assert data["tools_discovered"] == 5 + assert data["new_servers"] == 2 + assert data["new_tools"] == 5 + assert len(data["errors"]) == 0 + + +@pytest.mark.asyncio +async def test_discovery_refresh_with_errors(client, db): + """Test discovery refresh endpoint with partial failures.""" + result_with_errors = DiscoveryResult( + servers_discovered=1, + tools_discovered=2, + servers=[ + DiscoveredServer( + server_url="https://mcp1.example.com", + kind="custom", + tools=[ + NormalizedTool( + name="tool1", + description="Tool 1", + parameters="{}", + ), + NormalizedTool( + name="tool2", + description="Tool 2", + parameters="{}", + ), + ], + ), + DiscoveredServer( + server_url="https://mcp2.example.com", + kind="custom", + tools=[], + error="Connection timeout", + ), + ], + errors=["https://mcp2.example.com: Connection timeout"], + ) + + with patch("app.routes.discovery.DiscoveryService") as mock_service_class: + mock_service = MagicMock() + mock_service_class.return_value = mock_service + mock_service.discover_all = AsyncMock(return_value=result_with_errors) + + from app.services.discovery import UpsertResult + mock_service.upsert_discovery_results.return_value = UpsertResult( + new_servers=1, + updated_servers=0, + new_tools=2, + updated_tools=0, + ) + + response = client.post( + "/api/discovery/refresh", + json={ + "server_urls": ["https://mcp1.example.com", "https://mcp2.example.com"], + }, + ) + + assert response.status_code == 200 + data = response.json() + assert data["status"] == "partial" + assert data["servers_discovered"] == 1 + assert data["tools_discovered"] == 2 + assert len(data["errors"]) == 1 + assert "Connection timeout" in data["errors"][0] + + +@pytest.mark.asyncio +async def test_discovery_refresh_no_sources(client, db): + """Test discovery refresh endpoint with no sources specified.""" + response = client.post( + "/api/discovery/refresh", + json={ + "server_urls": [], + "discover_workspace": False, + "discover_catalog": False, + }, + ) + + assert response.status_code == 400 + assert "at least one discovery source" in response.json()["detail"].lower() + + +def test_discovery_status_initial(client, db): + """Test discovery status endpoint before any runs.""" + response = client.get("/api/discovery/status") + + assert response.status_code == 200 + data = response.json() + assert data["is_running"] is False + assert data["last_run_timestamp"] is None + assert data["last_run_status"] is None + + +@pytest.mark.asyncio +async def test_discovery_status_after_run(client, db, mock_discovery_result): + """Test discovery status endpoint after a successful run.""" + with patch("app.routes.discovery.DiscoveryService") as mock_service_class: + mock_service = MagicMock() + mock_service_class.return_value = mock_service + mock_service.discover_all = AsyncMock(return_value=mock_discovery_result) + + from app.services.discovery import UpsertResult + mock_service.upsert_discovery_results.return_value = UpsertResult( + new_servers=2, + updated_servers=0, + new_tools=5, + updated_tools=0, + ) + + # Run discovery + client.post( + "/api/discovery/refresh", + json={"server_urls": ["https://mcp.example.com"]}, + ) + + # Check status + response = client.get("/api/discovery/status") + assert response.status_code == 200 + data = response.json() + assert data["is_running"] is False + assert data["last_run_timestamp"] is not None + assert data["last_run_status"] == "success" + + +def test_upsert_discovery_creates_new_server(db): + """Test upserting discovery results creates new servers.""" + from app.services.discovery import DiscoveryService + + service = DiscoveryService() + + discovery_result = DiscoveryResult( + servers_discovered=1, + tools_discovered=2, + servers=[ + DiscoveredServer( + server_url="https://new-server.example.com", + kind="custom", + tools=[ + NormalizedTool( + name="new_tool1", + description="New tool 1", + parameters="{}", + ), + NormalizedTool( + name="new_tool2", + description="New tool 2", + parameters="{}", + ), + ], + ), + ], + errors=[], + ) + + upsert_result = service.upsert_discovery_results(db, discovery_result) + + assert upsert_result.new_servers == 1 + assert upsert_result.updated_servers == 0 + assert upsert_result.new_tools == 2 + assert upsert_result.updated_tools == 0 + + # Verify database + server = db.query(MCPServer).filter_by(server_url="https://new-server.example.com").first() + assert server is not None + assert server.kind == MCPServerKind.CUSTOM + + tools = db.query(Tool).filter_by(mcp_server_id=server.id).all() + assert len(tools) == 2 + assert {t.name for t in tools} == {"new_tool1", "new_tool2"} + + +def test_upsert_discovery_updates_existing_server(db, sample_mcp_server): + """Test upserting discovery results updates existing servers.""" + from app.services.discovery import DiscoveryService + + service = DiscoveryService() + + # Update existing server with new tool + discovery_result = DiscoveryResult( + servers_discovered=1, + tools_discovered=1, + servers=[ + DiscoveredServer( + server_url=sample_mcp_server.server_url, + kind="custom", + tools=[ + NormalizedTool( + name="new_tool", + description="New tool", + parameters="{}", + ), + ], + ), + ], + errors=[], + ) + + upsert_result = service.upsert_discovery_results(db, discovery_result) + + assert upsert_result.new_servers == 0 + assert upsert_result.updated_servers == 1 + assert upsert_result.new_tools == 1 + assert upsert_result.updated_tools == 0 + + # Verify tool was added + tools = db.query(Tool).filter_by(mcp_server_id=sample_mcp_server.id).all() + assert len(tools) == 1 + assert tools[0].name == "new_tool" + + +def test_upsert_discovery_updates_existing_tool(db, sample_mcp_server, sample_tool): + """Test upserting discovery results updates existing tools.""" + from app.services.discovery import DiscoveryService + + service = DiscoveryService() + + # Update existing tool + discovery_result = DiscoveryResult( + servers_discovered=1, + tools_discovered=1, + servers=[ + DiscoveredServer( + server_url=sample_mcp_server.server_url, + kind="custom", + tools=[ + NormalizedTool( + name=sample_tool.name, + description="Updated description", + parameters='{"type": "object", "properties": {"new": {"type": "string"}}}', + ), + ], + ), + ], + errors=[], + ) + + upsert_result = service.upsert_discovery_results(db, discovery_result) + + assert upsert_result.new_servers == 0 + assert upsert_result.updated_servers == 1 + assert upsert_result.new_tools == 0 + assert upsert_result.updated_tools == 1 + + # Verify tool was updated + db.refresh(sample_tool) + assert sample_tool.description == "Updated description" + assert "new" in sample_tool.parameters + + +def test_upsert_discovery_handles_duplicates(db): + """Test upserting discovery results handles duplicate tools gracefully.""" + from app.services.discovery import DiscoveryService + + service = DiscoveryService() + + # First discovery + discovery_result1 = DiscoveryResult( + servers_discovered=1, + tools_discovered=2, + servers=[ + DiscoveredServer( + server_url="https://server.example.com", + kind="custom", + tools=[ + NormalizedTool(name="tool1", description="Tool 1", parameters="{}"), + NormalizedTool(name="tool2", description="Tool 2", parameters="{}"), + ], + ), + ], + errors=[], + ) + + result1 = service.upsert_discovery_results(db, discovery_result1) + assert result1.new_servers == 1 + assert result1.new_tools == 2 + + # Second discovery with same tools (should update, not create) + discovery_result2 = DiscoveryResult( + servers_discovered=1, + tools_discovered=2, + servers=[ + DiscoveredServer( + server_url="https://server.example.com", + kind="custom", + tools=[ + NormalizedTool(name="tool1", description="Updated Tool 1", parameters="{}"), + NormalizedTool(name="tool2", description="Updated Tool 2", parameters="{}"), + ], + ), + ], + errors=[], + ) + + result2 = service.upsert_discovery_results(db, discovery_result2) + assert result2.new_servers == 0 + assert result2.updated_servers == 1 + assert result2.new_tools == 0 + assert result2.updated_tools == 2 + + # Verify only 2 tools exist (no duplicates) + server = db.query(MCPServer).filter_by(server_url="https://server.example.com").first() + tools = db.query(Tool).filter_by(mcp_server_id=server.id).all() + assert len(tools) == 2 + + +def test_upsert_discovery_skips_servers_with_errors(db): + """Test upserting discovery results skips servers with errors.""" + from app.services.discovery import DiscoveryService + + service = DiscoveryService() + + discovery_result = DiscoveryResult( + servers_discovered=1, + tools_discovered=1, + servers=[ + DiscoveredServer( + server_url="https://good-server.example.com", + kind="custom", + tools=[ + NormalizedTool(name="tool1", description="Tool 1", parameters="{}"), + ], + ), + DiscoveredServer( + server_url="https://bad-server.example.com", + kind="custom", + tools=[], + error="Connection failed", + ), + ], + errors=["https://bad-server.example.com: Connection failed"], + ) + + upsert_result = service.upsert_discovery_results(db, discovery_result) + + assert upsert_result.new_servers == 1 + assert upsert_result.new_tools == 1 + + # Verify only good server was created + servers = db.query(MCPServer).all() + assert len(servers) == 1 + assert servers[0].server_url == "https://good-server.example.com" diff --git a/dbx-agent-app/app/backend/tests/test_discovery_service.py b/dbx-agent-app/app/backend/tests/test_discovery_service.py new file mode 100644 index 00000000..48b2a65c --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_discovery_service.py @@ -0,0 +1,326 @@ +""" +Unit tests for discovery service. +""" + +import pytest +from unittest.mock import AsyncMock, MagicMock + +from app.services.discovery import ( + DiscoveryService, + DiscoveredServer, + DiscoveryResult, + create_discovery_service, +) +from app.services.mcp_client import ( + MCPClient, + MCPTool, + MCPConnectionError, + MCPTimeoutError, +) +from app.services.tool_parser import NormalizedTool + + +class TestDiscoveryService: + """Tests for DiscoveryService class.""" + + @pytest.mark.asyncio + async def test_discover_from_url_success(self): + """Test successful discovery from a single URL.""" + # Mock MCP client + mock_client = AsyncMock(spec=MCPClient) + mock_tools = [ + MCPTool( + name="tool1", + description="First tool", + input_schema={"type": "object"}, + ), + MCPTool( + name="tool2", + description="Second tool", + input_schema={"type": "string"}, + ), + ] + mock_client.list_tools.return_value = mock_tools + + service = DiscoveryService(mcp_client=mock_client) + result = await service.discover_from_url( + "https://mcp.example.com", + kind="custom", + ) + + assert isinstance(result, DiscoveredServer) + assert result.server_url == "https://mcp.example.com" + assert result.kind == "custom" + assert len(result.tools) == 2 + assert result.error is None + assert result.tools[0].name == "tool1" + assert result.tools[1].name == "tool2" + + @pytest.mark.asyncio + async def test_discover_from_url_connection_error(self): + """Test discovery handles connection errors gracefully.""" + mock_client = AsyncMock(spec=MCPClient) + mock_client.list_tools.side_effect = MCPConnectionError("Connection failed") + + service = DiscoveryService(mcp_client=mock_client) + result = await service.discover_from_url("https://mcp.example.com") + + assert isinstance(result, DiscoveredServer) + assert result.server_url == "https://mcp.example.com" + assert len(result.tools) == 0 + assert result.error is not None + assert "Connection failed" in result.error + + @pytest.mark.asyncio + async def test_discover_from_url_timeout_error(self): + """Test discovery handles timeout errors gracefully.""" + mock_client = AsyncMock(spec=MCPClient) + mock_client.list_tools.side_effect = MCPTimeoutError("Request timed out") + + service = DiscoveryService(mcp_client=mock_client) + result = await service.discover_from_url("https://mcp.example.com") + + assert result.error is not None + assert "timed out" in result.error + + @pytest.mark.asyncio + async def test_discover_from_url_skip_invalid_tools(self): + """Test discovery skips invalid tools without failing.""" + mock_client = AsyncMock(spec=MCPClient) + mock_tools = [ + MCPTool( + name="valid_tool", + description="Valid", + input_schema={"type": "object"}, + ), + MCPTool( + name="", # Invalid - empty name + description="Invalid", + input_schema={}, + ), + ] + mock_client.list_tools.return_value = mock_tools + + service = DiscoveryService(mcp_client=mock_client) + result = await service.discover_from_url("https://mcp.example.com") + + # Should only include valid tool + assert len(result.tools) == 1 + assert result.tools[0].name == "valid_tool" + + @pytest.mark.asyncio + async def test_discover_from_url_without_provided_client(self): + """Test discovery creates its own client when none provided.""" + service = DiscoveryService() # No client provided + + # We can't easily test the actual HTTP call without a real server + # But we can verify the method doesn't crash + # In a real scenario, this would require mocking at a lower level + # For now, we'll test the error handling + result = await service.discover_from_url("https://invalid-mcp-server-xyz.example") + + # Should return a result with an error + assert isinstance(result, DiscoveredServer) + # Error will vary based on network conditions, but should be present + # (Could be connection error, DNS error, etc.) + + @pytest.mark.asyncio + async def test_discover_from_workspace_returns_result(self): + """Test workspace discovery returns a valid DiscoveryResult.""" + service = DiscoveryService() + result = await service.discover_from_workspace() + + assert isinstance(result, DiscoveryResult) + # May find servers if workspace apps exist, or return empty + assert result.servers_discovered >= 0 + assert result.tools_discovered >= 0 + + @pytest.mark.asyncio + async def test_discover_from_catalog_no_url(self): + """Test catalog discovery returns empty when catalog URL not configured.""" + service = DiscoveryService() + result = await service.discover_from_catalog() + + assert isinstance(result, DiscoveryResult) + # Without mcp_catalog_url configured, returns empty results + assert result.servers_discovered == 0 + assert result.tools_discovered == 0 + assert len(result.servers) == 0 + + @pytest.mark.asyncio + async def test_discover_from_urls_multiple_servers(self): + """Test discovery from multiple URLs.""" + mock_client = AsyncMock(spec=MCPClient) + + # First server returns 2 tools + first_tools = [ + MCPTool(name="tool1", description="Tool 1", input_schema={}), + MCPTool(name="tool2", description="Tool 2", input_schema={}), + ] + + # Second server returns 1 tool + second_tools = [ + MCPTool(name="tool3", description="Tool 3", input_schema={}), + ] + + # Configure mock to return different results based on call count + mock_client.list_tools.side_effect = [first_tools, second_tools] + + service = DiscoveryService(mcp_client=mock_client) + result = await service.discover_from_urls( + ["https://mcp1.example.com", "https://mcp2.example.com"], + kind="external", + ) + + assert result.servers_discovered == 2 + assert result.tools_discovered == 3 + assert len(result.servers) == 2 + assert len(result.errors) == 0 + + @pytest.mark.asyncio + async def test_discover_from_urls_empty_list(self): + """Test discovery from empty URL list.""" + service = DiscoveryService() + result = await service.discover_from_urls([]) + + assert result.servers_discovered == 0 + assert result.tools_discovered == 0 + assert len(result.servers) == 0 + assert len(result.errors) == 0 + + @pytest.mark.asyncio + async def test_discover_from_urls_partial_failures(self): + """Test discovery handles partial failures across multiple servers.""" + mock_client = AsyncMock(spec=MCPClient) + + # First server succeeds + first_tools = [ + MCPTool(name="tool1", description="Tool 1", input_schema={}), + ] + + # Second server fails + mock_client.list_tools.side_effect = [ + first_tools, + MCPConnectionError("Connection failed"), + ] + + service = DiscoveryService(mcp_client=mock_client) + result = await service.discover_from_urls( + ["https://mcp1.example.com", "https://mcp2.example.com"] + ) + + assert result.servers_discovered == 1 # Only first succeeded + assert result.tools_discovered == 1 + assert len(result.servers) == 2 # Both servers returned + assert len(result.errors) == 1 # One error + assert "mcp2.example.com" in result.errors[0] + + @pytest.mark.asyncio + async def test_discover_all_without_custom_urls(self): + """Test full discovery without custom URLs returns valid result.""" + service = DiscoveryService() + result = await service.discover_all() + + # Runs workspace + catalog discovery; may or may not find servers + assert isinstance(result, DiscoveryResult) + assert result.servers_discovered >= 0 + assert result.tools_discovered >= 0 + + @pytest.mark.asyncio + async def test_discover_all_with_custom_urls(self): + """Test full discovery with custom URLs.""" + mock_client = AsyncMock(spec=MCPClient) + mock_tools = [ + MCPTool(name="tool1", description="Tool 1", input_schema={}), + ] + mock_client.list_tools.return_value = mock_tools + + service = DiscoveryService(mcp_client=mock_client) + result = await service.discover_all( + custom_urls=["https://mcp.example.com"] + ) + + # Should include stub results + custom URL results + assert result.servers_discovered >= 1 + assert result.tools_discovered >= 1 + + @pytest.mark.asyncio + async def test_create_discovery_service_factory(self): + """Test factory function creates service.""" + service = await create_discovery_service() + + assert isinstance(service, DiscoveryService) + + +class TestDiscoveredServer: + """Tests for DiscoveredServer dataclass.""" + + def test_discovered_server_creation(self): + """Test creating DiscoveredServer instance.""" + tools = [ + NormalizedTool(name="tool1", description="Tool 1", parameters="{}"), + ] + + server = DiscoveredServer( + server_url="https://mcp.example.com", + kind="custom", + tools=tools, + ) + + assert server.server_url == "https://mcp.example.com" + assert server.kind == "custom" + assert len(server.tools) == 1 + assert server.error is None + + def test_discovered_server_with_error(self): + """Test DiscoveredServer with error.""" + server = DiscoveredServer( + server_url="https://mcp.example.com", + kind="custom", + tools=[], + error="Connection failed", + ) + + assert server.error == "Connection failed" + assert len(server.tools) == 0 + + +class TestDiscoveryResult: + """Tests for DiscoveryResult dataclass.""" + + def test_discovery_result_creation(self): + """Test creating DiscoveryResult instance.""" + tools = [ + NormalizedTool(name="tool1", description="Tool 1", parameters="{}"), + ] + servers = [ + DiscoveredServer( + server_url="https://mcp.example.com", + kind="custom", + tools=tools, + ) + ] + + result = DiscoveryResult( + servers_discovered=1, + tools_discovered=1, + servers=servers, + errors=[], + ) + + assert result.servers_discovered == 1 + assert result.tools_discovered == 1 + assert len(result.servers) == 1 + assert len(result.errors) == 0 + + def test_discovery_result_with_errors(self): + """Test DiscoveryResult with errors.""" + result = DiscoveryResult( + servers_discovered=0, + tools_discovered=0, + servers=[], + errors=["Error 1", "Error 2"], + ) + + assert len(result.errors) == 2 + assert result.servers_discovered == 0 diff --git a/dbx-agent-app/app/backend/tests/test_generator.py b/dbx-agent-app/app/backend/tests/test_generator.py new file mode 100644 index 00000000..ff1a6c0b --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_generator.py @@ -0,0 +1,514 @@ +""" +Tests for code generation service. + +Tests the GeneratorService for creating code-first supervisors +from collections using Pattern 3 (dynamic tool discovery). +""" + +import pytest +from sqlalchemy.orm import Session + +from app.services.generator import GeneratorService, GeneratorError, get_generator_service +from app.models import Collection, CollectionItem, App, MCPServer, Tool, MCPServerKind + + +class TestGeneratorService: + """Test suite for GeneratorService.""" + + def test_singleton_instance(self): + """Test that get_generator_service returns singleton.""" + service1 = get_generator_service() + service2 = get_generator_service() + assert service1 is service2 + + def test_fetch_collection_items_not_found(self, db: Session): + """Test fetch_collection_items with non-existent collection.""" + service = GeneratorService() + + with pytest.raises(GeneratorError, match="Collection with id 999 not found"): + service.fetch_collection_items(999) + + def test_fetch_collection_items_empty(self, db: Session): + """Test fetch_collection_items with empty collection.""" + # Create empty collection + collection = Collection(name="Empty Collection", description="No items") + db.add(collection) + db.commit() + + service = GeneratorService() + result_collection, items = service.fetch_collection_items( + collection.id + ) + + assert result_collection['id'] == collection.id + assert len(items) == 0 + + def test_fetch_collection_items_with_tools(self, db: Session): + """Test fetch_collection_items with individual tools.""" + # Create collection with tools + collection = Collection(name="Test Collection", description="Test") + db.add(collection) + db.commit() + + # Create MCP server + mcp_server = MCPServer( + server_url="https://mcp.example.com", + kind=MCPServerKind.CUSTOM, + ) + db.add(mcp_server) + db.commit() + + # Create tools + tool1 = Tool( + mcp_server_id=mcp_server.id, + name="search_tool", + description="Search for things", + parameters='{"type": "object"}', + ) + tool2 = Tool( + mcp_server_id=mcp_server.id, + name="analysis_tool", + description="Analyze data", + parameters='{"type": "object"}', + ) + db.add_all([tool1, tool2]) + db.commit() + + # Add tools to collection + item1 = CollectionItem(collection_id=collection.id, tool_id=tool1.id) + item2 = CollectionItem(collection_id=collection.id, tool_id=tool2.id) + db.add_all([item1, item2]) + db.commit() + + service = GeneratorService() + result_collection, items = service.fetch_collection_items( + collection.id + ) + + assert result_collection['id'] == collection.id + assert len(items) == 2 + assert items[0]["type"] == "tool" + assert items[0]["name"] == "search_tool" + assert items[0]["server_url"] == "https://mcp.example.com" + assert items[1]["name"] == "analysis_tool" + + def test_fetch_collection_items_with_mcp_server(self, db: Session): + """Test fetch_collection_items with entire MCP server.""" + # Create collection + collection = Collection(name="Server Collection", description="Test") + db.add(collection) + db.commit() + + # Create MCP server + mcp_server = MCPServer( + server_url="https://mcp.example.com", + kind=MCPServerKind.MANAGED, + ) + db.add(mcp_server) + db.commit() + + # Add server to collection + item = CollectionItem(collection_id=collection.id, mcp_server_id=mcp_server.id) + db.add(item) + db.commit() + + service = GeneratorService() + result_collection, items = service.fetch_collection_items( + collection.id + ) + + assert len(items) == 1 + assert items[0]["type"] == "mcp_server" + assert items[0]["server_url"] == "https://mcp.example.com" + + def test_fetch_collection_items_with_app(self, db: Session): + """Test fetch_collection_items with Databricks App.""" + # Create collection + collection = Collection(name="App Collection", description="Test") + db.add(collection) + db.commit() + + # Create app + app = App(name="test-app", owner="user@example.com", url="https://app.example.com") + db.add(app) + db.commit() + + # Create MCP servers for app + server1 = MCPServer( + app_id=app.id, + server_url="https://mcp1.example.com", + kind=MCPServerKind.MANAGED, + ) + server2 = MCPServer( + app_id=app.id, + server_url="https://mcp2.example.com", + kind=MCPServerKind.MANAGED, + ) + db.add_all([server1, server2]) + db.commit() + + # Add app to collection + item = CollectionItem(collection_id=collection.id, app_id=app.id) + db.add(item) + db.commit() + + service = GeneratorService() + result_collection, items = service.fetch_collection_items( + collection.id + ) + + assert len(items) == 1 # App appears as single item + assert items[0]["type"] == "app" + assert items[0]["name"] == "test-app" + + def test_resolve_mcp_server_urls_empty(self): + """Test resolve_mcp_server_urls with empty items.""" + service = GeneratorService() + urls = service.resolve_mcp_server_urls([]) + assert urls == [] + + def test_resolve_mcp_server_urls_unique(self): + """Test resolve_mcp_server_urls removes duplicates.""" + service = GeneratorService() + items = [ + {"server_url": "https://mcp1.example.com"}, + {"server_url": "https://mcp2.example.com"}, + {"server_url": "https://mcp1.example.com"}, # Duplicate + {"server_url": ""}, # Empty + ] + urls = service.resolve_mcp_server_urls(items) + + assert len(urls) == 2 + assert "https://mcp1.example.com" in urls + assert "https://mcp2.example.com" in urls + assert urls == sorted(urls) # Should be sorted + + def test_generate_supervisor_code_empty_collection(self, db: Session): + """Test generate_supervisor_code with empty collection.""" + # Create empty collection + collection = Collection(name="Empty Collection", description="No tools") + db.add(collection) + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code(collection.id) + + assert "supervisor.py" in files + assert "requirements.txt" in files + assert "app.yaml" in files + + # Verify supervisor.py contains expected content + supervisor_code = files["supervisor.py"] + assert "Empty Collection" in supervisor_code + assert "Pattern 3" in supervisor_code + assert "fetch_tool_infos" in supervisor_code + assert "run_supervisor" in supervisor_code + + def test_generate_supervisor_code_with_one_tool(self, db: Session): + """Test generate_supervisor_code with single tool.""" + # Create collection with one tool + collection = Collection(name="Single Tool", description="Test") + db.add(collection) + db.commit() + + mcp_server = MCPServer( + server_url="https://mcp.example.com", + kind=MCPServerKind.CUSTOM, + ) + db.add(mcp_server) + db.commit() + + tool = Tool( + mcp_server_id=mcp_server.id, + name="test_tool", + description="Test tool", + parameters='{"type": "object"}', + ) + db.add(tool) + db.commit() + + item = CollectionItem(collection_id=collection.id, tool_id=tool.id) + db.add(item) + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code(collection.id) + + # Verify MCP server URL is included + supervisor_code = files["supervisor.py"] + assert "https://mcp.example.com" in supervisor_code or "MCP_SERVER_URLS" in supervisor_code + + # Verify app.yaml includes MCP server URL + app_yaml = files["app.yaml"] + assert "https://mcp.example.com" in app_yaml + + def test_generate_supervisor_code_with_many_tools(self, db: Session): + """Test generate_supervisor_code with 10+ tools.""" + # Create collection + collection = Collection(name="Many Tools", description="Lots of tools") + db.add(collection) + db.commit() + + # Create multiple MCP servers + servers = [] + for i in range(3): + server = MCPServer( + server_url=f"https://mcp{i}.example.com", + kind=MCPServerKind.CUSTOM, + ) + db.add(server) + servers.append(server) + db.commit() + + # Create 12 tools across servers + for i in range(12): + server = servers[i % 3] + tool = Tool( + mcp_server_id=server.id, + name=f"tool_{i}", + description=f"Tool number {i}", + parameters='{"type": "object"}', + ) + db.add(tool) + db.commit() + + item = CollectionItem(collection_id=collection.id, tool_id=tool.id) + db.add(item) + + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code(collection.id) + + # Verify all server URLs are included + app_yaml = files["app.yaml"] + assert "mcp0.example.com" in app_yaml + assert "mcp1.example.com" in app_yaml + assert "mcp2.example.com" in app_yaml + + def test_generate_supervisor_code_custom_llm_endpoint(self, db: Session): + """Test generate_supervisor_code with custom LLM endpoint.""" + # Create collection + collection = Collection(name="Custom LLM", description="Test") + db.add(collection) + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code( + collection.id, + llm_endpoint="databricks-dbrx-instruct", + ) + + # Verify custom LLM endpoint + app_yaml = files["app.yaml"] + assert "databricks-dbrx-instruct" in app_yaml + + def test_generate_supervisor_code_custom_app_name(self, db: Session): + """Test generate_supervisor_code with custom app name.""" + # Create collection + collection = Collection(name="Test Collection", description="Test") + db.add(collection) + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code( + collection.id, + app_name="my-custom-supervisor", + ) + + # Verify custom app name + app_yaml = files["app.yaml"] + assert "my-custom-supervisor" in app_yaml + + def test_generate_supervisor_code_app_name_normalization(self, db: Session): + """Test that collection name is normalized to valid app name.""" + # Create collection with special characters + collection = Collection( + name="Expert Research Toolkit!!", + description="Special chars", + ) + db.add(collection) + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code(collection.id) + + # Verify app name is normalized (in the name: field, not in comments/descriptions) + app_yaml = files["app.yaml"] + assert "name: expert-research-toolkit" in app_yaml + # Original name may appear in comments/descriptions, but normalized name should be in the name field + + def test_validate_python_syntax_valid(self): + """Test validate_python_syntax with valid code.""" + service = GeneratorService() + code = """ +def hello(): + return "world" + +x = 42 + """ + is_valid, error = service.validate_python_syntax(code) + assert is_valid is True + assert error is None + + def test_validate_python_syntax_invalid(self): + """Test validate_python_syntax with invalid code.""" + service = GeneratorService() + code = """ +def hello(): + return "world" + invalid syntax here!!! + """ + is_valid, error = service.validate_python_syntax(code) + assert is_valid is False + assert error is not None + assert "Syntax error" in error or "Validation error" in error + + def test_generate_and_validate_success(self, db: Session): + """Test generate_and_validate with valid generation.""" + # Create collection + collection = Collection(name="Valid Collection", description="Test") + db.add(collection) + db.commit() + + service = GeneratorService() + files = service.generate_and_validate(collection.id) + + assert "supervisor.py" in files + assert "requirements.txt" in files + assert "app.yaml" in files + + # Should not raise error + assert len(files["supervisor.py"]) > 0 + + def test_generated_code_is_valid_python(self, db: Session): + """Test that generated supervisor code is syntactically valid Python.""" + # Create collection with tools + collection = Collection(name="Syntax Test", description="Test") + db.add(collection) + db.commit() + + mcp_server = MCPServer( + server_url="https://mcp.example.com", + kind=MCPServerKind.CUSTOM, + ) + db.add(mcp_server) + db.commit() + + tool = Tool( + mcp_server_id=mcp_server.id, + name="test_tool", + description="Test", + parameters='{"type": "object"}', + ) + db.add(tool) + db.commit() + + item = CollectionItem(collection_id=collection.id, tool_id=tool.id) + db.add(item) + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code(collection.id) + + # Validate generated code + is_valid, error = service.validate_python_syntax(files["supervisor.py"]) + assert is_valid is True, f"Generated code has syntax error: {error}" + + def test_generated_code_contains_pattern3_elements(self, db: Session): + """Test that generated code includes Pattern 3 elements.""" + # Create collection + collection = Collection(name="Pattern 3 Test", description="Test") + db.add(collection) + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code(collection.id) + + supervisor_code = files["supervisor.py"] + + # Verify Pattern 3 elements are present + assert "fetch_tool_infos" in supervisor_code + assert "async def run_supervisor" in supervisor_code + assert "tool_infos: List[ToolInfo]" in supervisor_code + assert "for server_url in MCP_SERVER_URLS" in supervisor_code + assert "tools/list" in supervisor_code + assert "Dynamic tool discovery" in supervisor_code or "Pattern 3" in supervisor_code + + def test_generated_code_has_mlflow_tracing(self, db: Session): + """Test that generated code includes MLflow tracing.""" + # Create collection + collection = Collection(name="Tracing Test", description="Test") + db.add(collection) + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code(collection.id) + + supervisor_code = files["supervisor.py"] + + # Verify MLflow tracing elements + assert "import mlflow" in supervisor_code + assert "mlflow.start_span" in supervisor_code + assert "set_inputs" in supervisor_code + assert "set_outputs" in supervisor_code + + def test_generated_code_has_error_handling(self, db: Session): + """Test that generated code includes error handling.""" + # Create collection + collection = Collection(name="Error Handling Test", description="Test") + db.add(collection) + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code(collection.id) + + supervisor_code = files["supervisor.py"] + + # Verify error handling + assert "try:" in supervisor_code + assert "except" in supervisor_code + assert "HTTPException" in supervisor_code + + def test_generated_requirements_has_dependencies(self, db: Session): + """Test that generated requirements.txt includes all dependencies.""" + # Create collection + collection = Collection(name="Dependencies Test", description="Test") + db.add(collection) + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code(collection.id) + + requirements = files["requirements.txt"] + + # Verify dependencies + assert "fastapi" in requirements + assert "uvicorn" in requirements + assert "httpx" in requirements + assert "mlflow" in requirements + assert "pydantic" in requirements + + def test_generated_app_yaml_has_required_fields(self, db: Session): + """Test that generated app.yaml has required Databricks Apps fields.""" + # Create collection + collection = Collection(name="App YAML Test", description="Test") + db.add(collection) + db.commit() + + service = GeneratorService() + files = service.generate_supervisor_code(collection.id) + + app_yaml = files["app.yaml"] + + # Verify required fields + assert "name:" in app_yaml + assert "command:" in app_yaml + assert "env:" in app_yaml + assert "DATABRICKS_HOST" in app_yaml + assert "DATABRICKS_TOKEN" in app_yaml + assert "LLM_ENDPOINT" in app_yaml + assert "MCP_SERVER_URLS" in app_yaml + assert "resources:" in app_yaml + assert "health_check:" in app_yaml + assert "port:" in app_yaml diff --git a/dbx-agent-app/app/backend/tests/test_health.py b/dbx-agent-app/app/backend/tests/test_health.py new file mode 100644 index 00000000..7c9c261d --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_health.py @@ -0,0 +1,21 @@ +""" +Tests for health check endpoints. +""" + + +def test_health_check(client): + """Test GET /health returns 200.""" + response = client.get("/health") + assert response.status_code == 200 + data = response.json() + assert data["status"] == "healthy" + assert "version" in data + + +def test_readiness_check(client): + """Test GET /ready returns 200 when database is connected.""" + response = client.get("/ready") + assert response.status_code == 200 + data = response.json() + assert data["ready"] is True + assert data["database"] == "connected" diff --git a/dbx-agent-app/app/backend/tests/test_integration.py b/dbx-agent-app/app/backend/tests/test_integration.py new file mode 100644 index 00000000..63dce827 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_integration.py @@ -0,0 +1,345 @@ +""" +Integration tests for end-to-end workflows in the Multi-Agent Registry API. + +These tests verify complete workflows across multiple endpoints and services. +""" + +import pytest +from fastapi import status + +from app.models.mcp_server import MCPServerKind + + +class TestDiscoveryWorkflow: + """Test the complete discovery workflow.""" + + def test_full_discovery_workflow(self, client, sample_app): + """ + Test complete discovery workflow: register app → discover tools → query results. + """ + # 1. Create an MCP server + server_response = client.post( + "/api/mcp_servers", + json={ + "app_id": sample_app.id, + "server_url": "https://example.com/mcp", + "kind": "custom", + "scopes": "read,write", + }, + ) + assert server_response.status_code == status.HTTP_201_CREATED + server = server_response.json() + + # 2. List all MCP servers + list_response = client.get("/api/mcp_servers") + assert list_response.status_code == status.HTTP_200_OK + servers = list_response.json() + assert servers["total"] >= 1 + assert any(s["id"] == server["id"] for s in servers["items"]) + + # 3. Get tools (should be empty initially) + tools_response = client.get("/api/tools") + assert tools_response.status_code == status.HTTP_200_OK + + # 4. Check discovery status + status_response = client.get("/api/discovery/status") + assert status_response.status_code == status.HTTP_200_OK + discovery_status = status_response.json() + assert "is_running" in discovery_status + assert "last_run_status" in discovery_status + + +class TestCollectionWorkflow: + """Test the complete collection management workflow.""" + + def test_full_collection_workflow( + self, client, sample_app, sample_mcp_server, sample_tool + ): + """ + Test complete collection workflow: create → add items → list → generate. + """ + # 1. Create a collection + create_response = client.post( + "/api/collections", + json={ + "name": "Integration Test Collection", + "description": "Collection for integration testing", + }, + ) + assert create_response.status_code == status.HTTP_201_CREATED + collection = create_response.json() + collection_id = collection["id"] + + # 2. Add server to collection (skip app since app_id is optional) + # Note: Apps are typically not added directly to collections + + # 3. Add server to collection + server_response = client.post( + f"/api/collections/{collection_id}/items", + json={"collection_id": collection_id, "mcp_server_id": sample_mcp_server.id}, + ) + assert server_response.status_code == status.HTTP_201_CREATED + + # 4. Add tool to collection + tool_response = client.post( + f"/api/collections/{collection_id}/items", + json={"collection_id": collection_id, "tool_id": sample_tool.id}, + ) + assert tool_response.status_code == status.HTTP_201_CREATED + + # 5. List collection items + items_response = client.get(f"/api/collections/{collection_id}/items") + assert items_response.status_code == status.HTTP_200_OK + items = items_response.json() + assert len(items) == 2 # server + tool + + # 6. Get collection details + get_response = client.get(f"/api/collections/{collection_id}") + assert get_response.status_code == status.HTTP_200_OK + collection_details = get_response.json() + assert collection_details["name"] == "Integration Test Collection" + + # 7. Update collection + update_response = client.put( + f"/api/collections/{collection_id}", + json={"description": "Updated description"}, + ) + assert update_response.status_code == status.HTTP_200_OK + updated = update_response.json() + assert updated["description"] == "Updated description" + + +class TestSupervisorGenerationWorkflow: + """Test the complete supervisor generation workflow.""" + + def test_full_supervisor_generation_workflow( + self, client, sample_app, sample_mcp_server, sample_tool + ): + """ + Test complete supervisor workflow: create collection → generate → download. + """ + # 1. Create a collection with tools + collection_response = client.post( + "/api/collections", + json={ + "name": "Supervisor Test Collection", + "description": "Collection for supervisor generation", + }, + ) + assert collection_response.status_code == status.HTTP_201_CREATED + collection = collection_response.json() + collection_id = collection["id"] + + # 2. Add tool to collection + item_response = client.post( + f"/api/collections/{collection_id}/items", + json={"collection_id": collection_id, "tool_id": sample_tool.id}, + ) + assert item_response.status_code == status.HTTP_201_CREATED + + # 3. Generate supervisor code + generate_response = client.post( + "/api/supervisors/generate", + json={ + "collection_id": collection_id, + "app_name": "test-supervisor", + "llm_endpoint": "databricks-meta-llama-3-1-70b-instruct", + }, + ) + assert generate_response.status_code == status.HTTP_201_CREATED + generated = generate_response.json() + assert "files" in generated + assert generated["collection_id"] == collection_id + assert "supervisor.py" in generated["files"] + assert "requirements.txt" in generated["files"] + assert "app.yaml" in generated["files"] + + # 4. Validate generated code contains expected elements + code = generated["files"]["supervisor.py"] + assert "class" in code or "def" in code + assert "mlflow" in code.lower() + # Tool name might be in the code or discovered at runtime + + # 5. Download supervisor artifacts (download endpoint expects POST, not GET) + download_response = client.post( + f"/api/supervisors/{collection_id}/download", + json={ + "app_name": "test-supervisor", + "llm_endpoint": "databricks-meta-llama-3-1-70b-instruct", + }, + ) + assert download_response.status_code == status.HTTP_200_OK + assert "application/zip" in download_response.headers.get("content-type", "") + + +class TestErrorHandlingWorkflow: + """Test error handling across multiple endpoints.""" + + def test_cascade_delete_workflow(self, client, sample_app, sample_mcp_server): + """ + Test that deleting an app cascades to servers and tools. + """ + # 1. Verify app and server exist + app_response = client.get(f"/api/apps/{sample_app.id}") + assert app_response.status_code == status.HTTP_200_OK + + server_response = client.get(f"/api/mcp_servers/{sample_mcp_server.id}") + assert server_response.status_code == status.HTTP_200_OK + + # 2. Delete the app + delete_response = client.delete(f"/api/apps/{sample_app.id}") + assert delete_response.status_code == status.HTTP_204_NO_CONTENT + + # 3. Verify app is gone + app_check = client.get(f"/api/apps/{sample_app.id}") + assert app_check.status_code == status.HTTP_404_NOT_FOUND + + # 4. Verify server is gone (cascade delete) + server_check = client.get(f"/api/mcp_servers/{sample_mcp_server.id}") + assert server_check.status_code == status.HTTP_404_NOT_FOUND + + def test_foreign_key_constraint_workflow(self, client): + """ + Test that foreign key constraints are enforced. + """ + # Try to create MCP server with invalid app_id + response = client.post( + "/api/mcp_servers", + json={ + "app_id": 99999, + "server_url": "https://example.com/mcp", + "kind": "custom", + }, + ) + assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY + + def test_duplicate_name_workflow(self, client): + """ + Test that duplicate names are prevented. + """ + # 1. Create an app + app_response = client.post( + "/api/apps", + json={"name": "unique-app", "owner": "test@example.com"}, + ) + assert app_response.status_code == status.HTTP_201_CREATED + + # 2. Try to create another app with same name + duplicate_response = client.post( + "/api/apps", + json={"name": "unique-app", "owner": "other@example.com"}, + ) + assert duplicate_response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY + + def test_not_found_workflow(self, client): + """ + Test 404 responses for non-existent resources. + """ + # Test all endpoints + endpoints = [ + "/api/apps/99999", + "/api/mcp_servers/99999", + "/api/tools/99999", + "/api/collections/99999", + ] + + for endpoint in endpoints: + response = client.get(endpoint) + assert ( + response.status_code == status.HTTP_404_NOT_FOUND + ), f"Expected 404 for {endpoint}" + + +class TestPaginationWorkflow: + """Test pagination across all list endpoints.""" + + def test_pagination_workflow(self, client, db): + """ + Test pagination works consistently across all endpoints. + """ + # Create multiple apps + from app.models import App + + apps = [App(name=f"app-{i}", owner="test@example.com") for i in range(15)] + db.add_all(apps) + db.commit() + + # Test first page + page1 = client.get("/api/apps?page=1&page_size=10") + assert page1.status_code == status.HTTP_200_OK + data1 = page1.json() + assert len(data1["items"]) == 10 + assert data1["page"] == 1 + assert data1["total"] == 15 + assert data1["total_pages"] == 2 + + # Test second page + page2 = client.get("/api/apps?page=2&page_size=10") + assert page2.status_code == status.HTTP_200_OK + data2 = page2.json() + assert len(data2["items"]) == 5 + assert data2["page"] == 2 + + # Test custom page size + page_custom = client.get("/api/apps?page=1&page_size=5") + assert page_custom.status_code == status.HTTP_200_OK + data_custom = page_custom.json() + assert len(data_custom["items"]) == 5 + assert data_custom["total_pages"] == 3 + + +class TestFilteringWorkflow: + """Test filtering across all list endpoints.""" + + def test_filtering_workflow(self, client, db): + """ + Test filtering works correctly on list endpoints. + """ + from app.models import App, MCPServer + + # Create apps with different owners + app1 = App(name="app-1", owner="alice@example.com") + app2 = App(name="app-2", owner="bob@example.com") + app3 = App(name="app-3", owner="alice@example.com") + db.add_all([app1, app2, app3]) + db.commit() + + # Create servers with different kinds + server1 = MCPServer( + app_id=app1.id, + server_url="https://example.com/1", + kind=MCPServerKind.MANAGED, + ) + server2 = MCPServer( + app_id=app2.id, + server_url="https://example.com/2", + kind=MCPServerKind.EXTERNAL, + ) + server3 = MCPServer( + app_id=app3.id, + server_url="https://example.com/3", + kind=MCPServerKind.MANAGED, + ) + db.add_all([server1, server2, server3]) + db.commit() + + # Test filter apps by owner + alice_apps = client.get("/api/apps?owner=alice@example.com") + assert alice_apps.status_code == status.HTTP_200_OK + alice_data = alice_apps.json() + assert alice_data["total"] == 2 + assert all(item["owner"] == "alice@example.com" for item in alice_data["items"]) + + # Test filter servers by kind + managed_servers = client.get("/api/mcp_servers?kind=managed") + assert managed_servers.status_code == status.HTTP_200_OK + managed_data = managed_servers.json() + assert managed_data["total"] == 2 + assert all(item["kind"] == "managed" for item in managed_data["items"]) + + # Test filter servers by app_id + app_servers = client.get(f"/api/mcp_servers?app_id={app1.id}") + assert app_servers.status_code == status.HTTP_200_OK + app_data = app_servers.json() + assert app_data["total"] == 1 + assert app_data["items"][0]["app_id"] == app1.id diff --git a/dbx-agent-app/app/backend/tests/test_mcp_client.py b/dbx-agent-app/app/backend/tests/test_mcp_client.py new file mode 100644 index 00000000..7cd2df8b --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_mcp_client.py @@ -0,0 +1,349 @@ +""" +Unit tests for MCP client. +""" + +import pytest +import httpx +from unittest.mock import AsyncMock, patch, MagicMock + +from app.services.mcp_client import ( + MCPClient, + MCPTool, + MCPConnectionError, + MCPTimeoutError, + create_mcp_client, +) + + +class TestMCPClient: + """Tests for MCPClient class.""" + + @pytest.mark.asyncio + async def test_list_tools_success(self): + """Test successful tool listing from MCP server.""" + mock_response = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": "search_experts", + "description": "Search for experts by keyword", + "inputSchema": { + "type": "object", + "properties": { + "query": {"type": "string"}, + }, + "required": ["query"], + }, + }, + { + "name": "get_transcript", + "description": "Get transcript by ID", + "inputSchema": { + "type": "object", + "properties": { + "transcript_id": {"type": "string"}, + }, + }, + }, + ] + }, + } + + async with MCPClient() as client: + with patch.object(client._client, "post") as mock_post: + mock_response_obj = MagicMock() + mock_response_obj.json.return_value = mock_response + mock_response_obj.raise_for_status = MagicMock() + mock_post.return_value = mock_response_obj + + tools = await client.list_tools("https://mcp.example.com") + + assert len(tools) == 2 + assert tools[0].name == "search_experts" + assert tools[0].description == "Search for experts by keyword" + assert tools[0].input_schema["type"] == "object" + assert tools[1].name == "get_transcript" + + @pytest.mark.asyncio + async def test_list_tools_empty_response(self): + """Test handling of empty tool list response.""" + mock_response = { + "jsonrpc": "2.0", + "id": 1, + "result": {"tools": []}, + } + + async with MCPClient() as client: + with patch.object(client._client, "post") as mock_post: + mock_response_obj = MagicMock() + mock_response_obj.json.return_value = mock_response + mock_response_obj.raise_for_status = MagicMock() + mock_post.return_value = mock_response_obj + + tools = await client.list_tools("https://mcp.example.com") + + assert len(tools) == 0 + + @pytest.mark.asyncio + async def test_list_tools_missing_tools_key(self): + """Test handling of response without tools key.""" + mock_response = { + "jsonrpc": "2.0", + "id": 1, + "result": {}, + } + + async with MCPClient() as client: + with patch.object(client._client, "post") as mock_post: + mock_response_obj = MagicMock() + mock_response_obj.json.return_value = mock_response + mock_response_obj.raise_for_status = MagicMock() + mock_post.return_value = mock_response_obj + + tools = await client.list_tools("https://mcp.example.com") + + assert len(tools) == 0 + + @pytest.mark.asyncio + async def test_list_tools_skip_invalid_tool(self): + """Test that invalid tools are skipped without failing entire operation.""" + mock_response = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "tools": [ + { + "name": "valid_tool", + "description": "A valid tool", + "inputSchema": {"type": "object"}, + }, + { + # Missing name - should be skipped + "description": "Invalid tool", + "inputSchema": {"type": "object"}, + }, + { + "name": "", # Empty name - should be skipped + "description": "Another invalid tool", + "inputSchema": {"type": "object"}, + }, + ] + }, + } + + async with MCPClient() as client: + with patch.object(client._client, "post") as mock_post: + mock_response_obj = MagicMock() + mock_response_obj.json.return_value = mock_response + mock_response_obj.raise_for_status = MagicMock() + mock_post.return_value = mock_response_obj + + tools = await client.list_tools("https://mcp.example.com") + + # Only the valid tool should be returned + assert len(tools) == 1 + assert tools[0].name == "valid_tool" + + @pytest.mark.asyncio + async def test_list_tools_connection_error(self): + """Test handling of connection errors.""" + async with MCPClient() as client: + with patch.object( + client._client, "post", side_effect=httpx.ConnectError("Connection failed") + ): + with pytest.raises(MCPConnectionError) as exc_info: + await client.list_tools("https://mcp.example.com") + + assert "HTTP error" in str(exc_info.value) + + @pytest.mark.asyncio + async def test_list_tools_timeout_error(self): + """Test handling of timeout errors.""" + async with MCPClient() as client: + with patch.object( + client._client, "post", side_effect=httpx.TimeoutException("Request timed out") + ): + with pytest.raises(MCPTimeoutError) as exc_info: + await client.list_tools("https://mcp.example.com") + + assert "timed out" in str(exc_info.value) + + @pytest.mark.asyncio + async def test_list_tools_json_rpc_error(self): + """Test handling of JSON-RPC error response.""" + mock_response = { + "jsonrpc": "2.0", + "id": 1, + "error": { + "code": -32600, + "message": "Invalid Request", + }, + } + + async with MCPClient() as client: + with patch.object(client._client, "post") as mock_post: + mock_response_obj = MagicMock() + mock_response_obj.json.return_value = mock_response + mock_response_obj.raise_for_status = MagicMock() + mock_post.return_value = mock_response_obj + + with pytest.raises(MCPConnectionError) as exc_info: + await client.list_tools("https://mcp.example.com") + + assert "Invalid Request" in str(exc_info.value) + assert "-32600" in str(exc_info.value) + + @pytest.mark.asyncio + async def test_list_tools_invalid_json_response(self): + """Test handling of invalid JSON response.""" + async with MCPClient() as client: + with patch.object(client._client, "post") as mock_post: + mock_response_obj = MagicMock() + mock_response_obj.json.side_effect = ValueError("Invalid JSON") + mock_response_obj.raise_for_status = MagicMock() + mock_post.return_value = mock_response_obj + + with pytest.raises(MCPConnectionError) as exc_info: + await client.list_tools("https://mcp.example.com") + + assert "Invalid JSON" in str(exc_info.value) + + @pytest.mark.asyncio + async def test_get_server_info_success(self): + """Test successful server info retrieval.""" + mock_response = { + "jsonrpc": "2.0", + "id": 1, + "result": { + "protocolVersion": "2024-11-05", + "serverInfo": { + "name": "Example MCP Server", + "version": "1.0.0", + }, + "capabilities": { + "tools": {}, + }, + }, + } + + async with MCPClient() as client: + with patch.object(client._client, "post") as mock_post: + mock_response_obj = MagicMock() + mock_response_obj.json.return_value = mock_response + mock_response_obj.raise_for_status = MagicMock() + mock_post.return_value = mock_response_obj + + info = await client.get_server_info("https://mcp.example.com") + + assert info["protocolVersion"] == "2024-11-05" + assert info["serverInfo"]["name"] == "Example MCP Server" + + @pytest.mark.asyncio + async def test_ping_success(self): + """Test successful ping.""" + mock_response = { + "jsonrpc": "2.0", + "id": 1, + "result": {}, + } + + async with MCPClient() as client: + with patch.object(client._client, "post") as mock_post: + mock_response_obj = MagicMock() + mock_response_obj.json.return_value = mock_response + mock_response_obj.raise_for_status = MagicMock() + mock_post.return_value = mock_response_obj + + result = await client.ping("https://mcp.example.com") + + assert result is True + + @pytest.mark.asyncio + async def test_ping_connection_failure(self): + """Test ping returns False on connection failure.""" + async with MCPClient() as client: + with patch.object( + client._client, "post", side_effect=httpx.ConnectError("Connection failed") + ): + result = await client.ping("https://mcp.example.com") + + assert result is False + + @pytest.mark.asyncio + async def test_ping_timeout(self): + """Test ping returns False on timeout.""" + async with MCPClient() as client: + with patch.object( + client._client, "post", side_effect=httpx.TimeoutException("Timeout") + ): + result = await client.ping("https://mcp.example.com") + + assert result is False + + @pytest.mark.asyncio + async def test_context_manager(self): + """Test client works as async context manager.""" + async with MCPClient() as client: + assert client._client is not None + assert isinstance(client._client, httpx.AsyncClient) + + # Client should be closed after context exit + # (We can't directly test this without accessing internals) + + @pytest.mark.asyncio + async def test_request_id_increments(self): + """Test that JSON-RPC request IDs increment.""" + async with MCPClient() as client: + id1 = client._next_request_id() + id2 = client._next_request_id() + id3 = client._next_request_id() + + assert id2 == id1 + 1 + assert id3 == id2 + 1 + + @pytest.mark.asyncio + async def test_client_not_initialized_error(self): + """Test that using client without context manager raises error.""" + client = MCPClient() + + with pytest.raises(MCPConnectionError) as exc_info: + await client.list_tools("https://mcp.example.com") + + assert "not initialized" in str(exc_info.value).lower() + + @pytest.mark.asyncio + async def test_create_mcp_client_factory(self): + """Test factory function creates client.""" + client = await create_mcp_client() + + assert isinstance(client, MCPClient) + assert client.timeout == 30.0 + + +class TestMCPTool: + """Tests for MCPTool dataclass.""" + + def test_mcp_tool_creation(self): + """Test creating MCPTool instance.""" + tool = MCPTool( + name="test_tool", + description="A test tool", + input_schema={"type": "object"}, + ) + + assert tool.name == "test_tool" + assert tool.description == "A test tool" + assert tool.input_schema == {"type": "object"} + + def test_mcp_tool_optional_description(self): + """Test MCPTool with None description.""" + tool = MCPTool( + name="test_tool", + description=None, + input_schema={}, + ) + + assert tool.name == "test_tool" + assert tool.description is None diff --git a/dbx-agent-app/app/backend/tests/test_mcp_servers.py b/dbx-agent-app/app/backend/tests/test_mcp_servers.py new file mode 100644 index 00000000..f6da5783 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_mcp_servers.py @@ -0,0 +1,99 @@ +""" +Tests for MCP Server CRUD endpoints. +""" + +import pytest + + +def test_list_mcp_servers_empty(client): + """Test listing MCP servers when none exist.""" + response = client.get("/api/mcp_servers") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 0 + assert data["items"] == [] + + +def test_create_mcp_server(client, sample_app): + """Test creating a new MCP server.""" + response = client.post( + "/api/mcp_servers", + json={ + "app_id": sample_app.id, + "server_url": "https://example.com/mcp", + "kind": "custom", + "scopes": "read,write", + }, + ) + assert response.status_code == 201 + data = response.json() + assert data["server_url"] == "https://example.com/mcp" + assert data["kind"] == "custom" + assert "id" in data + + +def test_create_mcp_server_invalid_app_id(client): + """Test creating MCP server with non-existent app fails.""" + response = client.post( + "/api/mcp_servers", + json={ + "app_id": 9999, + "server_url": "https://example.com/mcp", + "kind": "custom", + }, + ) + assert response.status_code == 422 + + +def test_get_mcp_server(client, sample_mcp_server): + """Test getting a specific MCP server.""" + response = client.get(f"/api/mcp_servers/{sample_mcp_server.id}") + assert response.status_code == 200 + data = response.json() + assert data["id"] == sample_mcp_server.id + assert data["server_url"] == sample_mcp_server.server_url + + +def test_get_mcp_server_not_found(client): + """Test getting non-existent MCP server returns 404.""" + response = client.get("/api/mcp_servers/9999") + assert response.status_code == 404 + + +def test_update_mcp_server(client, sample_mcp_server): + """Test updating an MCP server.""" + response = client.put( + f"/api/mcp_servers/{sample_mcp_server.id}", + json={"scopes": "read,write,admin"}, + ) + assert response.status_code == 200 + data = response.json() + assert data["scopes"] == "read,write,admin" + + +def test_delete_mcp_server(client, sample_mcp_server): + """Test deleting an MCP server.""" + response = client.delete(f"/api/mcp_servers/{sample_mcp_server.id}") + assert response.status_code == 204 + + # Verify it's gone + response = client.get(f"/api/mcp_servers/{sample_mcp_server.id}") + assert response.status_code == 404 + + +def test_list_mcp_servers_filter_by_app_id(client, sample_app, sample_mcp_server): + """Test filtering MCP servers by app_id.""" + response = client.get(f"/api/mcp_servers?app_id={sample_app.id}") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert data["items"][0]["app_id"] == sample_app.id + + +def test_list_mcp_servers_filter_by_kind(client, sample_mcp_server): + """Test filtering MCP servers by kind.""" + response = client.get("/api/mcp_servers?kind=custom") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert data["items"][0]["kind"] == "custom" diff --git a/dbx-agent-app/app/backend/tests/test_orchestrator.py b/dbx-agent-app/app/backend/tests/test_orchestrator.py new file mode 100644 index 00000000..8215e311 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_orchestrator.py @@ -0,0 +1,198 @@ +""" +Tests for the orchestrator service — planning, execution levels, JSON parsing. +""" + +import json +import pytest +from app.services.orchestrator import ( + Orchestrator, + SubTask, + OrchestrationPlan, + SubTaskResult, + _parse_json_from_llm, +) + + +# ── _parse_json_from_llm ───────────────────────────────────────────── + + +class TestParseJsonFromLlm: + """Tests for extracting JSON from LLM output.""" + + def test_plain_json(self): + raw = '{"complexity": "simple", "reasoning": "test"}' + result = _parse_json_from_llm(raw) + assert result["complexity"] == "simple" + assert result["reasoning"] == "test" + + def test_json_with_markdown_fences(self): + raw = '```json\n{"complexity": "complex", "reasoning": "multi-step"}\n```' + result = _parse_json_from_llm(raw) + assert result["complexity"] == "complex" + + def test_json_with_bare_fences(self): + raw = '```\n{"key": "value"}\n```' + result = _parse_json_from_llm(raw) + assert result["key"] == "value" + + def test_json_with_whitespace(self): + raw = ' \n {"a": 1} \n ' + result = _parse_json_from_llm(raw) + assert result["a"] == 1 + + def test_invalid_json_raises(self): + with pytest.raises(json.JSONDecodeError): + _parse_json_from_llm("not json at all") + + def test_multiline_json_in_fences(self): + raw = '```json\n{\n "complexity": "simple",\n "sub_tasks": []\n}\n```' + result = _parse_json_from_llm(raw) + assert result["complexity"] == "simple" + assert result["sub_tasks"] == [] + + +# ── _build_execution_levels ─────────────────────────────────────────── + + +class TestBuildExecutionLevels: + """Tests for DAG-based execution level grouping.""" + + def test_empty_tasks(self): + levels = Orchestrator._build_execution_levels([]) + assert levels == [] + + def test_single_task_no_deps(self): + tasks = [SubTask(description="task0", agent_id=1, agent_name="a")] + levels = Orchestrator._build_execution_levels(tasks) + assert levels == [[0]] + + def test_two_independent_tasks(self): + tasks = [ + SubTask(description="task0", agent_id=1, agent_name="a"), + SubTask(description="task1", agent_id=2, agent_name="b"), + ] + levels = Orchestrator._build_execution_levels(tasks) + assert levels == [[0, 1]] + + def test_sequential_dependency(self): + tasks = [ + SubTask(description="task0", agent_id=1, agent_name="a"), + SubTask(description="task1", agent_id=2, agent_name="b", depends_on=[0]), + ] + levels = Orchestrator._build_execution_levels(tasks) + assert levels == [[0], [1]] + + def test_diamond_dependency(self): + """A depends on nothing, B and C depend on A, D depends on B and C.""" + tasks = [ + SubTask(description="A", agent_id=1, agent_name="a"), + SubTask(description="B", agent_id=2, agent_name="b", depends_on=[0]), + SubTask(description="C", agent_id=3, agent_name="c", depends_on=[0]), + SubTask(description="D", agent_id=4, agent_name="d", depends_on=[1, 2]), + ] + levels = Orchestrator._build_execution_levels(tasks) + assert len(levels) == 3 + assert levels[0] == [0] + assert set(levels[1]) == {1, 2} + assert levels[2] == [3] + + def test_out_of_range_dependency_ignored(self): + """Dependencies pointing to invalid indices should not crash.""" + tasks = [ + SubTask(description="task0", agent_id=1, agent_name="a", depends_on=[99]), + ] + levels = Orchestrator._build_execution_levels(tasks) + assert levels == [[0]] + + def test_three_levels(self): + tasks = [ + SubTask(description="t0", agent_id=1, agent_name="a"), + SubTask(description="t1", agent_id=2, agent_name="b", depends_on=[0]), + SubTask(description="t2", agent_id=3, agent_name="c", depends_on=[1]), + ] + levels = Orchestrator._build_execution_levels(tasks) + assert levels == [[0], [1], [2]] + + +# ── Plan parsing / fallback ────────────────────────────────────────── + + +class TestClassifyAndPlanParsing: + """Tests for plan construction from parsed JSON (unit-level, no LLM call).""" + + def test_plan_from_valid_json(self): + """Verify OrchestrationPlan construction from well-formed parsed data.""" + parsed = { + "complexity": "complex", + "reasoning": "needs two agents", + "sub_tasks": [ + {"description": "search", "agent_id": 1, "agent_name": "search-agent", "depends_on": []}, + {"description": "summarize", "agent_id": 2, "agent_name": "summary-agent", "depends_on": [0]}, + ], + } + sub_tasks = [ + SubTask( + description=st["description"], + agent_id=st["agent_id"], + agent_name=st["agent_name"], + depends_on=st.get("depends_on", []), + ) + for st in parsed["sub_tasks"] + ] + plan = OrchestrationPlan( + complexity=parsed["complexity"], + reasoning=parsed["reasoning"], + sub_tasks=sub_tasks, + ) + + assert plan.complexity == "complex" + assert len(plan.sub_tasks) == 2 + assert plan.sub_tasks[0].agent_name == "search-agent" + assert plan.sub_tasks[1].depends_on == [0] + + def test_plan_missing_fields_defaults(self): + """Sub-task with missing optional fields uses defaults.""" + parsed_st = {"description": "do something"} + st = SubTask( + description=parsed_st.get("description", "unknown"), + agent_id=parsed_st.get("agent_id", 0), + agent_name=parsed_st.get("agent_name", "unknown"), + depends_on=parsed_st.get("depends_on", []), + ) + assert st.agent_id == 0 + assert st.agent_name == "unknown" + assert st.depends_on == [] + + +# ── SubTaskResult ──────────────────────────────────────────────────── + + +class TestSubTaskResult: + """Tests for SubTaskResult dataclass.""" + + def test_success_result(self): + r = SubTaskResult( + task_index=0, + agent_id=1, + agent_name="test", + description="do thing", + response="done", + latency_ms=150, + success=True, + ) + assert r.success is True + assert r.error is None + + def test_failure_result(self): + r = SubTaskResult( + task_index=0, + agent_id=1, + agent_name="test", + description="do thing", + response="", + latency_ms=50, + success=False, + error="Connection refused", + ) + assert r.success is False + assert r.error == "Connection refused" diff --git a/dbx-agent-app/app/backend/tests/test_search_agents.py b/dbx-agent-app/app/backend/tests/test_search_agents.py new file mode 100644 index 00000000..063ceb23 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_search_agents.py @@ -0,0 +1,182 @@ +""" +Tests for agent search functionality — keyword search and embedding pipeline. +""" + +import json +import pytest +from app.models.agent import Agent +from app.db_adapter import DatabaseAdapter +from app.services.search import SearchService +from app.services.embedding import EmbeddingService + + +# ── Fixtures ────────────────────────────────────────────────────────── + + +@pytest.fixture +def sample_agents(db): + """Create sample agents for search tests.""" + agents = [ + Agent( + name="transcript-search-agent", + description="Searches expert transcripts using RAG", + capabilities="search,rag,transcripts", + status="active", + endpoint_url="https://example.com/search-agent", + skills='[{"name": "search_transcripts", "description": "RAG search"}]', + ), + Agent( + name="profile-agent", + description="Retrieves and summarizes expert profiles", + capabilities="profiles,summarization", + status="active", + endpoint_url="https://example.com/profile-agent", + ), + Agent( + name="inactive-agent", + description="An inactive agent that should not match", + capabilities="search", + status="inactive", + endpoint_url="https://example.com/inactive", + ), + ] + for a in agents: + db.add(a) + db.commit() + for a in agents: + db.refresh(a) + return agents + + +# ── Agent keyword search tests ──────────────────────────────────────── + + +class TestAgentKeywordSearch: + """Test that agents appear in keyword search results.""" + + def test_search_finds_agent_by_name(self, db, sample_agents, client): + """Search for agent by name via the search endpoint.""" + response = client.post( + "/api/search", + json={"query": "transcript-search", "types": ["agent"], "mode": "keyword"}, + ) + assert response.status_code == 200 + data = response.json() + agent_results = [r for r in data["results"] if r["asset_type"] == "agent"] + assert len(agent_results) >= 1 + assert any("transcript" in r["name"].lower() for r in agent_results) + + def test_search_finds_agent_by_description(self, db, sample_agents, client): + """Search finds agent by description keywords.""" + response = client.post( + "/api/search", + json={"query": "expert profiles", "types": ["agent"], "mode": "keyword"}, + ) + assert response.status_code == 200 + data = response.json() + agent_results = [r for r in data["results"] if r["asset_type"] == "agent"] + assert len(agent_results) >= 1 + + def test_search_finds_agent_by_capabilities(self, db, sample_agents, client): + """Search finds agent by capabilities text.""" + response = client.post( + "/api/search", + json={"query": "rag", "types": ["agent"], "mode": "keyword"}, + ) + assert response.status_code == 200 + data = response.json() + agent_results = [r for r in data["results"] if r["asset_type"] == "agent"] + assert len(agent_results) >= 1 + + def test_search_no_match(self, db, sample_agents, client): + """Search for non-existent capability returns no agents.""" + response = client.post( + "/api/search", + json={"query": "zzz-nonexistent-xyz", "types": ["agent"], "mode": "keyword"}, + ) + assert response.status_code == 200 + data = response.json() + agent_results = [r for r in data["results"] if r["asset_type"] == "agent"] + assert len(agent_results) == 0 + + +# ── Agent CRUD endpoint tests ──────────────────────────────────────── + + +class TestAgentCrud: + """Test basic agent CRUD to verify fixture setup works.""" + + def test_list_agents(self, client, db, sample_agents): + response = client.get("/api/agents") + assert response.status_code == 200 + data = response.json() + assert data["total"] >= 3 + + def test_get_agent(self, client, db, sample_agents): + agent = sample_agents[0] + response = client.get(f"/api/agents/{agent.id}") + assert response.status_code == 200 + data = response.json() + assert data["name"] == "transcript-search-agent" + + def test_create_agent(self, client, db): + response = client.post( + "/api/agents", + json={ + "name": "new-test-agent", + "description": "A brand new agent", + "capabilities": "testing", + "status": "draft", + }, + ) + assert response.status_code == 201 + data = response.json() + assert data["name"] == "new-test-agent" + assert "id" in data + + +# ── Embedding pipeline tests ───────────────────────────────────────── + + +class TestAgentEmbeddingText: + """Test that _build_text produces searchable text for agents.""" + + def _build(self, asset_type, asset): + svc = EmbeddingService.__new__(EmbeddingService) + return svc._build_text(asset_type, asset) + + def test_build_text_with_capabilities(self): + asset = { + "name": "my-agent", + "description": "Does cool things", + "capabilities": "search,summarize", + } + text = self._build("agent", asset) + assert "my-agent" in text + assert "Does cool things" in text + assert "capabilities: search,summarize" in text + + def test_build_text_with_skills_json(self): + asset = { + "name": "skilled-agent", + "description": "Multi-skilled", + "skills": '[{"name": "search_docs"}, {"name": "summarize_text"}]', + } + text = self._build("agent", asset) + assert "search_docs" in text + assert "summarize_text" in text + + def test_build_text_with_invalid_skills_json(self): + """Invalid skills JSON should not crash, just skip.""" + asset = { + "name": "broken-skills-agent", + "description": "Has bad skills JSON", + "skills": "not valid json", + } + text = self._build("agent", asset) + assert "broken-skills-agent" in text + + def test_build_text_minimal_agent(self): + asset = {"name": "minimal-agent"} + text = self._build("agent", asset) + assert "minimal-agent" in text diff --git a/dbx-agent-app/app/backend/tests/test_supervisors.py b/dbx-agent-app/app/backend/tests/test_supervisors.py new file mode 100644 index 00000000..9b0a46b5 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_supervisors.py @@ -0,0 +1,593 @@ +""" +Tests for supervisor generation API endpoints. + +Tests the REST API for generating supervisors from collections. +""" + +import pytest +from fastapi.testclient import TestClient +from sqlalchemy.orm import Session +import zipfile +import io + +from app.models import Collection, CollectionItem, App, MCPServer, Tool, Supervisor, MCPServerKind + + +class TestSupervisorGenerate: + """Test suite for POST /api/supervisors/generate endpoint.""" + + def test_generate_supervisor_success( + self, client: TestClient, sample_collection: Collection + ): + """Test generating supervisor from valid collection.""" + response = client.post( + "/api/supervisors/generate", + json={ + "collection_id": sample_collection.id, + "llm_endpoint": "databricks-meta-llama-3-1-70b-instruct", + }, + ) + + assert response.status_code == 201 + data = response.json() + + assert data["collection_id"] == sample_collection.id + assert data["collection_name"] == sample_collection.name + assert data["app_name"] == "test-collection" + assert "files" in data + assert "supervisor.py" in data["files"] + assert "requirements.txt" in data["files"] + assert "app.yaml" in data["files"] + assert "generated_at" in data + + # Verify supervisor.py contains Pattern 3 elements + supervisor_code = data["files"]["supervisor.py"] + assert "fetch_tool_infos" in supervisor_code + assert "run_supervisor" in supervisor_code + + def test_generate_supervisor_with_tools( + self, client: TestClient, db: Session + ): + """Test generating supervisor from collection with tools.""" + # Create collection with tools + collection = Collection(name="Tool Collection", description="Test") + db.add(collection) + db.commit() + + # Create MCP server and tool + mcp_server = MCPServer( + server_url="https://mcp.example.com", + kind=MCPServerKind.CUSTOM, + ) + db.add(mcp_server) + db.commit() + + tool = Tool( + mcp_server_id=mcp_server.id, + name="test_tool", + description="Test tool", + parameters='{"type": "object"}', + ) + db.add(tool) + db.commit() + + item = CollectionItem(collection_id=collection.id, tool_id=tool.id) + db.add(item) + db.commit() + + response = client.post( + "/api/supervisors/generate", + json={"collection_id": collection.id}, + ) + + assert response.status_code == 201 + data = response.json() + + # Verify MCP server URL is in generated files + app_yaml = data["files"]["app.yaml"] + assert "https://mcp.example.com" in app_yaml + + def test_generate_supervisor_custom_app_name( + self, client: TestClient, sample_collection: Collection + ): + """Test generating supervisor with custom app name.""" + response = client.post( + "/api/supervisors/generate", + json={ + "collection_id": sample_collection.id, + "app_name": "my-custom-supervisor", + }, + ) + + assert response.status_code == 201 + data = response.json() + assert data["app_name"] == "my-custom-supervisor" + + # Verify app name in app.yaml + app_yaml = data["files"]["app.yaml"] + assert "my-custom-supervisor" in app_yaml + + def test_generate_supervisor_custom_llm_endpoint( + self, client: TestClient, sample_collection: Collection + ): + """Test generating supervisor with custom LLM endpoint.""" + response = client.post( + "/api/supervisors/generate", + json={ + "collection_id": sample_collection.id, + "llm_endpoint": "databricks-dbrx-instruct", + }, + ) + + assert response.status_code == 201 + data = response.json() + + # Verify LLM endpoint in app.yaml + app_yaml = data["files"]["app.yaml"] + assert "databricks-dbrx-instruct" in app_yaml + + def test_generate_supervisor_collection_not_found(self, client: TestClient): + """Test generating supervisor with non-existent collection.""" + response = client.post( + "/api/supervisors/generate", + json={"collection_id": 9999}, + ) + + assert response.status_code == 422 + assert "Collection with id 9999 not found" in response.json()["detail"] + + def test_generate_supervisor_empty_collection( + self, client: TestClient, db: Session + ): + """Test generating supervisor from empty collection.""" + # Create empty collection + collection = Collection(name="Empty Collection", description="No items") + db.add(collection) + db.commit() + + response = client.post( + "/api/supervisors/generate", + json={"collection_id": collection.id}, + ) + + # Should succeed even with empty collection + assert response.status_code == 201 + data = response.json() + assert "files" in data + assert "supervisor.py" in data["files"] + + def test_generate_supervisor_creates_metadata( + self, client: TestClient, sample_collection: Collection, db: Session + ): + """Test that generating supervisor creates metadata in database.""" + response = client.post( + "/api/supervisors/generate", + json={"collection_id": sample_collection.id}, + ) + + assert response.status_code == 201 + + # Verify metadata was created + supervisor = db.query(Supervisor).filter( + Supervisor.collection_id == sample_collection.id + ).first() + assert supervisor is not None + assert supervisor.app_name == "test-collection" + assert supervisor.generated_at is not None + + def test_generate_supervisor_validation_error(self, client: TestClient): + """Test generating supervisor with invalid request body.""" + response = client.post( + "/api/supervisors/generate", + json={}, # Missing required collection_id + ) + + assert response.status_code == 422 + + +class TestSupervisorPreview: + """Test suite for GET /api/supervisors/{collection_id}/preview endpoint.""" + + def test_preview_supervisor_success( + self, client: TestClient, sample_collection: Collection + ): + """Test previewing supervisor generation.""" + response = client.get( + f"/api/supervisors/{sample_collection.id}/preview" + ) + + assert response.status_code == 200 + data = response.json() + + assert data["collection_id"] == sample_collection.id + assert data["collection_name"] == sample_collection.name + assert data["app_name"] == "test-collection" + assert "mcp_server_urls" in data + assert "tool_count" in data + assert "preview" in data + assert "supervisor.py" in data["preview"] + assert "requirements.txt" in data["preview"] + assert "app.yaml" in data["preview"] + + # Verify preview is truncated + supervisor_preview = data["preview"]["supervisor.py"] + assert len(supervisor_preview) <= 503 # 500 + "..." + + def test_preview_supervisor_with_tools( + self, client: TestClient, db: Session + ): + """Test previewing supervisor with tools in collection.""" + # Create collection with tools + collection = Collection(name="Tool Collection", description="Test") + db.add(collection) + db.commit() + + # Create MCP server and tool + mcp_server = MCPServer( + server_url="https://mcp.example.com", + kind=MCPServerKind.CUSTOM, + ) + db.add(mcp_server) + db.commit() + + tool = Tool( + mcp_server_id=mcp_server.id, + name="test_tool", + description="Test tool", + parameters='{"type": "object"}', + ) + db.add(tool) + db.commit() + + item = CollectionItem(collection_id=collection.id, tool_id=tool.id) + db.add(item) + db.commit() + + response = client.get(f"/api/supervisors/{collection.id}/preview") + + assert response.status_code == 200 + data = response.json() + + assert data["tool_count"] == 1 + assert "https://mcp.example.com" in data["mcp_server_urls"] + + def test_preview_supervisor_custom_params( + self, client: TestClient, sample_collection: Collection + ): + """Test previewing supervisor with custom parameters.""" + response = client.get( + f"/api/supervisors/{sample_collection.id}/preview", + params={ + "llm_endpoint": "databricks-dbrx-instruct", + "app_name": "custom-name", + }, + ) + + assert response.status_code == 200 + data = response.json() + assert data["app_name"] == "custom-name" + + def test_preview_supervisor_collection_not_found(self, client: TestClient): + """Test previewing supervisor with non-existent collection.""" + response = client.get("/api/supervisors/9999/preview") + + assert response.status_code == 404 + assert "Collection with id 9999 not found" in response.json()["detail"] + + def test_preview_supervisor_empty_collection( + self, client: TestClient, db: Session + ): + """Test previewing supervisor from empty collection.""" + # Create empty collection + collection = Collection(name="Empty Collection", description="No items") + db.add(collection) + db.commit() + + response = client.get(f"/api/supervisors/{collection.id}/preview") + + assert response.status_code == 200 + data = response.json() + assert data["tool_count"] == 0 + assert len(data["mcp_server_urls"]) == 0 + + +class TestSupervisorDownload: + """Test suite for POST /api/supervisors/{collection_id}/download endpoint.""" + + def test_download_supervisor_success( + self, client: TestClient, sample_collection: Collection + ): + """Test downloading supervisor as zip file.""" + response = client.post( + f"/api/supervisors/{sample_collection.id}/download" + ) + + assert response.status_code == 200 + assert response.headers["content-type"] == "application/zip" + assert "attachment" in response.headers["content-disposition"] + assert "test-collection.zip" in response.headers["content-disposition"] + + # Verify zip file structure + zip_data = io.BytesIO(response.content) + with zipfile.ZipFile(zip_data, mode="r") as zip_file: + files = zip_file.namelist() + assert "supervisor.py" in files + assert "requirements.txt" in files + assert "app.yaml" in files + + # Verify content is not empty + supervisor_content = zip_file.read("supervisor.py").decode("utf-8") + assert len(supervisor_content) > 0 + assert "fetch_tool_infos" in supervisor_content + + def test_download_supervisor_custom_app_name( + self, client: TestClient, sample_collection: Collection + ): + """Test downloading supervisor with custom app name.""" + response = client.post( + f"/api/supervisors/{sample_collection.id}/download", + params={"app_name": "my-custom-app"}, + ) + + assert response.status_code == 200 + assert "my-custom-app.zip" in response.headers["content-disposition"] + + def test_download_supervisor_creates_metadata( + self, client: TestClient, sample_collection: Collection, db: Session + ): + """Test that downloading supervisor creates metadata in database.""" + response = client.post( + f"/api/supervisors/{sample_collection.id}/download" + ) + + assert response.status_code == 200 + + # Verify metadata was created + supervisor = db.query(Supervisor).filter( + Supervisor.collection_id == sample_collection.id + ).first() + assert supervisor is not None + assert supervisor.app_name == "test-collection" + + def test_download_supervisor_collection_not_found(self, client: TestClient): + """Test downloading supervisor with non-existent collection.""" + response = client.post("/api/supervisors/9999/download") + + assert response.status_code == 404 + assert "Collection with id 9999 not found" in response.json()["detail"] + + def test_download_supervisor_validation_passes( + self, client: TestClient, sample_collection: Collection + ): + """Test that downloaded supervisor passes validation.""" + response = client.post( + f"/api/supervisors/{sample_collection.id}/download" + ) + + assert response.status_code == 200 + + # If this succeeds, validation passed (generate_and_validate was called) + # Extract and verify supervisor.py is valid Python + zip_data = io.BytesIO(response.content) + with zipfile.ZipFile(zip_data, mode="r") as zip_file: + supervisor_content = zip_file.read("supervisor.py").decode("utf-8") + + # Should be able to compile as Python + compile(supervisor_content, "", "exec") + + +class TestSupervisorList: + """Test suite for GET /api/supervisors endpoint.""" + + def test_list_supervisors_empty(self, client: TestClient): + """Test listing supervisors when none exist.""" + response = client.get("/api/supervisors") + + assert response.status_code == 200 + data = response.json() + assert data["supervisors"] == [] + assert data["total"] == 0 + + def test_list_supervisors_with_data( + self, client: TestClient, db: Session, sample_collection: Collection + ): + """Test listing supervisors after generating some.""" + # Generate two supervisors + client.post( + "/api/supervisors/generate", + json={"collection_id": sample_collection.id}, + ) + client.post( + "/api/supervisors/generate", + json={ + "collection_id": sample_collection.id, + "app_name": "another-supervisor", + }, + ) + + response = client.get("/api/supervisors") + + assert response.status_code == 200 + data = response.json() + assert data["total"] == 2 + assert len(data["supervisors"]) == 2 + + # Verify structure of supervisor metadata + supervisor = data["supervisors"][0] + assert "id" in supervisor + assert "collection_id" in supervisor + assert "app_name" in supervisor + assert "generated_at" in supervisor + assert "deployed_url" in supervisor + + def test_list_supervisors_ordered_by_date( + self, client: TestClient, db: Session, sample_collection: Collection + ): + """Test that supervisors are ordered by generation date (newest first).""" + # Generate two supervisors + response1 = client.post( + "/api/supervisors/generate", + json={"collection_id": sample_collection.id, "app_name": "first"}, + ) + response2 = client.post( + "/api/supervisors/generate", + json={"collection_id": sample_collection.id, "app_name": "second"}, + ) + + list_response = client.get("/api/supervisors") + + assert list_response.status_code == 200 + data = list_response.json() + + # Second supervisor should be first (newest) + assert data["supervisors"][0]["app_name"] == "second" + assert data["supervisors"][1]["app_name"] == "first" + + +class TestSupervisorDelete: + """Test suite for DELETE /api/supervisors/{supervisor_id} endpoint.""" + + def test_delete_supervisor_success( + self, client: TestClient, db: Session, sample_collection: Collection + ): + """Test deleting supervisor metadata.""" + # Generate supervisor + gen_response = client.post( + "/api/supervisors/generate", + json={"collection_id": sample_collection.id}, + ) + + # Get supervisor ID from database + supervisor = db.query(Supervisor).first() + assert supervisor is not None + + # Delete supervisor + response = client.delete(f"/api/supervisors/{supervisor.id}") + + assert response.status_code == 204 + + # Verify supervisor was deleted + supervisor = db.query(Supervisor).filter(Supervisor.id == supervisor.id).first() + assert supervisor is None + + def test_delete_supervisor_not_found(self, client: TestClient): + """Test deleting non-existent supervisor.""" + response = client.delete("/api/supervisors/9999") + + assert response.status_code == 404 + assert "Supervisor with id 9999 not found" in response.json()["detail"] + + +class TestSupervisorIntegration: + """Integration tests for supervisor generation workflow.""" + + def test_full_workflow( + self, client: TestClient, db: Session + ): + """Test complete workflow: preview -> generate -> list -> download -> delete.""" + # Create collection with tools + collection = Collection(name="Integration Test", description="Test") + db.add(collection) + db.commit() + + mcp_server = MCPServer( + server_url="https://mcp.example.com", + kind=MCPServerKind.CUSTOM, + ) + db.add(mcp_server) + db.commit() + + tool = Tool( + mcp_server_id=mcp_server.id, + name="test_tool", + description="Test", + parameters='{"type": "object"}', + ) + db.add(tool) + db.commit() + + item = CollectionItem(collection_id=collection.id, tool_id=tool.id) + db.add(item) + db.commit() + + # Step 1: Preview + preview_response = client.get(f"/api/supervisors/{collection.id}/preview") + assert preview_response.status_code == 200 + preview_data = preview_response.json() + assert preview_data["tool_count"] == 1 + + # Step 2: Generate + gen_response = client.post( + "/api/supervisors/generate", + json={"collection_id": collection.id}, + ) + assert gen_response.status_code == 201 + gen_data = gen_response.json() + assert "files" in gen_data + + # Step 3: List + list_response = client.get("/api/supervisors") + assert list_response.status_code == 200 + list_data = list_response.json() + assert list_data["total"] >= 1 + + # Step 4: Download + download_response = client.post( + f"/api/supervisors/{collection.id}/download" + ) + assert download_response.status_code == 200 + assert download_response.headers["content-type"] == "application/zip" + + # Step 5: Delete + supervisor = db.query(Supervisor).first() + delete_response = client.delete(f"/api/supervisors/{supervisor.id}") + assert delete_response.status_code == 204 + + def test_multiple_collections( + self, client: TestClient, db: Session + ): + """Test generating supervisors from multiple collections.""" + # Create two collections + collection1 = Collection(name="Collection 1", description="First") + collection2 = Collection(name="Collection 2", description="Second") + db.add_all([collection1, collection2]) + db.commit() + + # Generate supervisors from both + response1 = client.post( + "/api/supervisors/generate", + json={"collection_id": collection1.id}, + ) + response2 = client.post( + "/api/supervisors/generate", + json={"collection_id": collection2.id}, + ) + + assert response1.status_code == 201 + assert response2.status_code == 201 + + # List should show both + list_response = client.get("/api/supervisors") + assert list_response.status_code == 200 + assert list_response.json()["total"] == 2 + + def test_regenerate_supervisor( + self, client: TestClient, sample_collection: Collection + ): + """Test generating supervisor multiple times from same collection.""" + # Generate supervisor twice + response1 = client.post( + "/api/supervisors/generate", + json={"collection_id": sample_collection.id}, + ) + response2 = client.post( + "/api/supervisors/generate", + json={"collection_id": sample_collection.id}, + ) + + assert response1.status_code == 201 + assert response2.status_code == 201 + + # Both should succeed and create separate metadata records + list_response = client.get("/api/supervisors") + assert list_response.json()["total"] == 2 diff --git a/dbx-agent-app/app/backend/tests/test_tool_parser.py b/dbx-agent-app/app/backend/tests/test_tool_parser.py new file mode 100644 index 00000000..08b3a7e2 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_tool_parser.py @@ -0,0 +1,292 @@ +""" +Unit tests for tool parser. +""" + +import pytest +import json + +from app.services.tool_parser import ( + ToolParser, + NormalizedTool, + normalize_tool_spec, +) + + +class TestToolParser: + """Tests for ToolParser class.""" + + def test_extract_parameters_schema_valid(self): + """Test extracting valid parameters schema.""" + schema = { + "type": "object", + "properties": { + "query": {"type": "string"}, + "limit": {"type": "integer"}, + }, + "required": ["query"], + } + + result = ToolParser.extract_parameters_schema(schema) + + assert isinstance(result, str) + parsed = json.loads(result) + assert parsed["type"] == "object" + assert "query" in parsed["properties"] + + def test_extract_parameters_schema_empty(self): + """Test extracting empty schema.""" + result = ToolParser.extract_parameters_schema({}) + + assert result == "{}" + + def test_extract_parameters_schema_none(self): + """Test handling None schema.""" + result = ToolParser.extract_parameters_schema(None) + + assert result == "{}" + + def test_extract_parameters_schema_invalid_type(self): + """Test handling invalid schema type.""" + result = ToolParser.extract_parameters_schema("not a dict") + + assert result == "{}" + + def test_normalize_description_valid(self): + """Test normalizing valid description.""" + description = " Search for experts by keyword " + result = ToolParser.normalize_description(description) + + assert result == "Search for experts by keyword" + + def test_normalize_description_none(self): + """Test handling None description.""" + result = ToolParser.normalize_description(None) + + assert result is None + + def test_normalize_description_empty(self): + """Test handling empty description.""" + result = ToolParser.normalize_description("") + + assert result is None + + def test_normalize_description_whitespace_only(self): + """Test handling whitespace-only description.""" + result = ToolParser.normalize_description(" \n\t ") + + assert result is None + + def test_normalize_description_max_length(self): + """Test description length limiting.""" + long_description = "x" * 6000 + result = ToolParser.normalize_description(long_description) + + assert len(result) <= 5003 # 5000 + "..." + assert result.endswith("...") + + def test_normalize_name_valid(self): + """Test normalizing valid name.""" + name = " search_experts " + result = ToolParser.normalize_name(name) + + assert result == "search_experts" + + def test_normalize_name_empty_raises_error(self): + """Test that empty name raises ValueError.""" + with pytest.raises(ValueError) as exc_info: + ToolParser.normalize_name("") + + assert "required" in str(exc_info.value).lower() + + def test_normalize_name_none_raises_error(self): + """Test that None name raises ValueError.""" + with pytest.raises(ValueError) as exc_info: + ToolParser.normalize_name(None) + + assert "required" in str(exc_info.value) + + def test_normalize_name_whitespace_only_raises_error(self): + """Test that whitespace-only name raises ValueError.""" + with pytest.raises(ValueError) as exc_info: + ToolParser.normalize_name(" \n\t ") + + assert "cannot be empty" in str(exc_info.value) + + def test_normalize_name_max_length(self): + """Test name length limiting.""" + long_name = "x" * 300 + result = ToolParser.normalize_name(long_name) + + assert len(result) == 255 + + def test_normalize_name_invalid_type_raises_error(self): + """Test that invalid name type raises ValueError.""" + with pytest.raises(ValueError) as exc_info: + ToolParser.normalize_name(123) + + assert "must be a string" in str(exc_info.value) + + def test_parse_tool_full_data(self): + """Test parsing tool with all fields.""" + tool_data = { + "name": "search_experts", + "description": "Search for experts by keyword", + "inputSchema": { + "type": "object", + "properties": {"query": {"type": "string"}}, + }, + } + + result = ToolParser.parse_tool(tool_data) + + assert isinstance(result, NormalizedTool) + assert result.name == "search_experts" + assert result.description == "Search for experts by keyword" + assert isinstance(result.parameters, str) + parsed_params = json.loads(result.parameters) + assert parsed_params["type"] == "object" + + def test_parse_tool_minimal_data(self): + """Test parsing tool with only required fields.""" + tool_data = {"name": "minimal_tool"} + + result = ToolParser.parse_tool(tool_data) + + assert result.name == "minimal_tool" + assert result.description is None + assert result.parameters == "{}" + + def test_parse_tool_missing_name_raises_error(self): + """Test that missing name raises ValueError.""" + tool_data = { + "description": "Missing name", + "inputSchema": {"type": "object"}, + } + + with pytest.raises(ValueError) as exc_info: + ToolParser.parse_tool(tool_data) + + assert "required" in str(exc_info.value).lower() + + def test_parse_tool_invalid_data_type_raises_error(self): + """Test that invalid data type raises ValueError.""" + with pytest.raises(ValueError) as exc_info: + ToolParser.parse_tool("not a dict") + + assert "must be a dictionary" in str(exc_info.value) + + def test_parse_tool_empty_name_raises_error(self): + """Test that empty name raises ValueError.""" + tool_data = {"name": ""} + + with pytest.raises(ValueError) as exc_info: + ToolParser.parse_tool(tool_data) + + assert "required" in str(exc_info.value).lower() + + def test_parse_tool_with_empty_description(self): + """Test parsing tool with empty description.""" + tool_data = { + "name": "test_tool", + "description": "", + "inputSchema": {}, + } + + result = ToolParser.parse_tool(tool_data) + + assert result.name == "test_tool" + assert result.description is None # Empty description becomes None + + def test_parse_tool_with_whitespace_trimming(self): + """Test that name and description are trimmed.""" + tool_data = { + "name": " test_tool ", + "description": " Test description ", + } + + result = ToolParser.parse_tool(tool_data) + + assert result.name == "test_tool" + assert result.description == "Test description" + + +class TestNormalizeToolSpec: + """Tests for normalize_tool_spec convenience function.""" + + def test_normalize_tool_spec_all_fields(self): + """Test normalizing tool spec with all fields.""" + result = normalize_tool_spec( + name="my_tool", + description="Does something useful", + input_schema={"type": "object"}, + ) + + assert isinstance(result, NormalizedTool) + assert result.name == "my_tool" + assert result.description == "Does something useful" + parsed_params = json.loads(result.parameters) + assert parsed_params["type"] == "object" + + def test_normalize_tool_spec_minimal(self): + """Test normalizing tool spec with only name.""" + result = normalize_tool_spec(name="minimal_tool") + + assert result.name == "minimal_tool" + assert result.description is None + assert result.parameters == "{}" + + def test_normalize_tool_spec_with_description_only(self): + """Test normalizing tool spec with name and description.""" + result = normalize_tool_spec( + name="my_tool", + description="Has a description", + ) + + assert result.name == "my_tool" + assert result.description == "Has a description" + assert result.parameters == "{}" + + def test_normalize_tool_spec_with_schema_only(self): + """Test normalizing tool spec with name and schema.""" + result = normalize_tool_spec( + name="my_tool", + input_schema={"type": "string"}, + ) + + assert result.name == "my_tool" + assert result.description is None + parsed_params = json.loads(result.parameters) + assert parsed_params["type"] == "string" + + def test_normalize_tool_spec_empty_name_raises_error(self): + """Test that empty name raises ValueError.""" + with pytest.raises(ValueError): + normalize_tool_spec(name="") + + +class TestNormalizedTool: + """Tests for NormalizedTool dataclass.""" + + def test_normalized_tool_creation(self): + """Test creating NormalizedTool instance.""" + tool = NormalizedTool( + name="test_tool", + description="A test", + parameters='{"type": "object"}', + ) + + assert tool.name == "test_tool" + assert tool.description == "A test" + assert tool.parameters == '{"type": "object"}' + + def test_normalized_tool_optional_fields(self): + """Test NormalizedTool with None optional fields.""" + tool = NormalizedTool( + name="test_tool", + description=None, + parameters=None, + ) + + assert tool.name == "test_tool" + assert tool.description is None + assert tool.parameters is None diff --git a/dbx-agent-app/app/backend/tests/test_tools.py b/dbx-agent-app/app/backend/tests/test_tools.py new file mode 100644 index 00000000..9d7e65b9 --- /dev/null +++ b/dbx-agent-app/app/backend/tests/test_tools.py @@ -0,0 +1,286 @@ +""" +Tests for Tools endpoints (read-only). +""" + +import pytest +from app.models import App, MCPServer, Tool +from app.models.mcp_server import MCPServerKind + + +def test_list_tools_empty(client, db): + """Test listing tools when none exist.""" + response = client.get("/api/tools") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 0 + assert data["items"] == [] + assert data["page"] == 1 + assert data["total_pages"] == 1 + + +def test_list_tools_with_data(client, db, sample_tool): + """Test listing tools with sample data.""" + response = client.get("/api/tools") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert len(data["items"]) == 1 + assert data["items"][0]["name"] == sample_tool.name + + +def test_list_tools_filter_by_server(client, db, sample_tool): + """Test filtering tools by MCP server ID.""" + response = client.get(f"/api/tools?mcp_server_id={sample_tool.mcp_server_id}") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert data["items"][0]["id"] == sample_tool.id + + +def test_list_tools_filter_by_name(client, db, sample_tool): + """Test filtering tools by name.""" + response = client.get("/api/tools?name=test") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + + +def test_list_tools_search(client, db): + """Test full-text search on tool name and description.""" + # Create app and server + app = App(name="test-app", owner="owner@example.com") + db.add(app) + db.commit() + db.refresh(app) + + server = MCPServer( + app_id=app.id, + server_url="https://example.com/mcp", + kind=MCPServerKind.CUSTOM, + ) + db.add(server) + db.commit() + db.refresh(server) + + # Create tools with different names and descriptions + tool1 = Tool( + mcp_server_id=server.id, + name="search_transcripts", + description="Search through expert call transcripts", + ) + tool2 = Tool( + mcp_server_id=server.id, + name="find_experts", + description="Find experts in the database", + ) + tool3 = Tool( + mcp_server_id=server.id, + name="analyze_data", + description="Analyze research data", + ) + db.add_all([tool1, tool2, tool3]) + db.commit() + + # Search for "expert" (matches tool1 description and tool2 name) + response = client.get("/api/tools?search=expert") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 2 + names = {item["name"] for item in data["items"]} + assert names == {"search_transcripts", "find_experts"} + + # Search for "research" (matches tool3 description) + response = client.get("/api/tools?search=research") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert data["items"][0]["name"] == "analyze_data" + + +def test_list_tools_filter_by_tags(client, db): + """Test filtering tools by app tags.""" + # Create apps with tags + app1 = App(name="app1", owner="owner1@example.com", tags="research,analysis") + app2 = App(name="app2", owner="owner2@example.com", tags="automation,testing") + db.add_all([app1, app2]) + db.commit() + + # Create servers + server1 = MCPServer( + app_id=app1.id, + server_url="https://app1.example.com/mcp", + kind=MCPServerKind.CUSTOM, + ) + server2 = MCPServer( + app_id=app2.id, + server_url="https://app2.example.com/mcp", + kind=MCPServerKind.CUSTOM, + ) + db.add_all([server1, server2]) + db.commit() + + # Create tools + tool1 = Tool(mcp_server_id=server1.id, name="tool1", description="Research tool") + tool2 = Tool(mcp_server_id=server2.id, name="tool2", description="Automation tool") + db.add_all([tool1, tool2]) + db.commit() + + # Filter by "research" tag + response = client.get("/api/tools?tags=research") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert data["items"][0]["name"] == "tool1" + + # Filter by "automation" tag + response = client.get("/api/tools?tags=automation") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert data["items"][0]["name"] == "tool2" + + # Filter by multiple tags (comma-separated) + response = client.get("/api/tools?tags=research,automation") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 2 + + +def test_list_tools_filter_by_owner(client, db): + """Test filtering tools by app owner.""" + # Create apps with different owners + app1 = App(name="app1", owner="alice@example.com") + app2 = App(name="app2", owner="bob@example.com") + db.add_all([app1, app2]) + db.commit() + + # Create servers + server1 = MCPServer( + app_id=app1.id, + server_url="https://app1.example.com/mcp", + kind=MCPServerKind.CUSTOM, + ) + server2 = MCPServer( + app_id=app2.id, + server_url="https://app2.example.com/mcp", + kind=MCPServerKind.CUSTOM, + ) + db.add_all([server1, server2]) + db.commit() + + # Create tools + tool1 = Tool(mcp_server_id=server1.id, name="alice_tool", description="Alice's tool") + tool2 = Tool(mcp_server_id=server2.id, name="bob_tool", description="Bob's tool") + db.add_all([tool1, tool2]) + db.commit() + + # Filter by Alice's tools + response = client.get("/api/tools?owner=alice") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert data["items"][0]["name"] == "alice_tool" + + # Filter by Bob's tools + response = client.get("/api/tools?owner=bob") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert data["items"][0]["name"] == "bob_tool" + + +def test_list_tools_combined_filters(client, db): + """Test combining multiple filters.""" + # Create app + app = App(name="app", owner="owner@example.com", tags="research") + db.add(app) + db.commit() + + # Create server + server = MCPServer( + app_id=app.id, + server_url="https://app.example.com/mcp", + kind=MCPServerKind.CUSTOM, + ) + db.add(server) + db.commit() + + # Create tools + tool1 = Tool( + mcp_server_id=server.id, + name="search_experts", + description="Search for experts", + ) + tool2 = Tool( + mcp_server_id=server.id, + name="analyze_data", + description="Analyze data", + ) + db.add_all([tool1, tool2]) + db.commit() + + # Combine search + tags + owner + response = client.get("/api/tools?search=expert&tags=research&owner=owner") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert data["items"][0]["name"] == "search_experts" + + +def test_list_tools_pagination(client, db): + """Test pagination with filtering.""" + # Create app and server + app = App(name="app", owner="owner@example.com") + db.add(app) + db.commit() + + server = MCPServer( + app_id=app.id, + server_url="https://app.example.com/mcp", + kind=MCPServerKind.CUSTOM, + ) + db.add(server) + db.commit() + + # Create multiple tools + for i in range(15): + tool = Tool( + mcp_server_id=server.id, + name=f"tool_{i}", + description=f"Tool {i}", + ) + db.add(tool) + db.commit() + + # Get first page + response = client.get("/api/tools?page=1&page_size=10") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 15 + assert len(data["items"]) == 10 + assert data["page"] == 1 + assert data["total_pages"] == 2 + + # Get second page + response = client.get("/api/tools?page=2&page_size=10") + assert response.status_code == 200 + data = response.json() + assert data["total"] == 15 + assert len(data["items"]) == 5 + assert data["page"] == 2 + + +def test_get_tool_success(client, db, sample_tool): + """Test getting a specific tool.""" + response = client.get(f"/api/tools/{sample_tool.id}") + assert response.status_code == 200 + data = response.json() + assert data["id"] == sample_tool.id + assert data["name"] == sample_tool.name + + +def test_get_tool_not_found(client, db): + """Test getting a non-existent tool.""" + response = client.get("/api/tools/99999") + assert response.status_code == 404 + assert "not found" in response.json()["detail"].lower() diff --git a/dbx-agent-app/app/webapp/.dockerignore b/dbx-agent-app/app/webapp/.dockerignore new file mode 100644 index 00000000..d386a2d2 --- /dev/null +++ b/dbx-agent-app/app/webapp/.dockerignore @@ -0,0 +1,16 @@ +node_modules +dist +.git +.gitignore +.env +.env.local +.env.*.local +npm-debug.log* +yarn-debug.log* +yarn-error.log* +*.md +.vscode +.idea +.DS_Store +coverage +*.log diff --git a/dbx-agent-app/app/webapp/.gitignore b/dbx-agent-app/app/webapp/.gitignore new file mode 100644 index 00000000..3216211a --- /dev/null +++ b/dbx-agent-app/app/webapp/.gitignore @@ -0,0 +1,29 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? + +# Environment files +.env +.env.local +.env.production diff --git a/dbx-agent-app/app/webapp/Dockerfile b/dbx-agent-app/app/webapp/Dockerfile new file mode 100644 index 00000000..168b733d --- /dev/null +++ b/dbx-agent-app/app/webapp/Dockerfile @@ -0,0 +1,37 @@ +# Multi-stage build for Multi-Agent Registry Web App + +# Build stage +FROM node:20-alpine AS builder + +WORKDIR /app + +# Copy package files +COPY package*.json ./ + +# Install dependencies +RUN npm ci + +# Copy source code +COPY . . + +# Build the application +RUN npm run build + +# Production stage with nginx +FROM nginx:alpine + +# Copy build output to nginx +COPY --from=builder /app/dist /usr/share/nginx/html + +# Copy nginx configuration +COPY nginx.conf /etc/nginx/conf.d/default.conf + +# Expose port 80 +EXPOSE 80 + +# Health check +HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ + CMD wget --quiet --tries=1 --spider http://localhost/ || exit 1 + +# Start nginx +CMD ["nginx", "-g", "daemon off;"] diff --git a/dbx-agent-app/app/webapp/README.md b/dbx-agent-app/app/webapp/README.md new file mode 100644 index 00000000..aaf21939 --- /dev/null +++ b/dbx-agent-app/app/webapp/README.md @@ -0,0 +1,177 @@ +# Multi-Agent Registry - React Webapp + +Production-ready React + Vite application for the Multi-Agent Registry UI. + +## Features + +- **React 19.2** with TypeScript for type safety +- **React Router** for client-side navigation +- **Axios** for API communication +- **Vite** for fast development and optimized builds +- Three main pages: Discover, Collections, and Chat +- API client with error handling and interceptors +- TypeScript types matching backend Pydantic schemas + +## Prerequisites + +- Node.js 20+ +- npm or yarn + +## Installation + +```bash +npm install +``` + +## Development + +Start the development server with hot reload: + +```bash +npm run dev +``` + +The app will be available at http://localhost:3000 + +## Build + +Create production build: + +```bash +npm run build +``` + +Build output will be in the `dist/` directory. + +## Preview Production Build + +Preview the production build locally: + +```bash +npm run preview +``` + +## Project Structure + +``` +webapp/ +├── src/ +│ ├── api/ +│ │ ├── client.ts # Axios clients with interceptors +│ │ ├── registry.ts # Registry API endpoints +│ │ └── supervisor.ts # Supervisor API endpoints +│ ├── components/ +│ │ └── layout/ +│ │ ├── Layout.tsx # Main layout with navigation +│ │ └── Layout.css +│ ├── pages/ +│ │ ├── DiscoverPage.tsx # Browse tools/servers +│ │ ├── CollectionsPage.tsx # Manage collections +│ │ └── ChatPage.tsx # Chat interface +│ ├── types/ +│ │ └── index.ts # TypeScript interfaces +│ ├── App.tsx # Root component with routing +│ ├── App.css # Global styles +│ └── main.tsx # Entry point +├── public/ # Static assets +├── index.html # HTML entry point +├── vite.config.ts # Vite configuration +├── tsconfig.json # TypeScript configuration +└── package.json +``` + +## Environment Configuration + +Create a `.env` file based on `.env.example`: + +```bash +VITE_REGISTRY_API_URL=/api +VITE_SUPERVISOR_URL=/supervisor +VITE_DEBUG=true +``` + +For production deployment: + +```bash +VITE_REGISTRY_API_URL=https:///api/registry-api +VITE_SUPERVISOR_URL=https:///api/supervisor +VITE_DEBUG=false +``` + +## API Client + +The app includes two Axios clients: + +### Registry Client + +Connects to the Registry API for: +- Discovering apps, servers, and tools +- Managing collections +- Generating supervisors + +### Supervisor Client + +Connects to the Supervisor API for: +- Chat interactions +- Trace retrieval +- Real-time event streaming (future) + +## TypeScript Types + +All types in `src/types/index.ts` match the backend Pydantic schemas: + +- `App` - Databricks App metadata +- `MCPServer` - MCP server configuration +- `Tool` - Individual tool definition +- `Collection` - User collection +- `CollectionItem` - Collection membership +- `Message` - Chat message +- `TraceEvent` - Trace event +- `Span` - MLflow span + +## Development Proxy + +Vite dev server proxies API requests: + +- `/api/*` → `http://localhost:8000` +- `/supervisor/*` → `http://localhost:8001` + +This avoids CORS issues during development. + +## Next Steps + +### Phase 4.2: Discover Page Enhancement + +- Add search and filter functionality +- Implement card components +- Add detail modals +- Quick actions (add to collection) + +### Phase 4.3: Collections Page Enhancement + +- Collection editor component +- Item selector with search +- Supervisor generation UI +- Drag-and-drop item organization + +### Phase 4.4: Chat Page - Three-Panel Layout + +- Implement three-panel layout +- Add trace timeline with SSE +- Inspector panel for event details +- Real-time streaming support + +## Testing + +```bash +npm run lint +``` + +## Architecture + +Built according to specifications in: +- `docs/architecture/ARCHITECTURE.md` (Part 3: React Webapp) + +## Support + +Part of the Guidepoint Multi-Agent Registry project. diff --git a/dbx-agent-app/app/webapp/app.yaml b/dbx-agent-app/app/webapp/app.yaml new file mode 100644 index 00000000..faf22915 --- /dev/null +++ b/dbx-agent-app/app/webapp/app.yaml @@ -0,0 +1,25 @@ +# Databricks App Configuration for Multi-Agent Registry Web App +# Deploy this app using: databricks apps create + +command: + - "sh" + - "-c" + - "npm run build && node server.js" + +env: + - name: PORT + value: "8000" + - name: REGISTRY_API_URL + value: "https://registry-api-7474660127789418.aws.databricksapps.com" + +resources: + cpu: "1" + memory: "2Gi" + +# Health check endpoint +# The app must respond to HTTP requests on port 8000 +port: 8000 + +# App metadata +name: "multi-agent-registry-ui" +description: "React web interface for multi-agent registry and supervisor management" diff --git a/dbx-agent-app/app/webapp/index.html b/dbx-agent-app/app/webapp/index.html new file mode 100644 index 00000000..25b7d8e2 --- /dev/null +++ b/dbx-agent-app/app/webapp/index.html @@ -0,0 +1,16 @@ + + + + + + + + + + Multi-Agent Registry + + +
+ + + diff --git a/dbx-agent-app/app/webapp/nginx.conf b/dbx-agent-app/app/webapp/nginx.conf new file mode 100644 index 00000000..bfe2b5ee --- /dev/null +++ b/dbx-agent-app/app/webapp/nginx.conf @@ -0,0 +1,63 @@ +server { + listen 80; + server_name _; + + root /usr/share/nginx/html; + index index.html; + + # Enable gzip compression + gzip on; + gzip_vary on; + gzip_min_length 1024; + gzip_types text/plain text/css text/xml text/javascript application/javascript application/json application/xml+rss; + + # SPA fallback - serve index.html for all routes + location / { + try_files $uri $uri/ /index.html; + } + + # API proxy to registry API + location /api/registry-api/ { + proxy_pass http://registry-api:8000/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + # API proxy to supervisor + location /api/supervisor/ { + proxy_pass http://supervisor:8001/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # SSE support + proxy_set_header Connection ''; + proxy_http_version 1.1; + chunked_transfer_encoding off; + proxy_buffering off; + proxy_cache off; + proxy_read_timeout 86400s; + } + + # Security headers + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + add_header X-XSS-Protection "1; mode=block" always; + add_header Referrer-Policy "strict-origin-when-cross-origin" always; + + # Cache static assets + location ~* \.(jpg|jpeg|png|gif|ico|css|js|svg|woff|woff2|ttf|eot)$ { + expires 1y; + add_header Cache-Control "public, immutable"; + } + + # Health check endpoint + location /health { + access_log off; + return 200 "healthy\n"; + add_header Content-Type text/plain; + } +} diff --git a/dbx-agent-app/app/webapp/package-lock.json b/dbx-agent-app/app/webapp/package-lock.json new file mode 100644 index 00000000..3bd583d5 --- /dev/null +++ b/dbx-agent-app/app/webapp/package-lock.json @@ -0,0 +1,6174 @@ +{ + "name": "multi-agent-registry-webapp", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "multi-agent-registry-webapp", + "version": "0.1.0", + "dependencies": { + "@databricks/sql": "^1.12.0", + "@xyflow/react": "^12.10.1", + "axios": "^1.6.0", + "express": "^5.2.1", + "http-proxy-middleware": "^3.0.5", + "react": "^19.2.0", + "react-dom": "^19.2.0", + "react-router-dom": "^6.22.0", + "serve": "^14.2.1" + }, + "devDependencies": { + "@types/react": "^19.2.5", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^5.1.1", + "eslint": "^9.39.1", + "eslint-plugin-react-hooks": "^7.0.1", + "typescript": "~5.9.3", + "vite": "^7.2.4" + } + }, + "node_modules/@75lb/deep-merge": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@75lb/deep-merge/-/deep-merge-1.1.2.tgz", + "integrity": "sha512-08K9ou5VNbheZFxM5tDWoqjA3ImC50DiuuJ2tj1yEPRfkp8lLLg6XAaJ4On+a0yAXor/8ay5gHnAIshRM44Kpw==", + "license": "MIT", + "dependencies": { + "lodash": "^4.17.21", + "typical": "^7.1.1" + }, + "engines": { + "node": ">=12.17" + } + }, + "node_modules/@75lb/deep-merge/node_modules/typical": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/typical/-/typical-7.3.0.tgz", + "integrity": "sha512-ya4mg/30vm+DOWfBg4YK3j2WD6TWtRkCbasOJr40CseYENzCUby/7rIvXA99JGsQHeNxLbnXdyLLxKSv3tauFw==", + "license": "MIT", + "engines": { + "node": ">=12.17" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.6.tgz", + "integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@colors/colors": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.6.0.tgz", + "integrity": "sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA==", + "license": "MIT", + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@dabh/diagnostics": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/@dabh/diagnostics/-/diagnostics-2.0.8.tgz", + "integrity": "sha512-R4MSXTVnuMzGD7bzHdW2ZhhdPC/igELENcq5IjEverBvq5hn1SXCWcsi6eSsdWP0/Ur+SItRRjAktmdoX/8R/Q==", + "license": "MIT", + "dependencies": { + "@so-ric/colorspace": "^1.1.6", + "enabled": "2.0.x", + "kuler": "^2.0.0" + } + }, + "node_modules/@databricks/sql": { + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/@databricks/sql/-/sql-1.12.0.tgz", + "integrity": "sha512-bCUoHg2/mNgOXkYTooF1m0wyO57yPJPo44wOTXOlA9CREbXCzP1RMuUVa3GfXKzGifd52nNEPHyKeZvrbJBLnA==", + "license": "Apache 2.0", + "dependencies": { + "apache-arrow": "^13.0.0", + "commander": "^9.3.0", + "node-fetch": "^2.6.12", + "node-int64": "^0.4.0", + "open": "^8.4.2", + "openid-client": "^5.4.2", + "proxy-agent": "^6.3.1", + "thrift": "^0.16.0", + "uuid": "^9.0.0", + "winston": "^3.8.2" + }, + "engines": { + "node": ">=14.0.0" + }, + "optionalDependencies": { + "lz4": "^0.6.5" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.3.tgz", + "integrity": "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.3.tgz", + "integrity": "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.3.tgz", + "integrity": "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.3.tgz", + "integrity": "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.3.tgz", + "integrity": "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.3.tgz", + "integrity": "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.3.tgz", + "integrity": "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.3.tgz", + "integrity": "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.3.tgz", + "integrity": "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.3.tgz", + "integrity": "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.3.tgz", + "integrity": "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.3.tgz", + "integrity": "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.3.tgz", + "integrity": "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.3.tgz", + "integrity": "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.3.tgz", + "integrity": "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.3.tgz", + "integrity": "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.3.tgz", + "integrity": "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.3.tgz", + "integrity": "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.3.tgz", + "integrity": "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.3.tgz", + "integrity": "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.3.tgz", + "integrity": "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.3.tgz", + "integrity": "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.3.tgz", + "integrity": "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.3.tgz", + "integrity": "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.3.tgz", + "integrity": "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.3.tgz", + "integrity": "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", + "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz", + "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/js": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz", + "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@remix-run/router": { + "version": "1.23.2", + "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.23.2.tgz", + "integrity": "sha512-Ic6m2U/rMjTkhERIa/0ZtXJP17QUi2CbWE7cqx4J58M8aA3QTfW+2UlQ4psvTX9IO1RfNVhK3pcpdjej7L+t2w==", + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-rc.3", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.3.tgz", + "integrity": "sha512-eybk3TjzzzV97Dlj5c+XrBFW57eTNhzod66y9HrBlzJ6NsCrWCp/2kaPS3K9wJmurBC0Tdw4yPjXKZqlznim3Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", + "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz", + "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz", + "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz", + "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz", + "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz", + "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz", + "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz", + "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz", + "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz", + "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz", + "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz", + "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz", + "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz", + "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz", + "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz", + "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz", + "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz", + "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz", + "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz", + "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz", + "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz", + "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz", + "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz", + "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz", + "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@so-ric/colorspace": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@so-ric/colorspace/-/colorspace-1.1.6.tgz", + "integrity": "sha512-/KiKkpHNOBgkFJwu9sh48LkHSMYGyuTcSFK/qMBdnOAlrRJzRSXAOFB5qwzaVQuDl8wAvHVMkaASQDReTahxuw==", + "license": "MIT", + "dependencies": { + "color": "^5.0.2", + "text-hex": "1.0.x" + } + }, + "node_modules/@tootallnate/quickjs-emscripten": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz", + "integrity": "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==", + "license": "MIT" + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/command-line-args": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@types/command-line-args/-/command-line-args-5.2.0.tgz", + "integrity": "sha512-UuKzKpJJ/Ief6ufIaIzr3A/0XnluX7RvFgwkV89Yzvm77wCh1kFaFmqN8XEnGcN62EuHdedQjEMb8mYxFLGPyA==", + "license": "MIT" + }, + "node_modules/@types/command-line-usage": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/@types/command-line-usage/-/command-line-usage-5.0.2.tgz", + "integrity": "sha512-n7RlEEJ+4x4TS7ZQddTmNSxP+zziEG0TNsMfiRIxcIVXt71ENJ9ojeXmGO3wPoTdn7pJcU2xc3CJYMktNT6DPg==", + "license": "MIT" + }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", + "license": "MIT" + }, + "node_modules/@types/d3-drag": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", + "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "license": "MIT", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-selection": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz", + "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==", + "license": "MIT" + }, + "node_modules/@types/d3-transition": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz", + "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-zoom": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", + "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", + "license": "MIT", + "dependencies": { + "@types/d3-interpolate": "*", + "@types/d3-selection": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/http-proxy": { + "version": "1.17.17", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.17.tgz", + "integrity": "sha512-ED6LB+Z1AVylNTu7hdzuBqOgMnvG/ld6wGCG8wFnAzKX5uyW2K3WD52v0gnLCTK/VLpXtKckgWuyScYK6cSPaw==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "25.3.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.3.0.tgz", + "integrity": "sha512-4K3bqJpXpqfg2XKGK9bpDTc6xO/xoUP/RBWS7AtRMug6zZFaRekiLzjVtAoZMquxoAbzBvy5nxQ7veS5eYzf8A==", + "license": "MIT", + "dependencies": { + "undici-types": "~7.18.0" + } + }, + "node_modules/@types/pad-left": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@types/pad-left/-/pad-left-2.1.1.tgz", + "integrity": "sha512-Xd22WCRBydkGSApl5Bw0PhAOHKSVjNL3E3AwzKaps96IMraPqy5BvZIsBVK6JLwdybUzjHnuWVwpDd0JjTfHXA==", + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "19.2.13", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.13.tgz", + "integrity": "sha512-KkiJeU6VbYbUOp5ITMIc7kBfqlYkKA5KhEHVrGMmUUMt7NeaZg65ojdPk+FtNrBAOXNVM5QM72jnADjM+XVRAQ==", + "devOptional": true, + "license": "MIT", + "peer": true, + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@types/triple-beam": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/triple-beam/-/triple-beam-1.3.5.tgz", + "integrity": "sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==", + "license": "MIT" + }, + "node_modules/@vitejs/plugin-react": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-5.1.4.tgz", + "integrity": "sha512-VIcFLdRi/VYRU8OL/puL7QXMYafHmqOnwTZY50U1JPlCNj30PxCMx65c494b1K9be9hX83KVt0+gTEwTWLqToA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.29.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-rc.3", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.18.0" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/@xyflow/react": { + "version": "12.10.1", + "resolved": "https://registry.npmjs.org/@xyflow/react/-/react-12.10.1.tgz", + "integrity": "sha512-5eSWtIK/+rkldOuFbOOz44CRgQRjtS9v5nufk77DV+XBnfCGL9HAQ8PG00o2ZYKqkEU/Ak6wrKC95Tu+2zuK3Q==", + "license": "MIT", + "dependencies": { + "@xyflow/system": "0.0.75", + "classcat": "^5.0.3", + "zustand": "^4.4.0" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@xyflow/system": { + "version": "0.0.75", + "resolved": "https://registry.npmjs.org/@xyflow/system/-/system-0.0.75.tgz", + "integrity": "sha512-iXs+AGFLi8w/VlAoc/iSxk+CxfT6o64Uw/k0CKASOPqjqz6E0rb5jFZgJtXGZCpfQI6OQpu5EnumP5fGxQheaQ==", + "license": "MIT", + "dependencies": { + "@types/d3-drag": "^3.0.7", + "@types/d3-interpolate": "^3.0.4", + "@types/d3-selection": "^3.0.10", + "@types/d3-transition": "^3.0.8", + "@types/d3-zoom": "^3.0.8", + "d3-drag": "^3.0.0", + "d3-interpolate": "^3.0.1", + "d3-selection": "^3.0.0", + "d3-zoom": "^3.0.0" + } + }, + "node_modules/@zeit/schemas": { + "version": "2.36.0", + "resolved": "https://registry.npmjs.org/@zeit/schemas/-/schemas-2.36.0.tgz", + "integrity": "sha512-7kjMwcChYEzMKjeex9ZFXkt1AyNov9R5HZtjBKVsmVpw7pa7ZtlCGvCBC2vnnXctaYN+aRI61HjIqeetZW5ROg==", + "license": "MIT" + }, + "node_modules/accepts": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz", + "integrity": "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==", + "license": "MIT", + "dependencies": { + "mime-types": "^3.0.0", + "negotiator": "^1.0.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-types": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + "license": "MIT", + "dependencies": { + "mime-db": "^1.54.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/accepts/node_modules/negotiator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", + "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "license": "ISC", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-align/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/ansi-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-align/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/apache-arrow": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/apache-arrow/-/apache-arrow-13.0.0.tgz", + "integrity": "sha512-3gvCX0GDawWz6KFNC28p65U+zGh/LZ6ZNKWNu74N6CQlKzxeoWHpi4CgEQsgRSEMuyrIIXi1Ea2syja7dwcHvw==", + "license": "Apache-2.0", + "dependencies": { + "@types/command-line-args": "5.2.0", + "@types/command-line-usage": "5.0.2", + "@types/node": "20.3.0", + "@types/pad-left": "2.1.1", + "command-line-args": "5.2.1", + "command-line-usage": "7.0.1", + "flatbuffers": "23.5.26", + "json-bignum": "^0.0.3", + "pad-left": "^2.1.0", + "tslib": "^2.5.3" + }, + "bin": { + "arrow2csv": "bin/arrow2csv.js" + } + }, + "node_modules/apache-arrow/node_modules/@types/node": { + "version": "20.3.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.3.0.tgz", + "integrity": "sha512-cumHmIAf6On83X7yP+LrsEyUOf/YlociZelmpRYaGFydoaPdxdt80MAbu6vWerQT2COCp2nPvHdsbD7tHn/YlQ==", + "license": "MIT" + }, + "node_modules/arch": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/arch/-/arch-2.2.0.tgz", + "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/array-back": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/array-back/-/array-back-3.1.0.tgz", + "integrity": "sha512-TkuxA4UCOvxuDK6NZYXCalszEzj+TLszyASooky+i742l9TqsOdYCMJJupxRic61hwquNtppB3hgcuq9SVSH1Q==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/ast-types": { + "version": "0.13.4", + "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.13.4.tgz", + "integrity": "sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/async": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", + "license": "MIT" + }, + "node_modules/async-limiter": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", + "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==", + "license": "MIT" + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/axios": { + "version": "1.13.5", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.5.tgz", + "integrity": "sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.11", + "form-data": "^4.0.5", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "optional": true + }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.19", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.19.tgz", + "integrity": "sha512-ipDqC8FrAl/76p2SSWKSI+H9tFwm7vYqXQrItCuiVPt26Km0jS+NzSsBWAaBusvSbQcfJG+JitdMm+wZAgTYqg==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/basic-ftp": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.2.0.tgz", + "integrity": "sha512-VoMINM2rqJwJgfdHq6RiUudKt2BV+FY5ZFezP/ypmwayk68+NzzAQy4XXLlqsGD4MCzq3DrmNFD/uUmBJuGoXw==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/body-parser": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.2.tgz", + "integrity": "sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA==", + "license": "MIT", + "dependencies": { + "bytes": "^3.1.2", + "content-type": "^1.0.5", + "debug": "^4.4.3", + "http-errors": "^2.0.0", + "iconv-lite": "^0.7.0", + "on-finished": "^2.4.1", + "qs": "^6.14.1", + "raw-body": "^3.0.1", + "type-is": "^2.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/boxen": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.0.0.tgz", + "integrity": "sha512-j//dBVuyacJbvW+tvZ9HuH03fZ46QcaKvvhZickZqtB271DxJ7SNRSNxrV/dZX0085m7hISRZWbzWlJvx/rHSg==", + "license": "MIT", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^7.0.0", + "chalk": "^5.0.1", + "cli-boxes": "^3.0.0", + "string-width": "^5.1.2", + "type-fest": "^2.13.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.0.1" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boxen/node_modules/chalk": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz", + "integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browser-or-node": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/browser-or-node/-/browser-or-node-1.3.0.tgz", + "integrity": "sha512-0F2z/VSnLbmEeBcUrSuDH5l0HxTXdQQzLjkmBR4cYfvg1zJrKSlmIZFqyFR8oX0NrwPhy3c3HQ6i3OxMbew4Tg==", + "license": "MIT" + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "optional": true, + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", + "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001769", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001769.tgz", + "integrity": "sha512-BCfFL1sHijQlBGWBMuJyhZUhzo7wer5sVj9hqekB/7xn0Ypy+pER/edCYQm4exbXj4WiySGp40P8UuTh6w1srg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk-template": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/chalk-template/-/chalk-template-0.4.0.tgz", + "integrity": "sha512-/ghrgmhfY8RaSdeo43hNXxpoHAtxdbskUHjPpfqUWGttFgycUhYPGx3YZBCnUCvOa7Doivn1IZec3DEGFoMgLg==", + "license": "MIT", + "dependencies": { + "chalk": "^4.1.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/chalk-template?sponsor=1" + } + }, + "node_modules/classcat": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz", + "integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==", + "license": "MIT" + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/clipboardy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/clipboardy/-/clipboardy-3.0.0.tgz", + "integrity": "sha512-Su+uU5sr1jkUy1sGRpLKjKrvEOVXgSgiSInwa/qeID6aJ07yh+5NWc3h2QfjHjBnfX4LhtFcuAWKUsJ3r+fjbg==", + "license": "MIT", + "dependencies": { + "arch": "^2.2.0", + "execa": "^5.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/color": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/color/-/color-5.0.3.tgz", + "integrity": "sha512-ezmVcLR3xAVp8kYOm4GS45ZLLgIE6SPAFoduLr6hTDajwb3KZ2F46gulK3XpcwRFb5KKGCSezCBAY4Dw4HsyXA==", + "license": "MIT", + "dependencies": { + "color-convert": "^3.1.3", + "color-string": "^2.1.3" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT" + }, + "node_modules/color-string": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-2.1.4.tgz", + "integrity": "sha512-Bb6Cq8oq0IjDOe8wJmi4JeNn763Xs9cfrBcaylK1tPypWzyoy2G3l90v9k64kjphl/ZJjPIShFztenRomi8WTg==", + "license": "MIT", + "dependencies": { + "color-name": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/color-string/node_modules/color-name": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-2.1.0.tgz", + "integrity": "sha512-1bPaDNFm0axzE4MEAzKPuqKWeRaT43U/hyxKPBdqTfmPF+d6n7FSoTFxLVULUJOmiLp01KjhIPPH+HrXZJN4Rg==", + "license": "MIT", + "engines": { + "node": ">=12.20" + } + }, + "node_modules/color/node_modules/color-convert": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-3.1.3.tgz", + "integrity": "sha512-fasDH2ont2GqF5HpyO4w0+BcewlhHEZOFn9c1ckZdHpJ56Qb7MHhH/IcJZbBGgvdtwdwNbLvxiBEdg336iA9Sg==", + "license": "MIT", + "dependencies": { + "color-name": "^2.0.0" + }, + "engines": { + "node": ">=14.6" + } + }, + "node_modules/color/node_modules/color-name": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-2.1.0.tgz", + "integrity": "sha512-1bPaDNFm0axzE4MEAzKPuqKWeRaT43U/hyxKPBdqTfmPF+d6n7FSoTFxLVULUJOmiLp01KjhIPPH+HrXZJN4Rg==", + "license": "MIT", + "engines": { + "node": ">=12.20" + } + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/command-line-args": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/command-line-args/-/command-line-args-5.2.1.tgz", + "integrity": "sha512-H4UfQhZyakIjC74I9d34fGYDwk3XpSr17QhEd0Q3I9Xq1CETHo4Hcuo87WyWHpAF1aSLjLRf5lD9ZGX2qStUvg==", + "license": "MIT", + "dependencies": { + "array-back": "^3.1.0", + "find-replace": "^3.0.0", + "lodash.camelcase": "^4.3.0", + "typical": "^4.0.0" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/command-line-usage": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/command-line-usage/-/command-line-usage-7.0.1.tgz", + "integrity": "sha512-NCyznE//MuTjwi3y84QVUGEOT+P5oto1e1Pk/jFPVdPPfsG03qpTIl3yw6etR+v73d0lXsoojRpvbru2sqePxQ==", + "license": "MIT", + "dependencies": { + "array-back": "^6.2.2", + "chalk-template": "^0.4.0", + "table-layout": "^3.0.0", + "typical": "^7.1.1" + }, + "engines": { + "node": ">=12.20.0" + } + }, + "node_modules/command-line-usage/node_modules/array-back": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/array-back/-/array-back-6.2.2.tgz", + "integrity": "sha512-gUAZ7HPyb4SJczXAMUXMGAvI976JoK3qEx9v1FTmeYuJj0IBiaKttG1ydtGKdkfqWkIkouke7nG8ufGy77+Cvw==", + "license": "MIT", + "engines": { + "node": ">=12.17" + } + }, + "node_modules/command-line-usage/node_modules/typical": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/typical/-/typical-7.3.0.tgz", + "integrity": "sha512-ya4mg/30vm+DOWfBg4YK3j2WD6TWtRkCbasOJr40CseYENzCUby/7rIvXA99JGsQHeNxLbnXdyLLxKSv3tauFw==", + "license": "MIT", + "engines": { + "node": ">=12.17" + } + }, + "node_modules/commander": { + "version": "9.5.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz", + "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || >=14" + } + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "license": "MIT", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.8.1.tgz", + "integrity": "sha512-9mAqGPHLakhCLeNyxPkK4xVo746zQ/czLH1Ky+vkitMnWfWZps8r0qXuwhwizagCRttsL4lfG4pIOvaWLpAP0w==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "compressible": "~2.0.18", + "debug": "2.6.9", + "negotiator": "~0.6.4", + "on-headers": "~1.1.0", + "safe-buffer": "5.2.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "license": "MIT" + }, + "node_modules/content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cookie": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz", + "integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==", + "license": "MIT", + "engines": { + "node": ">=6.6.0" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/cuint": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/cuint/-/cuint-0.2.2.tgz", + "integrity": "sha512-d4ZVpCW31eWwCMe1YT3ur7mUDnTXbgwyzaL320DrcRT45rfjYxkt5QWLrmOJ+/UEAI2+fQgKe/fCjR8l4TpRgw==", + "license": "MIT", + "optional": true + }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-selection": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "license": "ISC", + "peer": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "d3-selection": "2 - 3" + } + }, + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/data-uri-to-buffer": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-6.0.2.tgz", + "integrity": "sha512-7hvf7/GW8e86rW0ptuwS3OcBGDjIi6SZva7hCyWC0yYry2cOPmLIjXAUHI6DK2HsnwJd9ifmt57i8eV2n4YNpw==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/degenerator": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/degenerator/-/degenerator-5.0.1.tgz", + "integrity": "sha512-TllpMR/t0M5sqCXfj85i4XaAzxmS5tVA16dqvdkMwGmzI+dXLXnw3J+3Vdv7VKw+ThlTMboK6i9rnZ6Nntj5CQ==", + "license": "MIT", + "dependencies": { + "ast-types": "^0.13.4", + "escodegen": "^2.1.0", + "esprima": "^4.0.1" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "license": "MIT" + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.286", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.286.tgz", + "integrity": "sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A==", + "dev": true, + "license": "ISC" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/enabled": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/enabled/-/enabled-2.0.0.tgz", + "integrity": "sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ==", + "license": "MIT" + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.3.tgz", + "integrity": "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.3", + "@esbuild/android-arm": "0.27.3", + "@esbuild/android-arm64": "0.27.3", + "@esbuild/android-x64": "0.27.3", + "@esbuild/darwin-arm64": "0.27.3", + "@esbuild/darwin-x64": "0.27.3", + "@esbuild/freebsd-arm64": "0.27.3", + "@esbuild/freebsd-x64": "0.27.3", + "@esbuild/linux-arm": "0.27.3", + "@esbuild/linux-arm64": "0.27.3", + "@esbuild/linux-ia32": "0.27.3", + "@esbuild/linux-loong64": "0.27.3", + "@esbuild/linux-mips64el": "0.27.3", + "@esbuild/linux-ppc64": "0.27.3", + "@esbuild/linux-riscv64": "0.27.3", + "@esbuild/linux-s390x": "0.27.3", + "@esbuild/linux-x64": "0.27.3", + "@esbuild/netbsd-arm64": "0.27.3", + "@esbuild/netbsd-x64": "0.27.3", + "@esbuild/openbsd-arm64": "0.27.3", + "@esbuild/openbsd-x64": "0.27.3", + "@esbuild/openharmony-arm64": "0.27.3", + "@esbuild/sunos-x64": "0.27.3", + "@esbuild/win32-arm64": "0.27.3", + "@esbuild/win32-ia32": "0.27.3", + "@esbuild/win32-x64": "0.27.3" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/escodegen": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz", + "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==", + "license": "BSD-2-Clause", + "dependencies": { + "esprima": "^4.0.1", + "estraverse": "^5.2.0", + "esutils": "^2.0.2" + }, + "bin": { + "escodegen": "bin/escodegen.js", + "esgenerate": "bin/esgenerate.js" + }, + "engines": { + "node": ">=6.0" + }, + "optionalDependencies": { + "source-map": "~0.6.1" + } + }, + "node_modules/eslint": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz", + "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.39.2", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-7.0.1.tgz", + "integrity": "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.24.4", + "@babel/parser": "^7.24.4", + "hermes-parser": "^0.25.1", + "zod": "^3.25.0 || ^4.0.0", + "zod-validation-error": "^3.5.0 || ^4.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "license": "MIT" + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/express": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz", + "integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==", + "license": "MIT", + "dependencies": { + "accepts": "^2.0.0", + "body-parser": "^2.2.1", + "content-disposition": "^1.0.0", + "content-type": "^1.0.5", + "cookie": "^0.7.1", + "cookie-signature": "^1.2.1", + "debug": "^4.4.0", + "depd": "^2.0.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "finalhandler": "^2.1.0", + "fresh": "^2.0.0", + "http-errors": "^2.0.0", + "merge-descriptors": "^2.0.0", + "mime-types": "^3.0.0", + "on-finished": "^2.4.1", + "once": "^1.4.0", + "parseurl": "^1.3.3", + "proxy-addr": "^2.0.7", + "qs": "^6.14.0", + "range-parser": "^1.2.1", + "router": "^2.2.0", + "send": "^1.1.0", + "serve-static": "^2.2.0", + "statuses": "^2.0.1", + "type-is": "^2.0.1", + "vary": "^1.1.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/express/node_modules/content-disposition": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.1.tgz", + "integrity": "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/express/node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express/node_modules/mime-types": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + "license": "MIT", + "dependencies": { + "mime-db": "^1.54.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/express/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "license": "MIT" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fecha": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/fecha/-/fecha-4.2.3.tgz", + "integrity": "sha512-OP2IUU6HeYKJi3i0z4A19kHMQoLVs4Hc+DPqqxI2h/DPZHTm/vjsfC6P0b4jCMy14XizLBqvndQ+UilD7707Jw==", + "license": "MIT" + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.1.tgz", + "integrity": "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==", + "license": "MIT", + "dependencies": { + "debug": "^4.4.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "on-finished": "^2.4.1", + "parseurl": "^1.3.3", + "statuses": "^2.0.1" + }, + "engines": { + "node": ">= 18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/find-replace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-replace/-/find-replace-3.0.0.tgz", + "integrity": "sha512-6Tb2myMioCAgv5kfvP5/PkZZ/ntTpVK39fHY7WkWBgvbeE+VHd/tZuZ4mrC+bxh4cfOZeYKVPaJIZtZXV7GNCQ==", + "license": "MIT", + "dependencies": { + "array-back": "^3.0.1" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatbuffers": { + "version": "23.5.26", + "resolved": "https://registry.npmjs.org/flatbuffers/-/flatbuffers-23.5.26.tgz", + "integrity": "sha512-vE+SI9vrJDwi1oETtTIFldC/o9GsVKRM+s6EL0nQgxXlYV1Vc4Tk30hj4xGICftInKQKj1F3up2n8UbIVobISQ==", + "license": "SEE LICENSE IN LICENSE" + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/fn.name": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fn.name/-/fn.name-1.1.0.tgz", + "integrity": "sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==", + "license": "MIT" + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz", + "integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-uri": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/get-uri/-/get-uri-6.0.5.tgz", + "integrity": "sha512-b1O07XYq8eRuVzBNgJLstU6FYc1tS6wnMtF1I1D9lE8LxZSOGZ7LhxN54yPP6mGw5f2CkXY2BQUL9Fx41qvcIg==", + "license": "MIT", + "dependencies": { + "basic-ftp": "^5.0.2", + "data-uri-to-buffer": "^6.0.2", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hermes-estree": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-estree/-/hermes-estree-0.25.1.tgz", + "integrity": "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw==", + "dev": true, + "license": "MIT" + }, + "node_modules/hermes-parser": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-parser/-/hermes-parser-0.25.1.tgz", + "integrity": "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "hermes-estree": "0.25.1" + } + }, + "node_modules/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + "license": "MIT", + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "license": "MIT", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/http-proxy-middleware": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-3.0.5.tgz", + "integrity": "sha512-GLZZm1X38BPY4lkXA01jhwxvDoOkkXqjgVyUzVxiEK4iuRu03PZoYHhHRwxnfhQMDuaxi3vVri0YgSro/1oWqg==", + "license": "MIT", + "dependencies": { + "@types/http-proxy": "^1.17.15", + "debug": "^4.3.6", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.3", + "is-plain-object": "^5.0.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.2.tgz", + "integrity": "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause", + "optional": true + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "license": "ISC" + }, + "node_modules/ip-address": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.1.0.tgz", + "integrity": "sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-port-reachable": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-port-reachable/-/is-port-reachable-4.0.0.tgz", + "integrity": "sha512-9UoipoxYmSk6Xy7QFgRv2HDyaysmgSG75TFQs6S+3pDM7ZhKTF/bskZV+0UlABHzKjNVhPjYCLfeZUEg1wXxig==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-promise": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz", + "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==", + "license": "MIT" + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "license": "MIT", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "license": "ISC" + }, + "node_modules/isomorphic-ws": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/isomorphic-ws/-/isomorphic-ws-4.0.1.tgz", + "integrity": "sha512-BhBvN2MBpWTaSHdWRb/bwdZJ1WaehQ2L1KngkCkfLUGF0mAWAT1sQUQacEmQ0jXkFw/czDXPNQSL5u2/Krsz1w==", + "license": "MIT", + "peerDependencies": { + "ws": "*" + } + }, + "node_modules/jose": { + "version": "4.15.9", + "resolved": "https://registry.npmjs.org/jose/-/jose-4.15.9.tgz", + "integrity": "sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-bignum": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/json-bignum/-/json-bignum-0.0.3.tgz", + "integrity": "sha512-2WHyXj3OfHSgNyuzDbSxI1w2jgw5gkWSWhS7Qg4bWXx1nLk3jnbwfUeS0PSba3IzpTUWdHxBieELUzXRjQB2zg==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kuler": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/kuler/-/kuler-2.0.0.tgz", + "integrity": "sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==", + "license": "MIT" + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", + "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", + "license": "MIT" + }, + "node_modules/lodash.camelcase": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", + "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==", + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/logform": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/logform/-/logform-2.7.0.tgz", + "integrity": "sha512-TFYA4jnP7PVbmlBIfhlSe+WKxs9dklXMTEGcBCIvLhE/Tn3H6Gk1norupVW7m5Cnd4bLcr08AytbyV/xj7f/kQ==", + "license": "MIT", + "dependencies": { + "@colors/colors": "1.6.0", + "@types/triple-beam": "^1.3.2", + "fecha": "^4.2.0", + "ms": "^2.1.1", + "safe-stable-stringify": "^2.3.1", + "triple-beam": "^1.3.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lz4": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/lz4/-/lz4-0.6.5.tgz", + "integrity": "sha512-KSZcJU49QZOlJSItaeIU3p8WoAvkTmD9fJqeahQXNu1iQ/kR0/mQLdbrK8JY9MY8f6AhJoMrihp1nu1xDbscSQ==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "dependencies": { + "buffer": "^5.2.1", + "cuint": "^0.2.2", + "nan": "^2.13.2", + "xxhashjs": "^0.2.2" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-1.1.0.tgz", + "integrity": "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/merge-descriptors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-2.0.0.tgz", + "integrity": "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/nan": { + "version": "2.25.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.25.0.tgz", + "integrity": "sha512-0M90Ag7Xn5KMLLZ7zliPWP3rT90P6PN+IzVFS0VqmnPktBk3700xUVv8Ikm9EUaUE5SDWdp/BIxdENzVznpm1g==", + "license": "MIT", + "optional": true + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/negotiator": { + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", + "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/netmask": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/netmask/-/netmask-2.0.2.tgz", + "integrity": "sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/object-hash": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-2.2.0.tgz", + "integrity": "sha512-gScRMn0bS5fH+IuwyIFgnh9zBdo4DV+6GhygmWM9HyNJSgS0hScp1f5vjtm7oIIOiT9trXrShAkLFSc2IqKNgw==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/oidc-token-hash": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/oidc-token-hash/-/oidc-token-hash-5.2.0.tgz", + "integrity": "sha512-6gj2m8cJZ+iSW8bm0FXdGF0YhIQbKrfP4yWTNzxc31U6MOjfEmB1rHvlYvxI1B7t7BCi1F2vYTT6YhtQRG4hxw==", + "license": "MIT", + "engines": { + "node": "^10.13.0 || >=12.0.0" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.1.0.tgz", + "integrity": "sha512-737ZY3yNnXy37FHkQxPzt4UZ2UWPWiCZWLvFZ4fu5cueciegX0zGPnrlY6bwRg4FdQOe9YU8MkmJwGhoMybl8A==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/one-time": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/one-time/-/one-time-1.0.0.tgz", + "integrity": "sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g==", + "license": "MIT", + "dependencies": { + "fn.name": "1.x.x" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "license": "MIT", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/openid-client": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-5.7.1.tgz", + "integrity": "sha512-jDBPgSVfTnkIh71Hg9pRvtJc6wTwqjRkN88+gCFtYWrlP4Yx2Dsrow8uPi3qLr/aeymPF3o2+dS+wOpglK04ew==", + "license": "MIT", + "dependencies": { + "jose": "^4.15.9", + "lru-cache": "^6.0.0", + "object-hash": "^2.2.0", + "oidc-token-hash": "^5.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, + "node_modules/openid-client/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/openid-client/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "license": "ISC" + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pac-proxy-agent": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-7.2.0.tgz", + "integrity": "sha512-TEB8ESquiLMc0lV8vcd5Ql/JAKAoyzHFXaStwjkzpOpC5Yv+pIzLfHvjTSdf3vpa2bMiUQrg9i6276yn8666aA==", + "license": "MIT", + "dependencies": { + "@tootallnate/quickjs-emscripten": "^0.23.0", + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "get-uri": "^6.0.1", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.6", + "pac-resolver": "^7.0.1", + "socks-proxy-agent": "^8.0.5" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/pac-resolver": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/pac-resolver/-/pac-resolver-7.0.1.tgz", + "integrity": "sha512-5NPgf87AT2STgwa2ntRMr45jTKrYBGkVU36yT0ig/n/GMAa3oPqhZfIQ2kMEimReg0+t9kZViDVZ83qfVUlckg==", + "license": "MIT", + "dependencies": { + "degenerator": "^5.0.0", + "netmask": "^2.0.2" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/pad-left": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/pad-left/-/pad-left-2.1.0.tgz", + "integrity": "sha512-HJxs9K9AztdIQIAIa/OIazRAUW/L6B9hbQDxO4X07roW3eo9XqZc2ur9bn1StH9CnbbI9EgvejHQX7CBpCF1QA==", + "license": "MIT", + "dependencies": { + "repeat-string": "^1.5.4" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==", + "license": "(WTFPL OR MIT)" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-to-regexp": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-3.3.0.tgz", + "integrity": "sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw==", + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-agent": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-6.5.0.tgz", + "integrity": "sha512-TmatMXdr2KlRiA2CyDu8GqR8EjahTG3aY3nXjdzFyoZbmB8hrBsTyMezhULIXKnC0jpfjlmiZ3+EaCzoInSu/A==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "http-proxy-agent": "^7.0.1", + "https-proxy-agent": "^7.0.6", + "lru-cache": "^7.14.1", + "pac-proxy-agent": "^7.1.0", + "proxy-from-env": "^1.1.0", + "socks-proxy-agent": "^8.0.5" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/proxy-agent/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/q": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", + "integrity": "sha512-kV/CThkXo6xyFEZUugw/+pIOywXcDbFYgSct5cT3gqlbkBE1SJdwy6UQoZvodiWF/ckQLZyDE/Bu1M6gVu5lVw==", + "deprecated": "You or someone you depend on is using Q, the JavaScript Promise library that gave JavaScript developers strong feelings about promises. They can almost certainly migrate to the native JavaScript promise now. Thank you literally everyone for joining me in this bet against the odds. Be excellent to each other.\n\n(For a CapTP with native promises, see @endo/eventual-send and @endo/captp)", + "license": "MIT", + "engines": { + "node": ">=0.6.0", + "teleport": ">=0.2.0" + } + }, + "node_modules/qs": { + "version": "6.15.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.15.0.tgz", + "integrity": "sha512-mAZTtNCeetKMH+pSjrb76NAM8V9a05I9aBZOHztWy/UqcJdQYNsf59vrRKWnojAT9Y+GbIvoTBC++CPHqpDBhQ==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.2.tgz", + "integrity": "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==", + "license": "MIT", + "dependencies": { + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.7.0", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", + "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", + "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.4" + } + }, + "node_modules/react-refresh": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.18.0.tgz", + "integrity": "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-router": { + "version": "6.30.3", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.30.3.tgz", + "integrity": "sha512-XRnlbKMTmktBkjCLE8/XcZFlnHvr2Ltdr1eJX4idL55/9BbORzyZEaIkBFDhFGCEWBBItsVrDxwx3gnisMitdw==", + "license": "MIT", + "dependencies": { + "@remix-run/router": "1.23.2" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8" + } + }, + "node_modules/react-router-dom": { + "version": "6.30.3", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.30.3.tgz", + "integrity": "sha512-pxPcv1AczD4vso7G4Z3TKcvlxK7g7TNt3/FNGMhfqyntocvYKj+GCatfigGDjbLozC4baguJ0ReCigoDJXb0ag==", + "license": "MIT", + "dependencies": { + "@remix-run/router": "1.23.2", + "react-router": "6.30.3" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/registry-auth-token": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-3.3.2.tgz", + "integrity": "sha512-JL39c60XlzCVgNrO+qq68FoNb56w/m7JYvGR2jT5iR1xBrUA3Mfx5Twk5rqTThPmQKMWydGmq8oFtDlxfrmxnQ==", + "license": "MIT", + "dependencies": { + "rc": "^1.1.6", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/registry-url": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-3.1.0.tgz", + "integrity": "sha512-ZbgR5aZEdf4UKZVBPYIgaglBmSF2Hi94s2PcIHhRGFjKYu+chjJdYfHn4rt3hB6eCKLJ8giVIIfgMa1ehDfZKA==", + "license": "MIT", + "dependencies": { + "rc": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "license": "MIT" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/rollup": { + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz", + "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.57.1", + "@rollup/rollup-android-arm64": "4.57.1", + "@rollup/rollup-darwin-arm64": "4.57.1", + "@rollup/rollup-darwin-x64": "4.57.1", + "@rollup/rollup-freebsd-arm64": "4.57.1", + "@rollup/rollup-freebsd-x64": "4.57.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", + "@rollup/rollup-linux-arm-musleabihf": "4.57.1", + "@rollup/rollup-linux-arm64-gnu": "4.57.1", + "@rollup/rollup-linux-arm64-musl": "4.57.1", + "@rollup/rollup-linux-loong64-gnu": "4.57.1", + "@rollup/rollup-linux-loong64-musl": "4.57.1", + "@rollup/rollup-linux-ppc64-gnu": "4.57.1", + "@rollup/rollup-linux-ppc64-musl": "4.57.1", + "@rollup/rollup-linux-riscv64-gnu": "4.57.1", + "@rollup/rollup-linux-riscv64-musl": "4.57.1", + "@rollup/rollup-linux-s390x-gnu": "4.57.1", + "@rollup/rollup-linux-x64-gnu": "4.57.1", + "@rollup/rollup-linux-x64-musl": "4.57.1", + "@rollup/rollup-openbsd-x64": "4.57.1", + "@rollup/rollup-openharmony-arm64": "4.57.1", + "@rollup/rollup-win32-arm64-msvc": "4.57.1", + "@rollup/rollup-win32-ia32-msvc": "4.57.1", + "@rollup/rollup-win32-x64-gnu": "4.57.1", + "@rollup/rollup-win32-x64-msvc": "4.57.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/router": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/router/-/router-2.2.0.tgz", + "integrity": "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==", + "license": "MIT", + "dependencies": { + "debug": "^4.4.0", + "depd": "^2.0.0", + "is-promise": "^4.0.0", + "parseurl": "^1.3.3", + "path-to-regexp": "^8.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/router/node_modules/path-to-regexp": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz", + "integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safe-stable-stringify": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.5.0.tgz", + "integrity": "sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/send": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/send/-/send-1.2.1.tgz", + "integrity": "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==", + "license": "MIT", + "dependencies": { + "debug": "^4.4.3", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "fresh": "^2.0.0", + "http-errors": "^2.0.1", + "mime-types": "^3.0.2", + "ms": "^2.1.3", + "on-finished": "^2.4.1", + "range-parser": "^1.2.1", + "statuses": "^2.0.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/send/node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/send/node_modules/mime-types": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + "license": "MIT", + "dependencies": { + "mime-db": "^1.54.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/send/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve": { + "version": "14.2.5", + "resolved": "https://registry.npmjs.org/serve/-/serve-14.2.5.tgz", + "integrity": "sha512-Qn/qMkzCcMFVPb60E/hQy+iRLpiU8PamOfOSYoAHmmF+fFFmpPpqa6Oci2iWYpTdOUM3VF+TINud7CfbQnsZbA==", + "license": "MIT", + "dependencies": { + "@zeit/schemas": "2.36.0", + "ajv": "8.12.0", + "arg": "5.0.2", + "boxen": "7.0.0", + "chalk": "5.0.1", + "chalk-template": "0.4.0", + "clipboardy": "3.0.0", + "compression": "1.8.1", + "is-port-reachable": "4.0.0", + "serve-handler": "6.1.6", + "update-check": "1.5.4" + }, + "bin": { + "serve": "build/main.js" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/serve-handler": { + "version": "6.1.6", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.6.tgz", + "integrity": "sha512-x5RL9Y2p5+Sh3D38Fh9i/iQ5ZK+e4xuXRd/pGbM4D13tgo/MGwbttUk8emytcr1YYzBYs+apnUngBDFYfpjPuQ==", + "license": "MIT", + "dependencies": { + "bytes": "3.0.0", + "content-disposition": "0.5.2", + "mime-types": "2.1.18", + "minimatch": "3.1.2", + "path-is-inside": "1.0.2", + "path-to-regexp": "3.3.0", + "range-parser": "1.2.0" + } + }, + "node_modules/serve-handler/node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/serve-handler/node_modules/mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-handler/node_modules/mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "license": "MIT", + "dependencies": { + "mime-db": "~1.33.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-static": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.1.tgz", + "integrity": "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==", + "license": "MIT", + "dependencies": { + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "parseurl": "^1.3.3", + "send": "^1.2.0" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/serve/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/serve/node_modules/chalk": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.0.1.tgz", + "integrity": "sha512-Fo07WOYGqMfCWHOzSXOt2CxDbC6skS/jO9ynEcmpANMoPrD+W1r1K6Vx7iNm+AQmETU1Xr2t+n8nzkV9t6xh3w==", + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/serve/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "license": "MIT" + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "license": "ISC" + }, + "node_modules/smart-buffer": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", + "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", + "license": "MIT", + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks": { + "version": "2.8.7", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.7.tgz", + "integrity": "sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A==", + "license": "MIT", + "dependencies": { + "ip-address": "^10.0.1", + "smart-buffer": "^4.2.0" + }, + "engines": { + "node": ">= 10.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks-proxy-agent": { + "version": "8.0.5", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz", + "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "socks": "^2.8.3" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stack-trace": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", + "integrity": "sha512-KGzahc7puUKkzyMt+IqAep+TVNbKP+k2Lmwhub39m1AsTSkaDutx56aDCo+HLDzf/D26BIHTJWNiTG1KAJiQCg==", + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/statuses": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/stream-read-all": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/stream-read-all/-/stream-read-all-3.0.1.tgz", + "integrity": "sha512-EWZT9XOceBPlVJRrYcykW8jyRSZYbkb/0ZK36uLEmoWVO5gxBOnntNTseNzfREsqxqdfEGQrD8SXQ3QWbBmq8A==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/table-layout": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/table-layout/-/table-layout-3.0.2.tgz", + "integrity": "sha512-rpyNZYRw+/C+dYkcQ3Pr+rLxW4CfHpXjPDnG7lYhdRoUcZTUt+KEsX+94RGp/aVp/MQU35JCITv2T/beY4m+hw==", + "license": "MIT", + "dependencies": { + "@75lb/deep-merge": "^1.1.1", + "array-back": "^6.2.2", + "command-line-args": "^5.2.1", + "command-line-usage": "^7.0.0", + "stream-read-all": "^3.0.1", + "typical": "^7.1.1", + "wordwrapjs": "^5.1.0" + }, + "bin": { + "table-layout": "bin/cli.js" + }, + "engines": { + "node": ">=12.17" + } + }, + "node_modules/table-layout/node_modules/array-back": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/array-back/-/array-back-6.2.2.tgz", + "integrity": "sha512-gUAZ7HPyb4SJczXAMUXMGAvI976JoK3qEx9v1FTmeYuJj0IBiaKttG1ydtGKdkfqWkIkouke7nG8ufGy77+Cvw==", + "license": "MIT", + "engines": { + "node": ">=12.17" + } + }, + "node_modules/table-layout/node_modules/typical": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/typical/-/typical-7.3.0.tgz", + "integrity": "sha512-ya4mg/30vm+DOWfBg4YK3j2WD6TWtRkCbasOJr40CseYENzCUby/7rIvXA99JGsQHeNxLbnXdyLLxKSv3tauFw==", + "license": "MIT", + "engines": { + "node": ">=12.17" + } + }, + "node_modules/text-hex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz", + "integrity": "sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==", + "license": "MIT" + }, + "node_modules/thrift": { + "version": "0.16.0", + "resolved": "https://registry.npmjs.org/thrift/-/thrift-0.16.0.tgz", + "integrity": "sha512-W8DpGyTPlIaK3f+e1XOCLxefaUWXtrOXAaVIDbfYhmVyriYeAKgsBVFNJUV1F9SQ2SPt2sG44AZQxSGwGj/3VA==", + "license": "Apache-2.0", + "dependencies": { + "browser-or-node": "^1.2.1", + "isomorphic-ws": "^4.0.1", + "node-int64": "^0.4.0", + "q": "^1.5.0", + "ws": "^5.2.3" + }, + "engines": { + "node": ">= 10.18.0" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "license": "MIT" + }, + "node_modules/triple-beam": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.4.1.tgz", + "integrity": "sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg==", + "license": "MIT", + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz", + "integrity": "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==", + "license": "MIT", + "dependencies": { + "content-type": "^1.0.5", + "media-typer": "^1.1.0", + "mime-types": "^3.0.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-types": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + "license": "MIT", + "dependencies": { + "mime-db": "^1.54.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/typical": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/typical/-/typical-4.0.0.tgz", + "integrity": "sha512-VAH4IvQ7BDFYglMd7BPRDfLgxZZX4O4TFcRDA6EN5X7erNJJq+McIEp8np9aVtxrCJ6qx4GTYVfOWNjcqwZgRw==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/undici-types": { + "version": "7.18.2", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.18.2.tgz", + "integrity": "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==", + "license": "MIT" + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/update-check": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/update-check/-/update-check-1.5.4.tgz", + "integrity": "sha512-5YHsflzHP4t1G+8WGPlvKbJEbAJGCgw+Em+dGR1KmBUbr1J36SJBqlHLjR7oob7sco5hWHGQVcr9B2poIVDDTQ==", + "license": "MIT", + "dependencies": { + "registry-auth-token": "3.3.2", + "registry-url": "3.1.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vite": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", + "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "license": "BSD-2-Clause" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/widest-line": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", + "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "license": "MIT", + "dependencies": { + "string-width": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/winston": { + "version": "3.19.0", + "resolved": "https://registry.npmjs.org/winston/-/winston-3.19.0.tgz", + "integrity": "sha512-LZNJgPzfKR+/J3cHkxcpHKpKKvGfDZVPS4hfJCc4cCG0CgYzvlD6yE/S3CIL/Yt91ak327YCpiF/0MyeZHEHKA==", + "license": "MIT", + "dependencies": { + "@colors/colors": "^1.6.0", + "@dabh/diagnostics": "^2.0.8", + "async": "^3.2.3", + "is-stream": "^2.0.0", + "logform": "^2.7.0", + "one-time": "^1.0.0", + "readable-stream": "^3.4.0", + "safe-stable-stringify": "^2.3.1", + "stack-trace": "0.0.x", + "triple-beam": "^1.3.0", + "winston-transport": "^4.9.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, + "node_modules/winston-transport": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.9.0.tgz", + "integrity": "sha512-8drMJ4rkgaPo1Me4zD/3WLfI/zPdA9o2IipKODunnGDcuqbHwjsbB79ylv04LCGGzU0xQ6vTznOMpQGaLhhm6A==", + "license": "MIT", + "dependencies": { + "logform": "^2.7.0", + "readable-stream": "^3.6.2", + "triple-beam": "^1.3.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wordwrapjs": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/wordwrapjs/-/wordwrapjs-5.1.1.tgz", + "integrity": "sha512-0yweIbkINJodk27gX9LBGMzyQdBDan3s/dEAiwBOj+Mf0PPyWL6/rikalkv8EeD0E8jm4o5RXEOrFTP3NXbhJg==", + "license": "MIT", + "engines": { + "node": ">=12.17" + } + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "license": "ISC" + }, + "node_modules/ws": { + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.4.tgz", + "integrity": "sha512-fFCejsuC8f9kOSu9FYaOw8CdO68O3h5v0lg4p74o8JqWpwTf9tniOD+nOB78aWoVSS6WptVUmDrp/KPsMVBWFQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "async-limiter": "~1.0.0" + } + }, + "node_modules/xxhashjs": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/xxhashjs/-/xxhashjs-0.2.2.tgz", + "integrity": "sha512-AkTuIuVTET12tpsVIQo+ZU6f/qDmKuRUcjaqR+OIvm+aCBsZ95i7UVY5WJ9TMsSaZ0DA2WxoZ4acu0sPH+OKAw==", + "license": "MIT", + "optional": true, + "dependencies": { + "cuint": "^0.2.2" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", + "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", + "dev": true, + "license": "MIT", + "peer": true, + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-validation-error": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/zod-validation-error/-/zod-validation-error-4.0.2.tgz", + "integrity": "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "zod": "^3.25.0 || ^4.0.0" + } + }, + "node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + } + } +} diff --git a/dbx-agent-app/app/webapp/package.json b/dbx-agent-app/app/webapp/package.json new file mode 100644 index 00000000..ec5605b5 --- /dev/null +++ b/dbx-agent-app/app/webapp/package.json @@ -0,0 +1,32 @@ +{ + "name": "multi-agent-registry-webapp", + "private": true, + "version": "0.1.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview", + "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0" + }, + "dependencies": { + "@databricks/sql": "^1.12.0", + "@xyflow/react": "^12.10.1", + "axios": "^1.6.0", + "express": "^5.2.1", + "http-proxy-middleware": "^3.0.5", + "react": "^19.2.0", + "react-dom": "^19.2.0", + "react-router-dom": "^6.22.0", + "serve": "^14.2.1" + }, + "devDependencies": { + "@types/react": "^19.2.5", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^5.1.1", + "eslint": "^9.39.1", + "eslint-plugin-react-hooks": "^7.0.1", + "typescript": "~5.9.3", + "vite": "^7.2.4" + } +} diff --git a/dbx-agent-app/app/webapp/server.js b/dbx-agent-app/app/webapp/server.js new file mode 100644 index 00000000..91499cf3 --- /dev/null +++ b/dbx-agent-app/app/webapp/server.js @@ -0,0 +1,163 @@ +/** + * Express proxy server for Multi-Agent Registry Webapp + * + * This server: + * 1. Serves the built React app from /dist + * 2. Proxies /api/* requests to the registry-api with authentication + * 3. Handles /supervisor/* requests if configured + */ + +import express from 'express'; +import { createProxyMiddleware } from 'http-proxy-middleware'; +import { fileURLToPath } from 'url'; +import { dirname, join } from 'path'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +const app = express(); +const PORT = process.env.PORT || 8000; + +// Registry API URL (from environment or default) +const REGISTRY_API_URL = process.env.REGISTRY_API_URL || 'https://registry-api-7474660127789418.aws.databricksapps.com'; + +console.log(`[CONFIG] Registry API URL: ${REGISTRY_API_URL}`); +console.log(`[CONFIG] Port: ${PORT}`); + +// OAuth token cache +let cachedToken = null; +let tokenExpiry = 0; + +// Get OAuth access token using service principal credentials +async function getAccessToken() { + // Return cached token if still valid (with 5-minute buffer) + if (cachedToken && Date.now() < tokenExpiry - 300000) { + return cachedToken; + } + + const clientId = process.env.DATABRICKS_CLIENT_ID; + const clientSecret = process.env.DATABRICKS_CLIENT_SECRET; + const host = process.env.DATABRICKS_HOST; + + if (!clientId || !clientSecret || !host) { + console.error('[AUTH] Missing OAuth credentials'); + return null; + } + + try { + const tokenUrl = `https://${host}/oidc/v1/token`; + const response = await fetch(tokenUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/x-www-form-urlencoded' + }, + body: new URLSearchParams({ + grant_type: 'client_credentials', + client_id: clientId, + client_secret: clientSecret, + scope: 'all-apis' + }) + }); + + if (!response.ok) { + console.error(`[AUTH] Token request failed: ${response.status}`); + return null; + } + + const data = await response.json(); + cachedToken = data.access_token; + tokenExpiry = Date.now() + (data.expires_in * 1000); + console.log(`[AUTH] Got access token, expires in ${data.expires_in} seconds`); + return cachedToken; + } catch (error) { + console.error(`[AUTH] Error getting token: ${error.message}`); + return null; + } +} + +// Health check endpoint +app.get('/health', (req, res) => { + res.json({ + status: 'healthy', + service: 'multi-agent-registry-webapp', + timestamp: new Date().toISOString() + }); +}); + +// Debug endpoint to check available env vars +app.get('/debug/env', (req, res) => { + const envVars = { + DATABRICKS_HOST: process.env.DATABRICKS_HOST || 'not set', + DATABRICKS_CLIENT_ID: process.env.DATABRICKS_CLIENT_ID ? 'present' : 'not set', + DATABRICKS_CLIENT_SECRET: process.env.DATABRICKS_CLIENT_SECRET ? 'present' : 'not set', + DATABRICKS_TOKEN: process.env.DATABRICKS_TOKEN ? 'present' : 'not set', + PORT: process.env.PORT, + REGISTRY_API_URL: process.env.REGISTRY_API_URL + }; + res.json(envVars); +}); + +// Middleware to add OAuth token to requests before proxying +app.use('/api', async (req, res, next) => { + try { + const token = await getAccessToken(); + if (token) { + req.headers['x-databricks-token'] = token; + console.log('[AUTH] Added OAuth token to request'); + } else { + console.error('[AUTH] Failed to get OAuth token'); + } + } catch (error) { + console.error(`[AUTH] Error getting token: ${error.message}`); + } + next(); +}); + +// Proxy /api requests to registry-api +// This forwards all /api/* requests to the registry-api with proper headers +app.use('/api', createProxyMiddleware({ + target: REGISTRY_API_URL, + changeOrigin: true, + secure: true, + logLevel: 'info', + onProxyReq: (proxyReq, req, res) => { + console.log(`[PROXY] ${req.method} ${req.url} -> ${REGISTRY_API_URL}${req.url}`); + + // Use the token added by the middleware + const token = req.headers['x-databricks-token']; + if (token) { + proxyReq.setHeader('Authorization', `Bearer ${token}`); + console.log('[PROXY] Added OAuth token to proxy request'); + } else { + console.error('[PROXY] No auth token available'); + } + }, + onProxyRes: (proxyRes, req, res) => { + console.log(`[PROXY] Response: ${proxyRes.statusCode} for ${req.url}`); + }, + onError: (err, req, res) => { + console.error(`[PROXY ERROR] ${err.message}`); + res.status(500).json({ + error: 'Proxy error', + message: 'Failed to connect to registry API', + details: err.message + }); + } +})); + +// Serve static files from dist directory +const distPath = join(__dirname, 'dist'); +app.use(express.static(distPath)); + +// SPA fallback - serve index.html for all other routes not handled above +// Using a catch-all pattern that works with newer Express/path-to-regexp +app.use((req, res) => { + res.sendFile(join(distPath, 'index.html')); +}); + +// Start server +app.listen(PORT, '0.0.0.0', () => { + console.log(`[SERVER] Multi-Agent Registry Webapp running on http://0.0.0.0:${PORT}`); + console.log(`[SERVER] Proxying /api/* to ${REGISTRY_API_URL}`); + console.log(`[SERVER] Serving static files from ${distPath}`); +}); diff --git a/dbx-agent-app/app/webapp/src/App.css b/dbx-agent-app/app/webapp/src/App.css new file mode 100644 index 00000000..42bae88a --- /dev/null +++ b/dbx-agent-app/app/webapp/src/App.css @@ -0,0 +1,83 @@ +:root { + --primary-color: #1a73e8; + --secondary-color: #5f6368; + --bg-primary: #ffffff; + --bg-secondary: #f8f9fa; + --border-color: #dadce0; + --text-primary: #202124; + --text-secondary: #5f6368; + --error-color: #d93025; + --success-color: #1e8e3e; +} + +* { + box-sizing: border-box; + margin: 0; + padding: 0; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', + 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', + sans-serif; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + color: var(--text-primary); + background-color: var(--bg-secondary); +} + +#root { + min-height: 100vh; +} + +button { + font-family: inherit; + font-size: 14px; + padding: 8px 16px; + border: none; + border-radius: 4px; + background-color: var(--primary-color); + color: white; + cursor: pointer; + transition: background-color 0.2s; +} + +button:hover:not(:disabled) { + background-color: #1557b0; +} + +button:disabled { + opacity: 0.6; + cursor: not-allowed; +} + +a { + color: var(--primary-color); + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +.loading { + display: flex; + justify-content: center; + align-items: center; + padding: 40px; + color: var(--text-secondary); +} + +.error { + padding: 16px; + background-color: #fce8e6; + color: var(--error-color); + border-radius: 4px; + margin-bottom: 16px; +} + +.empty-message { + color: var(--text-secondary); + text-align: center; + padding: 20px; +} diff --git a/dbx-agent-app/app/webapp/src/App.tsx b/dbx-agent-app/app/webapp/src/App.tsx new file mode 100644 index 00000000..96d56e8f --- /dev/null +++ b/dbx-agent-app/app/webapp/src/App.tsx @@ -0,0 +1,51 @@ +import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom' +import ErrorBoundary from './components/common/ErrorBoundary' +import Layout from './components/layout/Layout' +import DiscoverPage from './pages/DiscoverPage' +import CollectionsPage from './pages/CollectionsPage' +import AgentsPage from './pages/AgentsPage' +import ChatPage from './pages/ChatPage' +import AgentChatPage from './pages/AgentChatPage' +import LineagePage from './pages/LineagePage' +import AuditLogPage from './pages/AuditLogPage' +import SystemBuilderPage from './pages/SystemBuilderPage' +import './App.css' + +export default function App() { + return ( + + + + }> + } /> + + } /> + + } /> + + } /> + + } /> + + } /> + + } /> + + } /> + + } /> + } /> + + + + + ) +} diff --git a/dbx-agent-app/app/webapp/src/api/agentChat.ts b/dbx-agent-app/app/webapp/src/api/agentChat.ts new file mode 100644 index 00000000..b5271d7b --- /dev/null +++ b/dbx-agent-app/app/webapp/src/api/agentChat.ts @@ -0,0 +1,17 @@ +import { registryClient } from './client' +import { AgentChatResponse, AgentChatEndpointsResponse } from '../types' + +export const agentChatApi = { + async queryEndpoint(endpointName: string, message: string): Promise { + const response = await registryClient.post('/agent-chat/query', { + endpoint_name: endpointName, + message, + }) + return response.data + }, + + async getEndpoints(): Promise { + const response = await registryClient.get('/agent-chat/endpoints') + return response.data + }, +} diff --git a/dbx-agent-app/app/webapp/src/api/client.ts b/dbx-agent-app/app/webapp/src/api/client.ts new file mode 100644 index 00000000..7d2614f1 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/api/client.ts @@ -0,0 +1,65 @@ +import axios, { AxiosInstance, AxiosError } from 'axios' + +const registryClient: AxiosInstance = axios.create({ + baseURL: import.meta.env.VITE_REGISTRY_API_URL || '/api', + timeout: 30000, + withCredentials: true, // Enable cookie-based auth for production API + headers: { + 'Content-Type': 'application/json' + } +}) + +const supervisorClient: AxiosInstance = axios.create({ + baseURL: import.meta.env.VITE_SUPERVISOR_URL || '/api', + timeout: 60000, + withCredentials: true, + headers: { + 'Content-Type': 'application/json' + } +}) + +registryClient.interceptors.request.use( + (config) => { + const token = localStorage.getItem('databricks_token') + if (token) { + config.headers.Authorization = `Bearer ${token}` + } + return config + }, + (error) => Promise.reject(error) +) + +registryClient.interceptors.response.use( + (response) => response, + (error: AxiosError) => { + if (error.response?.status === 401) { + // Clear invalid token and let the caller handle auth failure + localStorage.removeItem('databricks_token') + } + return Promise.reject(error) + } +) + +supervisorClient.interceptors.request.use( + (config) => { + const token = localStorage.getItem('databricks_token') + if (token) { + config.headers.Authorization = `Bearer ${token}` + } + return config + }, + (error) => Promise.reject(error) +) + +supervisorClient.interceptors.response.use( + (response) => response, + (error: AxiosError) => { + if (error.response?.status === 401) { + // Clear invalid token and let the caller handle auth failure + localStorage.removeItem('databricks_token') + } + return Promise.reject(error) + } +) + +export { registryClient, supervisorClient } diff --git a/dbx-agent-app/app/webapp/src/api/registry.ts b/dbx-agent-app/app/webapp/src/api/registry.ts new file mode 100644 index 00000000..586a7d58 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/api/registry.ts @@ -0,0 +1,367 @@ +import { registryClient } from './client' +import { + App, + MCPServer, + Tool, + Collection, + CollectionCreate, + CollectionItem, + Agent, + AgentCreate, + A2AAgentCard, + A2ATask, + SupervisorGenerateRequest, + SupervisorGenerateResponse, + WorkspaceProfilesResponse, + CatalogAsset, + WorkspaceAsset, + CatalogCrawlResponse, + WorkspaceCrawlResponse, + SearchResponse, + EmbedStatusResponse, + LineageResponse, + ImpactAnalysisResponse, + AssetRelationship, + LineageCrawlResponse, + AuditLogEntry, +} from '../types' + +export const registryApi = { + async refreshDiscovery(): Promise { + await registryClient.post('/discovery/refresh') + }, + + async getApps(): Promise { + try { + const response = await registryClient.get('/apps') + // Handle paginated response or direct array + if (response.data.items && Array.isArray(response.data.items)) { + return response.data.items + } + if (Array.isArray(response.data)) { + return response.data + } + return [] + } catch { + return [] + } + }, + + async getServers(): Promise { + try { + const response = await registryClient.get('/mcp_servers') + // Handle paginated response or direct array + if (response.data.items && Array.isArray(response.data.items)) { + return response.data.items + } + if (Array.isArray(response.data)) { + return response.data + } + return [] + } catch { + return [] + } + }, + + async getTools(serverId?: number): Promise { + try { + const params = serverId ? { mcp_server_id: serverId } : {} + const response = await registryClient.get('/tools', { params }) + // Handle paginated response or direct array + if (response.data.items && Array.isArray(response.data.items)) { + return response.data.items + } + if (Array.isArray(response.data)) { + return response.data + } + return [] + } catch { + return [] + } + }, + + async getCollections(): Promise { + try { + const response = await registryClient.get('/collections') + // Handle paginated response or direct array + if (response.data.items && Array.isArray(response.data.items)) { + return response.data.items + } + if (Array.isArray(response.data)) { + return response.data + } + return [] + } catch { + return [] + } + }, + + async getCollection(id: number): Promise { + const response = await registryClient.get(`/collections/${id}`) + return response.data + }, + + async createCollection(data: CollectionCreate): Promise { + const response = await registryClient.post('/collections', data) + return response.data + }, + + async updateCollection(id: number, data: Partial): Promise { + const response = await registryClient.put(`/collections/${id}`, data) + return response.data + }, + + async deleteCollection(id: number): Promise { + await registryClient.delete(`/collections/${id}`) + }, + + async getCollectionItems(collectionId: number): Promise { + try { + const response = await registryClient.get(`/collections/${collectionId}/items`) + // Handle paginated response or direct array + if (response.data.items && Array.isArray(response.data.items)) { + return response.data.items + } + if (Array.isArray(response.data)) { + return response.data + } + return [] + } catch { + return [] + } + }, + + async addCollectionItem(collectionId: number, item: { + app_id?: number + mcp_server_id?: number + tool_id?: number + }): Promise { + const response = await registryClient.post(`/collections/${collectionId}/items`, item) + return response.data + }, + + async removeCollectionItem(collectionId: number, itemId: number): Promise { + await registryClient.delete(`/collections/${collectionId}/items/${itemId}`) + }, + + async generateSupervisor(data: SupervisorGenerateRequest): Promise { + const response = await registryClient.post('/supervisors/generate', data) + return response.data + }, + + async getWorkspaceProfiles(): Promise { + const response = await registryClient.get('/discovery/workspaces') + return response.data + }, + + async getAgents(): Promise { + try { + const response = await registryClient.get('/agents') + if (response.data.items && Array.isArray(response.data.items)) { + return response.data.items + } + if (Array.isArray(response.data)) { + return response.data + } + return [] + } catch { + return [] + } + }, + + async getAgent(id: number): Promise { + const response = await registryClient.get(`/agents/${id}`) + return response.data + }, + + async createAgent(data: AgentCreate): Promise { + const response = await registryClient.post('/agents', data) + return response.data + }, + + async updateAgent(id: number, data: Partial): Promise { + const response = await registryClient.put(`/agents/${id}`, data) + return response.data + }, + + async deleteAgent(id: number): Promise { + await registryClient.delete(`/agents/${id}`) + }, + + async getAgentCard(id: number): Promise { + const response = await registryClient.get(`/agents/${id}/card`) + return response.data + }, + + async getA2ATasks(agentId: number): Promise { + try { + const response = await registryClient.post(`/a2a/${agentId}`, { + jsonrpc: '2.0', + id: 1, + method: 'tasks/list', + params: {} + }) + return response.data?.result?.tasks || [] + } catch { + return [] + } + }, + + async refreshDiscoveryWithProfile(profile: string): Promise { + await registryClient.post('/discovery/refresh', { + discover_workspace: true, + databricks_profile: profile + }) + }, + + // --- Catalog Assets --- + + async getCatalogAssets(params?: { + asset_type?: string + catalog?: string + schema_name?: string + search?: string + owner?: string + page?: number + page_size?: number + }): Promise { + try { + const response = await registryClient.get('/catalog-assets', { params }) + if (response.data.items && Array.isArray(response.data.items)) { + return response.data.items + } + if (Array.isArray(response.data)) { + return response.data + } + return [] + } catch { + return [] + } + }, + + async getCatalogAsset(id: number): Promise { + const response = await registryClient.get(`/catalog-assets/${id}`) + return response.data + }, + + async crawlCatalog(params?: { + catalogs?: string[] + include_columns?: boolean + databricks_profile?: string + }): Promise { + const response = await registryClient.post('/catalog-assets/crawl', params || {}) + return response.data + }, + + // --- Workspace Assets --- + + async getWorkspaceAssets(params?: { + asset_type?: string + search?: string + owner?: string + workspace_host?: string + page?: number + page_size?: number + }): Promise { + try { + const response = await registryClient.get('/workspace-assets', { params }) + if (response.data.items && Array.isArray(response.data.items)) { + return response.data.items + } + if (Array.isArray(response.data)) { + return response.data + } + return [] + } catch { + return [] + } + }, + + async getWorkspaceAsset(id: number): Promise { + const response = await registryClient.get(`/workspace-assets/${id}`) + return response.data + }, + + async crawlWorkspace(params?: { + asset_types?: string[] + root_path?: string + databricks_profile?: string + }): Promise { + const response = await registryClient.post('/workspace-assets/crawl', params || {}) + return response.data + }, + + // --- Semantic Search --- + + async search(params: { + query: string + types?: string[] + catalogs?: string[] + owner?: string + limit?: number + }): Promise { + const response = await registryClient.post('/search', params) + return response.data + }, + + async embedAllAssets(): Promise { + const response = await registryClient.post('/search/embed-all') + return response.data + }, + + async getEmbedStatus(): Promise { + const response = await registryClient.get('/search/embed-status') + return response.data + }, + + // --- Lineage --- + + async getLineage(assetType: string, assetId: number, params?: { + direction?: 'upstream' | 'downstream' | 'both' + max_depth?: number + }): Promise { + const response = await registryClient.get(`/lineage/${assetType}/${assetId}`, { params }) + return response.data + }, + + async getImpactAnalysis(assetType: string, assetId: number, params?: { + max_depth?: number + }): Promise { + const response = await registryClient.get(`/lineage/${assetType}/${assetId}/impact`, { params }) + return response.data + }, + + async crawlLineage(params?: { + databricks_profile?: string + include_column_lineage?: boolean + }): Promise { + const response = await registryClient.post('/lineage/crawl', params || {}) + return response.data + }, + + async getRelationships(params?: { + source_type?: string + target_type?: string + relationship_type?: string + page?: number + page_size?: number + }): Promise { + const response = await registryClient.get('/lineage/relationships', { params }) + return response.data + }, + + // --- Audit Log --- + + async getAuditLog(params?: { + user_email?: string + action?: string + resource_type?: string + date_from?: string + date_to?: string + page?: number + page_size?: number + }): Promise<{ items: AuditLogEntry[]; total: number; page: number; page_size: number; total_pages: number }> { + const response = await registryClient.get('/audit-log', { params }) + return response.data + }, +} diff --git a/dbx-agent-app/app/webapp/src/api/supervisor.ts b/dbx-agent-app/app/webapp/src/api/supervisor.ts new file mode 100644 index 00000000..12c2b1c7 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/api/supervisor.ts @@ -0,0 +1,35 @@ +import { supervisorClient } from './client' +import { ChatRequest, ChatResponse, TraceResponse, Conversation, ConversationDetail } from '../types' + +export const supervisorApi = { + async chat(request: ChatRequest): Promise { + const response = await supervisorClient.post('/chat', request) + return response.data + }, + + async getTrace(traceId: string): Promise { + const response = await supervisorClient.get(`/traces/${traceId}`) + return response.data + }, + + async getConversations(page = 1, pageSize = 50): Promise<{ conversations: Conversation[]; total: number }> { + const response = await supervisorClient.get('/conversations', { + params: { page, page_size: pageSize }, + }) + return response.data + }, + + async getConversation(id: string): Promise { + const response = await supervisorClient.get(`/conversations/${id}`) + return response.data + }, + + async deleteConversation(id: string): Promise { + await supervisorClient.delete(`/conversations/${id}`) + }, + + async renameConversation(id: string, title: string): Promise { + const response = await supervisorClient.patch(`/conversations/${id}`, { title }) + return response.data + }, +} diff --git a/dbx-agent-app/app/webapp/src/api/systems.ts b/dbx-agent-app/app/webapp/src/api/systems.ts new file mode 100644 index 00000000..69f3acf1 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/api/systems.ts @@ -0,0 +1,47 @@ +import { registryClient } from './client' +import { SystemDefinition, SystemCreate, WiringEdge, DeployResult } from '../types' + +export const systemsApi = { + async listSystems(): Promise { + try { + const response = await registryClient.get('/systems') + if (Array.isArray(response.data)) { + return response.data + } + return [] + } catch { + return [] + } + }, + + async getSystem(id: string): Promise { + const response = await registryClient.get(`/systems/${id}`) + return response.data + }, + + async createSystem(data: SystemCreate): Promise { + const response = await registryClient.post('/systems', data) + return response.data + }, + + async updateSystem(id: string, data: { + name?: string + description?: string + agents?: string[] + edges?: WiringEdge[] + uc_catalog?: string + uc_schema?: string + }): Promise { + const response = await registryClient.put(`/systems/${id}`, data) + return response.data + }, + + async deleteSystem(id: string): Promise { + await registryClient.delete(`/systems/${id}`) + }, + + async deploySystem(id: string): Promise { + const response = await registryClient.post(`/systems/${id}/deploy`) + return response.data + }, +} diff --git a/dbx-agent-app/app/webapp/src/components/agent-chat/ProcessingPipelinePanel.css b/dbx-agent-app/app/webapp/src/components/agent-chat/ProcessingPipelinePanel.css new file mode 100644 index 00000000..9145038c --- /dev/null +++ b/dbx-agent-app/app/webapp/src/components/agent-chat/ProcessingPipelinePanel.css @@ -0,0 +1,272 @@ +.pipeline-panel { + padding: 16px; + background: #fff; + border: 1px solid #e0e0e0; + border-radius: 8px; + overflow: auto; +} + +.pipeline-panel-title { + margin: 0 0 4px 0; + font-size: 14px; + font-weight: 700; +} + +.pipeline-panel-subtitle { + margin: 0 0 16px 0; + font-size: 12px; + color: #999; +} + +.pipeline-panel-empty { + text-align: center; + color: #999; + padding: 32px 0; + font-size: 14px; +} + +/* Summary Stats */ +.pipeline-stats { + display: grid; + grid-template-columns: repeat(2, 1fr); + gap: 8px; + margin-bottom: 16px; +} + +.stat-card { + border: 1px solid #e0e0e0; + border-radius: 6px; + padding: 8px; + text-align: center; +} + +.stat-value { + display: block; + font-size: 16px; + font-weight: 700; +} + +.stat-label { + display: block; + font-size: 11px; + color: #999; +} + +/* Cost Table */ +.pipeline-details { + margin-bottom: 12px; +} + +.pipeline-details-summary { + font-size: 12px; + font-weight: 600; + cursor: pointer; + padding: 4px 0; +} + +.cost-table { + width: 100%; + border-collapse: collapse; + font-size: 12px; + margin-top: 4px; +} + +.cost-table td { + padding: 2px 4px; +} + +.cost-value { + text-align: right; + font-weight: 600; +} + +.cost-tokens { + text-align: right; + color: #999; +} + +.cost-total-row td { + border-top: 1px solid #e0e0e0; + padding-top: 4px; +} + +/* Latency Bars */ +.latency-bars { + margin-top: 4px; +} + +.latency-item { + margin-bottom: 8px; +} + +.latency-header { + display: flex; + justify-content: space-between; + margin-bottom: 2px; +} + +.latency-phase { + font-size: 12px; + text-transform: capitalize; +} + +.latency-value { + font-size: 12px; + color: #999; +} + +.latency-bar { + height: 6px; + background: #e0e0e0; + border-radius: 3px; + overflow: hidden; +} + +.latency-fill { + height: 100%; + background: #1976d2; + border-radius: 3px; + transition: width 0.3s; +} + +.latency-total { + display: flex; + justify-content: space-between; + margin-top: 8px; + font-size: 12px; +} + +/* Pipeline Steps */ +.pipeline-steps-section { + margin-top: 16px; +} + +.pipeline-steps-label { + display: block; + font-size: 12px; + font-weight: 600; + margin-bottom: 8px; +} + +.pipeline-steps { + display: flex; + flex-direction: column; +} + +.pipeline-step { + display: flex; + gap: 8px; + margin-bottom: 12px; +} + +.step-indicator { + display: flex; + flex-direction: column; + align-items: center; + min-width: 28px; +} + +.step-number { + display: inline-block; + font-size: 10px; + background: #f0f0f0; + border-radius: 10px; + padding: 2px 4px; + text-align: center; + min-width: 28px; +} + +.step-connector { + width: 2px; + flex: 1; + background: #e0e0e0; + margin-top: 4px; +} + +.step-content { + flex: 1; + min-width: 0; +} + +.step-header { + display: flex; + justify-content: space-between; + align-items: center; +} + +.step-name-row { + display: flex; + align-items: center; + gap: 4px; +} + +.step-name { + font-size: 12px; + font-weight: 600; +} + +.step-check { + color: #4caf50; + font-size: 12px; +} + +.step-duration { + font-size: 11px; + color: #999; +} + +.step-tools { + display: flex; + gap: 4px; + flex-wrap: wrap; + margin-top: 4px; +} + +.step-tool-chip { + display: inline-block; + padding: 1px 6px; + border: 1px solid #e0e0e0; + border-radius: 10px; + font-size: 10px; +} + +.step-metrics { + display: flex; + gap: 4px; + flex-wrap: wrap; + margin-top: 4px; +} + +.metric-chip { + display: inline-block; + padding: 1px 6px; + background: #f5f5f5; + border-radius: 10px; + font-size: 10px; +} + +.metric-chip-cost { + border: 1px solid #ff9800; + color: #e65100; + background: transparent; +} + +.step-details { + margin-top: 4px; +} + +.step-details-summary { + font-size: 10px; + color: #999; + cursor: pointer; +} + +.step-details-json { + font-family: monospace; + font-size: 10px; + background: rgba(0, 0, 0, 0.03); + padding: 4px; + border-radius: 4px; + white-space: pre-wrap; + margin: 4px 0 0 0; +} diff --git a/dbx-agent-app/app/webapp/src/components/agent-chat/ProcessingPipelinePanel.tsx b/dbx-agent-app/app/webapp/src/components/agent-chat/ProcessingPipelinePanel.tsx new file mode 100644 index 00000000..55d9c0c9 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/components/agent-chat/ProcessingPipelinePanel.tsx @@ -0,0 +1,179 @@ +import { PipelineInfo } from '../../types' +import './ProcessingPipelinePanel.css' + +interface ProcessingPipelinePanelProps { + pipeline: PipelineInfo | null +} + +export default function ProcessingPipelinePanel({ pipeline }: ProcessingPipelinePanelProps) { + if (!pipeline) { + return ( +
+

Processing Pipeline

+

Send a message to see processing pipeline...

+
+ ) + } + + const { metrics } = pipeline + + return ( +
+

Processing Pipeline

+

All Steps & Tools

+ + {/* Summary Stats */} +
+
+ {pipeline.totalSteps} + Steps +
+
+ {pipeline.totalDuration}ms + Total Time +
+ {metrics && ( + <> +
+ {metrics.totalTokens.toLocaleString()} + Tokens +
+
+ {metrics.costBreakdown.total} + Est. Cost +
+ + )} +
+ + {/* Cost Breakdown */} + {metrics && ( +
+ Cost Breakdown + + + + + + + + + + + + + + + + + + +
Input{metrics.costBreakdown.input}{metrics.inputTokens.toLocaleString()} tok
Output{metrics.costBreakdown.output}{metrics.outputTokens.toLocaleString()} tok
Total{metrics.costBreakdown.total}{metrics.tokensPerSecond.toLocaleString()} tok/s
+
+ )} + + {/* Latency Breakdown */} + {metrics?.latencyBreakdown && ( +
+ Latency Breakdown +
+ {(['preprocessing', 'search', 'llm', 'postprocessing'] as const).map((phase) => { + const value = metrics.latencyBreakdown[phase] + const pct = metrics.latencyBreakdown.total > 0 + ? (value / metrics.latencyBreakdown.total) * 100 + : 0 + return ( +
+
+ {phase} + {value}ms ({pct.toFixed(1)}%) +
+
+
+
+
+ ) + })} +
+ Total + + {metrics.latencyBreakdown.total}ms{' '} + ({(metrics.latencyBreakdown.total / 1000).toFixed(2)}s) + +
+
+
+ )} + + {/* Pipeline Steps */} +
+ Steps +
+ {pipeline.steps.map((step, idx) => ( +
+ {/* Step number with connector */} +
+ #{step.id} + {idx < pipeline.steps.length - 1 &&
} +
+ + {/* Step content */} +
+
+
+ {step.name} + +
+ {step.duration}ms +
+ + {/* Tools */} + {step.tools.length > 0 && ( +
+ {step.tools.map((tool, toolIdx) => ( + {tool} + ))} +
+ )} + + {/* Step Metrics */} + {step.metrics && ( +
+ {step.metrics.latency != null && ( + {step.metrics.latency}ms + )} + {step.metrics.estimatedCost != null && ( + + ${step.metrics.estimatedCost.toFixed(6)} + + )} + {step.metrics.inputTokens != null && ( + {step.metrics.inputTokens.toLocaleString()} in + )} + {step.metrics.outputTokens != null && ( + {step.metrics.outputTokens.toLocaleString()} out + )} + {step.metrics.tokensPerSecond != null && ( + {step.metrics.tokensPerSecond.toLocaleString()} tok/s + )} + {step.metrics.entitiesFound != null && ( + {step.metrics.entitiesFound} entities + )} +
+ )} + + {/* Step Details */} +
+ Details +
+                    {JSON.stringify(step.details, null, 2)}
+                  
+
+
+
+ ))} +
+
+
+ ) +} diff --git a/dbx-agent-app/app/webapp/src/components/agent-chat/QueryConstructionPanel.css b/dbx-agent-app/app/webapp/src/components/agent-chat/QueryConstructionPanel.css new file mode 100644 index 00000000..19bfd609 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/components/agent-chat/QueryConstructionPanel.css @@ -0,0 +1,153 @@ +.query-panel { + padding: 16px; + background: #fff; + border: 1px solid #e0e0e0; + border-radius: 8px; + overflow: auto; +} + +.query-panel-title { + margin: 0 0 4px 0; + font-size: 14px; + font-weight: 700; +} + +.query-panel-subtitle { + margin: 0 0 16px 0; + font-size: 12px; + color: #999; +} + +.query-panel-empty { + text-align: center; + color: #999; + padding: 32px 0; + font-size: 14px; +} + +.query-section { + margin-bottom: 16px; +} + +.query-section-label { + display: block; + font-size: 12px; + font-weight: 600; + margin-bottom: 6px; +} + +.query-chips { + display: flex; + gap: 4px; + flex-wrap: wrap; +} + +.query-chip { + display: inline-block; + padding: 2px 8px; + border: 1px solid #ccc; + border-radius: 12px; + font-size: 11px; + white-space: nowrap; +} + +.query-chip-primary { + border-color: #1565c0; + color: #1565c0; +} + +.nl-sql-mapping { + margin-bottom: 12px; + padding: 8px; + border-radius: 6px; + background: #fafafa; +} + +.mapping-header { + display: flex; + align-items: center; + gap: 6px; + margin-bottom: 4px; +} + +.mapping-type-badge { + display: inline-block; + padding: 1px 6px; + border-radius: 10px; + background: #f3e5f5; + color: #7b1fa2; + font-size: 10px; + font-weight: 600; +} + +.mapping-nl { + font-size: 12px; +} + +.mapping-arrow { + text-align: center; + color: #999; + font-size: 12px; +} + +.mapping-sql { + display: block; + font-family: monospace; + font-size: 11px; + background: rgba(0, 0, 0, 0.04); + padding: 4px 6px; + border-radius: 4px; + word-break: break-all; +} + +.mapping-confidence { + display: flex; + align-items: center; + gap: 8px; + margin-top: 6px; +} + +.confidence-bar { + flex: 1; + height: 4px; + background: #e0e0e0; + border-radius: 2px; + overflow: hidden; +} + +.confidence-fill { + height: 100%; + background: #1976d2; + border-radius: 2px; + transition: width 0.3s; +} + +.confidence-label { + font-size: 11px; + color: #999; + white-space: nowrap; +} + +.query-details { + margin-top: 8px; +} + +.query-details-summary { + font-size: 12px; + font-weight: 600; + cursor: pointer; + padding: 4px 0; +} + +.query-json { + font-family: monospace; + font-size: 10px; + background: rgba(0, 0, 0, 0.03); + border: 1px solid #e0e0e0; + padding: 8px; + border-radius: 4px; + max-height: 200px; + overflow: auto; + white-space: pre-wrap; + margin: 8px 0 0 0; +} diff --git a/dbx-agent-app/app/webapp/src/components/agent-chat/QueryConstructionPanel.tsx b/dbx-agent-app/app/webapp/src/components/agent-chat/QueryConstructionPanel.tsx new file mode 100644 index 00000000..1bee714d --- /dev/null +++ b/dbx-agent-app/app/webapp/src/components/agent-chat/QueryConstructionPanel.tsx @@ -0,0 +1,81 @@ +import { SlotFillingInfo } from '../../types' +import './QueryConstructionPanel.css' + +interface QueryConstructionPanelProps { + slotFilling: SlotFillingInfo | null +} + +export default function QueryConstructionPanel({ slotFilling }: QueryConstructionPanelProps) { + return ( +
+

Query Construction

+

NL → SQL Slot Filling

+ + {slotFilling ? ( +
+ {/* Entities Detected */} + {slotFilling.slots.entities.length > 0 && ( +
+ Entities Detected +
+ {slotFilling.slots.entities.map((entity, idx) => ( + {entity} + ))} +
+
+ )} + + {/* Search Terms */} + {slotFilling.slots.searchTerms.length > 0 && ( +
+ Search Terms +
+ {slotFilling.slots.searchTerms.map((term, idx) => ( + {term} + ))} +
+
+ )} + + {/* NL to SQL Mappings */} + {slotFilling.nlToSql.length > 0 && ( +
+ NL → SQL Clauses + {slotFilling.nlToSql.map((mapping, idx) => ( +
+
+ {mapping.type} + {mapping.naturalLanguage} +
+
+ {mapping.sqlClause} +
+
+
+
+ + {(mapping.confidence * 100).toFixed(0)}% + +
+
+ ))} +
+ )} + + {/* Elasticsearch Query */} +
+ Elasticsearch Query +
+              {JSON.stringify(slotFilling.elasticQuery, null, 2)}
+            
+
+
+ ) : ( +

Send a message to see query construction...

+ )} +
+ ) +} diff --git a/dbx-agent-app/app/webapp/src/components/agent-chat/RoutingBadges.css b/dbx-agent-app/app/webapp/src/components/agent-chat/RoutingBadges.css new file mode 100644 index 00000000..4e7c9691 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/components/agent-chat/RoutingBadges.css @@ -0,0 +1,51 @@ +.routing-badges { + margin-bottom: 8px; +} + +.routing-flow { + display: flex; + align-items: center; + gap: 8px; + margin-bottom: 4px; + flex-wrap: wrap; +} + +.routing-arrow { + color: #999; + font-size: 14px; +} + +.routing-tools { + display: flex; + gap: 4px; + flex-wrap: wrap; + margin-top: 4px; +} + +.routing-badge { + display: inline-block; + padding: 2px 8px; + border-radius: 12px; + font-size: 12px; + font-weight: 600; + white-space: nowrap; +} + +.routing-badge-primary { + background: #e3f2fd; + color: #1565c0; +} + +.routing-badge-secondary { + background: #f3e5f5; + color: #7b1fa2; +} + +.routing-badge-outlined { + background: transparent; + border: 1px solid #ccc; + color: #666; + font-weight: 400; + font-size: 11px; + padding: 1px 6px; +} diff --git a/dbx-agent-app/app/webapp/src/components/agent-chat/RoutingBadges.tsx b/dbx-agent-app/app/webapp/src/components/agent-chat/RoutingBadges.tsx new file mode 100644 index 00000000..69278ec8 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/components/agent-chat/RoutingBadges.tsx @@ -0,0 +1,38 @@ +import { RoutingInfo } from '../../types' +import './RoutingBadges.css' + +interface RoutingBadgesProps { + routing: RoutingInfo +} + +export default function RoutingBadges({ routing }: RoutingBadgesProps) { + if (!routing.usedSupervisor && (!routing.toolCalls || routing.toolCalls.length === 0)) { + return null + } + + return ( +
+ {routing.usedSupervisor && ( +
+ Supervisor Agent + {routing.subAgent && ( + <> + + {routing.subAgent} + + )} +
+ )} + + {routing.toolCalls.length > 0 && ( +
+ {routing.toolCalls.map((tool, idx) => ( + + {tool.tool}: {tool.description} + + ))} +
+ )} +
+ ) +} diff --git a/dbx-agent-app/app/webapp/src/components/agents/AgentCard.css b/dbx-agent-app/app/webapp/src/components/agents/AgentCard.css new file mode 100644 index 00000000..b48ffb93 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/components/agents/AgentCard.css @@ -0,0 +1,49 @@ +.agent-card { + cursor: pointer; + transition: all 0.2s; +} + +.agent-card-header { + display: flex; + justify-content: space-between; + align-items: flex-start; + gap: 8px; +} + +.agent-card-header h4 { + margin: 0 0 8px 0; + font-size: 16px; + font-weight: 600; +} + +.agent-card-description { + margin: 0 0 8px 0; + font-size: 14px; + color: #666; + overflow: hidden; + text-overflow: ellipsis; + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; +} + +.agent-card-capabilities { + display: flex; + flex-wrap: wrap; + gap: 4px; +} + +.capability-tag { + display: inline-block; + padding: 2px 8px; + font-size: 11px; + font-weight: 500; + background: #f0f0f0; + color: #555; + border-radius: 12px; +} + +.agent-card.active { + border-color: #0066cc; + background: #f0f7ff; +} diff --git a/dbx-agent-app/app/webapp/src/components/agents/AgentCard.tsx b/dbx-agent-app/app/webapp/src/components/agents/AgentCard.tsx new file mode 100644 index 00000000..903f2d88 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/components/agents/AgentCard.tsx @@ -0,0 +1,38 @@ +import { Agent } from '../../types' +import Card from '../common/Card' +import Badge from '../common/Badge' +import './AgentCard.css' + +interface AgentCardProps { + agent: Agent + isActive: boolean + onClick: () => void +} + +const statusVariant: Record = { + active: 'success', + draft: 'warning', + error: 'danger', + inactive: 'default', +} + +export default function AgentCard({ agent, isActive, onClick }: AgentCardProps) { + return ( + +
+

{agent.name}

+ + {agent.status} + +
+

{agent.description || 'No description'}

+ {agent.capabilities && ( +
+ {agent.capabilities.split(',').map((cap) => ( + {cap.trim()} + ))} +
+ )} +
+ ) +} diff --git a/dbx-agent-app/app/webapp/src/components/agents/CreateAgentModal.css b/dbx-agent-app/app/webapp/src/components/agents/CreateAgentModal.css new file mode 100644 index 00000000..408c7d45 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/components/agents/CreateAgentModal.css @@ -0,0 +1,107 @@ +.create-agent-form { + display: flex; + flex-direction: column; + gap: 20px; +} + +.create-agent-form .form-group { + display: flex; + flex-direction: column; + gap: 8px; +} + +.create-agent-form .form-group label { + font-size: 14px; + font-weight: 500; + color: #333; +} + +.create-agent-form .form-group input, +.create-agent-form .form-group textarea, +.create-agent-form .form-group select { + padding: 10px 12px; + border: 1px solid #ccc; + border-radius: 4px; + font-size: 14px; + font-family: inherit; +} + +.create-agent-form .form-group input:focus, +.create-agent-form .form-group textarea:focus, +.create-agent-form .form-group select:focus { + outline: none; + border-color: #0066cc; +} + +.create-agent-form .form-error { + background: #ffebee; + color: #c62828; + padding: 10px 12px; + border-radius: 4px; + font-size: 14px; + border-left: 4px solid #c62828; +} + +/* Advanced Settings Toggle */ +.advanced-toggle { + display: flex; + align-items: center; + gap: 6px; + background: none; + border: 1px solid #ddd; + border-radius: 6px; + padding: 10px 14px; + font-size: 13px; + font-weight: 500; + color: #555; + cursor: pointer; + transition: background 0.15s, color 0.15s; +} + +.advanced-toggle:hover { + background: #f5f5f5; + color: #333; +} + +.advanced-toggle-icon { + font-size: 10px; + transition: transform 0.2s; +} + +.advanced-toggle-icon.open { + transform: rotate(180deg); +} + +/* Advanced Section */ +.advanced-section { + display: flex; + flex-direction: column; + gap: 20px; + padding: 16px; + background: #fafbfc; + border: 1px solid #e8e8e8; + border-radius: 6px; +} + +/* Checkbox Group */ +.checkbox-group { + display: flex; + flex-direction: column; + gap: 10px; +} + +.checkbox-label { + display: flex; + align-items: center; + gap: 8px; + font-size: 14px; + font-weight: 400; + color: #444; + cursor: pointer; +} + +.checkbox-label input[type="checkbox"] { + width: 16px; + height: 16px; + accent-color: #0066cc; +} diff --git a/dbx-agent-app/app/webapp/src/components/agents/CreateAgentModal.tsx b/dbx-agent-app/app/webapp/src/components/agents/CreateAgentModal.tsx new file mode 100644 index 00000000..2e8e5a98 --- /dev/null +++ b/dbx-agent-app/app/webapp/src/components/agents/CreateAgentModal.tsx @@ -0,0 +1,279 @@ +import { useState, useEffect } from 'react' +import { registryApi } from '../../api/registry' +import { Collection, AgentCreate } from '../../types' +import Modal from '../common/Modal' +import Button from '../common/Button' +import './CreateAgentModal.css' + +interface CreateAgentModalProps { + isOpen: boolean + onClose: () => void + onCreate: (data: AgentCreate) => Promise +} + +export default function CreateAgentModal({ isOpen, onClose, onCreate }: CreateAgentModalProps) { + const [name, setName] = useState('') + const [description, setDescription] = useState('') + const [capabilities, setCapabilities] = useState('') + const [endpointUrl, setEndpointUrl] = useState('') + const [collectionId, setCollectionId] = useState(undefined) + const [collections, setCollections] = useState([]) + const [loading, setLoading] = useState(false) + const [error, setError] = useState(null) + + // Advanced / A2A fields + const [showAdvanced, setShowAdvanced] = useState(false) + const [systemPrompt, setSystemPrompt] = useState('') + const [authToken, setAuthToken] = useState('') + const [protocolVersion, setProtocolVersion] = useState('0.3.0') + const [streaming, setStreaming] = useState(false) + const [pushNotifications, setPushNotifications] = useState(false) + const [skillsJson, setSkillsJson] = useState('') + const [status, setStatus] = useState('draft') + + useEffect(() => { + if (isOpen) { + registryApi.getCollections().then(setCollections) + } + }, [isOpen]) + + const handleSubmit = async () => { + if (!name.trim()) { + setError('Agent name is required') + return + } + + // Validate skills JSON if provided + if (skillsJson.trim()) { + try { + JSON.parse(skillsJson.trim()) + } catch { + setError('Skills must be valid JSON') + return + } + } + + const a2aCaps = (streaming || pushNotifications) + ? JSON.stringify({ streaming, pushNotifications }) + : undefined + + try { + setLoading(true) + setError(null) + await onCreate({ + name: name.trim(), + description: description.trim() || undefined, + capabilities: capabilities.trim() || undefined, + endpoint_url: endpointUrl.trim() || undefined, + collection_id: collectionId, + status: status || undefined, + system_prompt: systemPrompt.trim() || undefined, + auth_token: authToken.trim() || undefined, + protocol_version: protocolVersion.trim() || undefined, + a2a_capabilities: a2aCaps, + skills: skillsJson.trim() || undefined, + }) + resetForm() + onClose() + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to create agent') + } finally { + setLoading(false) + } + } + + const resetForm = () => { + setName('') + setDescription('') + setCapabilities('') + setEndpointUrl('') + setCollectionId(undefined) + setError(null) + setShowAdvanced(false) + setSystemPrompt('') + setAuthToken('') + setProtocolVersion('0.3.0') + setStreaming(false) + setPushNotifications(false) + setSkillsJson('') + setStatus('draft') + } + + const handleClose = () => { + resetForm() + onClose() + } + + return ( + + + + + } + > +
+
+ + setName(e.target.value)} + placeholder="Enter agent name" + autoFocus + /> +
+ +
+ + + +
+
+ +""", + ) diff --git a/dbx-agent-app/examples/data-tools/dbx_agent_app/discovery/__init__.py b/dbx-agent-app/examples/data-tools/dbx_agent_app/discovery/__init__.py new file mode 100644 index 00000000..d6d04008 --- /dev/null +++ b/dbx-agent-app/examples/data-tools/dbx_agent_app/discovery/__init__.py @@ -0,0 +1,24 @@ +""" +Agent discovery for Databricks Apps. + +This module provides clients and utilities for discovering agent-enabled +Databricks Apps that expose A2A protocol agent cards. +""" + +from .agent_discovery import ( + AgentDiscovery, + DiscoveredAgent, + AgentDiscoveryResult, +) +from .a2a_client import ( + A2AClient, + A2AClientError, +) + +__all__ = [ + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", +] diff --git a/dbx-agent-app/examples/data-tools/dbx_agent_app/discovery/a2a_client.py b/dbx-agent-app/examples/data-tools/dbx_agent_app/discovery/a2a_client.py new file mode 100644 index 00000000..1243d1a3 --- /dev/null +++ b/dbx-agent-app/examples/data-tools/dbx_agent_app/discovery/a2a_client.py @@ -0,0 +1,268 @@ +""" +A2A Client for agent-to-agent communication. + +Implements the A2A protocol for discovering and communicating with peer agents. +""" + +import json +import uuid +import logging +from typing import Dict, Any, Optional, AsyncIterator + +import httpx + +logger = logging.getLogger(__name__) + + +class A2AClientError(Exception): + """Raised when an A2A operation fails.""" + pass + + +class A2AClient: + """ + Async client for A2A protocol communication with peer agents. + + Usage: + async with A2AClient() as client: + card = await client.fetch_agent_card("https://app.databricksapps.com") + result = await client.send_message("https://app.databricksapps.com/api/a2a", "Hello") + """ + + def __init__(self, timeout: float = 60.0): + """ + Initialize A2A client. + + Args: + timeout: Request timeout in seconds + """ + self.timeout = timeout + self._client: Optional[httpx.AsyncClient] = None + + async def __aenter__(self): + self._client = httpx.AsyncClient( + timeout=self.timeout, + follow_redirects=True, + ) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self._client: + await self._client.aclose() + + def _auth_headers(self, auth_token: Optional[str] = None) -> Dict[str, str]: + """Build authentication headers.""" + headers = {"Content-Type": "application/json"} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + return headers + + async def fetch_agent_card( + self, + base_url: str, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Fetch an agent's A2A protocol agent card. + + Tries /.well-known/agent.json first, then /card as fallback. + Handles OAuth redirects gracefully (returns error instead of following). + + Args: + base_url: Base URL of the agent application + auth_token: Optional OAuth token for authenticated requests + + Returns: + Agent card JSON data + + Raises: + A2AClientError: If agent card cannot be fetched + + Example: + >>> async with A2AClient() as client: + >>> card = await client.fetch_agent_card("https://app.databricksapps.com") + >>> print(card["name"], card["description"]) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + headers = {} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + # Use a client that doesn't follow redirects to detect OAuth flows + async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=False) as probe_client: + for path in ["/.well-known/agent.json", "/card"]: + try: + url = base_url.rstrip("/") + path + response = await probe_client.get(url, headers=headers) + + # OAuth redirect detected - app requires interactive auth + if response.status_code in (301, 302, 303, 307, 308): + logger.debug(f"OAuth redirect detected for {url}") + continue + + if response.status_code == 200: + if not response.text or response.text.isspace(): + logger.debug(f"Empty response body for {url}") + continue + return response.json() + + except Exception as e: + logger.debug(f"Agent card fetch failed for {url}: {e}") + continue + + raise A2AClientError(f"Could not fetch agent card from {base_url}") + + async def _jsonrpc_call( + self, + url: str, + method: str, + params: Dict[str, Any], + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a JSON-RPC 2.0 request to an agent. + + Args: + url: A2A endpoint URL + method: JSON-RPC method name (e.g., "message/send") + params: Method parameters + auth_token: Optional authentication token + + Returns: + JSON-RPC result + + Raises: + A2AClientError: If request fails or returns error + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": method, + "params": params, + } + + try: + response = await self._client.post( + url, + json=payload, + headers=self._auth_headers(auth_token), + ) + response.raise_for_status() + result = response.json() + + if "error" in result: + error = result["error"] + raise A2AClientError( + f"A2A error: {error.get('message', 'Unknown')} " + f"(code: {error.get('code')})" + ) + + return result.get("result", {}) + + except httpx.TimeoutException as e: + raise A2AClientError(f"Request to {url} timed out: {e}") + except httpx.HTTPStatusError as e: + raise A2AClientError( + f"HTTP error from {url}: {e.response.status_code}" + ) + except json.JSONDecodeError as e: + raise A2AClientError(f"Invalid JSON from {url}: {e}") + + async def send_message( + self, + agent_url: str, + message: str, + context_id: Optional[str] = None, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a message to a peer agent using A2A protocol. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + context_id: Optional conversation context ID + auth_token: Optional authentication token + + Returns: + Agent's response + + Example: + >>> async with A2AClient() as client: + >>> response = await client.send_message( + >>> "https://app.databricksapps.com/api/a2a", + >>> "What are your capabilities?" + >>> ) + """ + params: Dict[str, Any] = { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + } + if context_id: + params["message"]["contextId"] = context_id + + return await self._jsonrpc_call( + agent_url, "message/send", params, auth_token + ) + + async def send_streaming_message( + self, + agent_url: str, + message: str, + auth_token: Optional[str] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """ + Send a streaming message and yield SSE events. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + auth_token: Optional authentication token + + Yields: + SSE events from the agent's response stream + + Example: + >>> async with A2AClient() as client: + >>> async for event in client.send_streaming_message(url, "Analyze this"): + >>> print(event) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + stream_url = agent_url.rstrip("/") + "/stream" + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": "message/stream", + "params": { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + }, + } + + async with self._client.stream( + "POST", + stream_url, + json=payload, + headers=self._auth_headers(auth_token), + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.startswith("data: "): + try: + yield json.loads(line[6:]) + except json.JSONDecodeError: + continue diff --git a/dbx-agent-app/examples/data-tools/dbx_agent_app/discovery/agent_discovery.py b/dbx-agent-app/examples/data-tools/dbx_agent_app/discovery/agent_discovery.py new file mode 100644 index 00000000..1563b304 --- /dev/null +++ b/dbx-agent-app/examples/data-tools/dbx_agent_app/discovery/agent_discovery.py @@ -0,0 +1,253 @@ +""" +Agent discovery for Databricks Apps. + +Discovers agent-enabled Databricks Apps by scanning workspace apps +and probing for A2A protocol agent cards. +""" + +import asyncio +import logging +from typing import List, Dict, Any, Optional +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +from .a2a_client import A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + +# Agent card probe paths and timeout +AGENT_CARD_PATHS = ["/.well-known/agent.json", "/card"] +AGENT_CARD_PROBE_TIMEOUT = 5.0 + + +@dataclass +class DiscoveredAgent: + """ + An agent discovered from a Databricks App. + + Attributes: + name: Agent name (from agent card or app name) + endpoint_url: Agent's base URL + description: Agent description (from agent card) + capabilities: Comma-separated list of capabilities + protocol_version: A2A protocol version + app_name: Name of the backing Databricks App + """ + name: str + endpoint_url: str + app_name: str + description: Optional[str] = None + capabilities: Optional[str] = None + protocol_version: Optional[str] = None + + +@dataclass +class AgentDiscoveryResult: + """ + Results from agent discovery operation. + + Attributes: + agents: List of discovered agents + errors: List of error messages encountered during discovery + """ + agents: List[DiscoveredAgent] + errors: List[str] + + +class AgentDiscovery: + """ + Discovers agent-enabled Databricks Apps in a workspace. + + Scans running Databricks Apps and probes for A2A protocol agent cards + to identify which apps are agents. + + Usage: + discovery = AgentDiscovery(profile="my-profile") + result = await discovery.discover_agents() + for agent in result.agents: + print(f"Found agent: {agent.name} at {agent.endpoint_url}") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize agent discovery. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._workspace_token: Optional[str] = None + + async def discover_agents(self) -> AgentDiscoveryResult: + """ + Discover all agent-enabled Databricks Apps in the workspace. + + Returns: + AgentDiscoveryResult with discovered agents and any errors + + Example: + >>> discovery = AgentDiscovery(profile="my-profile") + >>> result = await discovery.discover_agents() + >>> print(f"Found {len(result.agents)} agents") + """ + agents: List[DiscoveredAgent] = [] + errors: List[str] = [] + + try: + app_list = await self._list_workspace_apps() + except Exception as e: + logger.error("Workspace app listing failed: %s", e) + return AgentDiscoveryResult( + agents=[], + errors=[f"Failed to list workspace apps: {e}"], + ) + + if not app_list: + return AgentDiscoveryResult(agents=[], errors=[]) + + # Probe each running app for agent card in parallel + probe_tasks = [ + self._probe_app_for_agent(app_info) + for app_info in app_list + if app_info.get("url") + ] + + if probe_tasks: + probe_results = await asyncio.gather( + *probe_tasks, return_exceptions=True + ) + + for result in probe_results: + if isinstance(result, Exception): + errors.append(str(result)) + elif result is not None: + agents.append(result) + + logger.info( + "Agent discovery: %d apps checked, %d agents found", + len(app_list), len(agents) + ) + + return AgentDiscoveryResult(agents=agents, errors=errors) + + async def _list_workspace_apps(self) -> List[Dict[str, Any]]: + """ + Enumerate Databricks Apps in the workspace. + + Returns: + List of running apps with name, url, owner + """ + def _list_sync() -> tuple: + client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + + # Extract auth token for cross-app requests + auth_headers = client.config.authenticate() + auth_val = auth_headers.get("Authorization", "") + token = auth_val[7:] if auth_val.startswith("Bearer ") else None + + results = [] + for app in client.apps.list(): + # Check if app is running via compute_status or deployment status + compute_state = None + cs = getattr(app, "compute_status", None) + if cs: + compute_state = str(getattr(cs, "state", "")) + + deploy_state = None + dep = getattr(app, "active_deployment", None) + if dep: + dep_status = getattr(dep, "status", None) + if dep_status: + deploy_state = str(getattr(dep_status, "state", "")) + + app_url = getattr(app, "url", None) or "" + app_url = app_url.rstrip("/") if app_url else "" + + results.append({ + "name": app.name, + "url": app_url, + "owner": getattr(app, "creator", None) or getattr(app, "updater", None), + "compute_state": compute_state, + "deploy_state": deploy_state, + }) + + return results, token + + loop = asyncio.get_event_loop() + result_tuple = await loop.run_in_executor(None, _list_sync) + all_apps, workspace_token = result_tuple + + # Store token for probing + self._workspace_token = workspace_token + + # Filter to running apps + running = [ + a for a in all_apps + if a.get("url") and ( + "ACTIVE" in (a.get("compute_state") or "") + or "SUCCEEDED" in (a.get("deploy_state") or "") + ) + ] + + logger.info( + "Workspace apps: %d total, %d running", + len(all_apps), len(running) + ) + + return running + + async def _probe_app_for_agent( + self, + app_info: Dict[str, Any], + ) -> Optional[DiscoveredAgent]: + """ + Probe a Databricks App for an A2A agent card. + + Args: + app_info: App metadata from workspace listing + + Returns: + DiscoveredAgent if agent card found, None otherwise + """ + app_url = app_info["url"] + app_name = app_info["name"] + + token = self._workspace_token + agent_card = None + + try: + logger.debug(f"Probing app '{app_name}' at {app_url}") + async with A2AClient(timeout=AGENT_CARD_PROBE_TIMEOUT) as client: + agent_card = await client.fetch_agent_card(app_url, auth_token=token) + logger.info(f"Found agent card for '{app_name}'") + except A2AClientError as e: + logger.debug(f"No agent card for '{app_name}': {e}") + return None + except Exception as e: + logger.warning(f"Probe failed for '{app_name}': {e}") + return None + + if not agent_card: + return None + + # Extract capabilities + capabilities_list = [] + caps = agent_card.get("capabilities") + if isinstance(caps, dict): + capabilities_list = list(caps.keys()) + elif isinstance(caps, list): + capabilities_list = caps + + return DiscoveredAgent( + name=agent_card.get("name", app_name), + endpoint_url=app_url, + app_name=app_name, + description=agent_card.get("description"), + capabilities=",".join(capabilities_list) if capabilities_list else None, + protocol_version=agent_card.get("protocolVersion"), + ) diff --git a/dbx-agent-app/examples/data-tools/dbx_agent_app/mcp/__init__.py b/dbx-agent-app/examples/data-tools/dbx_agent_app/mcp/__init__.py new file mode 100644 index 00000000..60ee38ad --- /dev/null +++ b/dbx-agent-app/examples/data-tools/dbx_agent_app/mcp/__init__.py @@ -0,0 +1,11 @@ +""" +Model Context Protocol (MCP) support. + +This module provides utilities for integrating agents with MCP servers +and exposing UC Functions as MCP tools. +""" + +from .mcp_server import MCPServer, MCPServerConfig, setup_mcp_server +from .uc_functions import UCFunctionAdapter + +__all__ = ["MCPServer", "MCPServerConfig", "setup_mcp_server", "UCFunctionAdapter"] diff --git a/dbx-agent-app/examples/data-tools/dbx_agent_app/mcp/mcp_server.py b/dbx-agent-app/examples/data-tools/dbx_agent_app/mcp/mcp_server.py new file mode 100644 index 00000000..98a456fb --- /dev/null +++ b/dbx-agent-app/examples/data-tools/dbx_agent_app/mcp/mcp_server.py @@ -0,0 +1,196 @@ +""" +MCP server implementation for agents. + +Provides an MCP server that exposes agent tools via the Model Context Protocol. +Works with any FastAPI app. +""" + +import json +import logging +from typing import Dict, Any, List, Optional +from dataclasses import dataclass + +from fastapi import Request + +logger = logging.getLogger(__name__) + + +@dataclass +class MCPServerConfig: + """ + Configuration for MCP server. + + Attributes: + name: Server name + version: Server version + description: Server description + """ + name: str + version: str = "1.0.0" + description: str = "MCP server for agent tools" + + +class MCPServer: + """ + MCP server that exposes tools via JSON-RPC. + + Accepts tools as dicts with name, description, parameters, function keys. + """ + + def __init__(self, tools, config: MCPServerConfig): + """ + Initialize MCP server. + + Args: + tools: List of ToolDefinition objects or dicts with name/description/parameters/function + config: MCP server configuration + """ + self._tools = tools + self.config = config + + def setup_routes(self, app): + """ + Set up MCP protocol routes on the FastAPI app. + + Adds: + - POST /api/mcp - MCP JSON-RPC endpoint + - GET /api/mcp/tools - List available tools + """ + + @app.post("/api/mcp") + async def mcp_jsonrpc(request: Request): + """MCP JSON-RPC endpoint.""" + try: + body = await request.json() + method = body.get("method") + params = body.get("params", {}) + request_id = body.get("id") + + if method == "tools/list": + result = await self._list_tools() + elif method == "tools/call": + result = await self._call_tool(params) + elif method == "server/info": + result = self._server_info() + else: + return { + "jsonrpc": "2.0", + "id": request_id, + "error": { + "code": -32601, + "message": f"Method not found: {method}" + } + } + + return { + "jsonrpc": "2.0", + "id": request_id, + "result": result + } + + except Exception as e: + logger.error("MCP request failed: %s", e) + return { + "jsonrpc": "2.0", + "id": body.get("id") if isinstance(body, dict) else None, + "error": { + "code": -32603, + "message": str(e) + } + } + + @app.get("/api/mcp/tools") + async def list_mcp_tools(): + """List available MCP tools.""" + return await self._list_tools() + + def _get_tool_name(self, tool) -> str: + return tool.name if hasattr(tool, "name") else tool["name"] + + def _get_tool_description(self, tool) -> str: + return tool.description if hasattr(tool, "description") else tool["description"] + + def _get_tool_parameters(self, tool) -> Dict[str, Any]: + return tool.parameters if hasattr(tool, "parameters") else tool.get("parameters", {}) + + def _get_tool_function(self, tool): + return tool.function if hasattr(tool, "function") else tool["function"] + + def _server_info(self) -> Dict[str, Any]: + """Get MCP server information.""" + return { + "name": self.config.name, + "version": self.config.version, + "description": self.config.description, + "protocol_version": "1.0", + } + + async def _list_tools(self) -> Dict[str, Any]: + """List all available tools in MCP format.""" + tools = [] + + for tool in self._tools: + mcp_tool = { + "name": self._get_tool_name(tool), + "description": self._get_tool_description(tool), + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + + for param_name, param_spec in self._get_tool_parameters(tool).items(): + param_type = param_spec.get("type", "string") + mcp_tool["inputSchema"]["properties"][param_name] = { + "type": param_type, + "description": param_spec.get("description", "") + } + if param_spec.get("required", False): + mcp_tool["inputSchema"]["required"].append(param_name) + + tools.append(mcp_tool) + + return {"tools": tools} + + async def _call_tool(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Call a tool via MCP.""" + tool_name = params.get("name") + arguments = params.get("arguments", {}) + + tool_def = None + for tool in self._tools: + if self._get_tool_name(tool) == tool_name: + tool_def = tool + break + + if not tool_def: + raise ValueError(f"Tool not found: {tool_name}") + + try: + result = await self._get_tool_function(tool_def)(**arguments) + return {"result": result} + except Exception as e: + logger.error("Tool execution failed: %s", e) + raise + + +def setup_mcp_server(tools, config: Optional[MCPServerConfig] = None, fastapi_app=None): + """ + Set up MCP server for an agent. + + Args: + tools: List of tool dicts with name/description/parameters/function keys + config: Optional MCP server configuration + fastapi_app: FastAPI app to add routes to (required) + + Returns: + MCPServer instance + """ + if config is None: + config = MCPServerConfig(name="mcp-server") + + server = MCPServer(tools, config) + server.setup_routes(fastapi_app) + + return server diff --git a/dbx-agent-app/examples/data-tools/dbx_agent_app/mcp/uc_functions.py b/dbx-agent-app/examples/data-tools/dbx_agent_app/mcp/uc_functions.py new file mode 100644 index 00000000..f1659960 --- /dev/null +++ b/dbx-agent-app/examples/data-tools/dbx_agent_app/mcp/uc_functions.py @@ -0,0 +1,240 @@ +""" +Unity Catalog Functions adapter for MCP. + +Automatically discovers UC Functions and exposes them as MCP tools. +""" + +import logging +from typing import List, Dict, Any, Optional + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCFunctionAdapter: + """ + Adapter for Unity Catalog Functions to MCP protocol. + + Discovers UC Functions and converts them to MCP tool format for + use with agents. + + Usage: + adapter = UCFunctionAdapter(profile="my-profile") + tools = adapter.discover_functions(catalog="main", schema="functions") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC Functions adapter. + + Args: + profile: Databricks CLI profile name + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def discover_functions( + self, + catalog: str, + schema: str, + name_pattern: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + Discover UC Functions and convert to MCP tool format. + + Args: + catalog: UC catalog name + schema: UC schema name + name_pattern: Optional name pattern filter (SQL LIKE pattern) + + Returns: + List of tool definitions in MCP format + + Example: + >>> adapter = UCFunctionAdapter() + >>> tools = adapter.discover_functions("main", "functions") + >>> for tool in tools: + ... print(tool["name"], tool["description"]) + """ + client = self._get_client() + tools = [] + + try: + functions = client.functions.list( + catalog_name=catalog, + schema_name=schema, + ) + + for func in functions: + # Skip system functions + if func.name.startswith("system."): + continue + + # Apply name pattern filter + if name_pattern and name_pattern not in func.name: + continue + + # Convert to MCP tool format + tool = self._convert_function_to_tool(func) + if tool: + tools.append(tool) + + logger.info( + f"Discovered {len(tools)} UC Functions from {catalog}.{schema}" + ) + + except Exception as e: + logger.error(f"Failed to discover UC Functions: {e}") + + return tools + + def _convert_function_to_tool(self, func) -> Optional[Dict[str, Any]]: + """ + Convert a UC Function to MCP tool format. + + Args: + func: Function info from Databricks SDK + + Returns: + MCP tool definition or None if conversion fails + """ + try: + # Extract function metadata + name = func.name.split(".")[-1] # Get short name + description = func.comment or f"Unity Catalog function: {name}" + + # Build parameter schema + input_schema = { + "type": "object", + "properties": {}, + "required": [] + } + + # Parse function parameters + if hasattr(func, "input_params") and func.input_params: + for param in func.input_params.parameters: + param_name = param.name + param_type = self._map_uc_type_to_json_type(param.type_name) + + input_schema["properties"][param_name] = { + "type": param_type, + "description": param.comment or "" + } + + # Parameters without defaults are required + if not hasattr(param, "default_value") or param.default_value is None: + input_schema["required"].append(param_name) + + return { + "name": name, + "description": description, + "inputSchema": input_schema, + "full_name": func.full_name, + "source": "unity_catalog" + } + + except Exception as e: + logger.warning(f"Failed to convert function {func.name}: {e}") + return None + + def _map_uc_type_to_json_type(self, uc_type: str) -> str: + """ + Map Unity Catalog data type to JSON Schema type. + + Args: + uc_type: UC type name (e.g., "STRING", "BIGINT", "BOOLEAN") + + Returns: + JSON Schema type ("string", "number", "boolean", etc.) + """ + type_mapping = { + "STRING": "string", + "VARCHAR": "string", + "CHAR": "string", + "BIGINT": "integer", + "INT": "integer", + "INTEGER": "integer", + "SMALLINT": "integer", + "TINYINT": "integer", + "DOUBLE": "number", + "FLOAT": "number", + "DECIMAL": "number", + "BOOLEAN": "boolean", + "BINARY": "string", + "DATE": "string", + "TIMESTAMP": "string", + "ARRAY": "array", + "MAP": "object", + "STRUCT": "object", + } + + uc_type_upper = uc_type.upper() + return type_mapping.get(uc_type_upper, "string") + + async def call_function( + self, + full_name: str, + arguments: Dict[str, Any] + ) -> Any: + """ + Call a UC Function with given arguments. + + Args: + full_name: Full function name (catalog.schema.function) + arguments: Function arguments + + Returns: + Function result + + Example: + >>> adapter = UCFunctionAdapter() + >>> result = await adapter.call_function( + ... "main.functions.calculate_tax", + ... {"amount": 100, "rate": 0.08} + ... ) + """ + client = self._get_client() + + try: + # Build SQL query to call the function + args_list = [f":{key}" for key in arguments.keys()] + query = f"SELECT {full_name}({', '.join(args_list)})" + + # Execute via SQL warehouse + # Note: This requires a warehouse ID to be configured + result = client.statement_execution.execute_statement( + statement=query, + warehouse_id=self._get_default_warehouse(), + parameters=[ + {"name": key, "value": str(value)} + for key, value in arguments.items() + ] + ) + + return result.result.data_array[0][0] if result.result.data_array else None + + except Exception as e: + logger.error(f"Failed to call UC Function {full_name}: {e}") + raise + + def _get_default_warehouse(self) -> str: + """Get default SQL warehouse ID from environment or client.""" + import os + warehouse_id = os.getenv("DATABRICKS_WAREHOUSE_ID") + if not warehouse_id: + raise ValueError( + "DATABRICKS_WAREHOUSE_ID not set. " + "Set this environment variable to use UC Functions." + ) + return warehouse_id diff --git a/dbx-agent-app/examples/data-tools/dbx_agent_app/py.typed b/dbx-agent-app/examples/data-tools/dbx_agent_app/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/dbx-agent-app/examples/data-tools/dbx_agent_app/registry/__init__.py b/dbx-agent-app/examples/data-tools/dbx_agent_app/registry/__init__.py new file mode 100644 index 00000000..892043b7 --- /dev/null +++ b/dbx-agent-app/examples/data-tools/dbx_agent_app/registry/__init__.py @@ -0,0 +1,10 @@ +""" +Unity Catalog integration for agent registration. + +This module provides utilities for registering agents in Unity Catalog +as AGENT objects, enabling catalog-based discovery and permission management. +""" + +from .uc_registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError + +__all__ = ["UCAgentRegistry", "UCAgentSpec", "UCRegistrationError"] diff --git a/dbx-agent-app/examples/data-tools/dbx_agent_app/registry/uc_registry.py b/dbx-agent-app/examples/data-tools/dbx_agent_app/registry/uc_registry.py new file mode 100644 index 00000000..f2651aca --- /dev/null +++ b/dbx-agent-app/examples/data-tools/dbx_agent_app/registry/uc_registry.py @@ -0,0 +1,345 @@ +""" +Unity Catalog agent registry. + +Registers and manages agents as Unity Catalog AGENT objects for +catalog-based discovery and permission management. +""" + +import json +import logging +from typing import Dict, Any, Optional, List +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCRegistrationError(Exception): + """Raised when agent registration in Unity Catalog fails.""" + pass + + +@dataclass +class UCAgentSpec: + """ + Specification for registering an agent in Unity Catalog. + + Attributes: + name: Agent name (will be catalog object name) + catalog: UC catalog name + schema: UC schema name + endpoint_url: Agent's base URL + description: Agent description + capabilities: List of agent capabilities + properties: Additional metadata key-value pairs + """ + name: str + catalog: str + schema: str + endpoint_url: str + description: Optional[str] = None + capabilities: Optional[List[str]] = None + properties: Optional[Dict[str, str]] = None + + +class UCAgentRegistry: + """ + Unity Catalog agent registry. + + Registers agents as UC AGENT objects for catalog-based discovery + and permission management. + + Usage: + registry = UCAgentRegistry(profile="my-profile") + + spec = UCAgentSpec( + name="customer_research", + catalog="main", + schema="agents", + endpoint_url="https://app.databricksapps.com", + description="Customer research agent", + capabilities=["search", "analysis"], + ) + + registry.register_agent(spec) + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC agent registry. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def register_agent(self, spec: UCAgentSpec) -> Dict[str, Any]: + """ + Register an agent in Unity Catalog. + + Creates a AGENT object in the specified catalog and schema with + metadata about the agent's endpoint, capabilities, and properties. + + Args: + spec: Agent specification + + Returns: + Dictionary with registration details + + Raises: + UCRegistrationError: If registration fails + + Example: + >>> registry = UCAgentRegistry(profile="my-profile") + >>> spec = UCAgentSpec( + ... name="my_agent", + ... catalog="main", + ... schema="agents", + ... endpoint_url="https://app.databricksapps.com", + ... ) + >>> result = registry.register_agent(spec) + """ + client = self._get_client() + full_name = f"{spec.catalog}.{spec.schema}.{spec.name}" + + try: + # Build agent properties for UC metadata + properties = spec.properties or {} + properties["endpoint_url"] = spec.endpoint_url + properties["agent_card_url"] = f"{spec.endpoint_url}/.well-known/agent.json" + + if spec.capabilities: + properties["capabilities"] = ",".join(spec.capabilities) + + # Register as a UC registered model with AGENT type + # (UC doesn't have a native AGENT type yet, so we use registered models + # with special tags/properties to mark them as agents) + + logger.info(f"Registering agent '{full_name}' in Unity Catalog") + + # Check if catalog and schema exist + try: + client.catalogs.get(spec.catalog) + except Exception as e: + raise UCRegistrationError( + f"Catalog '{spec.catalog}' does not exist or is not accessible: {e}" + ) + + try: + client.schemas.get(f"{spec.catalog}.{spec.schema}") + except Exception as e: + raise UCRegistrationError( + f"Schema '{spec.catalog}.{spec.schema}' does not exist or is not accessible: {e}" + ) + + # Create or update registered model as agent placeholder + # In a future UC version with native AGENT support, this would use: + # client.agents.create(name=full_name, properties=properties) + + # Encode properties as JSON suffix in comment for discovery + # Format: "description\n---AGENT_META---\n{json}" + meta = {"databricks_agent": True, **properties} + comment = spec.description or "" + comment_with_meta = f"{comment}\n---AGENT_META---\n{json.dumps(meta)}" + + # Try update first (model may already exist from prior deploy), + # fall back to create if it doesn't exist + try: + client.registered_models.update( + full_name, + comment=comment_with_meta, + ) + logger.info(f"Updated existing agent '{full_name}'") + except Exception as update_err: + # Model doesn't exist or SP can't access it — try create + logger.debug(f"Update failed ({update_err}), trying create") + try: + client.registered_models.create( + name=spec.name, + catalog_name=spec.catalog, + schema_name=spec.schema, + comment=comment_with_meta, + ) + logger.info(f"Created new agent '{full_name}'") + except Exception as create_err: + # If create fails with "already exists", the SP just + # can't see the model — log warning but don't fail + err_str = str(create_err).lower() + if "already exists" in err_str or "not a valid name" in err_str: + logger.warning( + "Agent '%s' exists but SP cannot update it. " + "Grant the app's SP ownership or MANAGE on the model.", + full_name, + ) + else: + raise + + logger.info(f"Successfully registered agent '{full_name}'") + + return { + "full_name": full_name, + "catalog": spec.catalog, + "schema": spec.schema, + "name": spec.name, + "endpoint_url": spec.endpoint_url, + "properties": properties, + } + + except UCRegistrationError: + raise + except Exception as e: + raise UCRegistrationError( + f"Failed to register agent '{full_name}': {e}" + ) from e + + @staticmethod + def _parse_agent_meta(comment: Optional[str]) -> Optional[Dict[str, Any]]: + """Parse agent metadata from comment field (JSON after ---AGENT_META--- marker).""" + if not comment or "---AGENT_META---" not in comment: + return None + try: + _, meta_json = comment.split("---AGENT_META---", 1) + return json.loads(meta_json.strip()) + except (ValueError, json.JSONDecodeError): + return None + + @staticmethod + def _clean_description(comment: Optional[str]) -> str: + """Extract human-readable description from comment (before the meta marker).""" + if not comment: + return "" + if "---AGENT_META---" in comment: + return comment.split("---AGENT_META---")[0].strip() + return comment + + def get_agent(self, catalog: str, schema: str, name: str) -> Optional[Dict[str, Any]]: + """ + Get agent metadata from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + Agent metadata dictionary or None if not found + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + model = client.registered_models.get(full_name) + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + return None + + return { + "full_name": full_name, + "catalog": catalog, + "schema": schema, + "name": name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "agent_card_url": meta.get("agent_card_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + "properties": meta, + } + + except Exception as e: + logger.debug(f"Agent '{full_name}' not found: {e}") + return None + + def list_agents( + self, + catalog: str, + schema: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + List all agents in a catalog or schema. + + Args: + catalog: UC catalog name + schema: Optional UC schema name (lists all schemas if not specified) + + Returns: + List of agent metadata dictionaries + """ + client = self._get_client() + agents = [] + + # Determine which schemas to scan + schemas_to_scan = [schema] if schema else [] + if not schema: + try: + for s in client.schemas.list(catalog_name=catalog): + if s.name != "information_schema": + schemas_to_scan.append(s.name) + except Exception as e: + logger.error(f"Failed to list schemas in {catalog}: {e}") + return [] + + for schema_name in schemas_to_scan: + try: + models = client.registered_models.list( + catalog_name=catalog, schema_name=schema_name + ) + for model in models: + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + continue + + agents.append({ + "full_name": model.full_name, + "catalog": catalog, + "schema": schema_name, + "name": model.name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + }) + except Exception as e: + logger.debug(f"Failed to list models in {catalog}.{schema_name}: {e}") + continue + + return agents + + def delete_agent(self, catalog: str, schema: str, name: str) -> bool: + """ + Delete an agent from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + True if deleted, False if not found + + Raises: + UCRegistrationError: If deletion fails + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + client.registered_models.delete(full_name) + logger.info(f"Deleted agent '{full_name}'") + return True + except Exception as e: + if "does not exist" in str(e).lower(): + return False + raise UCRegistrationError( + f"Failed to delete agent '{full_name}': {e}" + ) from e diff --git a/dbx-agent-app/examples/data-tools/requirements.txt b/dbx-agent-app/examples/data-tools/requirements.txt new file mode 100644 index 00000000..7b0556d1 --- /dev/null +++ b/dbx-agent-app/examples/data-tools/requirements.txt @@ -0,0 +1,5 @@ +fastapi>=0.115.0 +uvicorn[standard]>=0.30.0 +pydantic>=2.0.0 +httpx>=0.27.0 +databricks-sdk>=0.30.0 diff --git a/dbx-agent-app/examples/discover_agents.py b/dbx-agent-app/examples/discover_agents.py new file mode 100644 index 00000000..1e79b6ee --- /dev/null +++ b/dbx-agent-app/examples/discover_agents.py @@ -0,0 +1,39 @@ +""" +Example: Discover Agents + +Demonstrates how to discover agent-enabled Databricks Apps in a workspace. +""" + +import asyncio +from dbx_agent_app.discovery import AgentDiscovery + + +async def main(): + # Initialize discovery with your Databricks profile + discovery = AgentDiscovery(profile="my-profile") + + # Discover all agent-enabled apps + result = await discovery.discover_agents() + + print(f"Found {len(result.agents)} agents:\n") + + for agent in result.agents: + print(f"Agent: {agent.name}") + print(f" URL: {agent.endpoint_url}") + print(f" App: {agent.app_name}") + if agent.description: + print(f" Description: {agent.description}") + if agent.capabilities: + print(f" Capabilities: {agent.capabilities}") + if agent.protocol_version: + print(f" Protocol: {agent.protocol_version}") + print() + + if result.errors: + print(f"Encountered {len(result.errors)} errors:") + for error in result.errors: + print(f" - {error}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/dbx-agent-app/examples/full_featured_agent.py b/dbx-agent-app/examples/full_featured_agent.py new file mode 100644 index 00000000..4c9cb41d --- /dev/null +++ b/dbx-agent-app/examples/full_featured_agent.py @@ -0,0 +1,129 @@ +""" +Example: Full-Featured Agent with MCP + +Demonstrates the recommended pattern: +- Plain FastAPI app with /invocations +- add_agent_card() for platform discoverability +- add_mcp_endpoints() for MCP-aware clients +- Tools defined as plain async functions +""" + +import json +from fastapi import FastAPI, Request +from fastapi.responses import JSONResponse + +from dbx_agent_app import add_agent_card, add_mcp_endpoints + + +app = FastAPI() + + +# --- Define your tools as plain async functions --- + +async def process_csv(file_path: str, calculate_stats: bool = True) -> dict: + """Process CSV file and optionally calculate statistics.""" + return { + "file_path": file_path, + "rows_processed": 1000, + "columns": ["id", "name", "value"], + "statistics": { + "mean": 45.6, + "median": 42.0, + "std_dev": 12.3, + } if calculate_stats else None, + } + + +async def check_data_quality(table_name: str) -> dict: + """Run data quality checks on a table.""" + checks = ["nulls", "duplicates", "outliers"] + return { + "table": table_name, + "checks": {c: {"passed": True, "issues_found": 0} for c in checks}, + "overall_status": "passed", + } + + +# --- Standard Databricks /invocations endpoint --- + +TOOL_DISPATCH = { + "process_csv": process_csv, + "check_data_quality": check_data_quality, +} + + +@app.post("/invocations") +async def invocations(request: Request): + body = await request.json() + + # Extract last user message + query = "" + for item in reversed(body.get("input", [])): + if isinstance(item, dict) and item.get("role") == "user": + query = item.get("content", "") + break + + if not query: + return JSONResponse({"error": "No user message found"}, status_code=400) + + # Simple dispatch: call process_csv with the query as file_path + result = await process_csv(file_path=query) + + return { + "output": [ + { + "type": "message", + "content": [{"type": "output_text", "text": json.dumps(result, indent=2)}], + } + ], + } + + +# --- Make discoverable by Agent Platform --- + +tools_metadata = [ + { + "name": "process_csv", + "description": "Process CSV data and return statistics", + "function": process_csv, + "parameters": { + "file_path": {"type": "string", "required": True}, + "calculate_stats": {"type": "boolean", "required": False}, + }, + }, + { + "name": "check_data_quality", + "description": "Run data quality checks on a table", + "function": check_data_quality, + "parameters": { + "table_name": {"type": "string", "required": True}, + }, + }, +] + +add_agent_card( + app, + name="data_processor", + description="Process and analyze data", + capabilities=["data_processing", "analysis"], + tools=[{"name": t["name"], "description": t["description"], "parameters": t["parameters"]} for t in tools_metadata], +) + +add_mcp_endpoints(app, tools=tools_metadata) + + +if __name__ == "__main__": + import uvicorn + + print("\n" + "=" * 60) + print("Full-Featured Agent Starting") + print("=" * 60) + print("\nEndpoints:") + print(" /.well-known/agent.json — agent card (platform discovery)") + print(" /health — health check") + print(" /invocations — Databricks standard protocol") + print(" /api/mcp — MCP JSON-RPC server") + print(" /api/mcp/tools — MCP tool listing") + print("\n" + "=" * 60 + "\n") + + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/dbx-agent-app/examples/hello-world/app.py b/dbx-agent-app/examples/hello-world/app.py new file mode 100644 index 00000000..2a397176 --- /dev/null +++ b/dbx-agent-app/examples/hello-world/app.py @@ -0,0 +1,26 @@ +"""Minimal deployable agent -- one tool, MCP enabled, zero external deps.""" +from dbx_agent_app import app_agent, AgentRequest, AgentResponse + + +@app_agent( + name="hello", + description="A minimal greeting agent", + capabilities=["greetings"], + auto_register=False, + enable_mcp=True, +) +async def hello(request: AgentRequest) -> str: + return f"Hello, {request.last_user_message}!" + + +@hello.tool(description="Say hello to someone by name") +async def greet(name: str) -> dict: + return {"message": f"Hello, {name}!"} + + +app = hello.app + + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/dbx-agent-app/examples/hello-world/app.yaml b/dbx-agent-app/examples/hello-world/app.yaml new file mode 100644 index 00000000..a5a2761f --- /dev/null +++ b/dbx-agent-app/examples/hello-world/app.yaml @@ -0,0 +1,9 @@ +command: + - "python" + - "-m" + - "uvicorn" + - "app:app" + - "--host" + - "0.0.0.0" + - "--port" + - "8000" diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/__init__.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/__init__.py new file mode 100644 index 00000000..95db07d3 --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/__init__.py @@ -0,0 +1,41 @@ +""" +dbx-agent-app: Framework for building discoverable AI agents on Databricks Apps. + +This package provides: +- @app_agent: Decorator to turn an async function into a discoverable agent +- AgentDiscovery: Discover agents in your Databricks workspace +- A2AClient: Communicate with agents using the A2A protocol +- UCAgentRegistry: Register agents in Unity Catalog +- MCPServerConfig: Configure MCP server for agent tools +""" + +from .discovery import AgentDiscovery, DiscoveredAgent, AgentDiscoveryResult, A2AClient, A2AClientError +from .mcp import MCPServerConfig, setup_mcp_server, UCFunctionAdapter +from .registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError +from .dashboard import create_dashboard_app + +try: + from importlib.metadata import version + __version__ = version("dbx-agent-app") +except Exception: + __version__ = "0.1.0" + +__all__ = [ + # Discovery + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", + # Registry + "UCAgentRegistry", + "UCAgentSpec", + "UCRegistrationError", + # MCP + "MCPServerConfig", + "setup_mcp_server", + "UCFunctionAdapter", + # Dashboard + "create_dashboard_app", +] + diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/core/__init__.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/core/__init__.py new file mode 100644 index 00000000..623da88c --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/core/__init__.py @@ -0,0 +1 @@ +"""Core agent application components.""" diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/__init__.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/__init__.py new file mode 100644 index 00000000..89929a08 --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/__init__.py @@ -0,0 +1,14 @@ +""" +Developer dashboard for agent discovery. + +Launch via CLI: + dbx-agent-app dashboard --profile my-profile + +Or programmatically: + from dbx_agent_app.dashboard import create_dashboard_app, run_dashboard +""" + +from .app import create_dashboard_app +from .cli import main as run_dashboard + +__all__ = ["create_dashboard_app", "run_dashboard"] diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/app.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/app.py new file mode 100644 index 00000000..d16c3d54 --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/app.py @@ -0,0 +1,112 @@ +""" +FastAPI application for the developer dashboard. + +Routes: + HTML: GET / — agent list page + GET /agent/{name} — agent detail page + API: GET /api/agents — JSON list of agents + GET /api/agents/{name}/card — full agent card + POST /api/agents/{name}/mcp — MCP JSON-RPC proxy + POST /api/scan — trigger re-scan + GET /health — health check +""" + +import logging +from typing import Optional + +from fastapi import FastAPI, Request +from fastapi.responses import HTMLResponse, JSONResponse + +from .scanner import DashboardScanner +from .templates import render_agent_list, render_agent_detail + +logger = logging.getLogger(__name__) + + +def create_dashboard_app( + scanner: DashboardScanner, + profile: Optional[str] = None, +) -> FastAPI: + """Build and return the dashboard FastAPI app.""" + app = FastAPI(title="dbx-agent-app dashboard", docs_url=None, redoc_url=None) + + # --- HTML pages ------------------------------------------------------- + + @app.get("/", response_class=HTMLResponse) + async def index(): + agents = scanner.get_agents() + return render_agent_list(agents) + + @app.get("/agent/{name}", response_class=HTMLResponse) + async def agent_detail(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return HTMLResponse("

Agent not found

", status_code=404) + + card = None + try: + card = await scanner.get_agent_card(agent.endpoint_url) + except Exception as e: + logger.warning("Could not fetch card for %s: %s", name, e) + + return render_agent_detail(agent, card) + + # --- JSON API --------------------------------------------------------- + + @app.get("/api/agents") + async def api_agents(): + agents = scanner.get_agents() + return [ + { + "name": a.name, + "endpoint_url": a.endpoint_url, + "app_name": a.app_name, + "description": a.description, + "capabilities": a.capabilities, + "protocol_version": a.protocol_version, + } + for a in agents + ] + + @app.get("/api/agents/{name}/card") + async def api_agent_card(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + card = await scanner.get_agent_card(agent.endpoint_url) + return card + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=502) + + @app.post("/api/agents/{name}/mcp") + async def api_mcp_proxy(name: str, request: Request): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + payload = await request.json() + result = await scanner.proxy_mcp(agent.endpoint_url, payload) + return result + except Exception as e: + return JSONResponse( + {"jsonrpc": "2.0", "id": None, "error": {"code": -1, "message": str(e)}}, + status_code=502, + ) + + @app.post("/api/scan") + async def api_scan(): + agents = await scanner.scan() + return {"count": len(agents), "agents": [a.name for a in agents]} + + @app.get("/health") + async def health(): + return { + "status": "ok", + "agents_cached": len(scanner.get_agents()), + "profile": profile, + } + + return app diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/cli.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/cli.py new file mode 100644 index 00000000..3b17aa90 --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/cli.py @@ -0,0 +1,63 @@ +""" +CLI entry point for the developer dashboard. + +Usage: + dbx-agent-app dashboard --profile my-profile --port 8501 +""" + +import argparse +import asyncio +import logging +import sys +import webbrowser + +import uvicorn + +from .scanner import DashboardScanner +from .app import create_dashboard_app + + +def main(): + parser = argparse.ArgumentParser( + prog="dbx-agent-app", + description="Developer dashboard for Databricks agent discovery", + ) + sub = parser.add_subparsers(dest="command") + + dash = sub.add_parser("dashboard", help="Launch the agent discovery dashboard") + dash.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + dash.add_argument("--port", type=int, default=8501, help="Port to serve on (default: 8501)") + dash.add_argument("--host", type=str, default="127.0.0.1", help="Host to bind (default: 127.0.0.1)") + dash.add_argument("--no-browser", action="store_true", help="Don't auto-open browser") + + args = parser.parse_args() + + if args.command != "dashboard": + parser.print_help() + sys.exit(1) + + logging.basicConfig(level=logging.INFO, format="%(levelname)s %(name)s: %(message)s") + + scanner = DashboardScanner(profile=args.profile) + + # Run initial scan + print(f"Scanning workspace for agents (profile={args.profile or 'default'})...") + try: + agents = asyncio.run(scanner.scan()) + print(f"Found {len(agents)} agent(s)") + except Exception as e: + print(f"Initial scan failed: {e}", file=sys.stderr) + print("Dashboard will start anyway — use the Scan button to retry.") + + app = create_dashboard_app(scanner, profile=args.profile) + + url = f"http://{args.host}:{args.port}" + if not args.no_browser: + webbrowser.open(url) + + print(f"Dashboard running at {url}") + uvicorn.run(app, host=args.host, port=args.port, log_level="warning") + + +if __name__ == "__main__": + main() diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/scanner.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/scanner.py new file mode 100644 index 00000000..475460be --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/scanner.py @@ -0,0 +1,81 @@ +""" +Dashboard scanner — wraps AgentDiscovery + A2AClient with caching and MCP proxy. +""" + +import asyncio +import logging +from typing import Dict, Any, List, Optional + +import httpx + +from ..discovery import AgentDiscovery, DiscoveredAgent, A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + + +class DashboardScanner: + """ + Thin wrapper around AgentDiscovery that adds result caching + and MCP JSON-RPC proxying for the dashboard UI. + """ + + def __init__(self, profile: Optional[str] = None): + self._discovery = AgentDiscovery(profile=profile) + self._agents: List[DiscoveredAgent] = [] + self._scan_lock = asyncio.Lock() + self._scanned = False + + async def scan(self) -> List[DiscoveredAgent]: + """Run workspace discovery and cache results. Thread-safe via asyncio.Lock.""" + async with self._scan_lock: + result = await self._discovery.discover_agents() + self._agents = result.agents + self._scanned = True + if result.errors: + for err in result.errors: + logger.warning("Discovery error: %s", err) + return self._agents + + def get_agents(self) -> List[DiscoveredAgent]: + """Return cached agent list from the last scan.""" + return list(self._agents) + + def get_agent_by_name(self, name: str) -> Optional[DiscoveredAgent]: + """Look up a cached agent by name.""" + for agent in self._agents: + if agent.name == name or agent.app_name == name: + return agent + return None + + @property + def workspace_token(self) -> Optional[str]: + """Auth token extracted during discovery, used for cross-app requests.""" + return self._discovery._workspace_token + + async def get_agent_card(self, endpoint_url: str) -> Dict[str, Any]: + """Fetch the full agent card JSON from a remote agent.""" + async with A2AClient(timeout=10.0) as client: + return await client.fetch_agent_card( + endpoint_url, auth_token=self.workspace_token + ) + + async def proxy_mcp(self, endpoint_url: str, payload: Dict[str, Any]) -> Dict[str, Any]: + """ + Forward a JSON-RPC request to an agent's MCP endpoint. + + Args: + endpoint_url: Agent base URL + payload: Complete JSON-RPC 2.0 request body + + Returns: + JSON-RPC response from the agent + """ + mcp_url = endpoint_url.rstrip("/") + "/api/mcp" + headers = {"Content-Type": "application/json"} + if self.workspace_token: + headers["Authorization"] = f"Bearer {self.workspace_token}" + + async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as http: + response = await http.post(mcp_url, json=payload, headers=headers) + response.raise_for_status() + return response.json() diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/templates.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/templates.py new file mode 100644 index 00000000..8543d75d --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/dashboard/templates.py @@ -0,0 +1,278 @@ +""" +Server-rendered HTML templates for the dashboard. + +Pure Python functions returning HTML strings — no Jinja2, no React, no build step. +""" + +import html +import json +from typing import List, Dict, Any, Optional + +from ..discovery import DiscoveredAgent + + +# --------------------------------------------------------------------------- +# Base layout +# --------------------------------------------------------------------------- + +def render_base(title: str, content: str) -> str: + """HTML shell with inline CSS (dark theme).""" + return f""" + + + + +{html.escape(title)} + + + +
+
+

dbx-agent-app dashboard

+ +
+
+
+{content} +
+ +""" + + +# --------------------------------------------------------------------------- +# Agent list page +# --------------------------------------------------------------------------- + +def render_agent_list(agents: List[DiscoveredAgent]) -> str: + """Main page: grid of agent cards + scan button.""" + if not agents: + cards_html = """ +
+

No agents discovered

+

No agent-enabled Databricks Apps found in your workspace. Click Scan to retry.

+
""" + else: + cards = [] + for a in agents: + caps = "" + if a.capabilities: + badges = "".join( + f'{html.escape(c.strip())} ' + for c in a.capabilities.split(",") + ) + caps = f'
{badges}
' + + desc = html.escape(a.description or "No description") + cards.append(f""" + +
+

{html.escape(a.name)}

+

{desc}

+
+ App: {html.escape(a.app_name)} + {f'Protocol: {html.escape(a.protocol_version)}' if a.protocol_version else ''} +
+ {caps} +
+
""") + cards_html = f'
{"".join(cards)}
' + + return render_base( + "Agent Dashboard", + f""" +
+ {len(agents)} agent{"s" if len(agents) != 1 else ""} discovered + +
+{cards_html} +""", + ) + + +# --------------------------------------------------------------------------- +# Agent detail page +# --------------------------------------------------------------------------- + +def render_agent_detail( + agent: DiscoveredAgent, + card: Optional[Dict[str, Any]] = None, +) -> str: + """Detail page: agent card JSON, tools list, MCP test panel.""" + card_json = json.dumps(card, indent=2) if card else "Card not available" + + # Extract tools from card if present + tools_html = "" + if card: + skills = card.get("skills") or card.get("tools") or [] + if skills: + rows = [] + for t in skills: + name = html.escape(t.get("name", t.get("id", "unknown"))) + desc = html.escape(t.get("description", "")) + rows.append( + f'
{name}' + f'
{desc}
' + ) + tools_html = f""" +
+

Tools ({len(skills)})

+ {"".join(rows)} +
""" + + safe_name = html.escape(agent.name) + safe_endpoint = html.escape(agent.endpoint_url) + + return render_base( + f"{safe_name} — Agent Dashboard", + f""" +
+ ← All agents +

{safe_name}

+

{html.escape(agent.description or 'No description')}

+
+ Endpoint: {safe_endpoint} + App: {html.escape(agent.app_name)} + {f'{html.escape(agent.protocol_version)}' if agent.protocol_version else ''} +
+
+ +
+

Agent Card

+
{html.escape(card_json)}
+
+ +{tools_html} + +
+

MCP Test Panel

+

+ Send a JSON-RPC request to this agent's /api/mcp endpoint. +

+
+ + +
+ + +
+
+ +""", + ) diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/discovery/__init__.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/discovery/__init__.py new file mode 100644 index 00000000..d6d04008 --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/discovery/__init__.py @@ -0,0 +1,24 @@ +""" +Agent discovery for Databricks Apps. + +This module provides clients and utilities for discovering agent-enabled +Databricks Apps that expose A2A protocol agent cards. +""" + +from .agent_discovery import ( + AgentDiscovery, + DiscoveredAgent, + AgentDiscoveryResult, +) +from .a2a_client import ( + A2AClient, + A2AClientError, +) + +__all__ = [ + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", +] diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/discovery/a2a_client.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/discovery/a2a_client.py new file mode 100644 index 00000000..1243d1a3 --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/discovery/a2a_client.py @@ -0,0 +1,268 @@ +""" +A2A Client for agent-to-agent communication. + +Implements the A2A protocol for discovering and communicating with peer agents. +""" + +import json +import uuid +import logging +from typing import Dict, Any, Optional, AsyncIterator + +import httpx + +logger = logging.getLogger(__name__) + + +class A2AClientError(Exception): + """Raised when an A2A operation fails.""" + pass + + +class A2AClient: + """ + Async client for A2A protocol communication with peer agents. + + Usage: + async with A2AClient() as client: + card = await client.fetch_agent_card("https://app.databricksapps.com") + result = await client.send_message("https://app.databricksapps.com/api/a2a", "Hello") + """ + + def __init__(self, timeout: float = 60.0): + """ + Initialize A2A client. + + Args: + timeout: Request timeout in seconds + """ + self.timeout = timeout + self._client: Optional[httpx.AsyncClient] = None + + async def __aenter__(self): + self._client = httpx.AsyncClient( + timeout=self.timeout, + follow_redirects=True, + ) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self._client: + await self._client.aclose() + + def _auth_headers(self, auth_token: Optional[str] = None) -> Dict[str, str]: + """Build authentication headers.""" + headers = {"Content-Type": "application/json"} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + return headers + + async def fetch_agent_card( + self, + base_url: str, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Fetch an agent's A2A protocol agent card. + + Tries /.well-known/agent.json first, then /card as fallback. + Handles OAuth redirects gracefully (returns error instead of following). + + Args: + base_url: Base URL of the agent application + auth_token: Optional OAuth token for authenticated requests + + Returns: + Agent card JSON data + + Raises: + A2AClientError: If agent card cannot be fetched + + Example: + >>> async with A2AClient() as client: + >>> card = await client.fetch_agent_card("https://app.databricksapps.com") + >>> print(card["name"], card["description"]) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + headers = {} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + # Use a client that doesn't follow redirects to detect OAuth flows + async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=False) as probe_client: + for path in ["/.well-known/agent.json", "/card"]: + try: + url = base_url.rstrip("/") + path + response = await probe_client.get(url, headers=headers) + + # OAuth redirect detected - app requires interactive auth + if response.status_code in (301, 302, 303, 307, 308): + logger.debug(f"OAuth redirect detected for {url}") + continue + + if response.status_code == 200: + if not response.text or response.text.isspace(): + logger.debug(f"Empty response body for {url}") + continue + return response.json() + + except Exception as e: + logger.debug(f"Agent card fetch failed for {url}: {e}") + continue + + raise A2AClientError(f"Could not fetch agent card from {base_url}") + + async def _jsonrpc_call( + self, + url: str, + method: str, + params: Dict[str, Any], + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a JSON-RPC 2.0 request to an agent. + + Args: + url: A2A endpoint URL + method: JSON-RPC method name (e.g., "message/send") + params: Method parameters + auth_token: Optional authentication token + + Returns: + JSON-RPC result + + Raises: + A2AClientError: If request fails or returns error + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": method, + "params": params, + } + + try: + response = await self._client.post( + url, + json=payload, + headers=self._auth_headers(auth_token), + ) + response.raise_for_status() + result = response.json() + + if "error" in result: + error = result["error"] + raise A2AClientError( + f"A2A error: {error.get('message', 'Unknown')} " + f"(code: {error.get('code')})" + ) + + return result.get("result", {}) + + except httpx.TimeoutException as e: + raise A2AClientError(f"Request to {url} timed out: {e}") + except httpx.HTTPStatusError as e: + raise A2AClientError( + f"HTTP error from {url}: {e.response.status_code}" + ) + except json.JSONDecodeError as e: + raise A2AClientError(f"Invalid JSON from {url}: {e}") + + async def send_message( + self, + agent_url: str, + message: str, + context_id: Optional[str] = None, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a message to a peer agent using A2A protocol. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + context_id: Optional conversation context ID + auth_token: Optional authentication token + + Returns: + Agent's response + + Example: + >>> async with A2AClient() as client: + >>> response = await client.send_message( + >>> "https://app.databricksapps.com/api/a2a", + >>> "What are your capabilities?" + >>> ) + """ + params: Dict[str, Any] = { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + } + if context_id: + params["message"]["contextId"] = context_id + + return await self._jsonrpc_call( + agent_url, "message/send", params, auth_token + ) + + async def send_streaming_message( + self, + agent_url: str, + message: str, + auth_token: Optional[str] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """ + Send a streaming message and yield SSE events. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + auth_token: Optional authentication token + + Yields: + SSE events from the agent's response stream + + Example: + >>> async with A2AClient() as client: + >>> async for event in client.send_streaming_message(url, "Analyze this"): + >>> print(event) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + stream_url = agent_url.rstrip("/") + "/stream" + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": "message/stream", + "params": { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + }, + } + + async with self._client.stream( + "POST", + stream_url, + json=payload, + headers=self._auth_headers(auth_token), + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.startswith("data: "): + try: + yield json.loads(line[6:]) + except json.JSONDecodeError: + continue diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/discovery/agent_discovery.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/discovery/agent_discovery.py new file mode 100644 index 00000000..1563b304 --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/discovery/agent_discovery.py @@ -0,0 +1,253 @@ +""" +Agent discovery for Databricks Apps. + +Discovers agent-enabled Databricks Apps by scanning workspace apps +and probing for A2A protocol agent cards. +""" + +import asyncio +import logging +from typing import List, Dict, Any, Optional +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +from .a2a_client import A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + +# Agent card probe paths and timeout +AGENT_CARD_PATHS = ["/.well-known/agent.json", "/card"] +AGENT_CARD_PROBE_TIMEOUT = 5.0 + + +@dataclass +class DiscoveredAgent: + """ + An agent discovered from a Databricks App. + + Attributes: + name: Agent name (from agent card or app name) + endpoint_url: Agent's base URL + description: Agent description (from agent card) + capabilities: Comma-separated list of capabilities + protocol_version: A2A protocol version + app_name: Name of the backing Databricks App + """ + name: str + endpoint_url: str + app_name: str + description: Optional[str] = None + capabilities: Optional[str] = None + protocol_version: Optional[str] = None + + +@dataclass +class AgentDiscoveryResult: + """ + Results from agent discovery operation. + + Attributes: + agents: List of discovered agents + errors: List of error messages encountered during discovery + """ + agents: List[DiscoveredAgent] + errors: List[str] + + +class AgentDiscovery: + """ + Discovers agent-enabled Databricks Apps in a workspace. + + Scans running Databricks Apps and probes for A2A protocol agent cards + to identify which apps are agents. + + Usage: + discovery = AgentDiscovery(profile="my-profile") + result = await discovery.discover_agents() + for agent in result.agents: + print(f"Found agent: {agent.name} at {agent.endpoint_url}") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize agent discovery. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._workspace_token: Optional[str] = None + + async def discover_agents(self) -> AgentDiscoveryResult: + """ + Discover all agent-enabled Databricks Apps in the workspace. + + Returns: + AgentDiscoveryResult with discovered agents and any errors + + Example: + >>> discovery = AgentDiscovery(profile="my-profile") + >>> result = await discovery.discover_agents() + >>> print(f"Found {len(result.agents)} agents") + """ + agents: List[DiscoveredAgent] = [] + errors: List[str] = [] + + try: + app_list = await self._list_workspace_apps() + except Exception as e: + logger.error("Workspace app listing failed: %s", e) + return AgentDiscoveryResult( + agents=[], + errors=[f"Failed to list workspace apps: {e}"], + ) + + if not app_list: + return AgentDiscoveryResult(agents=[], errors=[]) + + # Probe each running app for agent card in parallel + probe_tasks = [ + self._probe_app_for_agent(app_info) + for app_info in app_list + if app_info.get("url") + ] + + if probe_tasks: + probe_results = await asyncio.gather( + *probe_tasks, return_exceptions=True + ) + + for result in probe_results: + if isinstance(result, Exception): + errors.append(str(result)) + elif result is not None: + agents.append(result) + + logger.info( + "Agent discovery: %d apps checked, %d agents found", + len(app_list), len(agents) + ) + + return AgentDiscoveryResult(agents=agents, errors=errors) + + async def _list_workspace_apps(self) -> List[Dict[str, Any]]: + """ + Enumerate Databricks Apps in the workspace. + + Returns: + List of running apps with name, url, owner + """ + def _list_sync() -> tuple: + client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + + # Extract auth token for cross-app requests + auth_headers = client.config.authenticate() + auth_val = auth_headers.get("Authorization", "") + token = auth_val[7:] if auth_val.startswith("Bearer ") else None + + results = [] + for app in client.apps.list(): + # Check if app is running via compute_status or deployment status + compute_state = None + cs = getattr(app, "compute_status", None) + if cs: + compute_state = str(getattr(cs, "state", "")) + + deploy_state = None + dep = getattr(app, "active_deployment", None) + if dep: + dep_status = getattr(dep, "status", None) + if dep_status: + deploy_state = str(getattr(dep_status, "state", "")) + + app_url = getattr(app, "url", None) or "" + app_url = app_url.rstrip("/") if app_url else "" + + results.append({ + "name": app.name, + "url": app_url, + "owner": getattr(app, "creator", None) or getattr(app, "updater", None), + "compute_state": compute_state, + "deploy_state": deploy_state, + }) + + return results, token + + loop = asyncio.get_event_loop() + result_tuple = await loop.run_in_executor(None, _list_sync) + all_apps, workspace_token = result_tuple + + # Store token for probing + self._workspace_token = workspace_token + + # Filter to running apps + running = [ + a for a in all_apps + if a.get("url") and ( + "ACTIVE" in (a.get("compute_state") or "") + or "SUCCEEDED" in (a.get("deploy_state") or "") + ) + ] + + logger.info( + "Workspace apps: %d total, %d running", + len(all_apps), len(running) + ) + + return running + + async def _probe_app_for_agent( + self, + app_info: Dict[str, Any], + ) -> Optional[DiscoveredAgent]: + """ + Probe a Databricks App for an A2A agent card. + + Args: + app_info: App metadata from workspace listing + + Returns: + DiscoveredAgent if agent card found, None otherwise + """ + app_url = app_info["url"] + app_name = app_info["name"] + + token = self._workspace_token + agent_card = None + + try: + logger.debug(f"Probing app '{app_name}' at {app_url}") + async with A2AClient(timeout=AGENT_CARD_PROBE_TIMEOUT) as client: + agent_card = await client.fetch_agent_card(app_url, auth_token=token) + logger.info(f"Found agent card for '{app_name}'") + except A2AClientError as e: + logger.debug(f"No agent card for '{app_name}': {e}") + return None + except Exception as e: + logger.warning(f"Probe failed for '{app_name}': {e}") + return None + + if not agent_card: + return None + + # Extract capabilities + capabilities_list = [] + caps = agent_card.get("capabilities") + if isinstance(caps, dict): + capabilities_list = list(caps.keys()) + elif isinstance(caps, list): + capabilities_list = caps + + return DiscoveredAgent( + name=agent_card.get("name", app_name), + endpoint_url=app_url, + app_name=app_name, + description=agent_card.get("description"), + capabilities=",".join(capabilities_list) if capabilities_list else None, + protocol_version=agent_card.get("protocolVersion"), + ) diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/mcp/__init__.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/mcp/__init__.py new file mode 100644 index 00000000..60ee38ad --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/mcp/__init__.py @@ -0,0 +1,11 @@ +""" +Model Context Protocol (MCP) support. + +This module provides utilities for integrating agents with MCP servers +and exposing UC Functions as MCP tools. +""" + +from .mcp_server import MCPServer, MCPServerConfig, setup_mcp_server +from .uc_functions import UCFunctionAdapter + +__all__ = ["MCPServer", "MCPServerConfig", "setup_mcp_server", "UCFunctionAdapter"] diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/mcp/mcp_server.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/mcp/mcp_server.py new file mode 100644 index 00000000..98a456fb --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/mcp/mcp_server.py @@ -0,0 +1,196 @@ +""" +MCP server implementation for agents. + +Provides an MCP server that exposes agent tools via the Model Context Protocol. +Works with any FastAPI app. +""" + +import json +import logging +from typing import Dict, Any, List, Optional +from dataclasses import dataclass + +from fastapi import Request + +logger = logging.getLogger(__name__) + + +@dataclass +class MCPServerConfig: + """ + Configuration for MCP server. + + Attributes: + name: Server name + version: Server version + description: Server description + """ + name: str + version: str = "1.0.0" + description: str = "MCP server for agent tools" + + +class MCPServer: + """ + MCP server that exposes tools via JSON-RPC. + + Accepts tools as dicts with name, description, parameters, function keys. + """ + + def __init__(self, tools, config: MCPServerConfig): + """ + Initialize MCP server. + + Args: + tools: List of ToolDefinition objects or dicts with name/description/parameters/function + config: MCP server configuration + """ + self._tools = tools + self.config = config + + def setup_routes(self, app): + """ + Set up MCP protocol routes on the FastAPI app. + + Adds: + - POST /api/mcp - MCP JSON-RPC endpoint + - GET /api/mcp/tools - List available tools + """ + + @app.post("/api/mcp") + async def mcp_jsonrpc(request: Request): + """MCP JSON-RPC endpoint.""" + try: + body = await request.json() + method = body.get("method") + params = body.get("params", {}) + request_id = body.get("id") + + if method == "tools/list": + result = await self._list_tools() + elif method == "tools/call": + result = await self._call_tool(params) + elif method == "server/info": + result = self._server_info() + else: + return { + "jsonrpc": "2.0", + "id": request_id, + "error": { + "code": -32601, + "message": f"Method not found: {method}" + } + } + + return { + "jsonrpc": "2.0", + "id": request_id, + "result": result + } + + except Exception as e: + logger.error("MCP request failed: %s", e) + return { + "jsonrpc": "2.0", + "id": body.get("id") if isinstance(body, dict) else None, + "error": { + "code": -32603, + "message": str(e) + } + } + + @app.get("/api/mcp/tools") + async def list_mcp_tools(): + """List available MCP tools.""" + return await self._list_tools() + + def _get_tool_name(self, tool) -> str: + return tool.name if hasattr(tool, "name") else tool["name"] + + def _get_tool_description(self, tool) -> str: + return tool.description if hasattr(tool, "description") else tool["description"] + + def _get_tool_parameters(self, tool) -> Dict[str, Any]: + return tool.parameters if hasattr(tool, "parameters") else tool.get("parameters", {}) + + def _get_tool_function(self, tool): + return tool.function if hasattr(tool, "function") else tool["function"] + + def _server_info(self) -> Dict[str, Any]: + """Get MCP server information.""" + return { + "name": self.config.name, + "version": self.config.version, + "description": self.config.description, + "protocol_version": "1.0", + } + + async def _list_tools(self) -> Dict[str, Any]: + """List all available tools in MCP format.""" + tools = [] + + for tool in self._tools: + mcp_tool = { + "name": self._get_tool_name(tool), + "description": self._get_tool_description(tool), + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + + for param_name, param_spec in self._get_tool_parameters(tool).items(): + param_type = param_spec.get("type", "string") + mcp_tool["inputSchema"]["properties"][param_name] = { + "type": param_type, + "description": param_spec.get("description", "") + } + if param_spec.get("required", False): + mcp_tool["inputSchema"]["required"].append(param_name) + + tools.append(mcp_tool) + + return {"tools": tools} + + async def _call_tool(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Call a tool via MCP.""" + tool_name = params.get("name") + arguments = params.get("arguments", {}) + + tool_def = None + for tool in self._tools: + if self._get_tool_name(tool) == tool_name: + tool_def = tool + break + + if not tool_def: + raise ValueError(f"Tool not found: {tool_name}") + + try: + result = await self._get_tool_function(tool_def)(**arguments) + return {"result": result} + except Exception as e: + logger.error("Tool execution failed: %s", e) + raise + + +def setup_mcp_server(tools, config: Optional[MCPServerConfig] = None, fastapi_app=None): + """ + Set up MCP server for an agent. + + Args: + tools: List of tool dicts with name/description/parameters/function keys + config: Optional MCP server configuration + fastapi_app: FastAPI app to add routes to (required) + + Returns: + MCPServer instance + """ + if config is None: + config = MCPServerConfig(name="mcp-server") + + server = MCPServer(tools, config) + server.setup_routes(fastapi_app) + + return server diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/mcp/uc_functions.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/mcp/uc_functions.py new file mode 100644 index 00000000..f1659960 --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/mcp/uc_functions.py @@ -0,0 +1,240 @@ +""" +Unity Catalog Functions adapter for MCP. + +Automatically discovers UC Functions and exposes them as MCP tools. +""" + +import logging +from typing import List, Dict, Any, Optional + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCFunctionAdapter: + """ + Adapter for Unity Catalog Functions to MCP protocol. + + Discovers UC Functions and converts them to MCP tool format for + use with agents. + + Usage: + adapter = UCFunctionAdapter(profile="my-profile") + tools = adapter.discover_functions(catalog="main", schema="functions") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC Functions adapter. + + Args: + profile: Databricks CLI profile name + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def discover_functions( + self, + catalog: str, + schema: str, + name_pattern: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + Discover UC Functions and convert to MCP tool format. + + Args: + catalog: UC catalog name + schema: UC schema name + name_pattern: Optional name pattern filter (SQL LIKE pattern) + + Returns: + List of tool definitions in MCP format + + Example: + >>> adapter = UCFunctionAdapter() + >>> tools = adapter.discover_functions("main", "functions") + >>> for tool in tools: + ... print(tool["name"], tool["description"]) + """ + client = self._get_client() + tools = [] + + try: + functions = client.functions.list( + catalog_name=catalog, + schema_name=schema, + ) + + for func in functions: + # Skip system functions + if func.name.startswith("system."): + continue + + # Apply name pattern filter + if name_pattern and name_pattern not in func.name: + continue + + # Convert to MCP tool format + tool = self._convert_function_to_tool(func) + if tool: + tools.append(tool) + + logger.info( + f"Discovered {len(tools)} UC Functions from {catalog}.{schema}" + ) + + except Exception as e: + logger.error(f"Failed to discover UC Functions: {e}") + + return tools + + def _convert_function_to_tool(self, func) -> Optional[Dict[str, Any]]: + """ + Convert a UC Function to MCP tool format. + + Args: + func: Function info from Databricks SDK + + Returns: + MCP tool definition or None if conversion fails + """ + try: + # Extract function metadata + name = func.name.split(".")[-1] # Get short name + description = func.comment or f"Unity Catalog function: {name}" + + # Build parameter schema + input_schema = { + "type": "object", + "properties": {}, + "required": [] + } + + # Parse function parameters + if hasattr(func, "input_params") and func.input_params: + for param in func.input_params.parameters: + param_name = param.name + param_type = self._map_uc_type_to_json_type(param.type_name) + + input_schema["properties"][param_name] = { + "type": param_type, + "description": param.comment or "" + } + + # Parameters without defaults are required + if not hasattr(param, "default_value") or param.default_value is None: + input_schema["required"].append(param_name) + + return { + "name": name, + "description": description, + "inputSchema": input_schema, + "full_name": func.full_name, + "source": "unity_catalog" + } + + except Exception as e: + logger.warning(f"Failed to convert function {func.name}: {e}") + return None + + def _map_uc_type_to_json_type(self, uc_type: str) -> str: + """ + Map Unity Catalog data type to JSON Schema type. + + Args: + uc_type: UC type name (e.g., "STRING", "BIGINT", "BOOLEAN") + + Returns: + JSON Schema type ("string", "number", "boolean", etc.) + """ + type_mapping = { + "STRING": "string", + "VARCHAR": "string", + "CHAR": "string", + "BIGINT": "integer", + "INT": "integer", + "INTEGER": "integer", + "SMALLINT": "integer", + "TINYINT": "integer", + "DOUBLE": "number", + "FLOAT": "number", + "DECIMAL": "number", + "BOOLEAN": "boolean", + "BINARY": "string", + "DATE": "string", + "TIMESTAMP": "string", + "ARRAY": "array", + "MAP": "object", + "STRUCT": "object", + } + + uc_type_upper = uc_type.upper() + return type_mapping.get(uc_type_upper, "string") + + async def call_function( + self, + full_name: str, + arguments: Dict[str, Any] + ) -> Any: + """ + Call a UC Function with given arguments. + + Args: + full_name: Full function name (catalog.schema.function) + arguments: Function arguments + + Returns: + Function result + + Example: + >>> adapter = UCFunctionAdapter() + >>> result = await adapter.call_function( + ... "main.functions.calculate_tax", + ... {"amount": 100, "rate": 0.08} + ... ) + """ + client = self._get_client() + + try: + # Build SQL query to call the function + args_list = [f":{key}" for key in arguments.keys()] + query = f"SELECT {full_name}({', '.join(args_list)})" + + # Execute via SQL warehouse + # Note: This requires a warehouse ID to be configured + result = client.statement_execution.execute_statement( + statement=query, + warehouse_id=self._get_default_warehouse(), + parameters=[ + {"name": key, "value": str(value)} + for key, value in arguments.items() + ] + ) + + return result.result.data_array[0][0] if result.result.data_array else None + + except Exception as e: + logger.error(f"Failed to call UC Function {full_name}: {e}") + raise + + def _get_default_warehouse(self) -> str: + """Get default SQL warehouse ID from environment or client.""" + import os + warehouse_id = os.getenv("DATABRICKS_WAREHOUSE_ID") + if not warehouse_id: + raise ValueError( + "DATABRICKS_WAREHOUSE_ID not set. " + "Set this environment variable to use UC Functions." + ) + return warehouse_id diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/py.typed b/dbx-agent-app/examples/hello-world/dbx_agent_app/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/registry/__init__.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/registry/__init__.py new file mode 100644 index 00000000..892043b7 --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/registry/__init__.py @@ -0,0 +1,10 @@ +""" +Unity Catalog integration for agent registration. + +This module provides utilities for registering agents in Unity Catalog +as AGENT objects, enabling catalog-based discovery and permission management. +""" + +from .uc_registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError + +__all__ = ["UCAgentRegistry", "UCAgentSpec", "UCRegistrationError"] diff --git a/dbx-agent-app/examples/hello-world/dbx_agent_app/registry/uc_registry.py b/dbx-agent-app/examples/hello-world/dbx_agent_app/registry/uc_registry.py new file mode 100644 index 00000000..f2651aca --- /dev/null +++ b/dbx-agent-app/examples/hello-world/dbx_agent_app/registry/uc_registry.py @@ -0,0 +1,345 @@ +""" +Unity Catalog agent registry. + +Registers and manages agents as Unity Catalog AGENT objects for +catalog-based discovery and permission management. +""" + +import json +import logging +from typing import Dict, Any, Optional, List +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCRegistrationError(Exception): + """Raised when agent registration in Unity Catalog fails.""" + pass + + +@dataclass +class UCAgentSpec: + """ + Specification for registering an agent in Unity Catalog. + + Attributes: + name: Agent name (will be catalog object name) + catalog: UC catalog name + schema: UC schema name + endpoint_url: Agent's base URL + description: Agent description + capabilities: List of agent capabilities + properties: Additional metadata key-value pairs + """ + name: str + catalog: str + schema: str + endpoint_url: str + description: Optional[str] = None + capabilities: Optional[List[str]] = None + properties: Optional[Dict[str, str]] = None + + +class UCAgentRegistry: + """ + Unity Catalog agent registry. + + Registers agents as UC AGENT objects for catalog-based discovery + and permission management. + + Usage: + registry = UCAgentRegistry(profile="my-profile") + + spec = UCAgentSpec( + name="customer_research", + catalog="main", + schema="agents", + endpoint_url="https://app.databricksapps.com", + description="Customer research agent", + capabilities=["search", "analysis"], + ) + + registry.register_agent(spec) + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC agent registry. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def register_agent(self, spec: UCAgentSpec) -> Dict[str, Any]: + """ + Register an agent in Unity Catalog. + + Creates a AGENT object in the specified catalog and schema with + metadata about the agent's endpoint, capabilities, and properties. + + Args: + spec: Agent specification + + Returns: + Dictionary with registration details + + Raises: + UCRegistrationError: If registration fails + + Example: + >>> registry = UCAgentRegistry(profile="my-profile") + >>> spec = UCAgentSpec( + ... name="my_agent", + ... catalog="main", + ... schema="agents", + ... endpoint_url="https://app.databricksapps.com", + ... ) + >>> result = registry.register_agent(spec) + """ + client = self._get_client() + full_name = f"{spec.catalog}.{spec.schema}.{spec.name}" + + try: + # Build agent properties for UC metadata + properties = spec.properties or {} + properties["endpoint_url"] = spec.endpoint_url + properties["agent_card_url"] = f"{spec.endpoint_url}/.well-known/agent.json" + + if spec.capabilities: + properties["capabilities"] = ",".join(spec.capabilities) + + # Register as a UC registered model with AGENT type + # (UC doesn't have a native AGENT type yet, so we use registered models + # with special tags/properties to mark them as agents) + + logger.info(f"Registering agent '{full_name}' in Unity Catalog") + + # Check if catalog and schema exist + try: + client.catalogs.get(spec.catalog) + except Exception as e: + raise UCRegistrationError( + f"Catalog '{spec.catalog}' does not exist or is not accessible: {e}" + ) + + try: + client.schemas.get(f"{spec.catalog}.{spec.schema}") + except Exception as e: + raise UCRegistrationError( + f"Schema '{spec.catalog}.{spec.schema}' does not exist or is not accessible: {e}" + ) + + # Create or update registered model as agent placeholder + # In a future UC version with native AGENT support, this would use: + # client.agents.create(name=full_name, properties=properties) + + # Encode properties as JSON suffix in comment for discovery + # Format: "description\n---AGENT_META---\n{json}" + meta = {"databricks_agent": True, **properties} + comment = spec.description or "" + comment_with_meta = f"{comment}\n---AGENT_META---\n{json.dumps(meta)}" + + # Try update first (model may already exist from prior deploy), + # fall back to create if it doesn't exist + try: + client.registered_models.update( + full_name, + comment=comment_with_meta, + ) + logger.info(f"Updated existing agent '{full_name}'") + except Exception as update_err: + # Model doesn't exist or SP can't access it — try create + logger.debug(f"Update failed ({update_err}), trying create") + try: + client.registered_models.create( + name=spec.name, + catalog_name=spec.catalog, + schema_name=spec.schema, + comment=comment_with_meta, + ) + logger.info(f"Created new agent '{full_name}'") + except Exception as create_err: + # If create fails with "already exists", the SP just + # can't see the model — log warning but don't fail + err_str = str(create_err).lower() + if "already exists" in err_str or "not a valid name" in err_str: + logger.warning( + "Agent '%s' exists but SP cannot update it. " + "Grant the app's SP ownership or MANAGE on the model.", + full_name, + ) + else: + raise + + logger.info(f"Successfully registered agent '{full_name}'") + + return { + "full_name": full_name, + "catalog": spec.catalog, + "schema": spec.schema, + "name": spec.name, + "endpoint_url": spec.endpoint_url, + "properties": properties, + } + + except UCRegistrationError: + raise + except Exception as e: + raise UCRegistrationError( + f"Failed to register agent '{full_name}': {e}" + ) from e + + @staticmethod + def _parse_agent_meta(comment: Optional[str]) -> Optional[Dict[str, Any]]: + """Parse agent metadata from comment field (JSON after ---AGENT_META--- marker).""" + if not comment or "---AGENT_META---" not in comment: + return None + try: + _, meta_json = comment.split("---AGENT_META---", 1) + return json.loads(meta_json.strip()) + except (ValueError, json.JSONDecodeError): + return None + + @staticmethod + def _clean_description(comment: Optional[str]) -> str: + """Extract human-readable description from comment (before the meta marker).""" + if not comment: + return "" + if "---AGENT_META---" in comment: + return comment.split("---AGENT_META---")[0].strip() + return comment + + def get_agent(self, catalog: str, schema: str, name: str) -> Optional[Dict[str, Any]]: + """ + Get agent metadata from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + Agent metadata dictionary or None if not found + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + model = client.registered_models.get(full_name) + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + return None + + return { + "full_name": full_name, + "catalog": catalog, + "schema": schema, + "name": name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "agent_card_url": meta.get("agent_card_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + "properties": meta, + } + + except Exception as e: + logger.debug(f"Agent '{full_name}' not found: {e}") + return None + + def list_agents( + self, + catalog: str, + schema: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + List all agents in a catalog or schema. + + Args: + catalog: UC catalog name + schema: Optional UC schema name (lists all schemas if not specified) + + Returns: + List of agent metadata dictionaries + """ + client = self._get_client() + agents = [] + + # Determine which schemas to scan + schemas_to_scan = [schema] if schema else [] + if not schema: + try: + for s in client.schemas.list(catalog_name=catalog): + if s.name != "information_schema": + schemas_to_scan.append(s.name) + except Exception as e: + logger.error(f"Failed to list schemas in {catalog}: {e}") + return [] + + for schema_name in schemas_to_scan: + try: + models = client.registered_models.list( + catalog_name=catalog, schema_name=schema_name + ) + for model in models: + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + continue + + agents.append({ + "full_name": model.full_name, + "catalog": catalog, + "schema": schema_name, + "name": model.name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + }) + except Exception as e: + logger.debug(f"Failed to list models in {catalog}.{schema_name}: {e}") + continue + + return agents + + def delete_agent(self, catalog: str, schema: str, name: str) -> bool: + """ + Delete an agent from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + True if deleted, False if not found + + Raises: + UCRegistrationError: If deletion fails + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + client.registered_models.delete(full_name) + logger.info(f"Deleted agent '{full_name}'") + return True + except Exception as e: + if "does not exist" in str(e).lower(): + return False + raise UCRegistrationError( + f"Failed to delete agent '{full_name}': {e}" + ) from e diff --git a/dbx-agent-app/examples/hello-world/requirements.txt b/dbx-agent-app/examples/hello-world/requirements.txt new file mode 100644 index 00000000..7b0556d1 --- /dev/null +++ b/dbx-agent-app/examples/hello-world/requirements.txt @@ -0,0 +1,5 @@ +fastapi>=0.115.0 +uvicorn[standard]>=0.30.0 +pydantic>=2.0.0 +httpx>=0.27.0 +databricks-sdk>=0.30.0 diff --git a/dbx-agent-app/examples/hello_agent.py b/dbx-agent-app/examples/hello_agent.py new file mode 100644 index 00000000..f8a7cc16 --- /dev/null +++ b/dbx-agent-app/examples/hello_agent.py @@ -0,0 +1,43 @@ +""" +Minimal agent example — plain FastAPI + discoverability helper. + +Build your agent however you want. Call add_agent_card() to make it +discoverable by the Agent Platform. +""" + +from fastapi import FastAPI, Request +from fastapi.responses import JSONResponse + +from dbx_agent_app import add_agent_card + +app = FastAPI() + + +@app.post("/invocations") +async def invocations(request: Request): + """Standard Databricks /invocations endpoint.""" + body = await request.json() + # Extract last user message + query = "" + for item in reversed(body.get("input", [])): + if isinstance(item, dict) and item.get("role") == "user": + query = item.get("content", "") + break + + return { + "output": [ + { + "type": "message", + "content": [{"type": "output_text", "text": f"Hello, {query}!"}], + } + ] + } + + +# Make this app discoverable by the Agent Platform +add_agent_card(app, name="hello", description="A minimal greeting agent", capabilities=["greetings"]) + + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/dbx-agent-app/examples/research-agent/agent.py b/dbx-agent-app/examples/research-agent/agent.py new file mode 100644 index 00000000..af7b558e --- /dev/null +++ b/dbx-agent-app/examples/research-agent/agent.py @@ -0,0 +1,547 @@ +""" +Research Assistant - Unity Catalog Native with MLflow Tracking + +Uses SDK types (AgentRequest/AgentResponse) instead of mlflow.pyfunc.ResponsesAgent. +MLflow is still used for observability (metrics, traces, artifacts) — just not for types. + +Key Value: Most organizations have ZERO visibility into agent performance. +This shows how Databricks makes agents observable out of the box. +""" + +# IMPORTANT: Clean up auth environment BEFORE any Databricks SDK imports +# In Databricks Apps, both OAuth and PAT token are present in environment +# We must use OAuth-only to avoid "multiple auth methods" error +import os +if os.environ.get("DATABRICKS_CLIENT_ID"): # Running in Databricks Apps + os.environ.pop("DATABRICKS_TOKEN", None) + +from uuid import uuid4 +from typing import Generator, Dict, Any, Optional +import time +from contextlib import contextmanager +import contextlib + +from dbx_agent_app import AgentRequest, AgentResponse, StreamEvent, UserContext +from dbx_agent_app.core.compat import to_langchain_messages +from databricks_langchain import ChatDatabricks +from databricks.sdk import WorkspaceClient +from databricks.sdk.config import Config +from langgraph.graph import StateGraph, MessagesState +from langgraph.prebuilt import ToolNode, tools_condition +from langchain_core.messages import SystemMessage +from langchain_core.tools import tool +import mlflow + + +@contextlib.contextmanager +def _clean_environment(): + """ + Context manager to temporarily clean Databricks environment variables. + This prevents SDK conflicts when creating clients with explicit credentials. + + Based on Kasal's authentication pattern for Databricks Apps. + """ + old_env = {} + env_vars_to_clean = [ + "DATABRICKS_TOKEN", + "DATABRICKS_API_KEY", + "DATABRICKS_CLIENT_ID", + "DATABRICKS_CLIENT_SECRET", + "DATABRICKS_CONFIG_FILE", + "DATABRICKS_CONFIG_PROFILE" + ] + + for var in env_vars_to_clean: + if var in os.environ: + old_env[var] = os.environ.pop(var) + + try: + yield + finally: + # Restore environment variables + os.environ.update(old_env) + + +class PerformanceMetrics: + """ + Track performance metrics throughout agent execution. + + This provides the observability that most organizations lack. + """ + + def __init__(self): + self.tool_calls: list[Dict[str, Any]] = [] + self.uc_function_latencies: list[float] = [] + self.total_tokens: int = 0 + self.prompt_tokens: int = 0 + self.completion_tokens: int = 0 + self.errors: list[Dict[str, str]] = [] + self.start_time: Optional[float] = None + self.end_time: Optional[float] = None + + def add_tool_call(self, tool_name: str, latency_ms: float, success: bool, result_size: int = 0): + """Record a tool call with performance data.""" + self.tool_calls.append({ + "tool_name": tool_name, + "latency_ms": latency_ms, + "success": success, + "result_size_bytes": result_size, + "timestamp": time.time() + }) + + def add_uc_function_latency(self, latency_ms: float): + """Track UC Function execution time separately.""" + self.uc_function_latencies.append(latency_ms) + + def add_error(self, error_type: str, error_message: str): + """Track errors for reliability metrics.""" + self.errors.append({ + "type": error_type, + "message": error_message, + "timestamp": time.time() + }) + + def update_token_usage(self, prompt_tokens: int, completion_tokens: int): + """Track token usage for cost estimation.""" + self.prompt_tokens += prompt_tokens + self.completion_tokens += completion_tokens + self.total_tokens = self.prompt_tokens + self.completion_tokens + + def get_summary(self) -> Dict[str, Any]: + """Get summary metrics for logging.""" + total_latency = (self.end_time - self.start_time) * 1000 if self.end_time and self.start_time else 0 + + return { + # Overall performance + "total_latency_ms": total_latency, + "total_tool_calls": len(self.tool_calls), + "successful_tool_calls": sum(1 for t in self.tool_calls if t["success"]), + "failed_tool_calls": sum(1 for t in self.tool_calls if not t["success"]), + + # UC Function performance + "uc_function_calls": len(self.uc_function_latencies), + "avg_uc_function_latency_ms": sum(self.uc_function_latencies) / len(self.uc_function_latencies) if self.uc_function_latencies else 0, + "max_uc_function_latency_ms": max(self.uc_function_latencies) if self.uc_function_latencies else 0, + "min_uc_function_latency_ms": min(self.uc_function_latencies) if self.uc_function_latencies else 0, + + # Token usage and cost estimation + "total_tokens": self.total_tokens, + "prompt_tokens": self.prompt_tokens, + "completion_tokens": self.completion_tokens, + "estimated_cost_usd": self._estimate_cost(), + + # Reliability + "error_count": len(self.errors), + "error_rate": len(self.errors) / max(len(self.tool_calls), 1), + + # Tool-specific metrics + "tool_breakdown": self._get_tool_breakdown() + } + + def _estimate_cost(self) -> float: + """Estimate cost based on token usage.""" + input_cost = (self.prompt_tokens / 1_000_000) * 3.0 + output_cost = (self.completion_tokens / 1_000_000) * 15.0 + return round(input_cost + output_cost, 6) + + def _get_tool_breakdown(self) -> Dict[str, Dict[str, Any]]: + """Get per-tool metrics.""" + breakdown = {} + for tool_call in self.tool_calls: + tool_name = tool_call["tool_name"] + if tool_name not in breakdown: + breakdown[tool_name] = { + "call_count": 0, + "total_latency_ms": 0, + "success_count": 0, + "fail_count": 0 + } + + breakdown[tool_name]["call_count"] += 1 + breakdown[tool_name]["total_latency_ms"] += tool_call["latency_ms"] + if tool_call["success"]: + breakdown[tool_name]["success_count"] += 1 + else: + breakdown[tool_name]["fail_count"] += 1 + + for tool_name in breakdown: + tool_data = breakdown[tool_name] + tool_data["avg_latency_ms"] = tool_data["total_latency_ms"] / tool_data["call_count"] + tool_data["success_rate"] = tool_data["success_count"] / tool_data["call_count"] + + return breakdown + + +class SGPResearchAgent: + """ + Research assistant with comprehensive MLflow performance tracking. + + Uses SDK types (AgentRequest/AgentResponse) — no mlflow.pyfunc inheritance. + MLflow is used purely for observability (metrics, traces, artifacts). + """ + + def __init__(self, config=None): + """Initialize agent with UC Function tools and MLflow tracking.""" + self.config = config or {} + + # UC configuration + self.catalog = self.config.get("catalog", "main") + self.schema = self.config.get("schema", "agents") + + # Workspace client for UC Function execution + import os + + workspace_url = os.environ.get("DATABRICKS_HOST", "https://fevm-serverless-dxukih.cloud.databricks.com") + is_databricks_app = os.environ.get("DATABRICKS_CLIENT_ID") is not None + client_id = os.environ.get("DATABRICKS_CLIENT_ID") + client_secret = os.environ.get("DATABRICKS_CLIENT_SECRET") + token = os.environ.get("DATABRICKS_TOKEN") + + with _clean_environment(): + if is_databricks_app: + self.workspace = WorkspaceClient( + host=workspace_url, + client_id=client_id, + client_secret=client_secret + ) + else: + if token: + self.workspace = WorkspaceClient( + host=workspace_url, + token=token + ) + else: + self.workspace = WorkspaceClient() + + self.llm = ChatDatabricks( + endpoint=self.config.get("endpoint", "databricks-claude-sonnet-4-5"), + temperature=self.config.get("temperature", 0.7), + max_tokens=self.config.get("max_tokens", 4096), + ) + + # Performance tracking + self.metrics = PerformanceMetrics() + + # Per-request user context for user-scoped UC execution + self._current_user_context: UserContext | None = None + + # Cache warehouse ID to avoid repeated lookups + self._warehouse_id_cache = None + + # Create tools that call UC Functions + self.tools = self._create_uc_tools() + + # Bind tools to LLM + self.llm_with_tools = self.llm.bind_tools(self.tools) + + # Build LangGraph workflow + self.graph = self._create_graph() + + @contextmanager + def _track_tool_execution(self, tool_name: str): + """Context manager to track tool execution time and status.""" + start_time = time.time() + success = False + result_size = 0 + + try: + yield + success = True + except Exception as e: + self.metrics.add_error(type(e).__name__, str(e)) + raise + finally: + latency_ms = (time.time() - start_time) * 1000 + self.metrics.add_tool_call(tool_name, latency_ms, success, result_size) + + if mlflow.active_run(): + mlflow.log_metric(f"tool_{tool_name}_latency_ms", latency_ms) + mlflow.log_metric(f"tool_{tool_name}_success", 1 if success else 0) + + def _execute_uc_function(self, statement: str, parameters: list = None) -> Any: + """Execute UC Function with performance tracking. + + When user context is available, executes as the calling user so that + Unity Catalog row-level security and column masks apply per-user. + Falls back to service principal when no user auth is configured. + """ + start_time = time.time() + + # Use user-scoped client when available for UC governance enforcement + client = self.workspace + if self._current_user_context and self._current_user_context.is_authenticated: + client = self._current_user_context.get_workspace_client() + + try: + result = client.statement_execution.execute_statement( + warehouse_id=self._get_warehouse_id(), + statement=statement, + parameters=parameters or [], + wait_timeout="30s" + ) + + latency_ms = (time.time() - start_time) * 1000 + self.metrics.add_uc_function_latency(latency_ms) + + if mlflow.active_run(): + mlflow.log_metric("uc_function_latency_ms", latency_ms) + + return result + + except Exception as e: + latency_ms = (time.time() - start_time) * 1000 + self.metrics.add_uc_function_latency(latency_ms) + self.metrics.add_error("UCFunctionError", str(e)) + + if mlflow.active_run(): + mlflow.log_metric("uc_function_error", 1) + + raise + + def _create_uc_tools(self): + """Create LangChain tools that wrap Unity Catalog Functions with tracking.""" + + @tool + def search_transcripts(query: str, top_k: int = 10) -> str: + """Search expert interview transcripts for insights on a topic.""" + with self._track_tool_execution("search_transcripts"): + try: + statement = f""" + SELECT * FROM TABLE({self.catalog}.{self.schema}.search_transcripts( + query => :query, + top_k => :top_k + )) + """ + parameters = [ + {"name": "query", "value": query}, + {"name": "top_k", "value": str(top_k)} + ] + + result = self._execute_uc_function(statement, parameters) + formatted = self._format_search_results(result) + + if mlflow.active_run(): + mlflow.log_param("search_query", query[:100]) + mlflow.log_metric("search_results_count", len(result.result.data_array) if result.result and result.result.data_array else 0) + + return formatted + + except Exception as e: + return f"Error searching transcripts: {str(e)}\n\nNote: Ensure UC Function '{self.catalog}.{self.schema}.search_transcripts' is registered and you have EXECUTE permissions." + + @tool + def get_expert_profile(expert_id: str) -> str: + """Get detailed profile information for a specific expert.""" + with self._track_tool_execution("get_expert_profile"): + try: + statement = f""" + SELECT * FROM TABLE({self.catalog}.{self.schema}.get_expert_profile( + expert_id => :expert_id + )) + """ + parameters = [{"name": "expert_id", "value": expert_id}] + + result = self._execute_uc_function(statement, parameters) + formatted = self._format_expert_profile(result) + + if mlflow.active_run(): + mlflow.log_param("expert_id", expert_id) + + return formatted + + except Exception as e: + return f"Error getting expert profile: {str(e)}" + + return [search_transcripts, get_expert_profile] + + def _get_warehouse_id(self) -> str: + """Get SQL warehouse ID with caching.""" + if self._warehouse_id_cache: + return self._warehouse_id_cache + + if "warehouse_id" in self.config: + self._warehouse_id_cache = self.config["warehouse_id"] + return self._warehouse_id_cache + + warehouses = self.workspace.warehouses.list() + for warehouse in warehouses: + if warehouse.enable_serverless_compute: + self._warehouse_id_cache = warehouse.id + return self._warehouse_id_cache + + first_warehouse = next(iter(warehouses), None) + if first_warehouse: + self._warehouse_id_cache = first_warehouse.id + return self._warehouse_id_cache + + raise ValueError("No SQL warehouse available. Please configure warehouse_id.") + + def _format_search_results(self, result) -> str: + """Format UC Function search results for agent consumption.""" + if not result.result or not result.result.data_array: + return "No transcripts found matching your query." + + rows = result.result.data_array + if len(rows) == 0: + return "No transcripts found matching your query." + + formatted = f"Found {len(rows)} relevant transcripts:\n\n" + + for i, row in enumerate(rows, 1): + transcript_id = row[0] + text = row[1] + expert_id = row[2] + expert_name = row[3] + score = row[4] + + formatted += f"**{i}. {expert_name}** (Expert ID: {expert_id})\n" + formatted += f"Relevance Score: {score:.2f}\n\n" + + if len(text) > 300: + text = text[:300] + "..." + formatted += f"{text}\n\n" + formatted += f"_Transcript ID: {transcript_id}_\n\n" + + formatted += "\n---\n" + formatted += f"Data source: Unity Catalog Function ({self.catalog}.{self.schema}.search_transcripts)\n" + formatted += f"Powered by: Databricks Vector Search\n" + + return formatted + + def _format_expert_profile(self, result) -> str: + """Format expert profile results.""" + if not result.result or not result.result.data_array: + return "Expert profile not found." + + row = result.result.data_array[0] + + formatted = f"**Expert Profile**\n\n" + formatted += f"Name: {row[1]}\n" + formatted += f"ID: {row[0]}\n" + formatted += f"Credentials: {row[2]}\n\n" + formatted += f"**Bio:**\n{row[3]}\n\n" + formatted += f"**Specialties:** {row[4]}\n" + + return formatted + + def _create_graph(self): + """Build LangGraph state machine with tracking.""" + + def agent_node(state: MessagesState): + """Agent reasoning node with token usage tracking.""" + messages = state["messages"] + + system_msg = SystemMessage(content=f"""You are an expert research assistant with access to expert interview transcripts via Unity Catalog. + +Your tools are Unity Catalog Functions registered in the {self.catalog}.{self.schema} schema: +- search_transcripts: Semantic search over transcripts using Vector Search +- get_expert_profile: Get detailed expert information + +Your role: +- Search transcripts to find relevant expert opinions and insights +- Synthesize information across multiple interviews +- Cite specific experts with their credentials and IDs +- Provide balanced perspectives when experts disagree + +When answering: +1. Use search_transcripts to find relevant information +2. Quote specific experts with their names and credentials +3. Reference expert IDs for traceability +4. Use get_expert_profile for detailed expert background +5. Summarize themes across multiple interviews +6. Be clear about confidence level in findings""") + + start_time = time.time() + response = self.llm_with_tools.invoke([system_msg] + messages) + llm_latency_ms = (time.time() - start_time) * 1000 + + if hasattr(response, "response_metadata"): + usage = response.response_metadata.get("usage", {}) + if usage: + self.metrics.update_token_usage( + usage.get("prompt_tokens", 0), + usage.get("completion_tokens", 0) + ) + + if mlflow.active_run(): + mlflow.log_metric("llm_latency_ms", llm_latency_ms) + + return {"messages": [response]} + + workflow = StateGraph(MessagesState) + workflow.add_node("agent", agent_node) + workflow.add_node("tools", ToolNode(self.tools)) + workflow.set_entry_point("agent") + workflow.add_conditional_edges("agent", tools_condition) + workflow.add_edge("tools", "agent") + + return workflow.compile() + + def predict(self, request: AgentRequest) -> AgentResponse: + """Non-streaming prediction with comprehensive tracking.""" + self._current_user_context = request.user_context + self.metrics = PerformanceMetrics() + self.metrics.start_time = time.time() + + with mlflow.start_run(nested=True): + mlflow.log_param("catalog", self.catalog) + mlflow.log_param("schema", self.schema) + mlflow.log_param("model_endpoint", self.config.get("endpoint")) + + # Convert SDK request to LangChain messages + messages = to_langchain_messages(request) + + result = self.graph.invoke({"messages": messages}) + final_message = result["messages"][-1] + + self.metrics.end_time = time.time() + + # Log all metrics to MLflow + summary = self.metrics.get_summary() + for metric_name, metric_value in summary.items(): + if isinstance(metric_value, (int, float)): + mlflow.log_metric(metric_name, metric_value) + elif isinstance(metric_value, dict): + for sub_key, sub_value in metric_value.items(): + if isinstance(sub_value, dict): + for subsub_key, subsub_value in sub_value.items(): + if isinstance(subsub_value, (int, float)): + mlflow.log_metric(f"{metric_name}_{sub_key}_{subsub_key}", subsub_value) + + import json + with open("/tmp/agent_metrics.json", "w") as f: + json.dump(summary, f, indent=2) + mlflow.log_artifact("/tmp/agent_metrics.json") + + return AgentResponse.text(final_message.content) + + def predict_stream(self, request: AgentRequest) -> Generator[StreamEvent, None, None]: + """Streaming prediction with tracking.""" + self._current_user_context = request.user_context + self.metrics = PerformanceMetrics() + self.metrics.start_time = time.time() + + with mlflow.start_run(nested=True): + mlflow.log_param("catalog", self.catalog) + mlflow.log_param("schema", self.schema) + mlflow.log_param("streaming", True) + + messages = to_langchain_messages(request) + + item_id = str(uuid4()) + aggregated_content = "" + + for chunk in self.graph.stream({"messages": messages}, stream_mode="messages"): + if hasattr(chunk[0], "content") and chunk[0].content: + delta = chunk[0].content + aggregated_content += delta + yield StreamEvent.text_delta(delta, item_id=item_id) + + self.metrics.end_time = time.time() + + summary = self.metrics.get_summary() + for metric_name, metric_value in summary.items(): + if isinstance(metric_value, (int, float)): + mlflow.log_metric(metric_name, metric_value) + + yield StreamEvent.done(aggregated_content, item_id=item_id) diff --git a/dbx-agent-app/examples/research-agent/app.py b/dbx-agent-app/examples/research-agent/app.py new file mode 100644 index 00000000..e964b2f1 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/app.py @@ -0,0 +1,108 @@ +""" +Research Agent — @app_agent pattern + +The decorated function IS the /invocations handler. + +Endpoints (auto-generated): +- POST /invocations — Databricks Responses Agent protocol +- GET /.well-known/agent.json — A2A agent card +- GET /health — Health check +- POST /api/mcp — MCP JSON-RPC server +- POST /api/tools/ — Individual tool endpoints +""" + +import os +import mlflow + +from dbx_agent_app import app_agent, AgentRequest, AgentResponse +from agent import SGPResearchAgent + + +# Initialize agent (singleton) +_agent = None + + +def get_agent() -> SGPResearchAgent: + """Get or create agent instance.""" + global _agent + if _agent is None: + config = { + "catalog": os.environ.get("UC_CATALOG", "main"), + "schema": os.environ.get("UC_SCHEMA", "agents"), + "endpoint": os.environ.get("MODEL_ENDPOINT", "databricks-claude-sonnet-4-5"), + "temperature": float(os.environ.get("TEMPERATURE", "0.7")), + "max_tokens": int(os.environ.get("MAX_TOKENS", "4096")), + } + + if "WAREHOUSE_ID" in os.environ: + config["warehouse_id"] = os.environ["WAREHOUSE_ID"] + + experiment_name = os.environ.get( + "MLFLOW_EXPERIMENT_NAME", + "/Users/databricks/agents-agent-tracking" + ) + try: + mlflow.set_experiment(experiment_name) + except Exception: + pass + + _agent = SGPResearchAgent(config) + + return _agent + + +@app_agent( + name="research", + description="Research Agent with MLflow performance tracking", + capabilities=["research", "search", "expert_analysis", "tracking"], + uc_catalog=os.environ.get("UC_CATALOG", "main"), + uc_schema=os.environ.get("UC_SCHEMA", "agents"), +) +async def research(request: AgentRequest) -> AgentResponse: + """Route to the research agent's predict method.""" + agent = get_agent() + return agent.predict(request) + + +@research.tool(description="Get agent performance metrics") +async def get_metrics() -> dict: + """Get agent performance metrics.""" + agent = get_agent() + if agent.metrics: + return agent.metrics.get_summary() + return {"message": "No metrics available yet"} + + +@research.tool(description="Get agent configuration details") +async def get_config() -> dict: + """Get agent configuration.""" + agent = get_agent() + return { + "catalog": agent.catalog, + "schema": agent.schema, + "model_endpoint": agent.config.get("endpoint"), + "temperature": agent.config.get("temperature"), + "max_tokens": agent.config.get("max_tokens"), + } + + +# The FastAPI app — uvicorn app:research.app +app = research.app + + +if __name__ == "__main__": + import uvicorn + + os.environ.setdefault("UC_CATALOG", "main") + os.environ.setdefault("UC_SCHEMA", "agents") + os.environ.setdefault("MODEL_ENDPOINT", "databricks-claude-sonnet-4-5") + + print("Starting Research Agent (@app_agent)") + print("\nEndpoints:") + print(" http://localhost:8000/invocations - Responses Agent protocol") + print(" http://localhost:8000/.well-known/agent.json - Agent card (A2A)") + print(" http://localhost:8000/health - Health check") + print(" http://localhost:8000/api/mcp - MCP server") + print() + + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/dbx-agent-app/examples/research-agent/app.yaml b/dbx-agent-app/examples/research-agent/app.yaml new file mode 100644 index 00000000..02eec781 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/app.yaml @@ -0,0 +1,60 @@ +command: + - "python" + - "-m" + - "uvicorn" + - "app:app" + - "--host" + - "0.0.0.0" + - "--port" + - "8000" + +env: + # Unity Catalog configuration + - name: UC_CATALOG + value: main + + - name: UC_SCHEMA + value: agents + + # Model endpoint + - name: MODEL_ENDPOINT + value: databricks-claude-sonnet-4-5 + + # Model parameters + - name: TEMPERATURE + value: "0.7" + + - name: MAX_TOKENS + value: "4096" + + # SQL Warehouse ID (optional - will auto-detect serverless if not provided) + # - name: WAREHOUSE_ID + # value: your-warehouse-id + + # MLflow tracking + - name: MLFLOW_EXPERIMENT_NAME + value: /Users/databricks/agent-tracking + + # Python path + - name: PYTHONPATH + value: /Workspace/Applications/${app_name} + +# ── User Authorization (on-behalf-of) ────────────────────────────── +# To enable per-user UC governance (row-level security, column masks), +# configure user authorization scopes on this app via the Databricks CLI: +# +# databricks apps update --json '{ +# "resources": [{ +# "name": "user-auth", +# "description": "On-behalf-of user authorization", +# "sql": { "permission": "CAN_USE" } +# }] +# }' +# +# Or use the Custom App Integration API to add OAuth scopes: +# POST /api/2.0/accounts/{account_id}/oauth2/custom-app-integrations +# scopes: ["sql", "iam.current-user:read"] +# +# When configured, the app receives X-Forwarded-Access-Token headers +# and the @app_agent decorator populates request.user_context automatically. +# See: https://docs.databricks.com/en/dev-tools/databricks-apps/app-development.html diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/__init__.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/__init__.py new file mode 100644 index 00000000..95db07d3 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/__init__.py @@ -0,0 +1,41 @@ +""" +dbx-agent-app: Framework for building discoverable AI agents on Databricks Apps. + +This package provides: +- @app_agent: Decorator to turn an async function into a discoverable agent +- AgentDiscovery: Discover agents in your Databricks workspace +- A2AClient: Communicate with agents using the A2A protocol +- UCAgentRegistry: Register agents in Unity Catalog +- MCPServerConfig: Configure MCP server for agent tools +""" + +from .discovery import AgentDiscovery, DiscoveredAgent, AgentDiscoveryResult, A2AClient, A2AClientError +from .mcp import MCPServerConfig, setup_mcp_server, UCFunctionAdapter +from .registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError +from .dashboard import create_dashboard_app + +try: + from importlib.metadata import version + __version__ = version("dbx-agent-app") +except Exception: + __version__ = "0.1.0" + +__all__ = [ + # Discovery + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", + # Registry + "UCAgentRegistry", + "UCAgentSpec", + "UCRegistrationError", + # MCP + "MCPServerConfig", + "setup_mcp_server", + "UCFunctionAdapter", + # Dashboard + "create_dashboard_app", +] + diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/core/__init__.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/core/__init__.py new file mode 100644 index 00000000..623da88c --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/core/__init__.py @@ -0,0 +1 @@ +"""Core agent application components.""" diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/__init__.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/__init__.py new file mode 100644 index 00000000..89929a08 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/__init__.py @@ -0,0 +1,14 @@ +""" +Developer dashboard for agent discovery. + +Launch via CLI: + dbx-agent-app dashboard --profile my-profile + +Or programmatically: + from dbx_agent_app.dashboard import create_dashboard_app, run_dashboard +""" + +from .app import create_dashboard_app +from .cli import main as run_dashboard + +__all__ = ["create_dashboard_app", "run_dashboard"] diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/app.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/app.py new file mode 100644 index 00000000..d16c3d54 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/app.py @@ -0,0 +1,112 @@ +""" +FastAPI application for the developer dashboard. + +Routes: + HTML: GET / — agent list page + GET /agent/{name} — agent detail page + API: GET /api/agents — JSON list of agents + GET /api/agents/{name}/card — full agent card + POST /api/agents/{name}/mcp — MCP JSON-RPC proxy + POST /api/scan — trigger re-scan + GET /health — health check +""" + +import logging +from typing import Optional + +from fastapi import FastAPI, Request +from fastapi.responses import HTMLResponse, JSONResponse + +from .scanner import DashboardScanner +from .templates import render_agent_list, render_agent_detail + +logger = logging.getLogger(__name__) + + +def create_dashboard_app( + scanner: DashboardScanner, + profile: Optional[str] = None, +) -> FastAPI: + """Build and return the dashboard FastAPI app.""" + app = FastAPI(title="dbx-agent-app dashboard", docs_url=None, redoc_url=None) + + # --- HTML pages ------------------------------------------------------- + + @app.get("/", response_class=HTMLResponse) + async def index(): + agents = scanner.get_agents() + return render_agent_list(agents) + + @app.get("/agent/{name}", response_class=HTMLResponse) + async def agent_detail(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return HTMLResponse("

Agent not found

", status_code=404) + + card = None + try: + card = await scanner.get_agent_card(agent.endpoint_url) + except Exception as e: + logger.warning("Could not fetch card for %s: %s", name, e) + + return render_agent_detail(agent, card) + + # --- JSON API --------------------------------------------------------- + + @app.get("/api/agents") + async def api_agents(): + agents = scanner.get_agents() + return [ + { + "name": a.name, + "endpoint_url": a.endpoint_url, + "app_name": a.app_name, + "description": a.description, + "capabilities": a.capabilities, + "protocol_version": a.protocol_version, + } + for a in agents + ] + + @app.get("/api/agents/{name}/card") + async def api_agent_card(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + card = await scanner.get_agent_card(agent.endpoint_url) + return card + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=502) + + @app.post("/api/agents/{name}/mcp") + async def api_mcp_proxy(name: str, request: Request): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + payload = await request.json() + result = await scanner.proxy_mcp(agent.endpoint_url, payload) + return result + except Exception as e: + return JSONResponse( + {"jsonrpc": "2.0", "id": None, "error": {"code": -1, "message": str(e)}}, + status_code=502, + ) + + @app.post("/api/scan") + async def api_scan(): + agents = await scanner.scan() + return {"count": len(agents), "agents": [a.name for a in agents]} + + @app.get("/health") + async def health(): + return { + "status": "ok", + "agents_cached": len(scanner.get_agents()), + "profile": profile, + } + + return app diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/cli.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/cli.py new file mode 100644 index 00000000..3b17aa90 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/cli.py @@ -0,0 +1,63 @@ +""" +CLI entry point for the developer dashboard. + +Usage: + dbx-agent-app dashboard --profile my-profile --port 8501 +""" + +import argparse +import asyncio +import logging +import sys +import webbrowser + +import uvicorn + +from .scanner import DashboardScanner +from .app import create_dashboard_app + + +def main(): + parser = argparse.ArgumentParser( + prog="dbx-agent-app", + description="Developer dashboard for Databricks agent discovery", + ) + sub = parser.add_subparsers(dest="command") + + dash = sub.add_parser("dashboard", help="Launch the agent discovery dashboard") + dash.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + dash.add_argument("--port", type=int, default=8501, help="Port to serve on (default: 8501)") + dash.add_argument("--host", type=str, default="127.0.0.1", help="Host to bind (default: 127.0.0.1)") + dash.add_argument("--no-browser", action="store_true", help="Don't auto-open browser") + + args = parser.parse_args() + + if args.command != "dashboard": + parser.print_help() + sys.exit(1) + + logging.basicConfig(level=logging.INFO, format="%(levelname)s %(name)s: %(message)s") + + scanner = DashboardScanner(profile=args.profile) + + # Run initial scan + print(f"Scanning workspace for agents (profile={args.profile or 'default'})...") + try: + agents = asyncio.run(scanner.scan()) + print(f"Found {len(agents)} agent(s)") + except Exception as e: + print(f"Initial scan failed: {e}", file=sys.stderr) + print("Dashboard will start anyway — use the Scan button to retry.") + + app = create_dashboard_app(scanner, profile=args.profile) + + url = f"http://{args.host}:{args.port}" + if not args.no_browser: + webbrowser.open(url) + + print(f"Dashboard running at {url}") + uvicorn.run(app, host=args.host, port=args.port, log_level="warning") + + +if __name__ == "__main__": + main() diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/scanner.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/scanner.py new file mode 100644 index 00000000..475460be --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/scanner.py @@ -0,0 +1,81 @@ +""" +Dashboard scanner — wraps AgentDiscovery + A2AClient with caching and MCP proxy. +""" + +import asyncio +import logging +from typing import Dict, Any, List, Optional + +import httpx + +from ..discovery import AgentDiscovery, DiscoveredAgent, A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + + +class DashboardScanner: + """ + Thin wrapper around AgentDiscovery that adds result caching + and MCP JSON-RPC proxying for the dashboard UI. + """ + + def __init__(self, profile: Optional[str] = None): + self._discovery = AgentDiscovery(profile=profile) + self._agents: List[DiscoveredAgent] = [] + self._scan_lock = asyncio.Lock() + self._scanned = False + + async def scan(self) -> List[DiscoveredAgent]: + """Run workspace discovery and cache results. Thread-safe via asyncio.Lock.""" + async with self._scan_lock: + result = await self._discovery.discover_agents() + self._agents = result.agents + self._scanned = True + if result.errors: + for err in result.errors: + logger.warning("Discovery error: %s", err) + return self._agents + + def get_agents(self) -> List[DiscoveredAgent]: + """Return cached agent list from the last scan.""" + return list(self._agents) + + def get_agent_by_name(self, name: str) -> Optional[DiscoveredAgent]: + """Look up a cached agent by name.""" + for agent in self._agents: + if agent.name == name or agent.app_name == name: + return agent + return None + + @property + def workspace_token(self) -> Optional[str]: + """Auth token extracted during discovery, used for cross-app requests.""" + return self._discovery._workspace_token + + async def get_agent_card(self, endpoint_url: str) -> Dict[str, Any]: + """Fetch the full agent card JSON from a remote agent.""" + async with A2AClient(timeout=10.0) as client: + return await client.fetch_agent_card( + endpoint_url, auth_token=self.workspace_token + ) + + async def proxy_mcp(self, endpoint_url: str, payload: Dict[str, Any]) -> Dict[str, Any]: + """ + Forward a JSON-RPC request to an agent's MCP endpoint. + + Args: + endpoint_url: Agent base URL + payload: Complete JSON-RPC 2.0 request body + + Returns: + JSON-RPC response from the agent + """ + mcp_url = endpoint_url.rstrip("/") + "/api/mcp" + headers = {"Content-Type": "application/json"} + if self.workspace_token: + headers["Authorization"] = f"Bearer {self.workspace_token}" + + async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as http: + response = await http.post(mcp_url, json=payload, headers=headers) + response.raise_for_status() + return response.json() diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/templates.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/templates.py new file mode 100644 index 00000000..8543d75d --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/dashboard/templates.py @@ -0,0 +1,278 @@ +""" +Server-rendered HTML templates for the dashboard. + +Pure Python functions returning HTML strings — no Jinja2, no React, no build step. +""" + +import html +import json +from typing import List, Dict, Any, Optional + +from ..discovery import DiscoveredAgent + + +# --------------------------------------------------------------------------- +# Base layout +# --------------------------------------------------------------------------- + +def render_base(title: str, content: str) -> str: + """HTML shell with inline CSS (dark theme).""" + return f""" + + + + +{html.escape(title)} + + + +
+
+

dbx-agent-app dashboard

+ +
+
+
+{content} +
+ +""" + + +# --------------------------------------------------------------------------- +# Agent list page +# --------------------------------------------------------------------------- + +def render_agent_list(agents: List[DiscoveredAgent]) -> str: + """Main page: grid of agent cards + scan button.""" + if not agents: + cards_html = """ +
+

No agents discovered

+

No agent-enabled Databricks Apps found in your workspace. Click Scan to retry.

+
""" + else: + cards = [] + for a in agents: + caps = "" + if a.capabilities: + badges = "".join( + f'{html.escape(c.strip())} ' + for c in a.capabilities.split(",") + ) + caps = f'
{badges}
' + + desc = html.escape(a.description or "No description") + cards.append(f""" + +
+

{html.escape(a.name)}

+

{desc}

+
+ App: {html.escape(a.app_name)} + {f'Protocol: {html.escape(a.protocol_version)}' if a.protocol_version else ''} +
+ {caps} +
+
""") + cards_html = f'
{"".join(cards)}
' + + return render_base( + "Agent Dashboard", + f""" +
+ {len(agents)} agent{"s" if len(agents) != 1 else ""} discovered + +
+{cards_html} +""", + ) + + +# --------------------------------------------------------------------------- +# Agent detail page +# --------------------------------------------------------------------------- + +def render_agent_detail( + agent: DiscoveredAgent, + card: Optional[Dict[str, Any]] = None, +) -> str: + """Detail page: agent card JSON, tools list, MCP test panel.""" + card_json = json.dumps(card, indent=2) if card else "Card not available" + + # Extract tools from card if present + tools_html = "" + if card: + skills = card.get("skills") or card.get("tools") or [] + if skills: + rows = [] + for t in skills: + name = html.escape(t.get("name", t.get("id", "unknown"))) + desc = html.escape(t.get("description", "")) + rows.append( + f'
{name}' + f'
{desc}
' + ) + tools_html = f""" +
+

Tools ({len(skills)})

+ {"".join(rows)} +
""" + + safe_name = html.escape(agent.name) + safe_endpoint = html.escape(agent.endpoint_url) + + return render_base( + f"{safe_name} — Agent Dashboard", + f""" +
+ ← All agents +

{safe_name}

+

{html.escape(agent.description or 'No description')}

+
+ Endpoint: {safe_endpoint} + App: {html.escape(agent.app_name)} + {f'{html.escape(agent.protocol_version)}' if agent.protocol_version else ''} +
+
+ +
+

Agent Card

+
{html.escape(card_json)}
+
+ +{tools_html} + +
+

MCP Test Panel

+

+ Send a JSON-RPC request to this agent's /api/mcp endpoint. +

+
+ + +
+ + +
+
+ +""", + ) diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/discovery/__init__.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/discovery/__init__.py new file mode 100644 index 00000000..d6d04008 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/discovery/__init__.py @@ -0,0 +1,24 @@ +""" +Agent discovery for Databricks Apps. + +This module provides clients and utilities for discovering agent-enabled +Databricks Apps that expose A2A protocol agent cards. +""" + +from .agent_discovery import ( + AgentDiscovery, + DiscoveredAgent, + AgentDiscoveryResult, +) +from .a2a_client import ( + A2AClient, + A2AClientError, +) + +__all__ = [ + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", +] diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/discovery/a2a_client.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/discovery/a2a_client.py new file mode 100644 index 00000000..1243d1a3 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/discovery/a2a_client.py @@ -0,0 +1,268 @@ +""" +A2A Client for agent-to-agent communication. + +Implements the A2A protocol for discovering and communicating with peer agents. +""" + +import json +import uuid +import logging +from typing import Dict, Any, Optional, AsyncIterator + +import httpx + +logger = logging.getLogger(__name__) + + +class A2AClientError(Exception): + """Raised when an A2A operation fails.""" + pass + + +class A2AClient: + """ + Async client for A2A protocol communication with peer agents. + + Usage: + async with A2AClient() as client: + card = await client.fetch_agent_card("https://app.databricksapps.com") + result = await client.send_message("https://app.databricksapps.com/api/a2a", "Hello") + """ + + def __init__(self, timeout: float = 60.0): + """ + Initialize A2A client. + + Args: + timeout: Request timeout in seconds + """ + self.timeout = timeout + self._client: Optional[httpx.AsyncClient] = None + + async def __aenter__(self): + self._client = httpx.AsyncClient( + timeout=self.timeout, + follow_redirects=True, + ) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self._client: + await self._client.aclose() + + def _auth_headers(self, auth_token: Optional[str] = None) -> Dict[str, str]: + """Build authentication headers.""" + headers = {"Content-Type": "application/json"} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + return headers + + async def fetch_agent_card( + self, + base_url: str, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Fetch an agent's A2A protocol agent card. + + Tries /.well-known/agent.json first, then /card as fallback. + Handles OAuth redirects gracefully (returns error instead of following). + + Args: + base_url: Base URL of the agent application + auth_token: Optional OAuth token for authenticated requests + + Returns: + Agent card JSON data + + Raises: + A2AClientError: If agent card cannot be fetched + + Example: + >>> async with A2AClient() as client: + >>> card = await client.fetch_agent_card("https://app.databricksapps.com") + >>> print(card["name"], card["description"]) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + headers = {} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + # Use a client that doesn't follow redirects to detect OAuth flows + async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=False) as probe_client: + for path in ["/.well-known/agent.json", "/card"]: + try: + url = base_url.rstrip("/") + path + response = await probe_client.get(url, headers=headers) + + # OAuth redirect detected - app requires interactive auth + if response.status_code in (301, 302, 303, 307, 308): + logger.debug(f"OAuth redirect detected for {url}") + continue + + if response.status_code == 200: + if not response.text or response.text.isspace(): + logger.debug(f"Empty response body for {url}") + continue + return response.json() + + except Exception as e: + logger.debug(f"Agent card fetch failed for {url}: {e}") + continue + + raise A2AClientError(f"Could not fetch agent card from {base_url}") + + async def _jsonrpc_call( + self, + url: str, + method: str, + params: Dict[str, Any], + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a JSON-RPC 2.0 request to an agent. + + Args: + url: A2A endpoint URL + method: JSON-RPC method name (e.g., "message/send") + params: Method parameters + auth_token: Optional authentication token + + Returns: + JSON-RPC result + + Raises: + A2AClientError: If request fails or returns error + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": method, + "params": params, + } + + try: + response = await self._client.post( + url, + json=payload, + headers=self._auth_headers(auth_token), + ) + response.raise_for_status() + result = response.json() + + if "error" in result: + error = result["error"] + raise A2AClientError( + f"A2A error: {error.get('message', 'Unknown')} " + f"(code: {error.get('code')})" + ) + + return result.get("result", {}) + + except httpx.TimeoutException as e: + raise A2AClientError(f"Request to {url} timed out: {e}") + except httpx.HTTPStatusError as e: + raise A2AClientError( + f"HTTP error from {url}: {e.response.status_code}" + ) + except json.JSONDecodeError as e: + raise A2AClientError(f"Invalid JSON from {url}: {e}") + + async def send_message( + self, + agent_url: str, + message: str, + context_id: Optional[str] = None, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a message to a peer agent using A2A protocol. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + context_id: Optional conversation context ID + auth_token: Optional authentication token + + Returns: + Agent's response + + Example: + >>> async with A2AClient() as client: + >>> response = await client.send_message( + >>> "https://app.databricksapps.com/api/a2a", + >>> "What are your capabilities?" + >>> ) + """ + params: Dict[str, Any] = { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + } + if context_id: + params["message"]["contextId"] = context_id + + return await self._jsonrpc_call( + agent_url, "message/send", params, auth_token + ) + + async def send_streaming_message( + self, + agent_url: str, + message: str, + auth_token: Optional[str] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """ + Send a streaming message and yield SSE events. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + auth_token: Optional authentication token + + Yields: + SSE events from the agent's response stream + + Example: + >>> async with A2AClient() as client: + >>> async for event in client.send_streaming_message(url, "Analyze this"): + >>> print(event) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + stream_url = agent_url.rstrip("/") + "/stream" + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": "message/stream", + "params": { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + }, + } + + async with self._client.stream( + "POST", + stream_url, + json=payload, + headers=self._auth_headers(auth_token), + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.startswith("data: "): + try: + yield json.loads(line[6:]) + except json.JSONDecodeError: + continue diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/discovery/agent_discovery.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/discovery/agent_discovery.py new file mode 100644 index 00000000..1563b304 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/discovery/agent_discovery.py @@ -0,0 +1,253 @@ +""" +Agent discovery for Databricks Apps. + +Discovers agent-enabled Databricks Apps by scanning workspace apps +and probing for A2A protocol agent cards. +""" + +import asyncio +import logging +from typing import List, Dict, Any, Optional +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +from .a2a_client import A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + +# Agent card probe paths and timeout +AGENT_CARD_PATHS = ["/.well-known/agent.json", "/card"] +AGENT_CARD_PROBE_TIMEOUT = 5.0 + + +@dataclass +class DiscoveredAgent: + """ + An agent discovered from a Databricks App. + + Attributes: + name: Agent name (from agent card or app name) + endpoint_url: Agent's base URL + description: Agent description (from agent card) + capabilities: Comma-separated list of capabilities + protocol_version: A2A protocol version + app_name: Name of the backing Databricks App + """ + name: str + endpoint_url: str + app_name: str + description: Optional[str] = None + capabilities: Optional[str] = None + protocol_version: Optional[str] = None + + +@dataclass +class AgentDiscoveryResult: + """ + Results from agent discovery operation. + + Attributes: + agents: List of discovered agents + errors: List of error messages encountered during discovery + """ + agents: List[DiscoveredAgent] + errors: List[str] + + +class AgentDiscovery: + """ + Discovers agent-enabled Databricks Apps in a workspace. + + Scans running Databricks Apps and probes for A2A protocol agent cards + to identify which apps are agents. + + Usage: + discovery = AgentDiscovery(profile="my-profile") + result = await discovery.discover_agents() + for agent in result.agents: + print(f"Found agent: {agent.name} at {agent.endpoint_url}") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize agent discovery. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._workspace_token: Optional[str] = None + + async def discover_agents(self) -> AgentDiscoveryResult: + """ + Discover all agent-enabled Databricks Apps in the workspace. + + Returns: + AgentDiscoveryResult with discovered agents and any errors + + Example: + >>> discovery = AgentDiscovery(profile="my-profile") + >>> result = await discovery.discover_agents() + >>> print(f"Found {len(result.agents)} agents") + """ + agents: List[DiscoveredAgent] = [] + errors: List[str] = [] + + try: + app_list = await self._list_workspace_apps() + except Exception as e: + logger.error("Workspace app listing failed: %s", e) + return AgentDiscoveryResult( + agents=[], + errors=[f"Failed to list workspace apps: {e}"], + ) + + if not app_list: + return AgentDiscoveryResult(agents=[], errors=[]) + + # Probe each running app for agent card in parallel + probe_tasks = [ + self._probe_app_for_agent(app_info) + for app_info in app_list + if app_info.get("url") + ] + + if probe_tasks: + probe_results = await asyncio.gather( + *probe_tasks, return_exceptions=True + ) + + for result in probe_results: + if isinstance(result, Exception): + errors.append(str(result)) + elif result is not None: + agents.append(result) + + logger.info( + "Agent discovery: %d apps checked, %d agents found", + len(app_list), len(agents) + ) + + return AgentDiscoveryResult(agents=agents, errors=errors) + + async def _list_workspace_apps(self) -> List[Dict[str, Any]]: + """ + Enumerate Databricks Apps in the workspace. + + Returns: + List of running apps with name, url, owner + """ + def _list_sync() -> tuple: + client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + + # Extract auth token for cross-app requests + auth_headers = client.config.authenticate() + auth_val = auth_headers.get("Authorization", "") + token = auth_val[7:] if auth_val.startswith("Bearer ") else None + + results = [] + for app in client.apps.list(): + # Check if app is running via compute_status or deployment status + compute_state = None + cs = getattr(app, "compute_status", None) + if cs: + compute_state = str(getattr(cs, "state", "")) + + deploy_state = None + dep = getattr(app, "active_deployment", None) + if dep: + dep_status = getattr(dep, "status", None) + if dep_status: + deploy_state = str(getattr(dep_status, "state", "")) + + app_url = getattr(app, "url", None) or "" + app_url = app_url.rstrip("/") if app_url else "" + + results.append({ + "name": app.name, + "url": app_url, + "owner": getattr(app, "creator", None) or getattr(app, "updater", None), + "compute_state": compute_state, + "deploy_state": deploy_state, + }) + + return results, token + + loop = asyncio.get_event_loop() + result_tuple = await loop.run_in_executor(None, _list_sync) + all_apps, workspace_token = result_tuple + + # Store token for probing + self._workspace_token = workspace_token + + # Filter to running apps + running = [ + a for a in all_apps + if a.get("url") and ( + "ACTIVE" in (a.get("compute_state") or "") + or "SUCCEEDED" in (a.get("deploy_state") or "") + ) + ] + + logger.info( + "Workspace apps: %d total, %d running", + len(all_apps), len(running) + ) + + return running + + async def _probe_app_for_agent( + self, + app_info: Dict[str, Any], + ) -> Optional[DiscoveredAgent]: + """ + Probe a Databricks App for an A2A agent card. + + Args: + app_info: App metadata from workspace listing + + Returns: + DiscoveredAgent if agent card found, None otherwise + """ + app_url = app_info["url"] + app_name = app_info["name"] + + token = self._workspace_token + agent_card = None + + try: + logger.debug(f"Probing app '{app_name}' at {app_url}") + async with A2AClient(timeout=AGENT_CARD_PROBE_TIMEOUT) as client: + agent_card = await client.fetch_agent_card(app_url, auth_token=token) + logger.info(f"Found agent card for '{app_name}'") + except A2AClientError as e: + logger.debug(f"No agent card for '{app_name}': {e}") + return None + except Exception as e: + logger.warning(f"Probe failed for '{app_name}': {e}") + return None + + if not agent_card: + return None + + # Extract capabilities + capabilities_list = [] + caps = agent_card.get("capabilities") + if isinstance(caps, dict): + capabilities_list = list(caps.keys()) + elif isinstance(caps, list): + capabilities_list = caps + + return DiscoveredAgent( + name=agent_card.get("name", app_name), + endpoint_url=app_url, + app_name=app_name, + description=agent_card.get("description"), + capabilities=",".join(capabilities_list) if capabilities_list else None, + protocol_version=agent_card.get("protocolVersion"), + ) diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/mcp/__init__.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/mcp/__init__.py new file mode 100644 index 00000000..60ee38ad --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/mcp/__init__.py @@ -0,0 +1,11 @@ +""" +Model Context Protocol (MCP) support. + +This module provides utilities for integrating agents with MCP servers +and exposing UC Functions as MCP tools. +""" + +from .mcp_server import MCPServer, MCPServerConfig, setup_mcp_server +from .uc_functions import UCFunctionAdapter + +__all__ = ["MCPServer", "MCPServerConfig", "setup_mcp_server", "UCFunctionAdapter"] diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/mcp/mcp_server.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/mcp/mcp_server.py new file mode 100644 index 00000000..98a456fb --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/mcp/mcp_server.py @@ -0,0 +1,196 @@ +""" +MCP server implementation for agents. + +Provides an MCP server that exposes agent tools via the Model Context Protocol. +Works with any FastAPI app. +""" + +import json +import logging +from typing import Dict, Any, List, Optional +from dataclasses import dataclass + +from fastapi import Request + +logger = logging.getLogger(__name__) + + +@dataclass +class MCPServerConfig: + """ + Configuration for MCP server. + + Attributes: + name: Server name + version: Server version + description: Server description + """ + name: str + version: str = "1.0.0" + description: str = "MCP server for agent tools" + + +class MCPServer: + """ + MCP server that exposes tools via JSON-RPC. + + Accepts tools as dicts with name, description, parameters, function keys. + """ + + def __init__(self, tools, config: MCPServerConfig): + """ + Initialize MCP server. + + Args: + tools: List of ToolDefinition objects or dicts with name/description/parameters/function + config: MCP server configuration + """ + self._tools = tools + self.config = config + + def setup_routes(self, app): + """ + Set up MCP protocol routes on the FastAPI app. + + Adds: + - POST /api/mcp - MCP JSON-RPC endpoint + - GET /api/mcp/tools - List available tools + """ + + @app.post("/api/mcp") + async def mcp_jsonrpc(request: Request): + """MCP JSON-RPC endpoint.""" + try: + body = await request.json() + method = body.get("method") + params = body.get("params", {}) + request_id = body.get("id") + + if method == "tools/list": + result = await self._list_tools() + elif method == "tools/call": + result = await self._call_tool(params) + elif method == "server/info": + result = self._server_info() + else: + return { + "jsonrpc": "2.0", + "id": request_id, + "error": { + "code": -32601, + "message": f"Method not found: {method}" + } + } + + return { + "jsonrpc": "2.0", + "id": request_id, + "result": result + } + + except Exception as e: + logger.error("MCP request failed: %s", e) + return { + "jsonrpc": "2.0", + "id": body.get("id") if isinstance(body, dict) else None, + "error": { + "code": -32603, + "message": str(e) + } + } + + @app.get("/api/mcp/tools") + async def list_mcp_tools(): + """List available MCP tools.""" + return await self._list_tools() + + def _get_tool_name(self, tool) -> str: + return tool.name if hasattr(tool, "name") else tool["name"] + + def _get_tool_description(self, tool) -> str: + return tool.description if hasattr(tool, "description") else tool["description"] + + def _get_tool_parameters(self, tool) -> Dict[str, Any]: + return tool.parameters if hasattr(tool, "parameters") else tool.get("parameters", {}) + + def _get_tool_function(self, tool): + return tool.function if hasattr(tool, "function") else tool["function"] + + def _server_info(self) -> Dict[str, Any]: + """Get MCP server information.""" + return { + "name": self.config.name, + "version": self.config.version, + "description": self.config.description, + "protocol_version": "1.0", + } + + async def _list_tools(self) -> Dict[str, Any]: + """List all available tools in MCP format.""" + tools = [] + + for tool in self._tools: + mcp_tool = { + "name": self._get_tool_name(tool), + "description": self._get_tool_description(tool), + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + + for param_name, param_spec in self._get_tool_parameters(tool).items(): + param_type = param_spec.get("type", "string") + mcp_tool["inputSchema"]["properties"][param_name] = { + "type": param_type, + "description": param_spec.get("description", "") + } + if param_spec.get("required", False): + mcp_tool["inputSchema"]["required"].append(param_name) + + tools.append(mcp_tool) + + return {"tools": tools} + + async def _call_tool(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Call a tool via MCP.""" + tool_name = params.get("name") + arguments = params.get("arguments", {}) + + tool_def = None + for tool in self._tools: + if self._get_tool_name(tool) == tool_name: + tool_def = tool + break + + if not tool_def: + raise ValueError(f"Tool not found: {tool_name}") + + try: + result = await self._get_tool_function(tool_def)(**arguments) + return {"result": result} + except Exception as e: + logger.error("Tool execution failed: %s", e) + raise + + +def setup_mcp_server(tools, config: Optional[MCPServerConfig] = None, fastapi_app=None): + """ + Set up MCP server for an agent. + + Args: + tools: List of tool dicts with name/description/parameters/function keys + config: Optional MCP server configuration + fastapi_app: FastAPI app to add routes to (required) + + Returns: + MCPServer instance + """ + if config is None: + config = MCPServerConfig(name="mcp-server") + + server = MCPServer(tools, config) + server.setup_routes(fastapi_app) + + return server diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/mcp/uc_functions.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/mcp/uc_functions.py new file mode 100644 index 00000000..f1659960 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/mcp/uc_functions.py @@ -0,0 +1,240 @@ +""" +Unity Catalog Functions adapter for MCP. + +Automatically discovers UC Functions and exposes them as MCP tools. +""" + +import logging +from typing import List, Dict, Any, Optional + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCFunctionAdapter: + """ + Adapter for Unity Catalog Functions to MCP protocol. + + Discovers UC Functions and converts them to MCP tool format for + use with agents. + + Usage: + adapter = UCFunctionAdapter(profile="my-profile") + tools = adapter.discover_functions(catalog="main", schema="functions") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC Functions adapter. + + Args: + profile: Databricks CLI profile name + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def discover_functions( + self, + catalog: str, + schema: str, + name_pattern: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + Discover UC Functions and convert to MCP tool format. + + Args: + catalog: UC catalog name + schema: UC schema name + name_pattern: Optional name pattern filter (SQL LIKE pattern) + + Returns: + List of tool definitions in MCP format + + Example: + >>> adapter = UCFunctionAdapter() + >>> tools = adapter.discover_functions("main", "functions") + >>> for tool in tools: + ... print(tool["name"], tool["description"]) + """ + client = self._get_client() + tools = [] + + try: + functions = client.functions.list( + catalog_name=catalog, + schema_name=schema, + ) + + for func in functions: + # Skip system functions + if func.name.startswith("system."): + continue + + # Apply name pattern filter + if name_pattern and name_pattern not in func.name: + continue + + # Convert to MCP tool format + tool = self._convert_function_to_tool(func) + if tool: + tools.append(tool) + + logger.info( + f"Discovered {len(tools)} UC Functions from {catalog}.{schema}" + ) + + except Exception as e: + logger.error(f"Failed to discover UC Functions: {e}") + + return tools + + def _convert_function_to_tool(self, func) -> Optional[Dict[str, Any]]: + """ + Convert a UC Function to MCP tool format. + + Args: + func: Function info from Databricks SDK + + Returns: + MCP tool definition or None if conversion fails + """ + try: + # Extract function metadata + name = func.name.split(".")[-1] # Get short name + description = func.comment or f"Unity Catalog function: {name}" + + # Build parameter schema + input_schema = { + "type": "object", + "properties": {}, + "required": [] + } + + # Parse function parameters + if hasattr(func, "input_params") and func.input_params: + for param in func.input_params.parameters: + param_name = param.name + param_type = self._map_uc_type_to_json_type(param.type_name) + + input_schema["properties"][param_name] = { + "type": param_type, + "description": param.comment or "" + } + + # Parameters without defaults are required + if not hasattr(param, "default_value") or param.default_value is None: + input_schema["required"].append(param_name) + + return { + "name": name, + "description": description, + "inputSchema": input_schema, + "full_name": func.full_name, + "source": "unity_catalog" + } + + except Exception as e: + logger.warning(f"Failed to convert function {func.name}: {e}") + return None + + def _map_uc_type_to_json_type(self, uc_type: str) -> str: + """ + Map Unity Catalog data type to JSON Schema type. + + Args: + uc_type: UC type name (e.g., "STRING", "BIGINT", "BOOLEAN") + + Returns: + JSON Schema type ("string", "number", "boolean", etc.) + """ + type_mapping = { + "STRING": "string", + "VARCHAR": "string", + "CHAR": "string", + "BIGINT": "integer", + "INT": "integer", + "INTEGER": "integer", + "SMALLINT": "integer", + "TINYINT": "integer", + "DOUBLE": "number", + "FLOAT": "number", + "DECIMAL": "number", + "BOOLEAN": "boolean", + "BINARY": "string", + "DATE": "string", + "TIMESTAMP": "string", + "ARRAY": "array", + "MAP": "object", + "STRUCT": "object", + } + + uc_type_upper = uc_type.upper() + return type_mapping.get(uc_type_upper, "string") + + async def call_function( + self, + full_name: str, + arguments: Dict[str, Any] + ) -> Any: + """ + Call a UC Function with given arguments. + + Args: + full_name: Full function name (catalog.schema.function) + arguments: Function arguments + + Returns: + Function result + + Example: + >>> adapter = UCFunctionAdapter() + >>> result = await adapter.call_function( + ... "main.functions.calculate_tax", + ... {"amount": 100, "rate": 0.08} + ... ) + """ + client = self._get_client() + + try: + # Build SQL query to call the function + args_list = [f":{key}" for key in arguments.keys()] + query = f"SELECT {full_name}({', '.join(args_list)})" + + # Execute via SQL warehouse + # Note: This requires a warehouse ID to be configured + result = client.statement_execution.execute_statement( + statement=query, + warehouse_id=self._get_default_warehouse(), + parameters=[ + {"name": key, "value": str(value)} + for key, value in arguments.items() + ] + ) + + return result.result.data_array[0][0] if result.result.data_array else None + + except Exception as e: + logger.error(f"Failed to call UC Function {full_name}: {e}") + raise + + def _get_default_warehouse(self) -> str: + """Get default SQL warehouse ID from environment or client.""" + import os + warehouse_id = os.getenv("DATABRICKS_WAREHOUSE_ID") + if not warehouse_id: + raise ValueError( + "DATABRICKS_WAREHOUSE_ID not set. " + "Set this environment variable to use UC Functions." + ) + return warehouse_id diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/py.typed b/dbx-agent-app/examples/research-agent/dbx_agent_app/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/registry/__init__.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/registry/__init__.py new file mode 100644 index 00000000..892043b7 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/registry/__init__.py @@ -0,0 +1,10 @@ +""" +Unity Catalog integration for agent registration. + +This module provides utilities for registering agents in Unity Catalog +as AGENT objects, enabling catalog-based discovery and permission management. +""" + +from .uc_registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError + +__all__ = ["UCAgentRegistry", "UCAgentSpec", "UCRegistrationError"] diff --git a/dbx-agent-app/examples/research-agent/dbx_agent_app/registry/uc_registry.py b/dbx-agent-app/examples/research-agent/dbx_agent_app/registry/uc_registry.py new file mode 100644 index 00000000..f2651aca --- /dev/null +++ b/dbx-agent-app/examples/research-agent/dbx_agent_app/registry/uc_registry.py @@ -0,0 +1,345 @@ +""" +Unity Catalog agent registry. + +Registers and manages agents as Unity Catalog AGENT objects for +catalog-based discovery and permission management. +""" + +import json +import logging +from typing import Dict, Any, Optional, List +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCRegistrationError(Exception): + """Raised when agent registration in Unity Catalog fails.""" + pass + + +@dataclass +class UCAgentSpec: + """ + Specification for registering an agent in Unity Catalog. + + Attributes: + name: Agent name (will be catalog object name) + catalog: UC catalog name + schema: UC schema name + endpoint_url: Agent's base URL + description: Agent description + capabilities: List of agent capabilities + properties: Additional metadata key-value pairs + """ + name: str + catalog: str + schema: str + endpoint_url: str + description: Optional[str] = None + capabilities: Optional[List[str]] = None + properties: Optional[Dict[str, str]] = None + + +class UCAgentRegistry: + """ + Unity Catalog agent registry. + + Registers agents as UC AGENT objects for catalog-based discovery + and permission management. + + Usage: + registry = UCAgentRegistry(profile="my-profile") + + spec = UCAgentSpec( + name="customer_research", + catalog="main", + schema="agents", + endpoint_url="https://app.databricksapps.com", + description="Customer research agent", + capabilities=["search", "analysis"], + ) + + registry.register_agent(spec) + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC agent registry. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def register_agent(self, spec: UCAgentSpec) -> Dict[str, Any]: + """ + Register an agent in Unity Catalog. + + Creates a AGENT object in the specified catalog and schema with + metadata about the agent's endpoint, capabilities, and properties. + + Args: + spec: Agent specification + + Returns: + Dictionary with registration details + + Raises: + UCRegistrationError: If registration fails + + Example: + >>> registry = UCAgentRegistry(profile="my-profile") + >>> spec = UCAgentSpec( + ... name="my_agent", + ... catalog="main", + ... schema="agents", + ... endpoint_url="https://app.databricksapps.com", + ... ) + >>> result = registry.register_agent(spec) + """ + client = self._get_client() + full_name = f"{spec.catalog}.{spec.schema}.{spec.name}" + + try: + # Build agent properties for UC metadata + properties = spec.properties or {} + properties["endpoint_url"] = spec.endpoint_url + properties["agent_card_url"] = f"{spec.endpoint_url}/.well-known/agent.json" + + if spec.capabilities: + properties["capabilities"] = ",".join(spec.capabilities) + + # Register as a UC registered model with AGENT type + # (UC doesn't have a native AGENT type yet, so we use registered models + # with special tags/properties to mark them as agents) + + logger.info(f"Registering agent '{full_name}' in Unity Catalog") + + # Check if catalog and schema exist + try: + client.catalogs.get(spec.catalog) + except Exception as e: + raise UCRegistrationError( + f"Catalog '{spec.catalog}' does not exist or is not accessible: {e}" + ) + + try: + client.schemas.get(f"{spec.catalog}.{spec.schema}") + except Exception as e: + raise UCRegistrationError( + f"Schema '{spec.catalog}.{spec.schema}' does not exist or is not accessible: {e}" + ) + + # Create or update registered model as agent placeholder + # In a future UC version with native AGENT support, this would use: + # client.agents.create(name=full_name, properties=properties) + + # Encode properties as JSON suffix in comment for discovery + # Format: "description\n---AGENT_META---\n{json}" + meta = {"databricks_agent": True, **properties} + comment = spec.description or "" + comment_with_meta = f"{comment}\n---AGENT_META---\n{json.dumps(meta)}" + + # Try update first (model may already exist from prior deploy), + # fall back to create if it doesn't exist + try: + client.registered_models.update( + full_name, + comment=comment_with_meta, + ) + logger.info(f"Updated existing agent '{full_name}'") + except Exception as update_err: + # Model doesn't exist or SP can't access it — try create + logger.debug(f"Update failed ({update_err}), trying create") + try: + client.registered_models.create( + name=spec.name, + catalog_name=spec.catalog, + schema_name=spec.schema, + comment=comment_with_meta, + ) + logger.info(f"Created new agent '{full_name}'") + except Exception as create_err: + # If create fails with "already exists", the SP just + # can't see the model — log warning but don't fail + err_str = str(create_err).lower() + if "already exists" in err_str or "not a valid name" in err_str: + logger.warning( + "Agent '%s' exists but SP cannot update it. " + "Grant the app's SP ownership or MANAGE on the model.", + full_name, + ) + else: + raise + + logger.info(f"Successfully registered agent '{full_name}'") + + return { + "full_name": full_name, + "catalog": spec.catalog, + "schema": spec.schema, + "name": spec.name, + "endpoint_url": spec.endpoint_url, + "properties": properties, + } + + except UCRegistrationError: + raise + except Exception as e: + raise UCRegistrationError( + f"Failed to register agent '{full_name}': {e}" + ) from e + + @staticmethod + def _parse_agent_meta(comment: Optional[str]) -> Optional[Dict[str, Any]]: + """Parse agent metadata from comment field (JSON after ---AGENT_META--- marker).""" + if not comment or "---AGENT_META---" not in comment: + return None + try: + _, meta_json = comment.split("---AGENT_META---", 1) + return json.loads(meta_json.strip()) + except (ValueError, json.JSONDecodeError): + return None + + @staticmethod + def _clean_description(comment: Optional[str]) -> str: + """Extract human-readable description from comment (before the meta marker).""" + if not comment: + return "" + if "---AGENT_META---" in comment: + return comment.split("---AGENT_META---")[0].strip() + return comment + + def get_agent(self, catalog: str, schema: str, name: str) -> Optional[Dict[str, Any]]: + """ + Get agent metadata from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + Agent metadata dictionary or None if not found + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + model = client.registered_models.get(full_name) + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + return None + + return { + "full_name": full_name, + "catalog": catalog, + "schema": schema, + "name": name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "agent_card_url": meta.get("agent_card_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + "properties": meta, + } + + except Exception as e: + logger.debug(f"Agent '{full_name}' not found: {e}") + return None + + def list_agents( + self, + catalog: str, + schema: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + List all agents in a catalog or schema. + + Args: + catalog: UC catalog name + schema: Optional UC schema name (lists all schemas if not specified) + + Returns: + List of agent metadata dictionaries + """ + client = self._get_client() + agents = [] + + # Determine which schemas to scan + schemas_to_scan = [schema] if schema else [] + if not schema: + try: + for s in client.schemas.list(catalog_name=catalog): + if s.name != "information_schema": + schemas_to_scan.append(s.name) + except Exception as e: + logger.error(f"Failed to list schemas in {catalog}: {e}") + return [] + + for schema_name in schemas_to_scan: + try: + models = client.registered_models.list( + catalog_name=catalog, schema_name=schema_name + ) + for model in models: + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + continue + + agents.append({ + "full_name": model.full_name, + "catalog": catalog, + "schema": schema_name, + "name": model.name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + }) + except Exception as e: + logger.debug(f"Failed to list models in {catalog}.{schema_name}: {e}") + continue + + return agents + + def delete_agent(self, catalog: str, schema: str, name: str) -> bool: + """ + Delete an agent from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + True if deleted, False if not found + + Raises: + UCRegistrationError: If deletion fails + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + client.registered_models.delete(full_name) + logger.info(f"Deleted agent '{full_name}'") + return True + except Exception as e: + if "does not exist" in str(e).lower(): + return False + raise UCRegistrationError( + f"Failed to delete agent '{full_name}': {e}" + ) from e diff --git a/dbx-agent-app/examples/research-agent/requirements.txt b/dbx-agent-app/examples/research-agent/requirements.txt new file mode 100644 index 00000000..1beb5529 --- /dev/null +++ b/dbx-agent-app/examples/research-agent/requirements.txt @@ -0,0 +1,10 @@ +fastapi>=0.115.0 +uvicorn[standard]>=0.30.0 +pydantic>=2.0.0 +httpx>=0.27.0 +databricks-sdk>=0.30.0 +mlflow>=2.19.0 +databricks-langchain>=0.1.0 +langchain>=0.3.0 +langgraph>=0.2.0 +langchain-core>=0.3.0 diff --git a/dbx-agent-app/examples/supervisor/.agents-deploy.json b/dbx-agent-app/examples/supervisor/.agents-deploy.json new file mode 100644 index 00000000..59a78f13 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/.agents-deploy.json @@ -0,0 +1,41 @@ +{ + "project": "sgp-multi-agent", + "profile": "fe-vm-serverless-dxukih", + "agents": { + "research": { + "app_name": "sgp-multi-agent-research", + "url": "https://sgp-multi-agent-research-7474660127789418.aws.databricksapps.com", + "sp_name": "app-1u9g0g sgp-multi-agent-research", + "sp_client_id": "a1a48594-9a8c-4521-b72d-68b412a3aec2", + "deployed_at": "2026-03-03T21:35:06.712359+00:00" + }, + "expert-finder": { + "app_name": "sgp-multi-agent-expert-finder", + "url": "https://sgp-multi-agent-expert-finder-7474660127789418.aws.databricksapps.com", + "sp_name": "app-1u9g0g sgp-multi-agent-expert-finder", + "sp_client_id": "2645d39b-79b0-48f1-854f-794c3782df1f", + "deployed_at": "2026-03-03T21:35:27.839714+00:00" + }, + "analytics": { + "app_name": "sgp-multi-agent-analytics", + "url": "https://sgp-multi-agent-analytics-7474660127789418.aws.databricksapps.com", + "sp_name": "app-1u9g0g sgp-multi-agent-analytics", + "sp_client_id": "b80e3c25-4349-46e2-bf6c-12a3a1e0a240", + "deployed_at": "2026-03-03T21:35:49.013649+00:00" + }, + "compliance": { + "app_name": "sgp-multi-agent-compliance", + "url": "https://sgp-multi-agent-compliance-7474660127789418.aws.databricksapps.com", + "sp_name": "app-1u9g0g sgp-multi-agent-compliance", + "sp_client_id": "c4cbc638-57a8-4d50-ace3-821ea520f171", + "deployed_at": "2026-03-03T21:36:17.396805+00:00" + }, + "supervisor": { + "app_name": "sgp-multi-agent-supervisor", + "url": "https://sgp-multi-agent-supervisor-7474660127789418.aws.databricksapps.com", + "sp_name": "app-1u9g0g sgp-multi-agent-supervisor", + "sp_client_id": "3bb99bd2-fcdb-4c08-9dea-87acdc79d313", + "deployed_at": "2026-03-03T21:37:38.072701+00:00" + } + } +} diff --git a/dbx-agent-app/examples/supervisor/agent.py b/dbx-agent-app/examples/supervisor/agent.py new file mode 100644 index 00000000..8250b764 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agent.py @@ -0,0 +1,431 @@ +"""Multi-Agent Supervisor — Routes queries to independently deployed sub-agents via /invocations. + +Uses SDK types (AgentRequest/AgentResponse) instead of mlflow.pyfunc.ResponsesAgent. +""" + +# IMPORTANT: Clean up auth environment BEFORE any Databricks SDK imports +# In Databricks Apps, both OAuth and PAT token may be present +import os +if os.environ.get("DATABRICKS_CLIENT_ID"): + os.environ.pop("DATABRICKS_TOKEN", None) + +import json +import time +import logging +from datetime import datetime, timezone +from uuid import uuid4 +from typing import Dict, Any + +import httpx + +from dbx_agent_app import AgentRequest, AgentResponse, UserContext +from databricks_langchain import ChatDatabricks +from databricks.sdk import WorkspaceClient +from langchain_core.messages import SystemMessage, HumanMessage, AIMessage +from langchain_core.tools import tool + +logger = logging.getLogger(__name__) + + +class SupervisorAgent: + """ + Multi-agent supervisor that routes queries to independently deployed sub-agents. + + Each sub-agent is a separate Databricks App with an /invocations endpoint + (Databricks Responses Agent protocol). The supervisor uses LLM function + calling to pick the right sub-agent, then calls it over HTTP at /invocations. + """ + + # Map tool names to sub-agent endpoint keys + TOOL_TO_SUBAGENT = { + "call_research": "research", + "call_expert_finder": "expert_finder", + "call_analytics": "analytics", + "call_compliance_check": "compliance", + } + + # Sub-agent configuration: env var for URL + tool name to invoke + SUBAGENT_CONFIG = { + "research": {"url_env": "RESEARCH_URL", "tool": "search"}, + "expert_finder": {"url_env": "EXPERT_FINDER_URL", "tool": "search"}, + "analytics": {"url_env": "ANALYTICS_URL", "tool": "query"}, + "compliance": {"url_env": "COMPLIANCE_URL", "tool": "check"}, + } + + def __init__(self, config=None): + """Initialize supervisor with LLM routing and sub-agent config.""" + self.config = config or {} + + # Workspace client for auth token generation + self.workspace = WorkspaceClient() + + # Per-request user context (set in predict, used in tool calls) + self._current_user_context: UserContext | None = None + + # Observability state — reset per call + self._last_tables_accessed = [] + self._last_sql_queries = [] + self._last_keywords = [] + self._last_data_source = "live" + self._last_routing = None + + # Initialize LLM with function calling + self.llm = ChatDatabricks( + endpoint=self.config.get("endpoint", "databricks-claude-sonnet-4-5"), + temperature=0.1, + max_tokens=4096, + ) + + # Create tools for sub-agents + self.tools = self._create_subagent_tools() + self.llm_with_tools = self.llm.bind_tools(self.tools) + + # ------------------------------------------------------------------ + # Sub-agent client — calls deployed agents via /invocations + # ------------------------------------------------------------------ + + def _call_subagent_invocations( + self, endpoint_name: str, query: str, user_context: UserContext | None = None, + ) -> dict: + """Call a sub-agent via /invocations (Databricks Responses Agent protocol).""" + config = self.SUBAGENT_CONFIG[endpoint_name] + agent_url = os.environ.get(config["url_env"]) + + if not agent_url: + logger.warning("No URL configured for sub-agent %s (env: %s)", + endpoint_name, config["url_env"]) + return self._fallback_response(endpoint_name, query) + + invocations_url = f"{agent_url.rstrip('/')}/invocations" + + # Authenticate using workspace OAuth (service principal) + auth_headers = {} + try: + header_factory = self.workspace.config.authenticate() + if callable(header_factory): + auth_headers = header_factory() + elif isinstance(header_factory, dict): + auth_headers = header_factory + except Exception as e: + logger.warning("Auth header generation failed: %s", e) + + # Forward user identity so sub-agents can execute as the calling user + if user_context is not None: + auth_headers.update(user_context.as_forwarded_headers()) + + payload = { + "input": [{"role": "user", "content": query}], + } + + start = time.monotonic() + try: + resp = httpx.post( + invocations_url, + json=payload, + headers={**auth_headers, "Content-Type": "application/json"}, + timeout=50.0, + ) + resp.raise_for_status() + except Exception as e: + logger.error("/invocations call to %s failed: %s", endpoint_name, e, exc_info=True) + return self._fallback_response(endpoint_name, query) + call_duration = round((time.monotonic() - start) * 1000, 1) + + response_data = resp.json() + + # Extract response text from Responses Agent protocol output + response_text = "" + output_items = response_data.get("output", []) + for item in output_items: + if isinstance(item, dict): + content = item.get("content", []) + for part in content: + if isinstance(part, dict) and part.get("type") == "output_text": + response_text = part.get("text", "") + break + if response_text: + break + + # Extract structured metadata if the sub-agent passed it through + metadata = response_data.get("_metadata") or {} + + if isinstance(metadata, dict) and "response" in metadata: + metadata["_network_ms"] = call_duration + metadata["_agent_url"] = agent_url + return metadata + + return { + "response": response_text, + "data_source": metadata.get("data_source", "live"), + "tables_accessed": metadata.get("tables_accessed", []), + "keywords_extracted": metadata.get("keywords_extracted", []), + "sql_queries": metadata.get("sql_queries", []), + "timing": metadata.get("timing", {"sql_total_ms": 0, "total_ms": call_duration}), + "_network_ms": call_duration, + "_agent_url": agent_url, + } + + def _fallback_response(self, endpoint_name: str, query: str) -> dict: + """Return a fallback response when MCP call fails or URL is not configured.""" + catalog = self.config.get("catalog", "serverless_dxukih_catalog") + schema = self.config.get("schema", "agents") + fqn = lambda t: f"{catalog}.{schema}.{t}" + + demo_responses = { + "research": { + "response": f'Based on analysis of expert transcripts:\n\n**Key Insights on "{query}":**\n\n' + '1. **Dr. Sarah Chen** (Healthcare Technology, Interview #T-2025-1247):\n' + ' "We\'re seeing 40% year-over-year growth in AI implementation."\n\n' + '2. **Michael Torres** (Supply Chain, Interview #T-2025-1189):\n' + ' "Leaders prioritize real-time visibility and transparency."\n\n' + '*Demo fallback -- sub-agent not reachable*', + "tables_accessed": [fqn("expert_transcripts")], + }, + "expert_finder": { + "response": f'**Found 5 experts for "{query}":**\n\n' + '**1. Dr. Sarah Chen** - Healthcare Technology\n' + ' - 23 interviews | Rating: 4.9\n\n' + '**2. Michael Torres** - Supply Chain Analytics\n' + ' - 18 interviews | Rating: 4.8\n\n' + '*Demo fallback -- sub-agent not reachable*', + "tables_accessed": [fqn("experts")], + }, + "analytics": { + "response": f'**Analytics Results:**\n\nQuery: {query}\n\n' + '- Total calls (last 90 days): 2,847\n' + '- Average duration: 52 minutes\n' + '- Month-over-month growth: +18%\n\n' + '*Demo fallback -- sub-agent not reachable*', + "tables_accessed": [fqn("call_metrics"), fqn("engagement_summary")], + }, + "compliance": { + "response": '**Compliance Check Complete**\n\n**Status: CLEARED**\n\n' + 'Checks:\n- Conflict of Interest: Clear\n- Restricted List: Clear\n' + '- NDA Status: Active\n\n' + '*Demo fallback -- sub-agent not reachable*', + "tables_accessed": [fqn("restricted_list"), fqn("nda_registry")], + }, + } + + demo = demo_responses.get(endpoint_name, demo_responses["research"]) + return { + "response": demo["response"], + "data_source": "demo_fallback", + "tables_accessed": demo["tables_accessed"], + "keywords_extracted": [], + "sql_queries": [], + "timing": {"sql_total_ms": 0, "total_ms": 0}, + "_network_ms": 0, + "_agent_url": None, + } + + # ------------------------------------------------------------------ + # Sub-agent dispatch (wraps invocations call with observability) + # ------------------------------------------------------------------ + + def _call_subagent(self, endpoint_name: str, query: str) -> str: + """Call a sub-agent via /invocations and update observability state.""" + self._last_tables_accessed = [] + self._last_sql_queries = [] + self._last_keywords = [] + self._last_data_source = "live" + self._last_subagent_duration_ms = 0 + self._last_network_ms = 0 + self._last_agent_url = None + + result = self._call_subagent_invocations( + endpoint_name, query, self._current_user_context + ) + + self._last_tables_accessed = result.get("tables_accessed", []) + self._last_sql_queries = result.get("sql_queries", []) + self._last_keywords = result.get("keywords_extracted", []) + self._last_data_source = result.get("data_source", "live") + self._last_subagent_duration_ms = result.get("timing", {}).get("total_ms", 0) + self._last_network_ms = result.get("_network_ms", 0) + self._last_agent_url = result.get("_agent_url") + + return result.get("response", str(result)) + + # ------------------------------------------------------------------ + # Tool definitions + # ------------------------------------------------------------------ + + def _create_subagent_tools(self): + """Create sync tools that route to sub-agent /invocations endpoints.""" + + @tool + def call_research(query: str) -> str: + """ + Search expert interview transcripts for insights and opinions. + + Use for: + - Questions about what experts have said + - Industry insights, trends, expert opinions + - "What do experts think about..." + - Summarizing expert perspectives + """ + return self._call_subagent("research", query) + + @tool + def call_expert_finder(query: str) -> str: + """ + Find experts who have knowledge on specific topics. + + Use for: + - "Find experts who know about..." + - "Who has discussed..." + - Identifying advisors with specific expertise + """ + return self._call_subagent("expert_finder", query) + + @tool + def call_analytics(query: str) -> str: + """ + Query business metrics, usage data, and operational analytics. + + Use for: + - Questions with numbers, counts, percentages + - "How many...", "What percentage...", "Show me usage..." + - Trends over time, comparisons + """ + return self._call_subagent("analytics", query) + + @tool + def call_compliance_check(query: str) -> str: + """ + Check engagements for compliance and conflicts of interest. + + Use for: + - "Check if this engagement is compliant..." + - "Any conflicts with..." + - Conflict of interest screening + """ + return self._call_subagent("compliance", query) + + return [call_research, call_expert_finder, call_analytics, call_compliance_check] + + # ------------------------------------------------------------------ + # Predict + # ------------------------------------------------------------------ + + def predict(self, request: AgentRequest) -> AgentResponse: + """Route query to appropriate sub-agent via /invocations.""" + # Capture user context for forwarding to sub-agents during tool calls + self._current_user_context = request.user_context + + # Convert SDK request to LangChain messages + messages = [] + for item in request.input: + if item.role == "user": + messages.append(HumanMessage(content=item.content)) + elif item.role == "assistant": + messages.append(AIMessage(content=item.content)) + + # System prompt for routing + system_msg = SystemMessage(content="""You are a multi-agent supervisor for an expert network platform. + +Your role is to route user queries to the appropriate specialized sub-agent: + +**Available Sub-Agents:** + +1. **call_research**: Expert interview transcript research + - Use for: qualitative insights, expert opinions, "what do experts say about..." + +2. **call_expert_finder**: Find experts by topic/domain + - Use for: "find experts who...", "who knows about...", expert recommendations + +3. **call_analytics**: Business metrics and SQL queries + - Use for: numbers, counts, trends, "how many...", quantitative questions + +4. **call_compliance_check**: Compliance and conflict checks + - Use for: policy adherence, conflicts of interest, engagement approval + +**Routing Guidelines:** +- Choose ONE sub-agent that best matches the query intent +- Call the tool with the full user query +- Return the sub-agent's response directly +- If unclear, prefer call_research for general questions + +**DO NOT:** +- Try to answer queries yourself +- Call multiple tools (pick the best one) +- Modify or summarize the sub-agent's response""") + + llm_start = time.monotonic() + response = self.llm_with_tools.invoke([system_msg] + messages) + llm_duration_ms = round((time.monotonic() - llm_start) * 1000, 1) + + self._last_routing = None + call_timestamp = datetime.now(timezone.utc).isoformat() + + # Check if tool was called + if hasattr(response, 'tool_calls') and response.tool_calls: + tool_call = response.tool_calls[0] + tool_name = tool_call['name'] + tool_args = tool_call['args'] + + for t in self.tools: + if t.name == tool_name: + result = t.invoke(tool_args) + + sub_agent = self.TOOL_TO_SUBAGENT.get(tool_name, tool_name) + total_sql_ms = sum( + q.get("duration_ms", 0) for q in self._last_sql_queries + if "duration_ms" in q + ) + network_ms = getattr(self, "_last_network_ms", 0) + subagent_ms = getattr(self, "_last_subagent_duration_ms", 0) + agent_url = getattr(self, "_last_agent_url", None) + + self._last_routing = { + "tool": tool_name, + "sub_agent": sub_agent, + "timestamp": call_timestamp, + "data_source": self._last_data_source, + "tables_accessed": self._last_tables_accessed, + "keywords_extracted": self._last_keywords, + "routing_decision": { + "model": self.config.get("endpoint", "databricks-claude-sonnet-4-5"), + "latency_ms": llm_duration_ms, + "tool_selected": tool_name, + "tool_args": tool_args, + }, + "sql_queries": self._last_sql_queries, + "timing": { + "routing_ms": llm_duration_ms, + "network_ms": network_ms, + "sql_total_ms": total_sql_ms, + "subagent_ms": subagent_ms, + "total_ms": round(llm_duration_ms + network_ms, 1), + }, + "agent_endpoint": agent_url, + } + + return AgentResponse.text(result) + + # No tool called — return LLM response directly + self._last_routing = { + "tool": None, + "sub_agent": None, + "timestamp": call_timestamp, + "data_source": "llm_direct", + "tables_accessed": [], + "keywords_extracted": [], + "routing_decision": { + "model": self.config.get("endpoint", "databricks-claude-sonnet-4-5"), + "latency_ms": llm_duration_ms, + "tool_selected": None, + "reason": "LLM did not select a tool", + }, + "sql_queries": [], + "timing": { + "routing_ms": llm_duration_ms, + "network_ms": 0, + "sql_total_ms": 0, + "subagent_ms": 0, + "total_ms": llm_duration_ms, + }, + "agent_endpoint": None, + } + return AgentResponse.text(response.content) diff --git a/dbx-agent-app/examples/supervisor/agents.yaml b/dbx-agent-app/examples/supervisor/agents.yaml new file mode 100644 index 00000000..5266e0ba --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents.yaml @@ -0,0 +1,59 @@ +# Multi-agent deployment configuration +# Deploy with: dbx-agent-app deploy --config agents.yaml --profile + +project: + name: sgp-multi-agent + workspace_path: /Workspace/Shared/apps + +uc: + catalog: serverless_dxukih_catalog + schema: agents + +warehouse: + id: 387bcda0f2ece20c + +agents: + - name: research + source: ./agents/research + tables: [expert_transcripts] + env: + UC_CATALOG: "${uc.catalog}" + UC_SCHEMA: "${uc.schema}" + WAREHOUSE_ID: "${warehouse.id}" + + - name: expert-finder + source: ./agents/expert_finder + tables: [experts] + env: + UC_CATALOG: "${uc.catalog}" + UC_SCHEMA: "${uc.schema}" + WAREHOUSE_ID: "${warehouse.id}" + + - name: analytics + source: ./agents/analytics + tables: [call_metrics, engagement_summary] + env: + UC_CATALOG: "${uc.catalog}" + UC_SCHEMA: "${uc.schema}" + WAREHOUSE_ID: "${warehouse.id}" + + - name: compliance + source: ./agents/compliance + tables: [restricted_list, nda_registry] + env: + UC_CATALOG: "${uc.catalog}" + UC_SCHEMA: "${uc.schema}" + WAREHOUSE_ID: "${warehouse.id}" + + - name: supervisor + source: . + depends_on: [research, expert-finder, analytics, compliance] + url_env_map: + research: RESEARCH_URL + expert-finder: EXPERT_FINDER_URL + analytics: ANALYTICS_URL + compliance: COMPLIANCE_URL + env: + MODEL_ENDPOINT: databricks-claude-sonnet-4-5 + UC_CATALOG: "${uc.catalog}" + UC_SCHEMA: "${uc.schema}" diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/app.py b/dbx-agent-app/examples/supervisor/agents/analytics/app.py new file mode 100644 index 00000000..c5a55697 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/app.py @@ -0,0 +1,211 @@ +"""Sub-agent: Analytics — queries call_metrics and engagement_summary.""" + +# Auth cleanup: prefer OAuth over PAT in Databricks Apps +import os +if os.environ.get("DATABRICKS_CLIENT_ID"): + os.environ.pop("DATABRICKS_TOKEN", None) + +import time +import logging +from databricks.sdk import WorkspaceClient +from databricks.sdk.service.sql import StatementParameterListItem +from dbx_agent_app import app_agent, AgentRequest, AgentResponse + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# SQL helpers +# --------------------------------------------------------------------------- + +_workspace = WorkspaceClient() +_warehouse_id_cache = os.environ.get("WAREHOUSE_ID") + +CATALOG = os.environ.get("UC_CATALOG", "serverless_dxukih_catalog") +SCHEMA = os.environ.get("UC_SCHEMA", "agents") + + +def _fqn(table: str) -> str: + return f"{CATALOG}.{SCHEMA}.{table}" + + +def _get_warehouse_id() -> str: + global _warehouse_id_cache + if _warehouse_id_cache: + return _warehouse_id_cache + for wh in _workspace.warehouses.list(): + if wh.enable_serverless_compute: + _warehouse_id_cache = wh.id + return wh.id + first = next(iter(_workspace.warehouses.list()), None) + if first: + _warehouse_id_cache = first.id + return first.id + raise ValueError("No SQL warehouse available") + + +def _execute_sql(statement, parameters=None): + """Execute SQL and return (result, trace_entry).""" + raw_params = parameters or [] + params = [ + StatementParameterListItem(name=p["name"], value=p["value"]) + if isinstance(p, dict) else p + for p in raw_params + ] + wh_id = _get_warehouse_id() + start = time.monotonic() + result = _workspace.statement_execution.execute_statement( + warehouse_id=wh_id, statement=statement, + parameters=params, wait_timeout="50s", + ) + duration_ms = round((time.monotonic() - start) * 1000, 1) + + row_count = len(result.result.data_array) if result.result and result.result.data_array else 0 + columns = [] + if result.manifest and result.manifest.schema and result.manifest.schema.columns: + columns = [ + {"name": c.name, + "type": str(c.type_name.value) if hasattr(c.type_name, "value") else str(c.type_name)} + for c in result.manifest.schema.columns + ] + + trace = { + "statement": " ".join(statement.split()), + "parameters": [ + {"name": p["name"], "value": p["value"]} if isinstance(p, dict) + else {"name": p.name, "value": p.value} + for p in raw_params + ], + "row_count": row_count, + "columns": columns, + "duration_ms": duration_ms, + "warehouse_id": wh_id, + } + return result, trace + + +# --------------------------------------------------------------------------- +# Demo fallback +# --------------------------------------------------------------------------- + +def _demo_response(query: str) -> dict: + return { + "response": f"""**Analytics Results:** + +Query: {query} + +- Total calls (last 90 days): 2,847 +- Average duration: 52 minutes +- Month-over-month growth: +18% +- Top segment: Healthcare (34%) + +*Demo fallback -- UC tables not available*""", + "data_source": "demo_fallback", + "tables_accessed": [_fqn("call_metrics"), _fqn("engagement_summary")], + "keywords_extracted": [], + "sql_queries": [], + "timing": {"sql_total_ms": 0, "total_ms": 0}, + } + + +# --------------------------------------------------------------------------- +# Agent + Tool +# --------------------------------------------------------------------------- + +@app_agent( + name="sub_analytics", + description="Query business metrics, usage data, and operational analytics", + capabilities=["analytics", "metrics", "reporting"], + uc_catalog=os.environ.get("UC_CATALOG", "main"), + uc_schema=os.environ.get("UC_SCHEMA", "agents"), + enable_mcp=True, +) +async def sub_analytics(request: AgentRequest) -> dict: + """Delegate to query tool.""" + return await query(request.last_user_message) + + +@sub_analytics.tool(description="Query business metrics, usage data, and operational analytics") +async def query(query: str) -> dict: + """ + Query call metrics and engagement summary data. + + Args: + query: Analytics question to answer + + Returns: + Structured result with metrics, tables, and SQL traces + """ + total_start = time.monotonic() + sql_queries = [] + + try: + # Aggregate call metrics for last 90 days + metrics_result, metrics_trace = _execute_sql( + f""" + SELECT region, + SUM(call_count) AS total_calls, + ROUND(AVG(avg_duration_min), 1) AS avg_duration, + segment, + ROUND(SUM(revenue_usd), 2) AS total_revenue + FROM {_fqn('call_metrics')} + WHERE metric_date >= DATE_ADD(CURRENT_DATE(), -90) + GROUP BY region, segment + ORDER BY total_calls DESC + LIMIT 12 + """ + ) + sql_queries.append(metrics_trace) + + # Get engagement summary + summary_result, summary_trace = _execute_sql( + f"SELECT metric_name, metric_value, period FROM {_fqn('engagement_summary')}" + ) + sql_queries.append(summary_trace) + + text = f"**Analytics Results** (query: {query})\n\n" + + # Format engagement summary + if summary_result.result and summary_result.result.data_array: + text += "**Key Metrics (Last 90 Days):**\n" + for row in summary_result.result.data_array: + name, value, period = row + display_name = str(name).replace("_", " ").title() + val = float(value) + if "pct" in str(name): + text += f"- {display_name}: {val:.1f}%\n" + elif "usd" in str(name) or "revenue" in str(name): + text += f"- {display_name}: ${val:,.0f}\n" + else: + text += f"- {display_name}: {val:,.1f}\n" + text += "\n" + + # Format call metrics breakdown + if metrics_result.result and metrics_result.result.data_array: + text += "**Breakdown by Region & Segment:**\n\n" + text += "| Region | Segment | Calls | Avg Duration | Revenue |\n" + text += "|--------|---------|-------|--------------|---------|\n" + for row in metrics_result.result.data_array: + region, calls, dur, segment, rev = row + text += f"| {region} | {segment} | {int(float(calls)):,} | {float(dur):.1f} min | ${float(rev):,.0f} |\n" + text += "\n" + + text += f"*Data sources: {_fqn('call_metrics')}, {_fqn('engagement_summary')}*" + + total_ms = round((time.monotonic() - total_start) * 1000, 1) + sql_total_ms = sum(q["duration_ms"] for q in sql_queries) + + return { + "response": text, + "data_source": "live", + "tables_accessed": [_fqn("call_metrics"), _fqn("engagement_summary")], + "keywords_extracted": [], + "sql_queries": sql_queries, + "timing": {"sql_total_ms": sql_total_ms, "total_ms": total_ms}, + } + + except Exception as e: + logger.error("SQL query failed for analytics: %s", e, exc_info=True) + return _demo_response(query) + + +app = sub_analytics.app diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/app.yaml b/dbx-agent-app/examples/supervisor/agents/analytics/app.yaml new file mode 100644 index 00000000..fc60869f --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/app.yaml @@ -0,0 +1,19 @@ +command: + - "python" + - "-m" + - "uvicorn" + - "app:app" + - "--host" + - "0.0.0.0" + - "--port" + - "8000" + +env: + - name: UC_CATALOG + value: serverless_dxukih_catalog + + - name: UC_SCHEMA + value: agents + + - name: WAREHOUSE_ID + value: 387bcda0f2ece20c diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/__init__.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/__init__.py new file mode 100644 index 00000000..95db07d3 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/__init__.py @@ -0,0 +1,41 @@ +""" +dbx-agent-app: Framework for building discoverable AI agents on Databricks Apps. + +This package provides: +- @app_agent: Decorator to turn an async function into a discoverable agent +- AgentDiscovery: Discover agents in your Databricks workspace +- A2AClient: Communicate with agents using the A2A protocol +- UCAgentRegistry: Register agents in Unity Catalog +- MCPServerConfig: Configure MCP server for agent tools +""" + +from .discovery import AgentDiscovery, DiscoveredAgent, AgentDiscoveryResult, A2AClient, A2AClientError +from .mcp import MCPServerConfig, setup_mcp_server, UCFunctionAdapter +from .registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError +from .dashboard import create_dashboard_app + +try: + from importlib.metadata import version + __version__ = version("dbx-agent-app") +except Exception: + __version__ = "0.1.0" + +__all__ = [ + # Discovery + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", + # Registry + "UCAgentRegistry", + "UCAgentSpec", + "UCRegistrationError", + # MCP + "MCPServerConfig", + "setup_mcp_server", + "UCFunctionAdapter", + # Dashboard + "create_dashboard_app", +] + diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/core/__init__.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/core/__init__.py new file mode 100644 index 00000000..623da88c --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/core/__init__.py @@ -0,0 +1 @@ +"""Core agent application components.""" diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/__init__.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/__init__.py new file mode 100644 index 00000000..89929a08 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/__init__.py @@ -0,0 +1,14 @@ +""" +Developer dashboard for agent discovery. + +Launch via CLI: + dbx-agent-app dashboard --profile my-profile + +Or programmatically: + from dbx_agent_app.dashboard import create_dashboard_app, run_dashboard +""" + +from .app import create_dashboard_app +from .cli import main as run_dashboard + +__all__ = ["create_dashboard_app", "run_dashboard"] diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/app.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/app.py new file mode 100644 index 00000000..d16c3d54 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/app.py @@ -0,0 +1,112 @@ +""" +FastAPI application for the developer dashboard. + +Routes: + HTML: GET / — agent list page + GET /agent/{name} — agent detail page + API: GET /api/agents — JSON list of agents + GET /api/agents/{name}/card — full agent card + POST /api/agents/{name}/mcp — MCP JSON-RPC proxy + POST /api/scan — trigger re-scan + GET /health — health check +""" + +import logging +from typing import Optional + +from fastapi import FastAPI, Request +from fastapi.responses import HTMLResponse, JSONResponse + +from .scanner import DashboardScanner +from .templates import render_agent_list, render_agent_detail + +logger = logging.getLogger(__name__) + + +def create_dashboard_app( + scanner: DashboardScanner, + profile: Optional[str] = None, +) -> FastAPI: + """Build and return the dashboard FastAPI app.""" + app = FastAPI(title="dbx-agent-app dashboard", docs_url=None, redoc_url=None) + + # --- HTML pages ------------------------------------------------------- + + @app.get("/", response_class=HTMLResponse) + async def index(): + agents = scanner.get_agents() + return render_agent_list(agents) + + @app.get("/agent/{name}", response_class=HTMLResponse) + async def agent_detail(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return HTMLResponse("

Agent not found

", status_code=404) + + card = None + try: + card = await scanner.get_agent_card(agent.endpoint_url) + except Exception as e: + logger.warning("Could not fetch card for %s: %s", name, e) + + return render_agent_detail(agent, card) + + # --- JSON API --------------------------------------------------------- + + @app.get("/api/agents") + async def api_agents(): + agents = scanner.get_agents() + return [ + { + "name": a.name, + "endpoint_url": a.endpoint_url, + "app_name": a.app_name, + "description": a.description, + "capabilities": a.capabilities, + "protocol_version": a.protocol_version, + } + for a in agents + ] + + @app.get("/api/agents/{name}/card") + async def api_agent_card(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + card = await scanner.get_agent_card(agent.endpoint_url) + return card + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=502) + + @app.post("/api/agents/{name}/mcp") + async def api_mcp_proxy(name: str, request: Request): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + payload = await request.json() + result = await scanner.proxy_mcp(agent.endpoint_url, payload) + return result + except Exception as e: + return JSONResponse( + {"jsonrpc": "2.0", "id": None, "error": {"code": -1, "message": str(e)}}, + status_code=502, + ) + + @app.post("/api/scan") + async def api_scan(): + agents = await scanner.scan() + return {"count": len(agents), "agents": [a.name for a in agents]} + + @app.get("/health") + async def health(): + return { + "status": "ok", + "agents_cached": len(scanner.get_agents()), + "profile": profile, + } + + return app diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/cli.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/cli.py new file mode 100644 index 00000000..3b17aa90 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/cli.py @@ -0,0 +1,63 @@ +""" +CLI entry point for the developer dashboard. + +Usage: + dbx-agent-app dashboard --profile my-profile --port 8501 +""" + +import argparse +import asyncio +import logging +import sys +import webbrowser + +import uvicorn + +from .scanner import DashboardScanner +from .app import create_dashboard_app + + +def main(): + parser = argparse.ArgumentParser( + prog="dbx-agent-app", + description="Developer dashboard for Databricks agent discovery", + ) + sub = parser.add_subparsers(dest="command") + + dash = sub.add_parser("dashboard", help="Launch the agent discovery dashboard") + dash.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + dash.add_argument("--port", type=int, default=8501, help="Port to serve on (default: 8501)") + dash.add_argument("--host", type=str, default="127.0.0.1", help="Host to bind (default: 127.0.0.1)") + dash.add_argument("--no-browser", action="store_true", help="Don't auto-open browser") + + args = parser.parse_args() + + if args.command != "dashboard": + parser.print_help() + sys.exit(1) + + logging.basicConfig(level=logging.INFO, format="%(levelname)s %(name)s: %(message)s") + + scanner = DashboardScanner(profile=args.profile) + + # Run initial scan + print(f"Scanning workspace for agents (profile={args.profile or 'default'})...") + try: + agents = asyncio.run(scanner.scan()) + print(f"Found {len(agents)} agent(s)") + except Exception as e: + print(f"Initial scan failed: {e}", file=sys.stderr) + print("Dashboard will start anyway — use the Scan button to retry.") + + app = create_dashboard_app(scanner, profile=args.profile) + + url = f"http://{args.host}:{args.port}" + if not args.no_browser: + webbrowser.open(url) + + print(f"Dashboard running at {url}") + uvicorn.run(app, host=args.host, port=args.port, log_level="warning") + + +if __name__ == "__main__": + main() diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/scanner.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/scanner.py new file mode 100644 index 00000000..475460be --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/scanner.py @@ -0,0 +1,81 @@ +""" +Dashboard scanner — wraps AgentDiscovery + A2AClient with caching and MCP proxy. +""" + +import asyncio +import logging +from typing import Dict, Any, List, Optional + +import httpx + +from ..discovery import AgentDiscovery, DiscoveredAgent, A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + + +class DashboardScanner: + """ + Thin wrapper around AgentDiscovery that adds result caching + and MCP JSON-RPC proxying for the dashboard UI. + """ + + def __init__(self, profile: Optional[str] = None): + self._discovery = AgentDiscovery(profile=profile) + self._agents: List[DiscoveredAgent] = [] + self._scan_lock = asyncio.Lock() + self._scanned = False + + async def scan(self) -> List[DiscoveredAgent]: + """Run workspace discovery and cache results. Thread-safe via asyncio.Lock.""" + async with self._scan_lock: + result = await self._discovery.discover_agents() + self._agents = result.agents + self._scanned = True + if result.errors: + for err in result.errors: + logger.warning("Discovery error: %s", err) + return self._agents + + def get_agents(self) -> List[DiscoveredAgent]: + """Return cached agent list from the last scan.""" + return list(self._agents) + + def get_agent_by_name(self, name: str) -> Optional[DiscoveredAgent]: + """Look up a cached agent by name.""" + for agent in self._agents: + if agent.name == name or agent.app_name == name: + return agent + return None + + @property + def workspace_token(self) -> Optional[str]: + """Auth token extracted during discovery, used for cross-app requests.""" + return self._discovery._workspace_token + + async def get_agent_card(self, endpoint_url: str) -> Dict[str, Any]: + """Fetch the full agent card JSON from a remote agent.""" + async with A2AClient(timeout=10.0) as client: + return await client.fetch_agent_card( + endpoint_url, auth_token=self.workspace_token + ) + + async def proxy_mcp(self, endpoint_url: str, payload: Dict[str, Any]) -> Dict[str, Any]: + """ + Forward a JSON-RPC request to an agent's MCP endpoint. + + Args: + endpoint_url: Agent base URL + payload: Complete JSON-RPC 2.0 request body + + Returns: + JSON-RPC response from the agent + """ + mcp_url = endpoint_url.rstrip("/") + "/api/mcp" + headers = {"Content-Type": "application/json"} + if self.workspace_token: + headers["Authorization"] = f"Bearer {self.workspace_token}" + + async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as http: + response = await http.post(mcp_url, json=payload, headers=headers) + response.raise_for_status() + return response.json() diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/templates.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/templates.py new file mode 100644 index 00000000..8543d75d --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/dashboard/templates.py @@ -0,0 +1,278 @@ +""" +Server-rendered HTML templates for the dashboard. + +Pure Python functions returning HTML strings — no Jinja2, no React, no build step. +""" + +import html +import json +from typing import List, Dict, Any, Optional + +from ..discovery import DiscoveredAgent + + +# --------------------------------------------------------------------------- +# Base layout +# --------------------------------------------------------------------------- + +def render_base(title: str, content: str) -> str: + """HTML shell with inline CSS (dark theme).""" + return f""" + + + + +{html.escape(title)} + + + +
+
+

dbx-agent-app dashboard

+ +
+
+
+{content} +
+ +""" + + +# --------------------------------------------------------------------------- +# Agent list page +# --------------------------------------------------------------------------- + +def render_agent_list(agents: List[DiscoveredAgent]) -> str: + """Main page: grid of agent cards + scan button.""" + if not agents: + cards_html = """ +
+

No agents discovered

+

No agent-enabled Databricks Apps found in your workspace. Click Scan to retry.

+
""" + else: + cards = [] + for a in agents: + caps = "" + if a.capabilities: + badges = "".join( + f'{html.escape(c.strip())} ' + for c in a.capabilities.split(",") + ) + caps = f'
{badges}
' + + desc = html.escape(a.description or "No description") + cards.append(f""" + +
+

{html.escape(a.name)}

+

{desc}

+
+ App: {html.escape(a.app_name)} + {f'Protocol: {html.escape(a.protocol_version)}' if a.protocol_version else ''} +
+ {caps} +
+
""") + cards_html = f'
{"".join(cards)}
' + + return render_base( + "Agent Dashboard", + f""" +
+ {len(agents)} agent{"s" if len(agents) != 1 else ""} discovered + +
+{cards_html} +""", + ) + + +# --------------------------------------------------------------------------- +# Agent detail page +# --------------------------------------------------------------------------- + +def render_agent_detail( + agent: DiscoveredAgent, + card: Optional[Dict[str, Any]] = None, +) -> str: + """Detail page: agent card JSON, tools list, MCP test panel.""" + card_json = json.dumps(card, indent=2) if card else "Card not available" + + # Extract tools from card if present + tools_html = "" + if card: + skills = card.get("skills") or card.get("tools") or [] + if skills: + rows = [] + for t in skills: + name = html.escape(t.get("name", t.get("id", "unknown"))) + desc = html.escape(t.get("description", "")) + rows.append( + f'
{name}' + f'
{desc}
' + ) + tools_html = f""" +
+

Tools ({len(skills)})

+ {"".join(rows)} +
""" + + safe_name = html.escape(agent.name) + safe_endpoint = html.escape(agent.endpoint_url) + + return render_base( + f"{safe_name} — Agent Dashboard", + f""" +
+ ← All agents +

{safe_name}

+

{html.escape(agent.description or 'No description')}

+
+ Endpoint: {safe_endpoint} + App: {html.escape(agent.app_name)} + {f'{html.escape(agent.protocol_version)}' if agent.protocol_version else ''} +
+
+ +
+

Agent Card

+
{html.escape(card_json)}
+
+ +{tools_html} + +
+

MCP Test Panel

+

+ Send a JSON-RPC request to this agent's /api/mcp endpoint. +

+
+ + +
+ + +
+
+ +""", + ) diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/discovery/__init__.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/discovery/__init__.py new file mode 100644 index 00000000..d6d04008 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/discovery/__init__.py @@ -0,0 +1,24 @@ +""" +Agent discovery for Databricks Apps. + +This module provides clients and utilities for discovering agent-enabled +Databricks Apps that expose A2A protocol agent cards. +""" + +from .agent_discovery import ( + AgentDiscovery, + DiscoveredAgent, + AgentDiscoveryResult, +) +from .a2a_client import ( + A2AClient, + A2AClientError, +) + +__all__ = [ + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", +] diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/discovery/a2a_client.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/discovery/a2a_client.py new file mode 100644 index 00000000..1243d1a3 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/discovery/a2a_client.py @@ -0,0 +1,268 @@ +""" +A2A Client for agent-to-agent communication. + +Implements the A2A protocol for discovering and communicating with peer agents. +""" + +import json +import uuid +import logging +from typing import Dict, Any, Optional, AsyncIterator + +import httpx + +logger = logging.getLogger(__name__) + + +class A2AClientError(Exception): + """Raised when an A2A operation fails.""" + pass + + +class A2AClient: + """ + Async client for A2A protocol communication with peer agents. + + Usage: + async with A2AClient() as client: + card = await client.fetch_agent_card("https://app.databricksapps.com") + result = await client.send_message("https://app.databricksapps.com/api/a2a", "Hello") + """ + + def __init__(self, timeout: float = 60.0): + """ + Initialize A2A client. + + Args: + timeout: Request timeout in seconds + """ + self.timeout = timeout + self._client: Optional[httpx.AsyncClient] = None + + async def __aenter__(self): + self._client = httpx.AsyncClient( + timeout=self.timeout, + follow_redirects=True, + ) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self._client: + await self._client.aclose() + + def _auth_headers(self, auth_token: Optional[str] = None) -> Dict[str, str]: + """Build authentication headers.""" + headers = {"Content-Type": "application/json"} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + return headers + + async def fetch_agent_card( + self, + base_url: str, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Fetch an agent's A2A protocol agent card. + + Tries /.well-known/agent.json first, then /card as fallback. + Handles OAuth redirects gracefully (returns error instead of following). + + Args: + base_url: Base URL of the agent application + auth_token: Optional OAuth token for authenticated requests + + Returns: + Agent card JSON data + + Raises: + A2AClientError: If agent card cannot be fetched + + Example: + >>> async with A2AClient() as client: + >>> card = await client.fetch_agent_card("https://app.databricksapps.com") + >>> print(card["name"], card["description"]) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + headers = {} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + # Use a client that doesn't follow redirects to detect OAuth flows + async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=False) as probe_client: + for path in ["/.well-known/agent.json", "/card"]: + try: + url = base_url.rstrip("/") + path + response = await probe_client.get(url, headers=headers) + + # OAuth redirect detected - app requires interactive auth + if response.status_code in (301, 302, 303, 307, 308): + logger.debug(f"OAuth redirect detected for {url}") + continue + + if response.status_code == 200: + if not response.text or response.text.isspace(): + logger.debug(f"Empty response body for {url}") + continue + return response.json() + + except Exception as e: + logger.debug(f"Agent card fetch failed for {url}: {e}") + continue + + raise A2AClientError(f"Could not fetch agent card from {base_url}") + + async def _jsonrpc_call( + self, + url: str, + method: str, + params: Dict[str, Any], + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a JSON-RPC 2.0 request to an agent. + + Args: + url: A2A endpoint URL + method: JSON-RPC method name (e.g., "message/send") + params: Method parameters + auth_token: Optional authentication token + + Returns: + JSON-RPC result + + Raises: + A2AClientError: If request fails or returns error + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": method, + "params": params, + } + + try: + response = await self._client.post( + url, + json=payload, + headers=self._auth_headers(auth_token), + ) + response.raise_for_status() + result = response.json() + + if "error" in result: + error = result["error"] + raise A2AClientError( + f"A2A error: {error.get('message', 'Unknown')} " + f"(code: {error.get('code')})" + ) + + return result.get("result", {}) + + except httpx.TimeoutException as e: + raise A2AClientError(f"Request to {url} timed out: {e}") + except httpx.HTTPStatusError as e: + raise A2AClientError( + f"HTTP error from {url}: {e.response.status_code}" + ) + except json.JSONDecodeError as e: + raise A2AClientError(f"Invalid JSON from {url}: {e}") + + async def send_message( + self, + agent_url: str, + message: str, + context_id: Optional[str] = None, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a message to a peer agent using A2A protocol. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + context_id: Optional conversation context ID + auth_token: Optional authentication token + + Returns: + Agent's response + + Example: + >>> async with A2AClient() as client: + >>> response = await client.send_message( + >>> "https://app.databricksapps.com/api/a2a", + >>> "What are your capabilities?" + >>> ) + """ + params: Dict[str, Any] = { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + } + if context_id: + params["message"]["contextId"] = context_id + + return await self._jsonrpc_call( + agent_url, "message/send", params, auth_token + ) + + async def send_streaming_message( + self, + agent_url: str, + message: str, + auth_token: Optional[str] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """ + Send a streaming message and yield SSE events. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + auth_token: Optional authentication token + + Yields: + SSE events from the agent's response stream + + Example: + >>> async with A2AClient() as client: + >>> async for event in client.send_streaming_message(url, "Analyze this"): + >>> print(event) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + stream_url = agent_url.rstrip("/") + "/stream" + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": "message/stream", + "params": { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + }, + } + + async with self._client.stream( + "POST", + stream_url, + json=payload, + headers=self._auth_headers(auth_token), + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.startswith("data: "): + try: + yield json.loads(line[6:]) + except json.JSONDecodeError: + continue diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/discovery/agent_discovery.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/discovery/agent_discovery.py new file mode 100644 index 00000000..1563b304 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/discovery/agent_discovery.py @@ -0,0 +1,253 @@ +""" +Agent discovery for Databricks Apps. + +Discovers agent-enabled Databricks Apps by scanning workspace apps +and probing for A2A protocol agent cards. +""" + +import asyncio +import logging +from typing import List, Dict, Any, Optional +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +from .a2a_client import A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + +# Agent card probe paths and timeout +AGENT_CARD_PATHS = ["/.well-known/agent.json", "/card"] +AGENT_CARD_PROBE_TIMEOUT = 5.0 + + +@dataclass +class DiscoveredAgent: + """ + An agent discovered from a Databricks App. + + Attributes: + name: Agent name (from agent card or app name) + endpoint_url: Agent's base URL + description: Agent description (from agent card) + capabilities: Comma-separated list of capabilities + protocol_version: A2A protocol version + app_name: Name of the backing Databricks App + """ + name: str + endpoint_url: str + app_name: str + description: Optional[str] = None + capabilities: Optional[str] = None + protocol_version: Optional[str] = None + + +@dataclass +class AgentDiscoveryResult: + """ + Results from agent discovery operation. + + Attributes: + agents: List of discovered agents + errors: List of error messages encountered during discovery + """ + agents: List[DiscoveredAgent] + errors: List[str] + + +class AgentDiscovery: + """ + Discovers agent-enabled Databricks Apps in a workspace. + + Scans running Databricks Apps and probes for A2A protocol agent cards + to identify which apps are agents. + + Usage: + discovery = AgentDiscovery(profile="my-profile") + result = await discovery.discover_agents() + for agent in result.agents: + print(f"Found agent: {agent.name} at {agent.endpoint_url}") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize agent discovery. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._workspace_token: Optional[str] = None + + async def discover_agents(self) -> AgentDiscoveryResult: + """ + Discover all agent-enabled Databricks Apps in the workspace. + + Returns: + AgentDiscoveryResult with discovered agents and any errors + + Example: + >>> discovery = AgentDiscovery(profile="my-profile") + >>> result = await discovery.discover_agents() + >>> print(f"Found {len(result.agents)} agents") + """ + agents: List[DiscoveredAgent] = [] + errors: List[str] = [] + + try: + app_list = await self._list_workspace_apps() + except Exception as e: + logger.error("Workspace app listing failed: %s", e) + return AgentDiscoveryResult( + agents=[], + errors=[f"Failed to list workspace apps: {e}"], + ) + + if not app_list: + return AgentDiscoveryResult(agents=[], errors=[]) + + # Probe each running app for agent card in parallel + probe_tasks = [ + self._probe_app_for_agent(app_info) + for app_info in app_list + if app_info.get("url") + ] + + if probe_tasks: + probe_results = await asyncio.gather( + *probe_tasks, return_exceptions=True + ) + + for result in probe_results: + if isinstance(result, Exception): + errors.append(str(result)) + elif result is not None: + agents.append(result) + + logger.info( + "Agent discovery: %d apps checked, %d agents found", + len(app_list), len(agents) + ) + + return AgentDiscoveryResult(agents=agents, errors=errors) + + async def _list_workspace_apps(self) -> List[Dict[str, Any]]: + """ + Enumerate Databricks Apps in the workspace. + + Returns: + List of running apps with name, url, owner + """ + def _list_sync() -> tuple: + client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + + # Extract auth token for cross-app requests + auth_headers = client.config.authenticate() + auth_val = auth_headers.get("Authorization", "") + token = auth_val[7:] if auth_val.startswith("Bearer ") else None + + results = [] + for app in client.apps.list(): + # Check if app is running via compute_status or deployment status + compute_state = None + cs = getattr(app, "compute_status", None) + if cs: + compute_state = str(getattr(cs, "state", "")) + + deploy_state = None + dep = getattr(app, "active_deployment", None) + if dep: + dep_status = getattr(dep, "status", None) + if dep_status: + deploy_state = str(getattr(dep_status, "state", "")) + + app_url = getattr(app, "url", None) or "" + app_url = app_url.rstrip("/") if app_url else "" + + results.append({ + "name": app.name, + "url": app_url, + "owner": getattr(app, "creator", None) or getattr(app, "updater", None), + "compute_state": compute_state, + "deploy_state": deploy_state, + }) + + return results, token + + loop = asyncio.get_event_loop() + result_tuple = await loop.run_in_executor(None, _list_sync) + all_apps, workspace_token = result_tuple + + # Store token for probing + self._workspace_token = workspace_token + + # Filter to running apps + running = [ + a for a in all_apps + if a.get("url") and ( + "ACTIVE" in (a.get("compute_state") or "") + or "SUCCEEDED" in (a.get("deploy_state") or "") + ) + ] + + logger.info( + "Workspace apps: %d total, %d running", + len(all_apps), len(running) + ) + + return running + + async def _probe_app_for_agent( + self, + app_info: Dict[str, Any], + ) -> Optional[DiscoveredAgent]: + """ + Probe a Databricks App for an A2A agent card. + + Args: + app_info: App metadata from workspace listing + + Returns: + DiscoveredAgent if agent card found, None otherwise + """ + app_url = app_info["url"] + app_name = app_info["name"] + + token = self._workspace_token + agent_card = None + + try: + logger.debug(f"Probing app '{app_name}' at {app_url}") + async with A2AClient(timeout=AGENT_CARD_PROBE_TIMEOUT) as client: + agent_card = await client.fetch_agent_card(app_url, auth_token=token) + logger.info(f"Found agent card for '{app_name}'") + except A2AClientError as e: + logger.debug(f"No agent card for '{app_name}': {e}") + return None + except Exception as e: + logger.warning(f"Probe failed for '{app_name}': {e}") + return None + + if not agent_card: + return None + + # Extract capabilities + capabilities_list = [] + caps = agent_card.get("capabilities") + if isinstance(caps, dict): + capabilities_list = list(caps.keys()) + elif isinstance(caps, list): + capabilities_list = caps + + return DiscoveredAgent( + name=agent_card.get("name", app_name), + endpoint_url=app_url, + app_name=app_name, + description=agent_card.get("description"), + capabilities=",".join(capabilities_list) if capabilities_list else None, + protocol_version=agent_card.get("protocolVersion"), + ) diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/mcp/__init__.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/mcp/__init__.py new file mode 100644 index 00000000..60ee38ad --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/mcp/__init__.py @@ -0,0 +1,11 @@ +""" +Model Context Protocol (MCP) support. + +This module provides utilities for integrating agents with MCP servers +and exposing UC Functions as MCP tools. +""" + +from .mcp_server import MCPServer, MCPServerConfig, setup_mcp_server +from .uc_functions import UCFunctionAdapter + +__all__ = ["MCPServer", "MCPServerConfig", "setup_mcp_server", "UCFunctionAdapter"] diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/mcp/mcp_server.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/mcp/mcp_server.py new file mode 100644 index 00000000..98a456fb --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/mcp/mcp_server.py @@ -0,0 +1,196 @@ +""" +MCP server implementation for agents. + +Provides an MCP server that exposes agent tools via the Model Context Protocol. +Works with any FastAPI app. +""" + +import json +import logging +from typing import Dict, Any, List, Optional +from dataclasses import dataclass + +from fastapi import Request + +logger = logging.getLogger(__name__) + + +@dataclass +class MCPServerConfig: + """ + Configuration for MCP server. + + Attributes: + name: Server name + version: Server version + description: Server description + """ + name: str + version: str = "1.0.0" + description: str = "MCP server for agent tools" + + +class MCPServer: + """ + MCP server that exposes tools via JSON-RPC. + + Accepts tools as dicts with name, description, parameters, function keys. + """ + + def __init__(self, tools, config: MCPServerConfig): + """ + Initialize MCP server. + + Args: + tools: List of ToolDefinition objects or dicts with name/description/parameters/function + config: MCP server configuration + """ + self._tools = tools + self.config = config + + def setup_routes(self, app): + """ + Set up MCP protocol routes on the FastAPI app. + + Adds: + - POST /api/mcp - MCP JSON-RPC endpoint + - GET /api/mcp/tools - List available tools + """ + + @app.post("/api/mcp") + async def mcp_jsonrpc(request: Request): + """MCP JSON-RPC endpoint.""" + try: + body = await request.json() + method = body.get("method") + params = body.get("params", {}) + request_id = body.get("id") + + if method == "tools/list": + result = await self._list_tools() + elif method == "tools/call": + result = await self._call_tool(params) + elif method == "server/info": + result = self._server_info() + else: + return { + "jsonrpc": "2.0", + "id": request_id, + "error": { + "code": -32601, + "message": f"Method not found: {method}" + } + } + + return { + "jsonrpc": "2.0", + "id": request_id, + "result": result + } + + except Exception as e: + logger.error("MCP request failed: %s", e) + return { + "jsonrpc": "2.0", + "id": body.get("id") if isinstance(body, dict) else None, + "error": { + "code": -32603, + "message": str(e) + } + } + + @app.get("/api/mcp/tools") + async def list_mcp_tools(): + """List available MCP tools.""" + return await self._list_tools() + + def _get_tool_name(self, tool) -> str: + return tool.name if hasattr(tool, "name") else tool["name"] + + def _get_tool_description(self, tool) -> str: + return tool.description if hasattr(tool, "description") else tool["description"] + + def _get_tool_parameters(self, tool) -> Dict[str, Any]: + return tool.parameters if hasattr(tool, "parameters") else tool.get("parameters", {}) + + def _get_tool_function(self, tool): + return tool.function if hasattr(tool, "function") else tool["function"] + + def _server_info(self) -> Dict[str, Any]: + """Get MCP server information.""" + return { + "name": self.config.name, + "version": self.config.version, + "description": self.config.description, + "protocol_version": "1.0", + } + + async def _list_tools(self) -> Dict[str, Any]: + """List all available tools in MCP format.""" + tools = [] + + for tool in self._tools: + mcp_tool = { + "name": self._get_tool_name(tool), + "description": self._get_tool_description(tool), + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + + for param_name, param_spec in self._get_tool_parameters(tool).items(): + param_type = param_spec.get("type", "string") + mcp_tool["inputSchema"]["properties"][param_name] = { + "type": param_type, + "description": param_spec.get("description", "") + } + if param_spec.get("required", False): + mcp_tool["inputSchema"]["required"].append(param_name) + + tools.append(mcp_tool) + + return {"tools": tools} + + async def _call_tool(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Call a tool via MCP.""" + tool_name = params.get("name") + arguments = params.get("arguments", {}) + + tool_def = None + for tool in self._tools: + if self._get_tool_name(tool) == tool_name: + tool_def = tool + break + + if not tool_def: + raise ValueError(f"Tool not found: {tool_name}") + + try: + result = await self._get_tool_function(tool_def)(**arguments) + return {"result": result} + except Exception as e: + logger.error("Tool execution failed: %s", e) + raise + + +def setup_mcp_server(tools, config: Optional[MCPServerConfig] = None, fastapi_app=None): + """ + Set up MCP server for an agent. + + Args: + tools: List of tool dicts with name/description/parameters/function keys + config: Optional MCP server configuration + fastapi_app: FastAPI app to add routes to (required) + + Returns: + MCPServer instance + """ + if config is None: + config = MCPServerConfig(name="mcp-server") + + server = MCPServer(tools, config) + server.setup_routes(fastapi_app) + + return server diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/mcp/uc_functions.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/mcp/uc_functions.py new file mode 100644 index 00000000..f1659960 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/mcp/uc_functions.py @@ -0,0 +1,240 @@ +""" +Unity Catalog Functions adapter for MCP. + +Automatically discovers UC Functions and exposes them as MCP tools. +""" + +import logging +from typing import List, Dict, Any, Optional + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCFunctionAdapter: + """ + Adapter for Unity Catalog Functions to MCP protocol. + + Discovers UC Functions and converts them to MCP tool format for + use with agents. + + Usage: + adapter = UCFunctionAdapter(profile="my-profile") + tools = adapter.discover_functions(catalog="main", schema="functions") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC Functions adapter. + + Args: + profile: Databricks CLI profile name + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def discover_functions( + self, + catalog: str, + schema: str, + name_pattern: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + Discover UC Functions and convert to MCP tool format. + + Args: + catalog: UC catalog name + schema: UC schema name + name_pattern: Optional name pattern filter (SQL LIKE pattern) + + Returns: + List of tool definitions in MCP format + + Example: + >>> adapter = UCFunctionAdapter() + >>> tools = adapter.discover_functions("main", "functions") + >>> for tool in tools: + ... print(tool["name"], tool["description"]) + """ + client = self._get_client() + tools = [] + + try: + functions = client.functions.list( + catalog_name=catalog, + schema_name=schema, + ) + + for func in functions: + # Skip system functions + if func.name.startswith("system."): + continue + + # Apply name pattern filter + if name_pattern and name_pattern not in func.name: + continue + + # Convert to MCP tool format + tool = self._convert_function_to_tool(func) + if tool: + tools.append(tool) + + logger.info( + f"Discovered {len(tools)} UC Functions from {catalog}.{schema}" + ) + + except Exception as e: + logger.error(f"Failed to discover UC Functions: {e}") + + return tools + + def _convert_function_to_tool(self, func) -> Optional[Dict[str, Any]]: + """ + Convert a UC Function to MCP tool format. + + Args: + func: Function info from Databricks SDK + + Returns: + MCP tool definition or None if conversion fails + """ + try: + # Extract function metadata + name = func.name.split(".")[-1] # Get short name + description = func.comment or f"Unity Catalog function: {name}" + + # Build parameter schema + input_schema = { + "type": "object", + "properties": {}, + "required": [] + } + + # Parse function parameters + if hasattr(func, "input_params") and func.input_params: + for param in func.input_params.parameters: + param_name = param.name + param_type = self._map_uc_type_to_json_type(param.type_name) + + input_schema["properties"][param_name] = { + "type": param_type, + "description": param.comment or "" + } + + # Parameters without defaults are required + if not hasattr(param, "default_value") or param.default_value is None: + input_schema["required"].append(param_name) + + return { + "name": name, + "description": description, + "inputSchema": input_schema, + "full_name": func.full_name, + "source": "unity_catalog" + } + + except Exception as e: + logger.warning(f"Failed to convert function {func.name}: {e}") + return None + + def _map_uc_type_to_json_type(self, uc_type: str) -> str: + """ + Map Unity Catalog data type to JSON Schema type. + + Args: + uc_type: UC type name (e.g., "STRING", "BIGINT", "BOOLEAN") + + Returns: + JSON Schema type ("string", "number", "boolean", etc.) + """ + type_mapping = { + "STRING": "string", + "VARCHAR": "string", + "CHAR": "string", + "BIGINT": "integer", + "INT": "integer", + "INTEGER": "integer", + "SMALLINT": "integer", + "TINYINT": "integer", + "DOUBLE": "number", + "FLOAT": "number", + "DECIMAL": "number", + "BOOLEAN": "boolean", + "BINARY": "string", + "DATE": "string", + "TIMESTAMP": "string", + "ARRAY": "array", + "MAP": "object", + "STRUCT": "object", + } + + uc_type_upper = uc_type.upper() + return type_mapping.get(uc_type_upper, "string") + + async def call_function( + self, + full_name: str, + arguments: Dict[str, Any] + ) -> Any: + """ + Call a UC Function with given arguments. + + Args: + full_name: Full function name (catalog.schema.function) + arguments: Function arguments + + Returns: + Function result + + Example: + >>> adapter = UCFunctionAdapter() + >>> result = await adapter.call_function( + ... "main.functions.calculate_tax", + ... {"amount": 100, "rate": 0.08} + ... ) + """ + client = self._get_client() + + try: + # Build SQL query to call the function + args_list = [f":{key}" for key in arguments.keys()] + query = f"SELECT {full_name}({', '.join(args_list)})" + + # Execute via SQL warehouse + # Note: This requires a warehouse ID to be configured + result = client.statement_execution.execute_statement( + statement=query, + warehouse_id=self._get_default_warehouse(), + parameters=[ + {"name": key, "value": str(value)} + for key, value in arguments.items() + ] + ) + + return result.result.data_array[0][0] if result.result.data_array else None + + except Exception as e: + logger.error(f"Failed to call UC Function {full_name}: {e}") + raise + + def _get_default_warehouse(self) -> str: + """Get default SQL warehouse ID from environment or client.""" + import os + warehouse_id = os.getenv("DATABRICKS_WAREHOUSE_ID") + if not warehouse_id: + raise ValueError( + "DATABRICKS_WAREHOUSE_ID not set. " + "Set this environment variable to use UC Functions." + ) + return warehouse_id diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/py.typed b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/registry/__init__.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/registry/__init__.py new file mode 100644 index 00000000..892043b7 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/registry/__init__.py @@ -0,0 +1,10 @@ +""" +Unity Catalog integration for agent registration. + +This module provides utilities for registering agents in Unity Catalog +as AGENT objects, enabling catalog-based discovery and permission management. +""" + +from .uc_registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError + +__all__ = ["UCAgentRegistry", "UCAgentSpec", "UCRegistrationError"] diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/registry/uc_registry.py b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/registry/uc_registry.py new file mode 100644 index 00000000..f2651aca --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/dbx_agent_app/registry/uc_registry.py @@ -0,0 +1,345 @@ +""" +Unity Catalog agent registry. + +Registers and manages agents as Unity Catalog AGENT objects for +catalog-based discovery and permission management. +""" + +import json +import logging +from typing import Dict, Any, Optional, List +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCRegistrationError(Exception): + """Raised when agent registration in Unity Catalog fails.""" + pass + + +@dataclass +class UCAgentSpec: + """ + Specification for registering an agent in Unity Catalog. + + Attributes: + name: Agent name (will be catalog object name) + catalog: UC catalog name + schema: UC schema name + endpoint_url: Agent's base URL + description: Agent description + capabilities: List of agent capabilities + properties: Additional metadata key-value pairs + """ + name: str + catalog: str + schema: str + endpoint_url: str + description: Optional[str] = None + capabilities: Optional[List[str]] = None + properties: Optional[Dict[str, str]] = None + + +class UCAgentRegistry: + """ + Unity Catalog agent registry. + + Registers agents as UC AGENT objects for catalog-based discovery + and permission management. + + Usage: + registry = UCAgentRegistry(profile="my-profile") + + spec = UCAgentSpec( + name="customer_research", + catalog="main", + schema="agents", + endpoint_url="https://app.databricksapps.com", + description="Customer research agent", + capabilities=["search", "analysis"], + ) + + registry.register_agent(spec) + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC agent registry. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def register_agent(self, spec: UCAgentSpec) -> Dict[str, Any]: + """ + Register an agent in Unity Catalog. + + Creates a AGENT object in the specified catalog and schema with + metadata about the agent's endpoint, capabilities, and properties. + + Args: + spec: Agent specification + + Returns: + Dictionary with registration details + + Raises: + UCRegistrationError: If registration fails + + Example: + >>> registry = UCAgentRegistry(profile="my-profile") + >>> spec = UCAgentSpec( + ... name="my_agent", + ... catalog="main", + ... schema="agents", + ... endpoint_url="https://app.databricksapps.com", + ... ) + >>> result = registry.register_agent(spec) + """ + client = self._get_client() + full_name = f"{spec.catalog}.{spec.schema}.{spec.name}" + + try: + # Build agent properties for UC metadata + properties = spec.properties or {} + properties["endpoint_url"] = spec.endpoint_url + properties["agent_card_url"] = f"{spec.endpoint_url}/.well-known/agent.json" + + if spec.capabilities: + properties["capabilities"] = ",".join(spec.capabilities) + + # Register as a UC registered model with AGENT type + # (UC doesn't have a native AGENT type yet, so we use registered models + # with special tags/properties to mark them as agents) + + logger.info(f"Registering agent '{full_name}' in Unity Catalog") + + # Check if catalog and schema exist + try: + client.catalogs.get(spec.catalog) + except Exception as e: + raise UCRegistrationError( + f"Catalog '{spec.catalog}' does not exist or is not accessible: {e}" + ) + + try: + client.schemas.get(f"{spec.catalog}.{spec.schema}") + except Exception as e: + raise UCRegistrationError( + f"Schema '{spec.catalog}.{spec.schema}' does not exist or is not accessible: {e}" + ) + + # Create or update registered model as agent placeholder + # In a future UC version with native AGENT support, this would use: + # client.agents.create(name=full_name, properties=properties) + + # Encode properties as JSON suffix in comment for discovery + # Format: "description\n---AGENT_META---\n{json}" + meta = {"databricks_agent": True, **properties} + comment = spec.description or "" + comment_with_meta = f"{comment}\n---AGENT_META---\n{json.dumps(meta)}" + + # Try update first (model may already exist from prior deploy), + # fall back to create if it doesn't exist + try: + client.registered_models.update( + full_name, + comment=comment_with_meta, + ) + logger.info(f"Updated existing agent '{full_name}'") + except Exception as update_err: + # Model doesn't exist or SP can't access it — try create + logger.debug(f"Update failed ({update_err}), trying create") + try: + client.registered_models.create( + name=spec.name, + catalog_name=spec.catalog, + schema_name=spec.schema, + comment=comment_with_meta, + ) + logger.info(f"Created new agent '{full_name}'") + except Exception as create_err: + # If create fails with "already exists", the SP just + # can't see the model — log warning but don't fail + err_str = str(create_err).lower() + if "already exists" in err_str or "not a valid name" in err_str: + logger.warning( + "Agent '%s' exists but SP cannot update it. " + "Grant the app's SP ownership or MANAGE on the model.", + full_name, + ) + else: + raise + + logger.info(f"Successfully registered agent '{full_name}'") + + return { + "full_name": full_name, + "catalog": spec.catalog, + "schema": spec.schema, + "name": spec.name, + "endpoint_url": spec.endpoint_url, + "properties": properties, + } + + except UCRegistrationError: + raise + except Exception as e: + raise UCRegistrationError( + f"Failed to register agent '{full_name}': {e}" + ) from e + + @staticmethod + def _parse_agent_meta(comment: Optional[str]) -> Optional[Dict[str, Any]]: + """Parse agent metadata from comment field (JSON after ---AGENT_META--- marker).""" + if not comment or "---AGENT_META---" not in comment: + return None + try: + _, meta_json = comment.split("---AGENT_META---", 1) + return json.loads(meta_json.strip()) + except (ValueError, json.JSONDecodeError): + return None + + @staticmethod + def _clean_description(comment: Optional[str]) -> str: + """Extract human-readable description from comment (before the meta marker).""" + if not comment: + return "" + if "---AGENT_META---" in comment: + return comment.split("---AGENT_META---")[0].strip() + return comment + + def get_agent(self, catalog: str, schema: str, name: str) -> Optional[Dict[str, Any]]: + """ + Get agent metadata from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + Agent metadata dictionary or None if not found + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + model = client.registered_models.get(full_name) + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + return None + + return { + "full_name": full_name, + "catalog": catalog, + "schema": schema, + "name": name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "agent_card_url": meta.get("agent_card_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + "properties": meta, + } + + except Exception as e: + logger.debug(f"Agent '{full_name}' not found: {e}") + return None + + def list_agents( + self, + catalog: str, + schema: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + List all agents in a catalog or schema. + + Args: + catalog: UC catalog name + schema: Optional UC schema name (lists all schemas if not specified) + + Returns: + List of agent metadata dictionaries + """ + client = self._get_client() + agents = [] + + # Determine which schemas to scan + schemas_to_scan = [schema] if schema else [] + if not schema: + try: + for s in client.schemas.list(catalog_name=catalog): + if s.name != "information_schema": + schemas_to_scan.append(s.name) + except Exception as e: + logger.error(f"Failed to list schemas in {catalog}: {e}") + return [] + + for schema_name in schemas_to_scan: + try: + models = client.registered_models.list( + catalog_name=catalog, schema_name=schema_name + ) + for model in models: + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + continue + + agents.append({ + "full_name": model.full_name, + "catalog": catalog, + "schema": schema_name, + "name": model.name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + }) + except Exception as e: + logger.debug(f"Failed to list models in {catalog}.{schema_name}: {e}") + continue + + return agents + + def delete_agent(self, catalog: str, schema: str, name: str) -> bool: + """ + Delete an agent from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + True if deleted, False if not found + + Raises: + UCRegistrationError: If deletion fails + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + client.registered_models.delete(full_name) + logger.info(f"Deleted agent '{full_name}'") + return True + except Exception as e: + if "does not exist" in str(e).lower(): + return False + raise UCRegistrationError( + f"Failed to delete agent '{full_name}': {e}" + ) from e diff --git a/dbx-agent-app/examples/supervisor/agents/analytics/requirements.txt b/dbx-agent-app/examples/supervisor/agents/analytics/requirements.txt new file mode 100644 index 00000000..7b0556d1 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/analytics/requirements.txt @@ -0,0 +1,5 @@ +fastapi>=0.115.0 +uvicorn[standard]>=0.30.0 +pydantic>=2.0.0 +httpx>=0.27.0 +databricks-sdk>=0.30.0 diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/app.py b/dbx-agent-app/examples/supervisor/agents/compliance/app.py new file mode 100644 index 00000000..ffbc25db --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/app.py @@ -0,0 +1,259 @@ +"""Sub-agent: Compliance — checks restricted_list and nda_registry.""" + +# Auth cleanup: prefer OAuth over PAT in Databricks Apps +import os +if os.environ.get("DATABRICKS_CLIENT_ID"): + os.environ.pop("DATABRICKS_TOKEN", None) + +import time +import logging +from databricks.sdk import WorkspaceClient +from databricks.sdk.service.sql import StatementParameterListItem +from dbx_agent_app import app_agent, AgentRequest, AgentResponse + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# SQL helpers +# --------------------------------------------------------------------------- + +_workspace = WorkspaceClient() +_warehouse_id_cache = os.environ.get("WAREHOUSE_ID") + +CATALOG = os.environ.get("UC_CATALOG", "serverless_dxukih_catalog") +SCHEMA = os.environ.get("UC_SCHEMA", "agents") + + +def _fqn(table: str) -> str: + return f"{CATALOG}.{SCHEMA}.{table}" + + +def _get_warehouse_id() -> str: + global _warehouse_id_cache + if _warehouse_id_cache: + return _warehouse_id_cache + for wh in _workspace.warehouses.list(): + if wh.enable_serverless_compute: + _warehouse_id_cache = wh.id + return wh.id + first = next(iter(_workspace.warehouses.list()), None) + if first: + _warehouse_id_cache = first.id + return first.id + raise ValueError("No SQL warehouse available") + + +def _execute_sql(statement, parameters=None): + """Execute SQL and return (result, trace_entry).""" + raw_params = parameters or [] + params = [ + StatementParameterListItem(name=p["name"], value=p["value"]) + if isinstance(p, dict) else p + for p in raw_params + ] + wh_id = _get_warehouse_id() + start = time.monotonic() + result = _workspace.statement_execution.execute_statement( + warehouse_id=wh_id, statement=statement, + parameters=params, wait_timeout="50s", + ) + duration_ms = round((time.monotonic() - start) * 1000, 1) + + row_count = len(result.result.data_array) if result.result and result.result.data_array else 0 + columns = [] + if result.manifest and result.manifest.schema and result.manifest.schema.columns: + columns = [ + {"name": c.name, + "type": str(c.type_name.value) if hasattr(c.type_name, "value") else str(c.type_name)} + for c in result.manifest.schema.columns + ] + + trace = { + "statement": " ".join(statement.split()), + "parameters": [ + {"name": p["name"], "value": p["value"]} if isinstance(p, dict) + else {"name": p.name, "value": p.value} + for p in raw_params + ], + "row_count": row_count, + "columns": columns, + "duration_ms": duration_ms, + "warehouse_id": wh_id, + } + return result, trace + + +def _extract_keywords(query: str) -> list[str]: + stop = {"a","an","the","is","are","on","any","do","does","for","in","of", + "to","what","who","how","can","this","that","find","check","show", + "me","my","about","with","and","or","all","get","list","tell"} + return [w for w in query.lower().split() if w not in stop and len(w) > 1] + + +def _build_like_clauses(column: str, keywords: list[str], prefix: str = ""): + col_tag = prefix or column.replace(".", "_") + clauses, params = [], [] + for i, kw in enumerate(keywords): + name = f"{col_tag}_{i}" + clauses.append(f"LOWER({column}) LIKE :{name}") + params.append({"name": name, "value": f"%{kw}%"}) + return " OR ".join(clauses) if clauses else "FALSE", params + + +# --------------------------------------------------------------------------- +# Demo fallback +# --------------------------------------------------------------------------- + +def _demo_response(query: str) -> dict: + return { + "response": f"""**Compliance Check Complete** + +**Status: CLEARED** + +Checks: +- Conflict of Interest: Clear +- Restricted List: Clear +- NDA Status: Active +- Prior Engagements: No issues + +*Demo fallback -- UC tables not available*""", + "data_source": "demo_fallback", + "tables_accessed": [_fqn("restricted_list"), _fqn("nda_registry")], + "keywords_extracted": _extract_keywords(query), + "sql_queries": [], + "timing": {"sql_total_ms": 0, "total_ms": 0}, + } + + +# --------------------------------------------------------------------------- +# Agent + Tool +# --------------------------------------------------------------------------- + +@app_agent( + name="sub_compliance", + description="Check engagements for compliance, conflicts of interest, and NDA status", + capabilities=["compliance", "conflict_check", "nda_status"], + uc_catalog=os.environ.get("UC_CATALOG", "main"), + uc_schema=os.environ.get("UC_SCHEMA", "agents"), + enable_mcp=True, +) +async def sub_compliance(request: AgentRequest) -> dict: + """Delegate to check tool.""" + return await check(request.last_user_message) + + +@sub_compliance.tool(description="Check engagements for compliance, conflicts of interest, and NDA status") +async def check(query: str) -> dict: + """ + Check restricted list and NDA registry for compliance issues. + + Args: + query: Compliance question or engagement to check + + Returns: + Structured result with compliance status, metadata, and SQL traces + """ + total_start = time.monotonic() + sql_queries = [] + + try: + keywords = _extract_keywords(query) + if not keywords: + keywords = [query.lower()] + + # Restricted list check + ent_clause, ent_params = _build_like_clauses("entity_name", keywords) + rsn_clause, rsn_params = _build_like_clauses("reason", keywords) + all_params = ent_params + rsn_params + + restricted_result, restricted_trace = _execute_sql( + f""" + SELECT entity_name, restriction_type, effective_date, expiry_date, reason + FROM {_fqn('restricted_list')} + WHERE ({ent_clause} OR {rsn_clause}) + AND expiry_date >= CURRENT_DATE() + """, + all_params, + ) + sql_queries.append(restricted_trace) + + # NDA registry check + exp_clause, exp_params = _build_like_clauses("expert_name", keywords) + cov_clause, cov_params = _build_like_clauses("coverage_scope", keywords) + nda_params = exp_params + cov_params + + nda_result, nda_trace = _execute_sql( + f""" + SELECT expert_name, nda_status, effective_date, expiry_date, coverage_scope + FROM {_fqn('nda_registry')} + WHERE {exp_clause} OR {cov_clause} + """, + nda_params, + ) + sql_queries.append(nda_trace) + + text = f'**Compliance Check for "{query}"**\n\n' + + # Restricted list results + has_restrictions = ( + restricted_result.result + and restricted_result.result.data_array + and len(restricted_result.result.data_array) > 0 + ) + + if has_restrictions: + text += "**Restricted List Matches:**\n" + for row in restricted_result.result.data_array: + entity, rtype, eff, exp, reason = row + text += f"- **{entity}** [{rtype.upper()}] ({eff} to {exp})\n" + text += f" Reason: {reason}\n" + text += "\n" + else: + text += "**Restricted List:** No active restrictions found.\n\n" + + # NDA results + has_ndas = ( + nda_result.result + and nda_result.result.data_array + and len(nda_result.result.data_array) > 0 + ) + + if has_ndas: + text += "**NDA Status:**\n" + for row in nda_result.result.data_array: + expert, status, eff, exp, scope = row + icon = {"active": "OK", "expired": "EXPIRED", "pending": "PENDING"}.get( + str(status).lower(), "?" + ) + text += f"- **{expert}** -- [{icon}] {status} ({eff} to {exp})\n" + text += f" Coverage: {scope}\n" + text += "\n" + else: + text += "**NDA Registry:** No matching NDA records found.\n\n" + + # Overall status + if has_restrictions: + text += "**Overall Status: FLAGGED** -- Review restrictions before proceeding.\n" + else: + text += "**Overall Status: CLEARED** -- No active restrictions found.\n" + + text += f"\n*Data sources: {_fqn('restricted_list')}, {_fqn('nda_registry')}*" + + total_ms = round((time.monotonic() - total_start) * 1000, 1) + sql_total_ms = sum(q["duration_ms"] for q in sql_queries) + + return { + "response": text, + "data_source": "live", + "tables_accessed": [_fqn("restricted_list"), _fqn("nda_registry")], + "keywords_extracted": keywords, + "sql_queries": sql_queries, + "timing": {"sql_total_ms": sql_total_ms, "total_ms": total_ms}, + } + + except Exception as e: + logger.error("SQL query failed for compliance: %s", e, exc_info=True) + return _demo_response(query) + + +app = sub_compliance.app diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/app.yaml b/dbx-agent-app/examples/supervisor/agents/compliance/app.yaml new file mode 100644 index 00000000..fc60869f --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/app.yaml @@ -0,0 +1,19 @@ +command: + - "python" + - "-m" + - "uvicorn" + - "app:app" + - "--host" + - "0.0.0.0" + - "--port" + - "8000" + +env: + - name: UC_CATALOG + value: serverless_dxukih_catalog + + - name: UC_SCHEMA + value: agents + + - name: WAREHOUSE_ID + value: 387bcda0f2ece20c diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/__init__.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/__init__.py new file mode 100644 index 00000000..95db07d3 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/__init__.py @@ -0,0 +1,41 @@ +""" +dbx-agent-app: Framework for building discoverable AI agents on Databricks Apps. + +This package provides: +- @app_agent: Decorator to turn an async function into a discoverable agent +- AgentDiscovery: Discover agents in your Databricks workspace +- A2AClient: Communicate with agents using the A2A protocol +- UCAgentRegistry: Register agents in Unity Catalog +- MCPServerConfig: Configure MCP server for agent tools +""" + +from .discovery import AgentDiscovery, DiscoveredAgent, AgentDiscoveryResult, A2AClient, A2AClientError +from .mcp import MCPServerConfig, setup_mcp_server, UCFunctionAdapter +from .registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError +from .dashboard import create_dashboard_app + +try: + from importlib.metadata import version + __version__ = version("dbx-agent-app") +except Exception: + __version__ = "0.1.0" + +__all__ = [ + # Discovery + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", + # Registry + "UCAgentRegistry", + "UCAgentSpec", + "UCRegistrationError", + # MCP + "MCPServerConfig", + "setup_mcp_server", + "UCFunctionAdapter", + # Dashboard + "create_dashboard_app", +] + diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/core/__init__.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/core/__init__.py new file mode 100644 index 00000000..623da88c --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/core/__init__.py @@ -0,0 +1 @@ +"""Core agent application components.""" diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/__init__.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/__init__.py new file mode 100644 index 00000000..89929a08 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/__init__.py @@ -0,0 +1,14 @@ +""" +Developer dashboard for agent discovery. + +Launch via CLI: + dbx-agent-app dashboard --profile my-profile + +Or programmatically: + from dbx_agent_app.dashboard import create_dashboard_app, run_dashboard +""" + +from .app import create_dashboard_app +from .cli import main as run_dashboard + +__all__ = ["create_dashboard_app", "run_dashboard"] diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/app.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/app.py new file mode 100644 index 00000000..d16c3d54 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/app.py @@ -0,0 +1,112 @@ +""" +FastAPI application for the developer dashboard. + +Routes: + HTML: GET / — agent list page + GET /agent/{name} — agent detail page + API: GET /api/agents — JSON list of agents + GET /api/agents/{name}/card — full agent card + POST /api/agents/{name}/mcp — MCP JSON-RPC proxy + POST /api/scan — trigger re-scan + GET /health — health check +""" + +import logging +from typing import Optional + +from fastapi import FastAPI, Request +from fastapi.responses import HTMLResponse, JSONResponse + +from .scanner import DashboardScanner +from .templates import render_agent_list, render_agent_detail + +logger = logging.getLogger(__name__) + + +def create_dashboard_app( + scanner: DashboardScanner, + profile: Optional[str] = None, +) -> FastAPI: + """Build and return the dashboard FastAPI app.""" + app = FastAPI(title="dbx-agent-app dashboard", docs_url=None, redoc_url=None) + + # --- HTML pages ------------------------------------------------------- + + @app.get("/", response_class=HTMLResponse) + async def index(): + agents = scanner.get_agents() + return render_agent_list(agents) + + @app.get("/agent/{name}", response_class=HTMLResponse) + async def agent_detail(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return HTMLResponse("

Agent not found

", status_code=404) + + card = None + try: + card = await scanner.get_agent_card(agent.endpoint_url) + except Exception as e: + logger.warning("Could not fetch card for %s: %s", name, e) + + return render_agent_detail(agent, card) + + # --- JSON API --------------------------------------------------------- + + @app.get("/api/agents") + async def api_agents(): + agents = scanner.get_agents() + return [ + { + "name": a.name, + "endpoint_url": a.endpoint_url, + "app_name": a.app_name, + "description": a.description, + "capabilities": a.capabilities, + "protocol_version": a.protocol_version, + } + for a in agents + ] + + @app.get("/api/agents/{name}/card") + async def api_agent_card(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + card = await scanner.get_agent_card(agent.endpoint_url) + return card + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=502) + + @app.post("/api/agents/{name}/mcp") + async def api_mcp_proxy(name: str, request: Request): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + payload = await request.json() + result = await scanner.proxy_mcp(agent.endpoint_url, payload) + return result + except Exception as e: + return JSONResponse( + {"jsonrpc": "2.0", "id": None, "error": {"code": -1, "message": str(e)}}, + status_code=502, + ) + + @app.post("/api/scan") + async def api_scan(): + agents = await scanner.scan() + return {"count": len(agents), "agents": [a.name for a in agents]} + + @app.get("/health") + async def health(): + return { + "status": "ok", + "agents_cached": len(scanner.get_agents()), + "profile": profile, + } + + return app diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/cli.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/cli.py new file mode 100644 index 00000000..3b17aa90 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/cli.py @@ -0,0 +1,63 @@ +""" +CLI entry point for the developer dashboard. + +Usage: + dbx-agent-app dashboard --profile my-profile --port 8501 +""" + +import argparse +import asyncio +import logging +import sys +import webbrowser + +import uvicorn + +from .scanner import DashboardScanner +from .app import create_dashboard_app + + +def main(): + parser = argparse.ArgumentParser( + prog="dbx-agent-app", + description="Developer dashboard for Databricks agent discovery", + ) + sub = parser.add_subparsers(dest="command") + + dash = sub.add_parser("dashboard", help="Launch the agent discovery dashboard") + dash.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + dash.add_argument("--port", type=int, default=8501, help="Port to serve on (default: 8501)") + dash.add_argument("--host", type=str, default="127.0.0.1", help="Host to bind (default: 127.0.0.1)") + dash.add_argument("--no-browser", action="store_true", help="Don't auto-open browser") + + args = parser.parse_args() + + if args.command != "dashboard": + parser.print_help() + sys.exit(1) + + logging.basicConfig(level=logging.INFO, format="%(levelname)s %(name)s: %(message)s") + + scanner = DashboardScanner(profile=args.profile) + + # Run initial scan + print(f"Scanning workspace for agents (profile={args.profile or 'default'})...") + try: + agents = asyncio.run(scanner.scan()) + print(f"Found {len(agents)} agent(s)") + except Exception as e: + print(f"Initial scan failed: {e}", file=sys.stderr) + print("Dashboard will start anyway — use the Scan button to retry.") + + app = create_dashboard_app(scanner, profile=args.profile) + + url = f"http://{args.host}:{args.port}" + if not args.no_browser: + webbrowser.open(url) + + print(f"Dashboard running at {url}") + uvicorn.run(app, host=args.host, port=args.port, log_level="warning") + + +if __name__ == "__main__": + main() diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/scanner.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/scanner.py new file mode 100644 index 00000000..475460be --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/scanner.py @@ -0,0 +1,81 @@ +""" +Dashboard scanner — wraps AgentDiscovery + A2AClient with caching and MCP proxy. +""" + +import asyncio +import logging +from typing import Dict, Any, List, Optional + +import httpx + +from ..discovery import AgentDiscovery, DiscoveredAgent, A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + + +class DashboardScanner: + """ + Thin wrapper around AgentDiscovery that adds result caching + and MCP JSON-RPC proxying for the dashboard UI. + """ + + def __init__(self, profile: Optional[str] = None): + self._discovery = AgentDiscovery(profile=profile) + self._agents: List[DiscoveredAgent] = [] + self._scan_lock = asyncio.Lock() + self._scanned = False + + async def scan(self) -> List[DiscoveredAgent]: + """Run workspace discovery and cache results. Thread-safe via asyncio.Lock.""" + async with self._scan_lock: + result = await self._discovery.discover_agents() + self._agents = result.agents + self._scanned = True + if result.errors: + for err in result.errors: + logger.warning("Discovery error: %s", err) + return self._agents + + def get_agents(self) -> List[DiscoveredAgent]: + """Return cached agent list from the last scan.""" + return list(self._agents) + + def get_agent_by_name(self, name: str) -> Optional[DiscoveredAgent]: + """Look up a cached agent by name.""" + for agent in self._agents: + if agent.name == name or agent.app_name == name: + return agent + return None + + @property + def workspace_token(self) -> Optional[str]: + """Auth token extracted during discovery, used for cross-app requests.""" + return self._discovery._workspace_token + + async def get_agent_card(self, endpoint_url: str) -> Dict[str, Any]: + """Fetch the full agent card JSON from a remote agent.""" + async with A2AClient(timeout=10.0) as client: + return await client.fetch_agent_card( + endpoint_url, auth_token=self.workspace_token + ) + + async def proxy_mcp(self, endpoint_url: str, payload: Dict[str, Any]) -> Dict[str, Any]: + """ + Forward a JSON-RPC request to an agent's MCP endpoint. + + Args: + endpoint_url: Agent base URL + payload: Complete JSON-RPC 2.0 request body + + Returns: + JSON-RPC response from the agent + """ + mcp_url = endpoint_url.rstrip("/") + "/api/mcp" + headers = {"Content-Type": "application/json"} + if self.workspace_token: + headers["Authorization"] = f"Bearer {self.workspace_token}" + + async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as http: + response = await http.post(mcp_url, json=payload, headers=headers) + response.raise_for_status() + return response.json() diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/templates.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/templates.py new file mode 100644 index 00000000..8543d75d --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/dashboard/templates.py @@ -0,0 +1,278 @@ +""" +Server-rendered HTML templates for the dashboard. + +Pure Python functions returning HTML strings — no Jinja2, no React, no build step. +""" + +import html +import json +from typing import List, Dict, Any, Optional + +from ..discovery import DiscoveredAgent + + +# --------------------------------------------------------------------------- +# Base layout +# --------------------------------------------------------------------------- + +def render_base(title: str, content: str) -> str: + """HTML shell with inline CSS (dark theme).""" + return f""" + + + + +{html.escape(title)} + + + +
+
+

dbx-agent-app dashboard

+ +
+
+
+{content} +
+ +""" + + +# --------------------------------------------------------------------------- +# Agent list page +# --------------------------------------------------------------------------- + +def render_agent_list(agents: List[DiscoveredAgent]) -> str: + """Main page: grid of agent cards + scan button.""" + if not agents: + cards_html = """ +
+

No agents discovered

+

No agent-enabled Databricks Apps found in your workspace. Click Scan to retry.

+
""" + else: + cards = [] + for a in agents: + caps = "" + if a.capabilities: + badges = "".join( + f'{html.escape(c.strip())} ' + for c in a.capabilities.split(",") + ) + caps = f'
{badges}
' + + desc = html.escape(a.description or "No description") + cards.append(f""" + +
+

{html.escape(a.name)}

+

{desc}

+
+ App: {html.escape(a.app_name)} + {f'Protocol: {html.escape(a.protocol_version)}' if a.protocol_version else ''} +
+ {caps} +
+
""") + cards_html = f'
{"".join(cards)}
' + + return render_base( + "Agent Dashboard", + f""" +
+ {len(agents)} agent{"s" if len(agents) != 1 else ""} discovered + +
+{cards_html} +""", + ) + + +# --------------------------------------------------------------------------- +# Agent detail page +# --------------------------------------------------------------------------- + +def render_agent_detail( + agent: DiscoveredAgent, + card: Optional[Dict[str, Any]] = None, +) -> str: + """Detail page: agent card JSON, tools list, MCP test panel.""" + card_json = json.dumps(card, indent=2) if card else "Card not available" + + # Extract tools from card if present + tools_html = "" + if card: + skills = card.get("skills") or card.get("tools") or [] + if skills: + rows = [] + for t in skills: + name = html.escape(t.get("name", t.get("id", "unknown"))) + desc = html.escape(t.get("description", "")) + rows.append( + f'
{name}' + f'
{desc}
' + ) + tools_html = f""" +
+

Tools ({len(skills)})

+ {"".join(rows)} +
""" + + safe_name = html.escape(agent.name) + safe_endpoint = html.escape(agent.endpoint_url) + + return render_base( + f"{safe_name} — Agent Dashboard", + f""" +
+ ← All agents +

{safe_name}

+

{html.escape(agent.description or 'No description')}

+
+ Endpoint: {safe_endpoint} + App: {html.escape(agent.app_name)} + {f'{html.escape(agent.protocol_version)}' if agent.protocol_version else ''} +
+
+ +
+

Agent Card

+
{html.escape(card_json)}
+
+ +{tools_html} + +
+

MCP Test Panel

+

+ Send a JSON-RPC request to this agent's /api/mcp endpoint. +

+
+ + +
+ + +
+
+ +""", + ) diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/discovery/__init__.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/discovery/__init__.py new file mode 100644 index 00000000..d6d04008 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/discovery/__init__.py @@ -0,0 +1,24 @@ +""" +Agent discovery for Databricks Apps. + +This module provides clients and utilities for discovering agent-enabled +Databricks Apps that expose A2A protocol agent cards. +""" + +from .agent_discovery import ( + AgentDiscovery, + DiscoveredAgent, + AgentDiscoveryResult, +) +from .a2a_client import ( + A2AClient, + A2AClientError, +) + +__all__ = [ + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", +] diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/discovery/a2a_client.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/discovery/a2a_client.py new file mode 100644 index 00000000..1243d1a3 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/discovery/a2a_client.py @@ -0,0 +1,268 @@ +""" +A2A Client for agent-to-agent communication. + +Implements the A2A protocol for discovering and communicating with peer agents. +""" + +import json +import uuid +import logging +from typing import Dict, Any, Optional, AsyncIterator + +import httpx + +logger = logging.getLogger(__name__) + + +class A2AClientError(Exception): + """Raised when an A2A operation fails.""" + pass + + +class A2AClient: + """ + Async client for A2A protocol communication with peer agents. + + Usage: + async with A2AClient() as client: + card = await client.fetch_agent_card("https://app.databricksapps.com") + result = await client.send_message("https://app.databricksapps.com/api/a2a", "Hello") + """ + + def __init__(self, timeout: float = 60.0): + """ + Initialize A2A client. + + Args: + timeout: Request timeout in seconds + """ + self.timeout = timeout + self._client: Optional[httpx.AsyncClient] = None + + async def __aenter__(self): + self._client = httpx.AsyncClient( + timeout=self.timeout, + follow_redirects=True, + ) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self._client: + await self._client.aclose() + + def _auth_headers(self, auth_token: Optional[str] = None) -> Dict[str, str]: + """Build authentication headers.""" + headers = {"Content-Type": "application/json"} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + return headers + + async def fetch_agent_card( + self, + base_url: str, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Fetch an agent's A2A protocol agent card. + + Tries /.well-known/agent.json first, then /card as fallback. + Handles OAuth redirects gracefully (returns error instead of following). + + Args: + base_url: Base URL of the agent application + auth_token: Optional OAuth token for authenticated requests + + Returns: + Agent card JSON data + + Raises: + A2AClientError: If agent card cannot be fetched + + Example: + >>> async with A2AClient() as client: + >>> card = await client.fetch_agent_card("https://app.databricksapps.com") + >>> print(card["name"], card["description"]) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + headers = {} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + # Use a client that doesn't follow redirects to detect OAuth flows + async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=False) as probe_client: + for path in ["/.well-known/agent.json", "/card"]: + try: + url = base_url.rstrip("/") + path + response = await probe_client.get(url, headers=headers) + + # OAuth redirect detected - app requires interactive auth + if response.status_code in (301, 302, 303, 307, 308): + logger.debug(f"OAuth redirect detected for {url}") + continue + + if response.status_code == 200: + if not response.text or response.text.isspace(): + logger.debug(f"Empty response body for {url}") + continue + return response.json() + + except Exception as e: + logger.debug(f"Agent card fetch failed for {url}: {e}") + continue + + raise A2AClientError(f"Could not fetch agent card from {base_url}") + + async def _jsonrpc_call( + self, + url: str, + method: str, + params: Dict[str, Any], + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a JSON-RPC 2.0 request to an agent. + + Args: + url: A2A endpoint URL + method: JSON-RPC method name (e.g., "message/send") + params: Method parameters + auth_token: Optional authentication token + + Returns: + JSON-RPC result + + Raises: + A2AClientError: If request fails or returns error + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": method, + "params": params, + } + + try: + response = await self._client.post( + url, + json=payload, + headers=self._auth_headers(auth_token), + ) + response.raise_for_status() + result = response.json() + + if "error" in result: + error = result["error"] + raise A2AClientError( + f"A2A error: {error.get('message', 'Unknown')} " + f"(code: {error.get('code')})" + ) + + return result.get("result", {}) + + except httpx.TimeoutException as e: + raise A2AClientError(f"Request to {url} timed out: {e}") + except httpx.HTTPStatusError as e: + raise A2AClientError( + f"HTTP error from {url}: {e.response.status_code}" + ) + except json.JSONDecodeError as e: + raise A2AClientError(f"Invalid JSON from {url}: {e}") + + async def send_message( + self, + agent_url: str, + message: str, + context_id: Optional[str] = None, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a message to a peer agent using A2A protocol. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + context_id: Optional conversation context ID + auth_token: Optional authentication token + + Returns: + Agent's response + + Example: + >>> async with A2AClient() as client: + >>> response = await client.send_message( + >>> "https://app.databricksapps.com/api/a2a", + >>> "What are your capabilities?" + >>> ) + """ + params: Dict[str, Any] = { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + } + if context_id: + params["message"]["contextId"] = context_id + + return await self._jsonrpc_call( + agent_url, "message/send", params, auth_token + ) + + async def send_streaming_message( + self, + agent_url: str, + message: str, + auth_token: Optional[str] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """ + Send a streaming message and yield SSE events. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + auth_token: Optional authentication token + + Yields: + SSE events from the agent's response stream + + Example: + >>> async with A2AClient() as client: + >>> async for event in client.send_streaming_message(url, "Analyze this"): + >>> print(event) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + stream_url = agent_url.rstrip("/") + "/stream" + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": "message/stream", + "params": { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + }, + } + + async with self._client.stream( + "POST", + stream_url, + json=payload, + headers=self._auth_headers(auth_token), + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.startswith("data: "): + try: + yield json.loads(line[6:]) + except json.JSONDecodeError: + continue diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/discovery/agent_discovery.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/discovery/agent_discovery.py new file mode 100644 index 00000000..1563b304 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/discovery/agent_discovery.py @@ -0,0 +1,253 @@ +""" +Agent discovery for Databricks Apps. + +Discovers agent-enabled Databricks Apps by scanning workspace apps +and probing for A2A protocol agent cards. +""" + +import asyncio +import logging +from typing import List, Dict, Any, Optional +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +from .a2a_client import A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + +# Agent card probe paths and timeout +AGENT_CARD_PATHS = ["/.well-known/agent.json", "/card"] +AGENT_CARD_PROBE_TIMEOUT = 5.0 + + +@dataclass +class DiscoveredAgent: + """ + An agent discovered from a Databricks App. + + Attributes: + name: Agent name (from agent card or app name) + endpoint_url: Agent's base URL + description: Agent description (from agent card) + capabilities: Comma-separated list of capabilities + protocol_version: A2A protocol version + app_name: Name of the backing Databricks App + """ + name: str + endpoint_url: str + app_name: str + description: Optional[str] = None + capabilities: Optional[str] = None + protocol_version: Optional[str] = None + + +@dataclass +class AgentDiscoveryResult: + """ + Results from agent discovery operation. + + Attributes: + agents: List of discovered agents + errors: List of error messages encountered during discovery + """ + agents: List[DiscoveredAgent] + errors: List[str] + + +class AgentDiscovery: + """ + Discovers agent-enabled Databricks Apps in a workspace. + + Scans running Databricks Apps and probes for A2A protocol agent cards + to identify which apps are agents. + + Usage: + discovery = AgentDiscovery(profile="my-profile") + result = await discovery.discover_agents() + for agent in result.agents: + print(f"Found agent: {agent.name} at {agent.endpoint_url}") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize agent discovery. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._workspace_token: Optional[str] = None + + async def discover_agents(self) -> AgentDiscoveryResult: + """ + Discover all agent-enabled Databricks Apps in the workspace. + + Returns: + AgentDiscoveryResult with discovered agents and any errors + + Example: + >>> discovery = AgentDiscovery(profile="my-profile") + >>> result = await discovery.discover_agents() + >>> print(f"Found {len(result.agents)} agents") + """ + agents: List[DiscoveredAgent] = [] + errors: List[str] = [] + + try: + app_list = await self._list_workspace_apps() + except Exception as e: + logger.error("Workspace app listing failed: %s", e) + return AgentDiscoveryResult( + agents=[], + errors=[f"Failed to list workspace apps: {e}"], + ) + + if not app_list: + return AgentDiscoveryResult(agents=[], errors=[]) + + # Probe each running app for agent card in parallel + probe_tasks = [ + self._probe_app_for_agent(app_info) + for app_info in app_list + if app_info.get("url") + ] + + if probe_tasks: + probe_results = await asyncio.gather( + *probe_tasks, return_exceptions=True + ) + + for result in probe_results: + if isinstance(result, Exception): + errors.append(str(result)) + elif result is not None: + agents.append(result) + + logger.info( + "Agent discovery: %d apps checked, %d agents found", + len(app_list), len(agents) + ) + + return AgentDiscoveryResult(agents=agents, errors=errors) + + async def _list_workspace_apps(self) -> List[Dict[str, Any]]: + """ + Enumerate Databricks Apps in the workspace. + + Returns: + List of running apps with name, url, owner + """ + def _list_sync() -> tuple: + client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + + # Extract auth token for cross-app requests + auth_headers = client.config.authenticate() + auth_val = auth_headers.get("Authorization", "") + token = auth_val[7:] if auth_val.startswith("Bearer ") else None + + results = [] + for app in client.apps.list(): + # Check if app is running via compute_status or deployment status + compute_state = None + cs = getattr(app, "compute_status", None) + if cs: + compute_state = str(getattr(cs, "state", "")) + + deploy_state = None + dep = getattr(app, "active_deployment", None) + if dep: + dep_status = getattr(dep, "status", None) + if dep_status: + deploy_state = str(getattr(dep_status, "state", "")) + + app_url = getattr(app, "url", None) or "" + app_url = app_url.rstrip("/") if app_url else "" + + results.append({ + "name": app.name, + "url": app_url, + "owner": getattr(app, "creator", None) or getattr(app, "updater", None), + "compute_state": compute_state, + "deploy_state": deploy_state, + }) + + return results, token + + loop = asyncio.get_event_loop() + result_tuple = await loop.run_in_executor(None, _list_sync) + all_apps, workspace_token = result_tuple + + # Store token for probing + self._workspace_token = workspace_token + + # Filter to running apps + running = [ + a for a in all_apps + if a.get("url") and ( + "ACTIVE" in (a.get("compute_state") or "") + or "SUCCEEDED" in (a.get("deploy_state") or "") + ) + ] + + logger.info( + "Workspace apps: %d total, %d running", + len(all_apps), len(running) + ) + + return running + + async def _probe_app_for_agent( + self, + app_info: Dict[str, Any], + ) -> Optional[DiscoveredAgent]: + """ + Probe a Databricks App for an A2A agent card. + + Args: + app_info: App metadata from workspace listing + + Returns: + DiscoveredAgent if agent card found, None otherwise + """ + app_url = app_info["url"] + app_name = app_info["name"] + + token = self._workspace_token + agent_card = None + + try: + logger.debug(f"Probing app '{app_name}' at {app_url}") + async with A2AClient(timeout=AGENT_CARD_PROBE_TIMEOUT) as client: + agent_card = await client.fetch_agent_card(app_url, auth_token=token) + logger.info(f"Found agent card for '{app_name}'") + except A2AClientError as e: + logger.debug(f"No agent card for '{app_name}': {e}") + return None + except Exception as e: + logger.warning(f"Probe failed for '{app_name}': {e}") + return None + + if not agent_card: + return None + + # Extract capabilities + capabilities_list = [] + caps = agent_card.get("capabilities") + if isinstance(caps, dict): + capabilities_list = list(caps.keys()) + elif isinstance(caps, list): + capabilities_list = caps + + return DiscoveredAgent( + name=agent_card.get("name", app_name), + endpoint_url=app_url, + app_name=app_name, + description=agent_card.get("description"), + capabilities=",".join(capabilities_list) if capabilities_list else None, + protocol_version=agent_card.get("protocolVersion"), + ) diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/mcp/__init__.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/mcp/__init__.py new file mode 100644 index 00000000..60ee38ad --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/mcp/__init__.py @@ -0,0 +1,11 @@ +""" +Model Context Protocol (MCP) support. + +This module provides utilities for integrating agents with MCP servers +and exposing UC Functions as MCP tools. +""" + +from .mcp_server import MCPServer, MCPServerConfig, setup_mcp_server +from .uc_functions import UCFunctionAdapter + +__all__ = ["MCPServer", "MCPServerConfig", "setup_mcp_server", "UCFunctionAdapter"] diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/mcp/mcp_server.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/mcp/mcp_server.py new file mode 100644 index 00000000..98a456fb --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/mcp/mcp_server.py @@ -0,0 +1,196 @@ +""" +MCP server implementation for agents. + +Provides an MCP server that exposes agent tools via the Model Context Protocol. +Works with any FastAPI app. +""" + +import json +import logging +from typing import Dict, Any, List, Optional +from dataclasses import dataclass + +from fastapi import Request + +logger = logging.getLogger(__name__) + + +@dataclass +class MCPServerConfig: + """ + Configuration for MCP server. + + Attributes: + name: Server name + version: Server version + description: Server description + """ + name: str + version: str = "1.0.0" + description: str = "MCP server for agent tools" + + +class MCPServer: + """ + MCP server that exposes tools via JSON-RPC. + + Accepts tools as dicts with name, description, parameters, function keys. + """ + + def __init__(self, tools, config: MCPServerConfig): + """ + Initialize MCP server. + + Args: + tools: List of ToolDefinition objects or dicts with name/description/parameters/function + config: MCP server configuration + """ + self._tools = tools + self.config = config + + def setup_routes(self, app): + """ + Set up MCP protocol routes on the FastAPI app. + + Adds: + - POST /api/mcp - MCP JSON-RPC endpoint + - GET /api/mcp/tools - List available tools + """ + + @app.post("/api/mcp") + async def mcp_jsonrpc(request: Request): + """MCP JSON-RPC endpoint.""" + try: + body = await request.json() + method = body.get("method") + params = body.get("params", {}) + request_id = body.get("id") + + if method == "tools/list": + result = await self._list_tools() + elif method == "tools/call": + result = await self._call_tool(params) + elif method == "server/info": + result = self._server_info() + else: + return { + "jsonrpc": "2.0", + "id": request_id, + "error": { + "code": -32601, + "message": f"Method not found: {method}" + } + } + + return { + "jsonrpc": "2.0", + "id": request_id, + "result": result + } + + except Exception as e: + logger.error("MCP request failed: %s", e) + return { + "jsonrpc": "2.0", + "id": body.get("id") if isinstance(body, dict) else None, + "error": { + "code": -32603, + "message": str(e) + } + } + + @app.get("/api/mcp/tools") + async def list_mcp_tools(): + """List available MCP tools.""" + return await self._list_tools() + + def _get_tool_name(self, tool) -> str: + return tool.name if hasattr(tool, "name") else tool["name"] + + def _get_tool_description(self, tool) -> str: + return tool.description if hasattr(tool, "description") else tool["description"] + + def _get_tool_parameters(self, tool) -> Dict[str, Any]: + return tool.parameters if hasattr(tool, "parameters") else tool.get("parameters", {}) + + def _get_tool_function(self, tool): + return tool.function if hasattr(tool, "function") else tool["function"] + + def _server_info(self) -> Dict[str, Any]: + """Get MCP server information.""" + return { + "name": self.config.name, + "version": self.config.version, + "description": self.config.description, + "protocol_version": "1.0", + } + + async def _list_tools(self) -> Dict[str, Any]: + """List all available tools in MCP format.""" + tools = [] + + for tool in self._tools: + mcp_tool = { + "name": self._get_tool_name(tool), + "description": self._get_tool_description(tool), + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + + for param_name, param_spec in self._get_tool_parameters(tool).items(): + param_type = param_spec.get("type", "string") + mcp_tool["inputSchema"]["properties"][param_name] = { + "type": param_type, + "description": param_spec.get("description", "") + } + if param_spec.get("required", False): + mcp_tool["inputSchema"]["required"].append(param_name) + + tools.append(mcp_tool) + + return {"tools": tools} + + async def _call_tool(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Call a tool via MCP.""" + tool_name = params.get("name") + arguments = params.get("arguments", {}) + + tool_def = None + for tool in self._tools: + if self._get_tool_name(tool) == tool_name: + tool_def = tool + break + + if not tool_def: + raise ValueError(f"Tool not found: {tool_name}") + + try: + result = await self._get_tool_function(tool_def)(**arguments) + return {"result": result} + except Exception as e: + logger.error("Tool execution failed: %s", e) + raise + + +def setup_mcp_server(tools, config: Optional[MCPServerConfig] = None, fastapi_app=None): + """ + Set up MCP server for an agent. + + Args: + tools: List of tool dicts with name/description/parameters/function keys + config: Optional MCP server configuration + fastapi_app: FastAPI app to add routes to (required) + + Returns: + MCPServer instance + """ + if config is None: + config = MCPServerConfig(name="mcp-server") + + server = MCPServer(tools, config) + server.setup_routes(fastapi_app) + + return server diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/mcp/uc_functions.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/mcp/uc_functions.py new file mode 100644 index 00000000..f1659960 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/mcp/uc_functions.py @@ -0,0 +1,240 @@ +""" +Unity Catalog Functions adapter for MCP. + +Automatically discovers UC Functions and exposes them as MCP tools. +""" + +import logging +from typing import List, Dict, Any, Optional + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCFunctionAdapter: + """ + Adapter for Unity Catalog Functions to MCP protocol. + + Discovers UC Functions and converts them to MCP tool format for + use with agents. + + Usage: + adapter = UCFunctionAdapter(profile="my-profile") + tools = adapter.discover_functions(catalog="main", schema="functions") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC Functions adapter. + + Args: + profile: Databricks CLI profile name + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def discover_functions( + self, + catalog: str, + schema: str, + name_pattern: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + Discover UC Functions and convert to MCP tool format. + + Args: + catalog: UC catalog name + schema: UC schema name + name_pattern: Optional name pattern filter (SQL LIKE pattern) + + Returns: + List of tool definitions in MCP format + + Example: + >>> adapter = UCFunctionAdapter() + >>> tools = adapter.discover_functions("main", "functions") + >>> for tool in tools: + ... print(tool["name"], tool["description"]) + """ + client = self._get_client() + tools = [] + + try: + functions = client.functions.list( + catalog_name=catalog, + schema_name=schema, + ) + + for func in functions: + # Skip system functions + if func.name.startswith("system."): + continue + + # Apply name pattern filter + if name_pattern and name_pattern not in func.name: + continue + + # Convert to MCP tool format + tool = self._convert_function_to_tool(func) + if tool: + tools.append(tool) + + logger.info( + f"Discovered {len(tools)} UC Functions from {catalog}.{schema}" + ) + + except Exception as e: + logger.error(f"Failed to discover UC Functions: {e}") + + return tools + + def _convert_function_to_tool(self, func) -> Optional[Dict[str, Any]]: + """ + Convert a UC Function to MCP tool format. + + Args: + func: Function info from Databricks SDK + + Returns: + MCP tool definition or None if conversion fails + """ + try: + # Extract function metadata + name = func.name.split(".")[-1] # Get short name + description = func.comment or f"Unity Catalog function: {name}" + + # Build parameter schema + input_schema = { + "type": "object", + "properties": {}, + "required": [] + } + + # Parse function parameters + if hasattr(func, "input_params") and func.input_params: + for param in func.input_params.parameters: + param_name = param.name + param_type = self._map_uc_type_to_json_type(param.type_name) + + input_schema["properties"][param_name] = { + "type": param_type, + "description": param.comment or "" + } + + # Parameters without defaults are required + if not hasattr(param, "default_value") or param.default_value is None: + input_schema["required"].append(param_name) + + return { + "name": name, + "description": description, + "inputSchema": input_schema, + "full_name": func.full_name, + "source": "unity_catalog" + } + + except Exception as e: + logger.warning(f"Failed to convert function {func.name}: {e}") + return None + + def _map_uc_type_to_json_type(self, uc_type: str) -> str: + """ + Map Unity Catalog data type to JSON Schema type. + + Args: + uc_type: UC type name (e.g., "STRING", "BIGINT", "BOOLEAN") + + Returns: + JSON Schema type ("string", "number", "boolean", etc.) + """ + type_mapping = { + "STRING": "string", + "VARCHAR": "string", + "CHAR": "string", + "BIGINT": "integer", + "INT": "integer", + "INTEGER": "integer", + "SMALLINT": "integer", + "TINYINT": "integer", + "DOUBLE": "number", + "FLOAT": "number", + "DECIMAL": "number", + "BOOLEAN": "boolean", + "BINARY": "string", + "DATE": "string", + "TIMESTAMP": "string", + "ARRAY": "array", + "MAP": "object", + "STRUCT": "object", + } + + uc_type_upper = uc_type.upper() + return type_mapping.get(uc_type_upper, "string") + + async def call_function( + self, + full_name: str, + arguments: Dict[str, Any] + ) -> Any: + """ + Call a UC Function with given arguments. + + Args: + full_name: Full function name (catalog.schema.function) + arguments: Function arguments + + Returns: + Function result + + Example: + >>> adapter = UCFunctionAdapter() + >>> result = await adapter.call_function( + ... "main.functions.calculate_tax", + ... {"amount": 100, "rate": 0.08} + ... ) + """ + client = self._get_client() + + try: + # Build SQL query to call the function + args_list = [f":{key}" for key in arguments.keys()] + query = f"SELECT {full_name}({', '.join(args_list)})" + + # Execute via SQL warehouse + # Note: This requires a warehouse ID to be configured + result = client.statement_execution.execute_statement( + statement=query, + warehouse_id=self._get_default_warehouse(), + parameters=[ + {"name": key, "value": str(value)} + for key, value in arguments.items() + ] + ) + + return result.result.data_array[0][0] if result.result.data_array else None + + except Exception as e: + logger.error(f"Failed to call UC Function {full_name}: {e}") + raise + + def _get_default_warehouse(self) -> str: + """Get default SQL warehouse ID from environment or client.""" + import os + warehouse_id = os.getenv("DATABRICKS_WAREHOUSE_ID") + if not warehouse_id: + raise ValueError( + "DATABRICKS_WAREHOUSE_ID not set. " + "Set this environment variable to use UC Functions." + ) + return warehouse_id diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/py.typed b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/registry/__init__.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/registry/__init__.py new file mode 100644 index 00000000..892043b7 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/registry/__init__.py @@ -0,0 +1,10 @@ +""" +Unity Catalog integration for agent registration. + +This module provides utilities for registering agents in Unity Catalog +as AGENT objects, enabling catalog-based discovery and permission management. +""" + +from .uc_registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError + +__all__ = ["UCAgentRegistry", "UCAgentSpec", "UCRegistrationError"] diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/registry/uc_registry.py b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/registry/uc_registry.py new file mode 100644 index 00000000..f2651aca --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/dbx_agent_app/registry/uc_registry.py @@ -0,0 +1,345 @@ +""" +Unity Catalog agent registry. + +Registers and manages agents as Unity Catalog AGENT objects for +catalog-based discovery and permission management. +""" + +import json +import logging +from typing import Dict, Any, Optional, List +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCRegistrationError(Exception): + """Raised when agent registration in Unity Catalog fails.""" + pass + + +@dataclass +class UCAgentSpec: + """ + Specification for registering an agent in Unity Catalog. + + Attributes: + name: Agent name (will be catalog object name) + catalog: UC catalog name + schema: UC schema name + endpoint_url: Agent's base URL + description: Agent description + capabilities: List of agent capabilities + properties: Additional metadata key-value pairs + """ + name: str + catalog: str + schema: str + endpoint_url: str + description: Optional[str] = None + capabilities: Optional[List[str]] = None + properties: Optional[Dict[str, str]] = None + + +class UCAgentRegistry: + """ + Unity Catalog agent registry. + + Registers agents as UC AGENT objects for catalog-based discovery + and permission management. + + Usage: + registry = UCAgentRegistry(profile="my-profile") + + spec = UCAgentSpec( + name="customer_research", + catalog="main", + schema="agents", + endpoint_url="https://app.databricksapps.com", + description="Customer research agent", + capabilities=["search", "analysis"], + ) + + registry.register_agent(spec) + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC agent registry. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def register_agent(self, spec: UCAgentSpec) -> Dict[str, Any]: + """ + Register an agent in Unity Catalog. + + Creates a AGENT object in the specified catalog and schema with + metadata about the agent's endpoint, capabilities, and properties. + + Args: + spec: Agent specification + + Returns: + Dictionary with registration details + + Raises: + UCRegistrationError: If registration fails + + Example: + >>> registry = UCAgentRegistry(profile="my-profile") + >>> spec = UCAgentSpec( + ... name="my_agent", + ... catalog="main", + ... schema="agents", + ... endpoint_url="https://app.databricksapps.com", + ... ) + >>> result = registry.register_agent(spec) + """ + client = self._get_client() + full_name = f"{spec.catalog}.{spec.schema}.{spec.name}" + + try: + # Build agent properties for UC metadata + properties = spec.properties or {} + properties["endpoint_url"] = spec.endpoint_url + properties["agent_card_url"] = f"{spec.endpoint_url}/.well-known/agent.json" + + if spec.capabilities: + properties["capabilities"] = ",".join(spec.capabilities) + + # Register as a UC registered model with AGENT type + # (UC doesn't have a native AGENT type yet, so we use registered models + # with special tags/properties to mark them as agents) + + logger.info(f"Registering agent '{full_name}' in Unity Catalog") + + # Check if catalog and schema exist + try: + client.catalogs.get(spec.catalog) + except Exception as e: + raise UCRegistrationError( + f"Catalog '{spec.catalog}' does not exist or is not accessible: {e}" + ) + + try: + client.schemas.get(f"{spec.catalog}.{spec.schema}") + except Exception as e: + raise UCRegistrationError( + f"Schema '{spec.catalog}.{spec.schema}' does not exist or is not accessible: {e}" + ) + + # Create or update registered model as agent placeholder + # In a future UC version with native AGENT support, this would use: + # client.agents.create(name=full_name, properties=properties) + + # Encode properties as JSON suffix in comment for discovery + # Format: "description\n---AGENT_META---\n{json}" + meta = {"databricks_agent": True, **properties} + comment = spec.description or "" + comment_with_meta = f"{comment}\n---AGENT_META---\n{json.dumps(meta)}" + + # Try update first (model may already exist from prior deploy), + # fall back to create if it doesn't exist + try: + client.registered_models.update( + full_name, + comment=comment_with_meta, + ) + logger.info(f"Updated existing agent '{full_name}'") + except Exception as update_err: + # Model doesn't exist or SP can't access it — try create + logger.debug(f"Update failed ({update_err}), trying create") + try: + client.registered_models.create( + name=spec.name, + catalog_name=spec.catalog, + schema_name=spec.schema, + comment=comment_with_meta, + ) + logger.info(f"Created new agent '{full_name}'") + except Exception as create_err: + # If create fails with "already exists", the SP just + # can't see the model — log warning but don't fail + err_str = str(create_err).lower() + if "already exists" in err_str or "not a valid name" in err_str: + logger.warning( + "Agent '%s' exists but SP cannot update it. " + "Grant the app's SP ownership or MANAGE on the model.", + full_name, + ) + else: + raise + + logger.info(f"Successfully registered agent '{full_name}'") + + return { + "full_name": full_name, + "catalog": spec.catalog, + "schema": spec.schema, + "name": spec.name, + "endpoint_url": spec.endpoint_url, + "properties": properties, + } + + except UCRegistrationError: + raise + except Exception as e: + raise UCRegistrationError( + f"Failed to register agent '{full_name}': {e}" + ) from e + + @staticmethod + def _parse_agent_meta(comment: Optional[str]) -> Optional[Dict[str, Any]]: + """Parse agent metadata from comment field (JSON after ---AGENT_META--- marker).""" + if not comment or "---AGENT_META---" not in comment: + return None + try: + _, meta_json = comment.split("---AGENT_META---", 1) + return json.loads(meta_json.strip()) + except (ValueError, json.JSONDecodeError): + return None + + @staticmethod + def _clean_description(comment: Optional[str]) -> str: + """Extract human-readable description from comment (before the meta marker).""" + if not comment: + return "" + if "---AGENT_META---" in comment: + return comment.split("---AGENT_META---")[0].strip() + return comment + + def get_agent(self, catalog: str, schema: str, name: str) -> Optional[Dict[str, Any]]: + """ + Get agent metadata from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + Agent metadata dictionary or None if not found + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + model = client.registered_models.get(full_name) + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + return None + + return { + "full_name": full_name, + "catalog": catalog, + "schema": schema, + "name": name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "agent_card_url": meta.get("agent_card_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + "properties": meta, + } + + except Exception as e: + logger.debug(f"Agent '{full_name}' not found: {e}") + return None + + def list_agents( + self, + catalog: str, + schema: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + List all agents in a catalog or schema. + + Args: + catalog: UC catalog name + schema: Optional UC schema name (lists all schemas if not specified) + + Returns: + List of agent metadata dictionaries + """ + client = self._get_client() + agents = [] + + # Determine which schemas to scan + schemas_to_scan = [schema] if schema else [] + if not schema: + try: + for s in client.schemas.list(catalog_name=catalog): + if s.name != "information_schema": + schemas_to_scan.append(s.name) + except Exception as e: + logger.error(f"Failed to list schemas in {catalog}: {e}") + return [] + + for schema_name in schemas_to_scan: + try: + models = client.registered_models.list( + catalog_name=catalog, schema_name=schema_name + ) + for model in models: + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + continue + + agents.append({ + "full_name": model.full_name, + "catalog": catalog, + "schema": schema_name, + "name": model.name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + }) + except Exception as e: + logger.debug(f"Failed to list models in {catalog}.{schema_name}: {e}") + continue + + return agents + + def delete_agent(self, catalog: str, schema: str, name: str) -> bool: + """ + Delete an agent from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + True if deleted, False if not found + + Raises: + UCRegistrationError: If deletion fails + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + client.registered_models.delete(full_name) + logger.info(f"Deleted agent '{full_name}'") + return True + except Exception as e: + if "does not exist" in str(e).lower(): + return False + raise UCRegistrationError( + f"Failed to delete agent '{full_name}': {e}" + ) from e diff --git a/dbx-agent-app/examples/supervisor/agents/compliance/requirements.txt b/dbx-agent-app/examples/supervisor/agents/compliance/requirements.txt new file mode 100644 index 00000000..7b0556d1 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/compliance/requirements.txt @@ -0,0 +1,5 @@ +fastapi>=0.115.0 +uvicorn[standard]>=0.30.0 +pydantic>=2.0.0 +httpx>=0.27.0 +databricks-sdk>=0.30.0 diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/app.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/app.py new file mode 100644 index 00000000..7258ea6f --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/app.py @@ -0,0 +1,211 @@ +"""Sub-agent: Expert Finder — finds experts by topic, specialty, or name.""" + +# Auth cleanup: prefer OAuth over PAT in Databricks Apps +import os +if os.environ.get("DATABRICKS_CLIENT_ID"): + os.environ.pop("DATABRICKS_TOKEN", None) + +import time +import logging +from databricks.sdk import WorkspaceClient +from databricks.sdk.service.sql import StatementParameterListItem +from dbx_agent_app import app_agent, AgentRequest, AgentResponse + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# SQL helpers +# --------------------------------------------------------------------------- + +_workspace = WorkspaceClient() +_warehouse_id_cache = os.environ.get("WAREHOUSE_ID") + +CATALOG = os.environ.get("UC_CATALOG", "serverless_dxukih_catalog") +SCHEMA = os.environ.get("UC_SCHEMA", "agents") + + +def _fqn(table: str) -> str: + return f"{CATALOG}.{SCHEMA}.{table}" + + +def _get_warehouse_id() -> str: + global _warehouse_id_cache + if _warehouse_id_cache: + return _warehouse_id_cache + for wh in _workspace.warehouses.list(): + if wh.enable_serverless_compute: + _warehouse_id_cache = wh.id + return wh.id + first = next(iter(_workspace.warehouses.list()), None) + if first: + _warehouse_id_cache = first.id + return first.id + raise ValueError("No SQL warehouse available") + + +def _execute_sql(statement, parameters=None): + """Execute SQL and return (result, trace_entry).""" + raw_params = parameters or [] + params = [ + StatementParameterListItem(name=p["name"], value=p["value"]) + if isinstance(p, dict) else p + for p in raw_params + ] + wh_id = _get_warehouse_id() + start = time.monotonic() + result = _workspace.statement_execution.execute_statement( + warehouse_id=wh_id, statement=statement, + parameters=params, wait_timeout="50s", + ) + duration_ms = round((time.monotonic() - start) * 1000, 1) + + row_count = len(result.result.data_array) if result.result and result.result.data_array else 0 + columns = [] + if result.manifest and result.manifest.schema and result.manifest.schema.columns: + columns = [ + {"name": c.name, + "type": str(c.type_name.value) if hasattr(c.type_name, "value") else str(c.type_name)} + for c in result.manifest.schema.columns + ] + + trace = { + "statement": " ".join(statement.split()), + "parameters": [ + {"name": p["name"], "value": p["value"]} if isinstance(p, dict) + else {"name": p.name, "value": p.value} + for p in raw_params + ], + "row_count": row_count, + "columns": columns, + "duration_ms": duration_ms, + "warehouse_id": wh_id, + } + return result, trace + + +def _extract_keywords(query: str) -> list[str]: + stop = {"a","an","the","is","are","on","any","do","does","for","in","of", + "to","what","who","how","can","this","that","find","check","show", + "me","my","about","with","and","or","all","get","list","tell"} + return [w for w in query.lower().split() if w not in stop and len(w) > 1] + + +def _build_like_clauses(column: str, keywords: list[str], prefix: str = ""): + col_tag = prefix or column.replace(".", "_") + clauses, params = [], [] + for i, kw in enumerate(keywords): + name = f"{col_tag}_{i}" + clauses.append(f"LOWER({column}) LIKE :{name}") + params.append({"name": name, "value": f"%{kw}%"}) + return " OR ".join(clauses) if clauses else "FALSE", params + + +# --------------------------------------------------------------------------- +# Demo fallback +# --------------------------------------------------------------------------- + +def _demo_response(query: str) -> dict: + return { + "response": f"""**Found 5 experts for "{query}":** + +**1. Dr. Sarah Chen** - Healthcare Technology + - Relevance: 94% + - 23 interviews | Rating: 4.9 + +**2. Michael Torres** - Supply Chain Analytics + - Relevance: 89% + - 18 interviews | Rating: 4.8 + +*Demo fallback -- UC tables not available*""", + "data_source": "demo_fallback", + "tables_accessed": [_fqn("experts")], + "keywords_extracted": _extract_keywords(query), + "sql_queries": [], + "timing": {"sql_total_ms": 0, "total_ms": 0}, + } + + +# --------------------------------------------------------------------------- +# Agent + Tool +# --------------------------------------------------------------------------- + +@app_agent( + name="sub_expert_finder", + description="Find experts who have knowledge on specific topics", + capabilities=["expert_search", "expert_matching"], + uc_catalog=os.environ.get("UC_CATALOG", "main"), + uc_schema=os.environ.get("UC_SCHEMA", "agents"), + enable_mcp=True, +) +async def sub_expert_finder(request: AgentRequest) -> dict: + """Delegate to search tool.""" + return await search(request.last_user_message) + + +@sub_expert_finder.tool(description="Find experts who have knowledge on specific topics") +async def search(query: str) -> dict: + """ + Find experts by topic, specialty, or name. + + Args: + query: Topic or expertise to search for + + Returns: + Structured result with ranked experts, metadata, and SQL traces + """ + total_start = time.monotonic() + sql_queries = [] + + try: + keywords = _extract_keywords(query) + if not keywords: + keywords = [query.lower()] + + topics_clause, topics_params = _build_like_clauses("topics", keywords) + spec_clause, spec_params = _build_like_clauses("specialty", keywords) + name_clause, name_params = _build_like_clauses("name", keywords) + all_params = topics_params + spec_params + name_params + + result, trace = _execute_sql( + f""" + SELECT expert_id, name, specialty, interview_count, rating, topics, bio, region + FROM {_fqn('experts')} + WHERE {topics_clause} OR {spec_clause} OR {name_clause} + ORDER BY rating DESC + LIMIT 5 + """, + all_params, + ) + sql_queries.append(trace) + + if not result.result or not result.result.data_array: + text = f'No experts found matching "{query}".' + else: + rows = result.result.data_array + text = f'**Found {len(rows)} experts for "{query}":**\n\n' + for i, row in enumerate(rows, 1): + eid, name, spec, count, rating, topics, bio, region = row + text += f"**{i}. {name}** -- {spec}\n" + text += f" - Relevance topics: {topics}\n" + text += f" - {count} interviews | Rating: {float(rating):.1f} | Region: {region}\n" + text += f" - {bio[:150]}{'...' if len(str(bio)) > 150 else ''}\n\n" + text += f"\n*Data source: {_fqn('experts')}*" + + total_ms = round((time.monotonic() - total_start) * 1000, 1) + sql_total_ms = sum(q["duration_ms"] for q in sql_queries) + + return { + "response": text, + "data_source": "live", + "tables_accessed": [_fqn("experts")], + "keywords_extracted": keywords, + "sql_queries": sql_queries, + "timing": {"sql_total_ms": sql_total_ms, "total_ms": total_ms}, + } + + except Exception as e: + logger.error("SQL query failed for expert_finder: %s", e, exc_info=True) + return _demo_response(query) + + +app = sub_expert_finder.app diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/app.yaml b/dbx-agent-app/examples/supervisor/agents/expert_finder/app.yaml new file mode 100644 index 00000000..fc60869f --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/app.yaml @@ -0,0 +1,19 @@ +command: + - "python" + - "-m" + - "uvicorn" + - "app:app" + - "--host" + - "0.0.0.0" + - "--port" + - "8000" + +env: + - name: UC_CATALOG + value: serverless_dxukih_catalog + + - name: UC_SCHEMA + value: agents + + - name: WAREHOUSE_ID + value: 387bcda0f2ece20c diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/__init__.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/__init__.py new file mode 100644 index 00000000..95db07d3 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/__init__.py @@ -0,0 +1,41 @@ +""" +dbx-agent-app: Framework for building discoverable AI agents on Databricks Apps. + +This package provides: +- @app_agent: Decorator to turn an async function into a discoverable agent +- AgentDiscovery: Discover agents in your Databricks workspace +- A2AClient: Communicate with agents using the A2A protocol +- UCAgentRegistry: Register agents in Unity Catalog +- MCPServerConfig: Configure MCP server for agent tools +""" + +from .discovery import AgentDiscovery, DiscoveredAgent, AgentDiscoveryResult, A2AClient, A2AClientError +from .mcp import MCPServerConfig, setup_mcp_server, UCFunctionAdapter +from .registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError +from .dashboard import create_dashboard_app + +try: + from importlib.metadata import version + __version__ = version("dbx-agent-app") +except Exception: + __version__ = "0.1.0" + +__all__ = [ + # Discovery + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", + # Registry + "UCAgentRegistry", + "UCAgentSpec", + "UCRegistrationError", + # MCP + "MCPServerConfig", + "setup_mcp_server", + "UCFunctionAdapter", + # Dashboard + "create_dashboard_app", +] + diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/core/__init__.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/core/__init__.py new file mode 100644 index 00000000..623da88c --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/core/__init__.py @@ -0,0 +1 @@ +"""Core agent application components.""" diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/__init__.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/__init__.py new file mode 100644 index 00000000..89929a08 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/__init__.py @@ -0,0 +1,14 @@ +""" +Developer dashboard for agent discovery. + +Launch via CLI: + dbx-agent-app dashboard --profile my-profile + +Or programmatically: + from dbx_agent_app.dashboard import create_dashboard_app, run_dashboard +""" + +from .app import create_dashboard_app +from .cli import main as run_dashboard + +__all__ = ["create_dashboard_app", "run_dashboard"] diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/app.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/app.py new file mode 100644 index 00000000..d16c3d54 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/app.py @@ -0,0 +1,112 @@ +""" +FastAPI application for the developer dashboard. + +Routes: + HTML: GET / — agent list page + GET /agent/{name} — agent detail page + API: GET /api/agents — JSON list of agents + GET /api/agents/{name}/card — full agent card + POST /api/agents/{name}/mcp — MCP JSON-RPC proxy + POST /api/scan — trigger re-scan + GET /health — health check +""" + +import logging +from typing import Optional + +from fastapi import FastAPI, Request +from fastapi.responses import HTMLResponse, JSONResponse + +from .scanner import DashboardScanner +from .templates import render_agent_list, render_agent_detail + +logger = logging.getLogger(__name__) + + +def create_dashboard_app( + scanner: DashboardScanner, + profile: Optional[str] = None, +) -> FastAPI: + """Build and return the dashboard FastAPI app.""" + app = FastAPI(title="dbx-agent-app dashboard", docs_url=None, redoc_url=None) + + # --- HTML pages ------------------------------------------------------- + + @app.get("/", response_class=HTMLResponse) + async def index(): + agents = scanner.get_agents() + return render_agent_list(agents) + + @app.get("/agent/{name}", response_class=HTMLResponse) + async def agent_detail(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return HTMLResponse("

Agent not found

", status_code=404) + + card = None + try: + card = await scanner.get_agent_card(agent.endpoint_url) + except Exception as e: + logger.warning("Could not fetch card for %s: %s", name, e) + + return render_agent_detail(agent, card) + + # --- JSON API --------------------------------------------------------- + + @app.get("/api/agents") + async def api_agents(): + agents = scanner.get_agents() + return [ + { + "name": a.name, + "endpoint_url": a.endpoint_url, + "app_name": a.app_name, + "description": a.description, + "capabilities": a.capabilities, + "protocol_version": a.protocol_version, + } + for a in agents + ] + + @app.get("/api/agents/{name}/card") + async def api_agent_card(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + card = await scanner.get_agent_card(agent.endpoint_url) + return card + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=502) + + @app.post("/api/agents/{name}/mcp") + async def api_mcp_proxy(name: str, request: Request): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + payload = await request.json() + result = await scanner.proxy_mcp(agent.endpoint_url, payload) + return result + except Exception as e: + return JSONResponse( + {"jsonrpc": "2.0", "id": None, "error": {"code": -1, "message": str(e)}}, + status_code=502, + ) + + @app.post("/api/scan") + async def api_scan(): + agents = await scanner.scan() + return {"count": len(agents), "agents": [a.name for a in agents]} + + @app.get("/health") + async def health(): + return { + "status": "ok", + "agents_cached": len(scanner.get_agents()), + "profile": profile, + } + + return app diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/cli.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/cli.py new file mode 100644 index 00000000..3b17aa90 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/cli.py @@ -0,0 +1,63 @@ +""" +CLI entry point for the developer dashboard. + +Usage: + dbx-agent-app dashboard --profile my-profile --port 8501 +""" + +import argparse +import asyncio +import logging +import sys +import webbrowser + +import uvicorn + +from .scanner import DashboardScanner +from .app import create_dashboard_app + + +def main(): + parser = argparse.ArgumentParser( + prog="dbx-agent-app", + description="Developer dashboard for Databricks agent discovery", + ) + sub = parser.add_subparsers(dest="command") + + dash = sub.add_parser("dashboard", help="Launch the agent discovery dashboard") + dash.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + dash.add_argument("--port", type=int, default=8501, help="Port to serve on (default: 8501)") + dash.add_argument("--host", type=str, default="127.0.0.1", help="Host to bind (default: 127.0.0.1)") + dash.add_argument("--no-browser", action="store_true", help="Don't auto-open browser") + + args = parser.parse_args() + + if args.command != "dashboard": + parser.print_help() + sys.exit(1) + + logging.basicConfig(level=logging.INFO, format="%(levelname)s %(name)s: %(message)s") + + scanner = DashboardScanner(profile=args.profile) + + # Run initial scan + print(f"Scanning workspace for agents (profile={args.profile or 'default'})...") + try: + agents = asyncio.run(scanner.scan()) + print(f"Found {len(agents)} agent(s)") + except Exception as e: + print(f"Initial scan failed: {e}", file=sys.stderr) + print("Dashboard will start anyway — use the Scan button to retry.") + + app = create_dashboard_app(scanner, profile=args.profile) + + url = f"http://{args.host}:{args.port}" + if not args.no_browser: + webbrowser.open(url) + + print(f"Dashboard running at {url}") + uvicorn.run(app, host=args.host, port=args.port, log_level="warning") + + +if __name__ == "__main__": + main() diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/scanner.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/scanner.py new file mode 100644 index 00000000..475460be --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/scanner.py @@ -0,0 +1,81 @@ +""" +Dashboard scanner — wraps AgentDiscovery + A2AClient with caching and MCP proxy. +""" + +import asyncio +import logging +from typing import Dict, Any, List, Optional + +import httpx + +from ..discovery import AgentDiscovery, DiscoveredAgent, A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + + +class DashboardScanner: + """ + Thin wrapper around AgentDiscovery that adds result caching + and MCP JSON-RPC proxying for the dashboard UI. + """ + + def __init__(self, profile: Optional[str] = None): + self._discovery = AgentDiscovery(profile=profile) + self._agents: List[DiscoveredAgent] = [] + self._scan_lock = asyncio.Lock() + self._scanned = False + + async def scan(self) -> List[DiscoveredAgent]: + """Run workspace discovery and cache results. Thread-safe via asyncio.Lock.""" + async with self._scan_lock: + result = await self._discovery.discover_agents() + self._agents = result.agents + self._scanned = True + if result.errors: + for err in result.errors: + logger.warning("Discovery error: %s", err) + return self._agents + + def get_agents(self) -> List[DiscoveredAgent]: + """Return cached agent list from the last scan.""" + return list(self._agents) + + def get_agent_by_name(self, name: str) -> Optional[DiscoveredAgent]: + """Look up a cached agent by name.""" + for agent in self._agents: + if agent.name == name or agent.app_name == name: + return agent + return None + + @property + def workspace_token(self) -> Optional[str]: + """Auth token extracted during discovery, used for cross-app requests.""" + return self._discovery._workspace_token + + async def get_agent_card(self, endpoint_url: str) -> Dict[str, Any]: + """Fetch the full agent card JSON from a remote agent.""" + async with A2AClient(timeout=10.0) as client: + return await client.fetch_agent_card( + endpoint_url, auth_token=self.workspace_token + ) + + async def proxy_mcp(self, endpoint_url: str, payload: Dict[str, Any]) -> Dict[str, Any]: + """ + Forward a JSON-RPC request to an agent's MCP endpoint. + + Args: + endpoint_url: Agent base URL + payload: Complete JSON-RPC 2.0 request body + + Returns: + JSON-RPC response from the agent + """ + mcp_url = endpoint_url.rstrip("/") + "/api/mcp" + headers = {"Content-Type": "application/json"} + if self.workspace_token: + headers["Authorization"] = f"Bearer {self.workspace_token}" + + async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as http: + response = await http.post(mcp_url, json=payload, headers=headers) + response.raise_for_status() + return response.json() diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/templates.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/templates.py new file mode 100644 index 00000000..8543d75d --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/dashboard/templates.py @@ -0,0 +1,278 @@ +""" +Server-rendered HTML templates for the dashboard. + +Pure Python functions returning HTML strings — no Jinja2, no React, no build step. +""" + +import html +import json +from typing import List, Dict, Any, Optional + +from ..discovery import DiscoveredAgent + + +# --------------------------------------------------------------------------- +# Base layout +# --------------------------------------------------------------------------- + +def render_base(title: str, content: str) -> str: + """HTML shell with inline CSS (dark theme).""" + return f""" + + + + +{html.escape(title)} + + + +
+
+

dbx-agent-app dashboard

+ +
+
+
+{content} +
+ +""" + + +# --------------------------------------------------------------------------- +# Agent list page +# --------------------------------------------------------------------------- + +def render_agent_list(agents: List[DiscoveredAgent]) -> str: + """Main page: grid of agent cards + scan button.""" + if not agents: + cards_html = """ +
+

No agents discovered

+

No agent-enabled Databricks Apps found in your workspace. Click Scan to retry.

+
""" + else: + cards = [] + for a in agents: + caps = "" + if a.capabilities: + badges = "".join( + f'{html.escape(c.strip())} ' + for c in a.capabilities.split(",") + ) + caps = f'
{badges}
' + + desc = html.escape(a.description or "No description") + cards.append(f""" + +
+

{html.escape(a.name)}

+

{desc}

+
+ App: {html.escape(a.app_name)} + {f'Protocol: {html.escape(a.protocol_version)}' if a.protocol_version else ''} +
+ {caps} +
+
""") + cards_html = f'
{"".join(cards)}
' + + return render_base( + "Agent Dashboard", + f""" +
+ {len(agents)} agent{"s" if len(agents) != 1 else ""} discovered + +
+{cards_html} +""", + ) + + +# --------------------------------------------------------------------------- +# Agent detail page +# --------------------------------------------------------------------------- + +def render_agent_detail( + agent: DiscoveredAgent, + card: Optional[Dict[str, Any]] = None, +) -> str: + """Detail page: agent card JSON, tools list, MCP test panel.""" + card_json = json.dumps(card, indent=2) if card else "Card not available" + + # Extract tools from card if present + tools_html = "" + if card: + skills = card.get("skills") or card.get("tools") or [] + if skills: + rows = [] + for t in skills: + name = html.escape(t.get("name", t.get("id", "unknown"))) + desc = html.escape(t.get("description", "")) + rows.append( + f'
{name}' + f'
{desc}
' + ) + tools_html = f""" +
+

Tools ({len(skills)})

+ {"".join(rows)} +
""" + + safe_name = html.escape(agent.name) + safe_endpoint = html.escape(agent.endpoint_url) + + return render_base( + f"{safe_name} — Agent Dashboard", + f""" +
+ ← All agents +

{safe_name}

+

{html.escape(agent.description or 'No description')}

+
+ Endpoint: {safe_endpoint} + App: {html.escape(agent.app_name)} + {f'{html.escape(agent.protocol_version)}' if agent.protocol_version else ''} +
+
+ +
+

Agent Card

+
{html.escape(card_json)}
+
+ +{tools_html} + +
+

MCP Test Panel

+

+ Send a JSON-RPC request to this agent's /api/mcp endpoint. +

+
+ + +
+ + +
+
+ +""", + ) diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/discovery/__init__.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/discovery/__init__.py new file mode 100644 index 00000000..d6d04008 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/discovery/__init__.py @@ -0,0 +1,24 @@ +""" +Agent discovery for Databricks Apps. + +This module provides clients and utilities for discovering agent-enabled +Databricks Apps that expose A2A protocol agent cards. +""" + +from .agent_discovery import ( + AgentDiscovery, + DiscoveredAgent, + AgentDiscoveryResult, +) +from .a2a_client import ( + A2AClient, + A2AClientError, +) + +__all__ = [ + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", +] diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/discovery/a2a_client.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/discovery/a2a_client.py new file mode 100644 index 00000000..1243d1a3 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/discovery/a2a_client.py @@ -0,0 +1,268 @@ +""" +A2A Client for agent-to-agent communication. + +Implements the A2A protocol for discovering and communicating with peer agents. +""" + +import json +import uuid +import logging +from typing import Dict, Any, Optional, AsyncIterator + +import httpx + +logger = logging.getLogger(__name__) + + +class A2AClientError(Exception): + """Raised when an A2A operation fails.""" + pass + + +class A2AClient: + """ + Async client for A2A protocol communication with peer agents. + + Usage: + async with A2AClient() as client: + card = await client.fetch_agent_card("https://app.databricksapps.com") + result = await client.send_message("https://app.databricksapps.com/api/a2a", "Hello") + """ + + def __init__(self, timeout: float = 60.0): + """ + Initialize A2A client. + + Args: + timeout: Request timeout in seconds + """ + self.timeout = timeout + self._client: Optional[httpx.AsyncClient] = None + + async def __aenter__(self): + self._client = httpx.AsyncClient( + timeout=self.timeout, + follow_redirects=True, + ) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self._client: + await self._client.aclose() + + def _auth_headers(self, auth_token: Optional[str] = None) -> Dict[str, str]: + """Build authentication headers.""" + headers = {"Content-Type": "application/json"} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + return headers + + async def fetch_agent_card( + self, + base_url: str, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Fetch an agent's A2A protocol agent card. + + Tries /.well-known/agent.json first, then /card as fallback. + Handles OAuth redirects gracefully (returns error instead of following). + + Args: + base_url: Base URL of the agent application + auth_token: Optional OAuth token for authenticated requests + + Returns: + Agent card JSON data + + Raises: + A2AClientError: If agent card cannot be fetched + + Example: + >>> async with A2AClient() as client: + >>> card = await client.fetch_agent_card("https://app.databricksapps.com") + >>> print(card["name"], card["description"]) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + headers = {} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + # Use a client that doesn't follow redirects to detect OAuth flows + async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=False) as probe_client: + for path in ["/.well-known/agent.json", "/card"]: + try: + url = base_url.rstrip("/") + path + response = await probe_client.get(url, headers=headers) + + # OAuth redirect detected - app requires interactive auth + if response.status_code in (301, 302, 303, 307, 308): + logger.debug(f"OAuth redirect detected for {url}") + continue + + if response.status_code == 200: + if not response.text or response.text.isspace(): + logger.debug(f"Empty response body for {url}") + continue + return response.json() + + except Exception as e: + logger.debug(f"Agent card fetch failed for {url}: {e}") + continue + + raise A2AClientError(f"Could not fetch agent card from {base_url}") + + async def _jsonrpc_call( + self, + url: str, + method: str, + params: Dict[str, Any], + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a JSON-RPC 2.0 request to an agent. + + Args: + url: A2A endpoint URL + method: JSON-RPC method name (e.g., "message/send") + params: Method parameters + auth_token: Optional authentication token + + Returns: + JSON-RPC result + + Raises: + A2AClientError: If request fails or returns error + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": method, + "params": params, + } + + try: + response = await self._client.post( + url, + json=payload, + headers=self._auth_headers(auth_token), + ) + response.raise_for_status() + result = response.json() + + if "error" in result: + error = result["error"] + raise A2AClientError( + f"A2A error: {error.get('message', 'Unknown')} " + f"(code: {error.get('code')})" + ) + + return result.get("result", {}) + + except httpx.TimeoutException as e: + raise A2AClientError(f"Request to {url} timed out: {e}") + except httpx.HTTPStatusError as e: + raise A2AClientError( + f"HTTP error from {url}: {e.response.status_code}" + ) + except json.JSONDecodeError as e: + raise A2AClientError(f"Invalid JSON from {url}: {e}") + + async def send_message( + self, + agent_url: str, + message: str, + context_id: Optional[str] = None, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a message to a peer agent using A2A protocol. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + context_id: Optional conversation context ID + auth_token: Optional authentication token + + Returns: + Agent's response + + Example: + >>> async with A2AClient() as client: + >>> response = await client.send_message( + >>> "https://app.databricksapps.com/api/a2a", + >>> "What are your capabilities?" + >>> ) + """ + params: Dict[str, Any] = { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + } + if context_id: + params["message"]["contextId"] = context_id + + return await self._jsonrpc_call( + agent_url, "message/send", params, auth_token + ) + + async def send_streaming_message( + self, + agent_url: str, + message: str, + auth_token: Optional[str] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """ + Send a streaming message and yield SSE events. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + auth_token: Optional authentication token + + Yields: + SSE events from the agent's response stream + + Example: + >>> async with A2AClient() as client: + >>> async for event in client.send_streaming_message(url, "Analyze this"): + >>> print(event) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + stream_url = agent_url.rstrip("/") + "/stream" + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": "message/stream", + "params": { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + }, + } + + async with self._client.stream( + "POST", + stream_url, + json=payload, + headers=self._auth_headers(auth_token), + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.startswith("data: "): + try: + yield json.loads(line[6:]) + except json.JSONDecodeError: + continue diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/discovery/agent_discovery.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/discovery/agent_discovery.py new file mode 100644 index 00000000..1563b304 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/discovery/agent_discovery.py @@ -0,0 +1,253 @@ +""" +Agent discovery for Databricks Apps. + +Discovers agent-enabled Databricks Apps by scanning workspace apps +and probing for A2A protocol agent cards. +""" + +import asyncio +import logging +from typing import List, Dict, Any, Optional +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +from .a2a_client import A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + +# Agent card probe paths and timeout +AGENT_CARD_PATHS = ["/.well-known/agent.json", "/card"] +AGENT_CARD_PROBE_TIMEOUT = 5.0 + + +@dataclass +class DiscoveredAgent: + """ + An agent discovered from a Databricks App. + + Attributes: + name: Agent name (from agent card or app name) + endpoint_url: Agent's base URL + description: Agent description (from agent card) + capabilities: Comma-separated list of capabilities + protocol_version: A2A protocol version + app_name: Name of the backing Databricks App + """ + name: str + endpoint_url: str + app_name: str + description: Optional[str] = None + capabilities: Optional[str] = None + protocol_version: Optional[str] = None + + +@dataclass +class AgentDiscoveryResult: + """ + Results from agent discovery operation. + + Attributes: + agents: List of discovered agents + errors: List of error messages encountered during discovery + """ + agents: List[DiscoveredAgent] + errors: List[str] + + +class AgentDiscovery: + """ + Discovers agent-enabled Databricks Apps in a workspace. + + Scans running Databricks Apps and probes for A2A protocol agent cards + to identify which apps are agents. + + Usage: + discovery = AgentDiscovery(profile="my-profile") + result = await discovery.discover_agents() + for agent in result.agents: + print(f"Found agent: {agent.name} at {agent.endpoint_url}") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize agent discovery. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._workspace_token: Optional[str] = None + + async def discover_agents(self) -> AgentDiscoveryResult: + """ + Discover all agent-enabled Databricks Apps in the workspace. + + Returns: + AgentDiscoveryResult with discovered agents and any errors + + Example: + >>> discovery = AgentDiscovery(profile="my-profile") + >>> result = await discovery.discover_agents() + >>> print(f"Found {len(result.agents)} agents") + """ + agents: List[DiscoveredAgent] = [] + errors: List[str] = [] + + try: + app_list = await self._list_workspace_apps() + except Exception as e: + logger.error("Workspace app listing failed: %s", e) + return AgentDiscoveryResult( + agents=[], + errors=[f"Failed to list workspace apps: {e}"], + ) + + if not app_list: + return AgentDiscoveryResult(agents=[], errors=[]) + + # Probe each running app for agent card in parallel + probe_tasks = [ + self._probe_app_for_agent(app_info) + for app_info in app_list + if app_info.get("url") + ] + + if probe_tasks: + probe_results = await asyncio.gather( + *probe_tasks, return_exceptions=True + ) + + for result in probe_results: + if isinstance(result, Exception): + errors.append(str(result)) + elif result is not None: + agents.append(result) + + logger.info( + "Agent discovery: %d apps checked, %d agents found", + len(app_list), len(agents) + ) + + return AgentDiscoveryResult(agents=agents, errors=errors) + + async def _list_workspace_apps(self) -> List[Dict[str, Any]]: + """ + Enumerate Databricks Apps in the workspace. + + Returns: + List of running apps with name, url, owner + """ + def _list_sync() -> tuple: + client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + + # Extract auth token for cross-app requests + auth_headers = client.config.authenticate() + auth_val = auth_headers.get("Authorization", "") + token = auth_val[7:] if auth_val.startswith("Bearer ") else None + + results = [] + for app in client.apps.list(): + # Check if app is running via compute_status or deployment status + compute_state = None + cs = getattr(app, "compute_status", None) + if cs: + compute_state = str(getattr(cs, "state", "")) + + deploy_state = None + dep = getattr(app, "active_deployment", None) + if dep: + dep_status = getattr(dep, "status", None) + if dep_status: + deploy_state = str(getattr(dep_status, "state", "")) + + app_url = getattr(app, "url", None) or "" + app_url = app_url.rstrip("/") if app_url else "" + + results.append({ + "name": app.name, + "url": app_url, + "owner": getattr(app, "creator", None) or getattr(app, "updater", None), + "compute_state": compute_state, + "deploy_state": deploy_state, + }) + + return results, token + + loop = asyncio.get_event_loop() + result_tuple = await loop.run_in_executor(None, _list_sync) + all_apps, workspace_token = result_tuple + + # Store token for probing + self._workspace_token = workspace_token + + # Filter to running apps + running = [ + a for a in all_apps + if a.get("url") and ( + "ACTIVE" in (a.get("compute_state") or "") + or "SUCCEEDED" in (a.get("deploy_state") or "") + ) + ] + + logger.info( + "Workspace apps: %d total, %d running", + len(all_apps), len(running) + ) + + return running + + async def _probe_app_for_agent( + self, + app_info: Dict[str, Any], + ) -> Optional[DiscoveredAgent]: + """ + Probe a Databricks App for an A2A agent card. + + Args: + app_info: App metadata from workspace listing + + Returns: + DiscoveredAgent if agent card found, None otherwise + """ + app_url = app_info["url"] + app_name = app_info["name"] + + token = self._workspace_token + agent_card = None + + try: + logger.debug(f"Probing app '{app_name}' at {app_url}") + async with A2AClient(timeout=AGENT_CARD_PROBE_TIMEOUT) as client: + agent_card = await client.fetch_agent_card(app_url, auth_token=token) + logger.info(f"Found agent card for '{app_name}'") + except A2AClientError as e: + logger.debug(f"No agent card for '{app_name}': {e}") + return None + except Exception as e: + logger.warning(f"Probe failed for '{app_name}': {e}") + return None + + if not agent_card: + return None + + # Extract capabilities + capabilities_list = [] + caps = agent_card.get("capabilities") + if isinstance(caps, dict): + capabilities_list = list(caps.keys()) + elif isinstance(caps, list): + capabilities_list = caps + + return DiscoveredAgent( + name=agent_card.get("name", app_name), + endpoint_url=app_url, + app_name=app_name, + description=agent_card.get("description"), + capabilities=",".join(capabilities_list) if capabilities_list else None, + protocol_version=agent_card.get("protocolVersion"), + ) diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/mcp/__init__.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/mcp/__init__.py new file mode 100644 index 00000000..60ee38ad --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/mcp/__init__.py @@ -0,0 +1,11 @@ +""" +Model Context Protocol (MCP) support. + +This module provides utilities for integrating agents with MCP servers +and exposing UC Functions as MCP tools. +""" + +from .mcp_server import MCPServer, MCPServerConfig, setup_mcp_server +from .uc_functions import UCFunctionAdapter + +__all__ = ["MCPServer", "MCPServerConfig", "setup_mcp_server", "UCFunctionAdapter"] diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/mcp/mcp_server.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/mcp/mcp_server.py new file mode 100644 index 00000000..98a456fb --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/mcp/mcp_server.py @@ -0,0 +1,196 @@ +""" +MCP server implementation for agents. + +Provides an MCP server that exposes agent tools via the Model Context Protocol. +Works with any FastAPI app. +""" + +import json +import logging +from typing import Dict, Any, List, Optional +from dataclasses import dataclass + +from fastapi import Request + +logger = logging.getLogger(__name__) + + +@dataclass +class MCPServerConfig: + """ + Configuration for MCP server. + + Attributes: + name: Server name + version: Server version + description: Server description + """ + name: str + version: str = "1.0.0" + description: str = "MCP server for agent tools" + + +class MCPServer: + """ + MCP server that exposes tools via JSON-RPC. + + Accepts tools as dicts with name, description, parameters, function keys. + """ + + def __init__(self, tools, config: MCPServerConfig): + """ + Initialize MCP server. + + Args: + tools: List of ToolDefinition objects or dicts with name/description/parameters/function + config: MCP server configuration + """ + self._tools = tools + self.config = config + + def setup_routes(self, app): + """ + Set up MCP protocol routes on the FastAPI app. + + Adds: + - POST /api/mcp - MCP JSON-RPC endpoint + - GET /api/mcp/tools - List available tools + """ + + @app.post("/api/mcp") + async def mcp_jsonrpc(request: Request): + """MCP JSON-RPC endpoint.""" + try: + body = await request.json() + method = body.get("method") + params = body.get("params", {}) + request_id = body.get("id") + + if method == "tools/list": + result = await self._list_tools() + elif method == "tools/call": + result = await self._call_tool(params) + elif method == "server/info": + result = self._server_info() + else: + return { + "jsonrpc": "2.0", + "id": request_id, + "error": { + "code": -32601, + "message": f"Method not found: {method}" + } + } + + return { + "jsonrpc": "2.0", + "id": request_id, + "result": result + } + + except Exception as e: + logger.error("MCP request failed: %s", e) + return { + "jsonrpc": "2.0", + "id": body.get("id") if isinstance(body, dict) else None, + "error": { + "code": -32603, + "message": str(e) + } + } + + @app.get("/api/mcp/tools") + async def list_mcp_tools(): + """List available MCP tools.""" + return await self._list_tools() + + def _get_tool_name(self, tool) -> str: + return tool.name if hasattr(tool, "name") else tool["name"] + + def _get_tool_description(self, tool) -> str: + return tool.description if hasattr(tool, "description") else tool["description"] + + def _get_tool_parameters(self, tool) -> Dict[str, Any]: + return tool.parameters if hasattr(tool, "parameters") else tool.get("parameters", {}) + + def _get_tool_function(self, tool): + return tool.function if hasattr(tool, "function") else tool["function"] + + def _server_info(self) -> Dict[str, Any]: + """Get MCP server information.""" + return { + "name": self.config.name, + "version": self.config.version, + "description": self.config.description, + "protocol_version": "1.0", + } + + async def _list_tools(self) -> Dict[str, Any]: + """List all available tools in MCP format.""" + tools = [] + + for tool in self._tools: + mcp_tool = { + "name": self._get_tool_name(tool), + "description": self._get_tool_description(tool), + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + + for param_name, param_spec in self._get_tool_parameters(tool).items(): + param_type = param_spec.get("type", "string") + mcp_tool["inputSchema"]["properties"][param_name] = { + "type": param_type, + "description": param_spec.get("description", "") + } + if param_spec.get("required", False): + mcp_tool["inputSchema"]["required"].append(param_name) + + tools.append(mcp_tool) + + return {"tools": tools} + + async def _call_tool(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Call a tool via MCP.""" + tool_name = params.get("name") + arguments = params.get("arguments", {}) + + tool_def = None + for tool in self._tools: + if self._get_tool_name(tool) == tool_name: + tool_def = tool + break + + if not tool_def: + raise ValueError(f"Tool not found: {tool_name}") + + try: + result = await self._get_tool_function(tool_def)(**arguments) + return {"result": result} + except Exception as e: + logger.error("Tool execution failed: %s", e) + raise + + +def setup_mcp_server(tools, config: Optional[MCPServerConfig] = None, fastapi_app=None): + """ + Set up MCP server for an agent. + + Args: + tools: List of tool dicts with name/description/parameters/function keys + config: Optional MCP server configuration + fastapi_app: FastAPI app to add routes to (required) + + Returns: + MCPServer instance + """ + if config is None: + config = MCPServerConfig(name="mcp-server") + + server = MCPServer(tools, config) + server.setup_routes(fastapi_app) + + return server diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/mcp/uc_functions.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/mcp/uc_functions.py new file mode 100644 index 00000000..f1659960 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/mcp/uc_functions.py @@ -0,0 +1,240 @@ +""" +Unity Catalog Functions adapter for MCP. + +Automatically discovers UC Functions and exposes them as MCP tools. +""" + +import logging +from typing import List, Dict, Any, Optional + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCFunctionAdapter: + """ + Adapter for Unity Catalog Functions to MCP protocol. + + Discovers UC Functions and converts them to MCP tool format for + use with agents. + + Usage: + adapter = UCFunctionAdapter(profile="my-profile") + tools = adapter.discover_functions(catalog="main", schema="functions") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC Functions adapter. + + Args: + profile: Databricks CLI profile name + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def discover_functions( + self, + catalog: str, + schema: str, + name_pattern: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + Discover UC Functions and convert to MCP tool format. + + Args: + catalog: UC catalog name + schema: UC schema name + name_pattern: Optional name pattern filter (SQL LIKE pattern) + + Returns: + List of tool definitions in MCP format + + Example: + >>> adapter = UCFunctionAdapter() + >>> tools = adapter.discover_functions("main", "functions") + >>> for tool in tools: + ... print(tool["name"], tool["description"]) + """ + client = self._get_client() + tools = [] + + try: + functions = client.functions.list( + catalog_name=catalog, + schema_name=schema, + ) + + for func in functions: + # Skip system functions + if func.name.startswith("system."): + continue + + # Apply name pattern filter + if name_pattern and name_pattern not in func.name: + continue + + # Convert to MCP tool format + tool = self._convert_function_to_tool(func) + if tool: + tools.append(tool) + + logger.info( + f"Discovered {len(tools)} UC Functions from {catalog}.{schema}" + ) + + except Exception as e: + logger.error(f"Failed to discover UC Functions: {e}") + + return tools + + def _convert_function_to_tool(self, func) -> Optional[Dict[str, Any]]: + """ + Convert a UC Function to MCP tool format. + + Args: + func: Function info from Databricks SDK + + Returns: + MCP tool definition or None if conversion fails + """ + try: + # Extract function metadata + name = func.name.split(".")[-1] # Get short name + description = func.comment or f"Unity Catalog function: {name}" + + # Build parameter schema + input_schema = { + "type": "object", + "properties": {}, + "required": [] + } + + # Parse function parameters + if hasattr(func, "input_params") and func.input_params: + for param in func.input_params.parameters: + param_name = param.name + param_type = self._map_uc_type_to_json_type(param.type_name) + + input_schema["properties"][param_name] = { + "type": param_type, + "description": param.comment or "" + } + + # Parameters without defaults are required + if not hasattr(param, "default_value") or param.default_value is None: + input_schema["required"].append(param_name) + + return { + "name": name, + "description": description, + "inputSchema": input_schema, + "full_name": func.full_name, + "source": "unity_catalog" + } + + except Exception as e: + logger.warning(f"Failed to convert function {func.name}: {e}") + return None + + def _map_uc_type_to_json_type(self, uc_type: str) -> str: + """ + Map Unity Catalog data type to JSON Schema type. + + Args: + uc_type: UC type name (e.g., "STRING", "BIGINT", "BOOLEAN") + + Returns: + JSON Schema type ("string", "number", "boolean", etc.) + """ + type_mapping = { + "STRING": "string", + "VARCHAR": "string", + "CHAR": "string", + "BIGINT": "integer", + "INT": "integer", + "INTEGER": "integer", + "SMALLINT": "integer", + "TINYINT": "integer", + "DOUBLE": "number", + "FLOAT": "number", + "DECIMAL": "number", + "BOOLEAN": "boolean", + "BINARY": "string", + "DATE": "string", + "TIMESTAMP": "string", + "ARRAY": "array", + "MAP": "object", + "STRUCT": "object", + } + + uc_type_upper = uc_type.upper() + return type_mapping.get(uc_type_upper, "string") + + async def call_function( + self, + full_name: str, + arguments: Dict[str, Any] + ) -> Any: + """ + Call a UC Function with given arguments. + + Args: + full_name: Full function name (catalog.schema.function) + arguments: Function arguments + + Returns: + Function result + + Example: + >>> adapter = UCFunctionAdapter() + >>> result = await adapter.call_function( + ... "main.functions.calculate_tax", + ... {"amount": 100, "rate": 0.08} + ... ) + """ + client = self._get_client() + + try: + # Build SQL query to call the function + args_list = [f":{key}" for key in arguments.keys()] + query = f"SELECT {full_name}({', '.join(args_list)})" + + # Execute via SQL warehouse + # Note: This requires a warehouse ID to be configured + result = client.statement_execution.execute_statement( + statement=query, + warehouse_id=self._get_default_warehouse(), + parameters=[ + {"name": key, "value": str(value)} + for key, value in arguments.items() + ] + ) + + return result.result.data_array[0][0] if result.result.data_array else None + + except Exception as e: + logger.error(f"Failed to call UC Function {full_name}: {e}") + raise + + def _get_default_warehouse(self) -> str: + """Get default SQL warehouse ID from environment or client.""" + import os + warehouse_id = os.getenv("DATABRICKS_WAREHOUSE_ID") + if not warehouse_id: + raise ValueError( + "DATABRICKS_WAREHOUSE_ID not set. " + "Set this environment variable to use UC Functions." + ) + return warehouse_id diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/py.typed b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/registry/__init__.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/registry/__init__.py new file mode 100644 index 00000000..892043b7 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/registry/__init__.py @@ -0,0 +1,10 @@ +""" +Unity Catalog integration for agent registration. + +This module provides utilities for registering agents in Unity Catalog +as AGENT objects, enabling catalog-based discovery and permission management. +""" + +from .uc_registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError + +__all__ = ["UCAgentRegistry", "UCAgentSpec", "UCRegistrationError"] diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/registry/uc_registry.py b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/registry/uc_registry.py new file mode 100644 index 00000000..f2651aca --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/dbx_agent_app/registry/uc_registry.py @@ -0,0 +1,345 @@ +""" +Unity Catalog agent registry. + +Registers and manages agents as Unity Catalog AGENT objects for +catalog-based discovery and permission management. +""" + +import json +import logging +from typing import Dict, Any, Optional, List +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCRegistrationError(Exception): + """Raised when agent registration in Unity Catalog fails.""" + pass + + +@dataclass +class UCAgentSpec: + """ + Specification for registering an agent in Unity Catalog. + + Attributes: + name: Agent name (will be catalog object name) + catalog: UC catalog name + schema: UC schema name + endpoint_url: Agent's base URL + description: Agent description + capabilities: List of agent capabilities + properties: Additional metadata key-value pairs + """ + name: str + catalog: str + schema: str + endpoint_url: str + description: Optional[str] = None + capabilities: Optional[List[str]] = None + properties: Optional[Dict[str, str]] = None + + +class UCAgentRegistry: + """ + Unity Catalog agent registry. + + Registers agents as UC AGENT objects for catalog-based discovery + and permission management. + + Usage: + registry = UCAgentRegistry(profile="my-profile") + + spec = UCAgentSpec( + name="customer_research", + catalog="main", + schema="agents", + endpoint_url="https://app.databricksapps.com", + description="Customer research agent", + capabilities=["search", "analysis"], + ) + + registry.register_agent(spec) + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC agent registry. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def register_agent(self, spec: UCAgentSpec) -> Dict[str, Any]: + """ + Register an agent in Unity Catalog. + + Creates a AGENT object in the specified catalog and schema with + metadata about the agent's endpoint, capabilities, and properties. + + Args: + spec: Agent specification + + Returns: + Dictionary with registration details + + Raises: + UCRegistrationError: If registration fails + + Example: + >>> registry = UCAgentRegistry(profile="my-profile") + >>> spec = UCAgentSpec( + ... name="my_agent", + ... catalog="main", + ... schema="agents", + ... endpoint_url="https://app.databricksapps.com", + ... ) + >>> result = registry.register_agent(spec) + """ + client = self._get_client() + full_name = f"{spec.catalog}.{spec.schema}.{spec.name}" + + try: + # Build agent properties for UC metadata + properties = spec.properties or {} + properties["endpoint_url"] = spec.endpoint_url + properties["agent_card_url"] = f"{spec.endpoint_url}/.well-known/agent.json" + + if spec.capabilities: + properties["capabilities"] = ",".join(spec.capabilities) + + # Register as a UC registered model with AGENT type + # (UC doesn't have a native AGENT type yet, so we use registered models + # with special tags/properties to mark them as agents) + + logger.info(f"Registering agent '{full_name}' in Unity Catalog") + + # Check if catalog and schema exist + try: + client.catalogs.get(spec.catalog) + except Exception as e: + raise UCRegistrationError( + f"Catalog '{spec.catalog}' does not exist or is not accessible: {e}" + ) + + try: + client.schemas.get(f"{spec.catalog}.{spec.schema}") + except Exception as e: + raise UCRegistrationError( + f"Schema '{spec.catalog}.{spec.schema}' does not exist or is not accessible: {e}" + ) + + # Create or update registered model as agent placeholder + # In a future UC version with native AGENT support, this would use: + # client.agents.create(name=full_name, properties=properties) + + # Encode properties as JSON suffix in comment for discovery + # Format: "description\n---AGENT_META---\n{json}" + meta = {"databricks_agent": True, **properties} + comment = spec.description or "" + comment_with_meta = f"{comment}\n---AGENT_META---\n{json.dumps(meta)}" + + # Try update first (model may already exist from prior deploy), + # fall back to create if it doesn't exist + try: + client.registered_models.update( + full_name, + comment=comment_with_meta, + ) + logger.info(f"Updated existing agent '{full_name}'") + except Exception as update_err: + # Model doesn't exist or SP can't access it — try create + logger.debug(f"Update failed ({update_err}), trying create") + try: + client.registered_models.create( + name=spec.name, + catalog_name=spec.catalog, + schema_name=spec.schema, + comment=comment_with_meta, + ) + logger.info(f"Created new agent '{full_name}'") + except Exception as create_err: + # If create fails with "already exists", the SP just + # can't see the model — log warning but don't fail + err_str = str(create_err).lower() + if "already exists" in err_str or "not a valid name" in err_str: + logger.warning( + "Agent '%s' exists but SP cannot update it. " + "Grant the app's SP ownership or MANAGE on the model.", + full_name, + ) + else: + raise + + logger.info(f"Successfully registered agent '{full_name}'") + + return { + "full_name": full_name, + "catalog": spec.catalog, + "schema": spec.schema, + "name": spec.name, + "endpoint_url": spec.endpoint_url, + "properties": properties, + } + + except UCRegistrationError: + raise + except Exception as e: + raise UCRegistrationError( + f"Failed to register agent '{full_name}': {e}" + ) from e + + @staticmethod + def _parse_agent_meta(comment: Optional[str]) -> Optional[Dict[str, Any]]: + """Parse agent metadata from comment field (JSON after ---AGENT_META--- marker).""" + if not comment or "---AGENT_META---" not in comment: + return None + try: + _, meta_json = comment.split("---AGENT_META---", 1) + return json.loads(meta_json.strip()) + except (ValueError, json.JSONDecodeError): + return None + + @staticmethod + def _clean_description(comment: Optional[str]) -> str: + """Extract human-readable description from comment (before the meta marker).""" + if not comment: + return "" + if "---AGENT_META---" in comment: + return comment.split("---AGENT_META---")[0].strip() + return comment + + def get_agent(self, catalog: str, schema: str, name: str) -> Optional[Dict[str, Any]]: + """ + Get agent metadata from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + Agent metadata dictionary or None if not found + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + model = client.registered_models.get(full_name) + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + return None + + return { + "full_name": full_name, + "catalog": catalog, + "schema": schema, + "name": name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "agent_card_url": meta.get("agent_card_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + "properties": meta, + } + + except Exception as e: + logger.debug(f"Agent '{full_name}' not found: {e}") + return None + + def list_agents( + self, + catalog: str, + schema: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + List all agents in a catalog or schema. + + Args: + catalog: UC catalog name + schema: Optional UC schema name (lists all schemas if not specified) + + Returns: + List of agent metadata dictionaries + """ + client = self._get_client() + agents = [] + + # Determine which schemas to scan + schemas_to_scan = [schema] if schema else [] + if not schema: + try: + for s in client.schemas.list(catalog_name=catalog): + if s.name != "information_schema": + schemas_to_scan.append(s.name) + except Exception as e: + logger.error(f"Failed to list schemas in {catalog}: {e}") + return [] + + for schema_name in schemas_to_scan: + try: + models = client.registered_models.list( + catalog_name=catalog, schema_name=schema_name + ) + for model in models: + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + continue + + agents.append({ + "full_name": model.full_name, + "catalog": catalog, + "schema": schema_name, + "name": model.name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + }) + except Exception as e: + logger.debug(f"Failed to list models in {catalog}.{schema_name}: {e}") + continue + + return agents + + def delete_agent(self, catalog: str, schema: str, name: str) -> bool: + """ + Delete an agent from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + True if deleted, False if not found + + Raises: + UCRegistrationError: If deletion fails + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + client.registered_models.delete(full_name) + logger.info(f"Deleted agent '{full_name}'") + return True + except Exception as e: + if "does not exist" in str(e).lower(): + return False + raise UCRegistrationError( + f"Failed to delete agent '{full_name}': {e}" + ) from e diff --git a/dbx-agent-app/examples/supervisor/agents/expert_finder/requirements.txt b/dbx-agent-app/examples/supervisor/agents/expert_finder/requirements.txt new file mode 100644 index 00000000..7b0556d1 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/expert_finder/requirements.txt @@ -0,0 +1,5 @@ +fastapi>=0.115.0 +uvicorn[standard]>=0.30.0 +pydantic>=2.0.0 +httpx>=0.27.0 +databricks-sdk>=0.30.0 diff --git a/dbx-agent-app/examples/supervisor/agents/research/app.py b/dbx-agent-app/examples/supervisor/agents/research/app.py new file mode 100644 index 00000000..26f9ba1c --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/app.py @@ -0,0 +1,215 @@ +"""Sub-agent: Research — searches expert_transcripts for insights.""" + +# Auth cleanup: prefer OAuth over PAT in Databricks Apps +import os +if os.environ.get("DATABRICKS_CLIENT_ID"): + os.environ.pop("DATABRICKS_TOKEN", None) + +import time +import logging +from databricks.sdk import WorkspaceClient +from databricks.sdk.service.sql import StatementParameterListItem +from dbx_agent_app import app_agent, AgentRequest, AgentResponse + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# SQL helpers +# --------------------------------------------------------------------------- + +_workspace = WorkspaceClient() +_warehouse_id_cache = os.environ.get("WAREHOUSE_ID") + +CATALOG = os.environ.get("UC_CATALOG", "serverless_dxukih_catalog") +SCHEMA = os.environ.get("UC_SCHEMA", "agents") + + +def _fqn(table: str) -> str: + return f"{CATALOG}.{SCHEMA}.{table}" + + +def _get_warehouse_id() -> str: + global _warehouse_id_cache + if _warehouse_id_cache: + return _warehouse_id_cache + for wh in _workspace.warehouses.list(): + if wh.enable_serverless_compute: + _warehouse_id_cache = wh.id + return wh.id + first = next(iter(_workspace.warehouses.list()), None) + if first: + _warehouse_id_cache = first.id + return first.id + raise ValueError("No SQL warehouse available") + + +def _execute_sql(statement, parameters=None): + """Execute SQL and return (result, trace_entry).""" + raw_params = parameters or [] + params = [ + StatementParameterListItem(name=p["name"], value=p["value"]) + if isinstance(p, dict) else p + for p in raw_params + ] + wh_id = _get_warehouse_id() + start = time.monotonic() + result = _workspace.statement_execution.execute_statement( + warehouse_id=wh_id, statement=statement, + parameters=params, wait_timeout="50s", + ) + duration_ms = round((time.monotonic() - start) * 1000, 1) + + row_count = len(result.result.data_array) if result.result and result.result.data_array else 0 + columns = [] + if result.manifest and result.manifest.schema and result.manifest.schema.columns: + columns = [ + {"name": c.name, + "type": str(c.type_name.value) if hasattr(c.type_name, "value") else str(c.type_name)} + for c in result.manifest.schema.columns + ] + + trace = { + "statement": " ".join(statement.split()), + "parameters": [ + {"name": p["name"], "value": p["value"]} if isinstance(p, dict) + else {"name": p.name, "value": p.value} + for p in raw_params + ], + "row_count": row_count, + "columns": columns, + "duration_ms": duration_ms, + "warehouse_id": wh_id, + } + return result, trace + + +def _extract_keywords(query: str) -> list[str]: + stop = {"a","an","the","is","are","on","any","do","does","for","in","of", + "to","what","who","how","can","this","that","find","check","show", + "me","my","about","with","and","or","all","get","list","tell"} + return [w for w in query.lower().split() if w not in stop and len(w) > 1] + + +def _build_like_clauses(column: str, keywords: list[str], prefix: str = ""): + col_tag = prefix or column.replace(".", "_") + clauses, params = [], [] + for i, kw in enumerate(keywords): + name = f"{col_tag}_{i}" + clauses.append(f"LOWER({column}) LIKE :{name}") + params.append({"name": name, "value": f"%{kw}%"}) + return " OR ".join(clauses) if clauses else "FALSE", params + + +# --------------------------------------------------------------------------- +# Demo fallback +# --------------------------------------------------------------------------- + +def _demo_response(query: str) -> dict: + return { + "response": f"""Based on analysis of expert transcripts: + +**Key Insights on "{query}":** + +1. **Dr. Sarah Chen** (Healthcare Technology, Interview #T-2025-1247): + "We're seeing 40% year-over-year growth in AI implementation." + +2. **Michael Torres** (Supply Chain, Interview #T-2025-1189): + "Leaders prioritize real-time visibility and transparency." + +**Themes:** +- Accelerating digital transformation (8/12 interviews) +- Talent shortage challenges (7/12 interviews) + +*Demo fallback -- UC tables not available*""", + "data_source": "demo_fallback", + "tables_accessed": [_fqn("expert_transcripts")], + "keywords_extracted": _extract_keywords(query), + "sql_queries": [], + "timing": {"sql_total_ms": 0, "total_ms": 0}, + } + + +# --------------------------------------------------------------------------- +# Agent + Tool +# --------------------------------------------------------------------------- + +@app_agent( + name="sub_research", + description="Search expert interview transcripts for insights and opinions", + capabilities=["research", "transcript_search"], + uc_catalog=os.environ.get("UC_CATALOG", "main"), + uc_schema=os.environ.get("UC_SCHEMA", "agents"), + enable_mcp=True, +) +async def sub_research(request: AgentRequest) -> dict: + """Delegate to search tool.""" + return await search(request.last_user_message) + + +@sub_research.tool(description="Search expert interview transcripts for insights and opinions") +async def search(query: str) -> dict: + """ + Search expert transcripts by topic, sector, or keyword. + + Args: + query: Natural-language research question + + Returns: + Structured result with response text, metadata, and SQL traces + """ + total_start = time.monotonic() + sql_queries = [] + + try: + keywords = _extract_keywords(query) + if not keywords: + keywords = [query.lower()] + + topic_clause, topic_params = _build_like_clauses("topic", keywords) + excerpt_clause, excerpt_params = _build_like_clauses("transcript_excerpt", keywords) + sector_clause, sector_params = _build_like_clauses("sector", keywords) + all_params = topic_params + excerpt_params + sector_params + + result, trace = _execute_sql( + f""" + SELECT transcript_id, expert_name, topic, transcript_excerpt, + interview_date, relevance_score, sector + FROM {_fqn('expert_transcripts')} + WHERE {topic_clause} OR {excerpt_clause} OR {sector_clause} + ORDER BY relevance_score DESC + LIMIT 5 + """, + all_params, + ) + sql_queries.append(trace) + + if not result.result or not result.result.data_array: + text = f'No transcripts found matching "{query}".' + else: + rows = result.result.data_array + text = f'**Research Results for "{query}"** ({len(rows)} transcripts found)\n\n' + for i, row in enumerate(rows, 1): + tid, expert, topic, excerpt, date, score, sector = row + text += f"**{i}. {expert}** ({sector}, Interview #{tid})\n" + text += f" Topic: {topic} | Relevance: {float(score):.0%} | Date: {date}\n" + text += f' > "{excerpt[:250]}{"..." if len(str(excerpt)) > 250 else ""}"\n\n' + text += f"\n*Data source: {_fqn('expert_transcripts')}*" + + total_ms = round((time.monotonic() - total_start) * 1000, 1) + sql_total_ms = sum(q["duration_ms"] for q in sql_queries) + + return { + "response": text, + "data_source": "live", + "tables_accessed": [_fqn("expert_transcripts")], + "keywords_extracted": keywords, + "sql_queries": sql_queries, + "timing": {"sql_total_ms": sql_total_ms, "total_ms": total_ms}, + } + + except Exception as e: + logger.error("SQL query failed for research: %s", e, exc_info=True) + return _demo_response(query) + + +app = sub_research.app diff --git a/dbx-agent-app/examples/supervisor/agents/research/app.yaml b/dbx-agent-app/examples/supervisor/agents/research/app.yaml new file mode 100644 index 00000000..fc60869f --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/app.yaml @@ -0,0 +1,19 @@ +command: + - "python" + - "-m" + - "uvicorn" + - "app:app" + - "--host" + - "0.0.0.0" + - "--port" + - "8000" + +env: + - name: UC_CATALOG + value: serverless_dxukih_catalog + + - name: UC_SCHEMA + value: agents + + - name: WAREHOUSE_ID + value: 387bcda0f2ece20c diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/__init__.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/__init__.py new file mode 100644 index 00000000..95db07d3 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/__init__.py @@ -0,0 +1,41 @@ +""" +dbx-agent-app: Framework for building discoverable AI agents on Databricks Apps. + +This package provides: +- @app_agent: Decorator to turn an async function into a discoverable agent +- AgentDiscovery: Discover agents in your Databricks workspace +- A2AClient: Communicate with agents using the A2A protocol +- UCAgentRegistry: Register agents in Unity Catalog +- MCPServerConfig: Configure MCP server for agent tools +""" + +from .discovery import AgentDiscovery, DiscoveredAgent, AgentDiscoveryResult, A2AClient, A2AClientError +from .mcp import MCPServerConfig, setup_mcp_server, UCFunctionAdapter +from .registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError +from .dashboard import create_dashboard_app + +try: + from importlib.metadata import version + __version__ = version("dbx-agent-app") +except Exception: + __version__ = "0.1.0" + +__all__ = [ + # Discovery + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", + # Registry + "UCAgentRegistry", + "UCAgentSpec", + "UCRegistrationError", + # MCP + "MCPServerConfig", + "setup_mcp_server", + "UCFunctionAdapter", + # Dashboard + "create_dashboard_app", +] + diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/core/__init__.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/core/__init__.py new file mode 100644 index 00000000..623da88c --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/core/__init__.py @@ -0,0 +1 @@ +"""Core agent application components.""" diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/__init__.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/__init__.py new file mode 100644 index 00000000..89929a08 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/__init__.py @@ -0,0 +1,14 @@ +""" +Developer dashboard for agent discovery. + +Launch via CLI: + dbx-agent-app dashboard --profile my-profile + +Or programmatically: + from dbx_agent_app.dashboard import create_dashboard_app, run_dashboard +""" + +from .app import create_dashboard_app +from .cli import main as run_dashboard + +__all__ = ["create_dashboard_app", "run_dashboard"] diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/app.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/app.py new file mode 100644 index 00000000..d16c3d54 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/app.py @@ -0,0 +1,112 @@ +""" +FastAPI application for the developer dashboard. + +Routes: + HTML: GET / — agent list page + GET /agent/{name} — agent detail page + API: GET /api/agents — JSON list of agents + GET /api/agents/{name}/card — full agent card + POST /api/agents/{name}/mcp — MCP JSON-RPC proxy + POST /api/scan — trigger re-scan + GET /health — health check +""" + +import logging +from typing import Optional + +from fastapi import FastAPI, Request +from fastapi.responses import HTMLResponse, JSONResponse + +from .scanner import DashboardScanner +from .templates import render_agent_list, render_agent_detail + +logger = logging.getLogger(__name__) + + +def create_dashboard_app( + scanner: DashboardScanner, + profile: Optional[str] = None, +) -> FastAPI: + """Build and return the dashboard FastAPI app.""" + app = FastAPI(title="dbx-agent-app dashboard", docs_url=None, redoc_url=None) + + # --- HTML pages ------------------------------------------------------- + + @app.get("/", response_class=HTMLResponse) + async def index(): + agents = scanner.get_agents() + return render_agent_list(agents) + + @app.get("/agent/{name}", response_class=HTMLResponse) + async def agent_detail(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return HTMLResponse("

Agent not found

", status_code=404) + + card = None + try: + card = await scanner.get_agent_card(agent.endpoint_url) + except Exception as e: + logger.warning("Could not fetch card for %s: %s", name, e) + + return render_agent_detail(agent, card) + + # --- JSON API --------------------------------------------------------- + + @app.get("/api/agents") + async def api_agents(): + agents = scanner.get_agents() + return [ + { + "name": a.name, + "endpoint_url": a.endpoint_url, + "app_name": a.app_name, + "description": a.description, + "capabilities": a.capabilities, + "protocol_version": a.protocol_version, + } + for a in agents + ] + + @app.get("/api/agents/{name}/card") + async def api_agent_card(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + card = await scanner.get_agent_card(agent.endpoint_url) + return card + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=502) + + @app.post("/api/agents/{name}/mcp") + async def api_mcp_proxy(name: str, request: Request): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + payload = await request.json() + result = await scanner.proxy_mcp(agent.endpoint_url, payload) + return result + except Exception as e: + return JSONResponse( + {"jsonrpc": "2.0", "id": None, "error": {"code": -1, "message": str(e)}}, + status_code=502, + ) + + @app.post("/api/scan") + async def api_scan(): + agents = await scanner.scan() + return {"count": len(agents), "agents": [a.name for a in agents]} + + @app.get("/health") + async def health(): + return { + "status": "ok", + "agents_cached": len(scanner.get_agents()), + "profile": profile, + } + + return app diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/cli.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/cli.py new file mode 100644 index 00000000..3b17aa90 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/cli.py @@ -0,0 +1,63 @@ +""" +CLI entry point for the developer dashboard. + +Usage: + dbx-agent-app dashboard --profile my-profile --port 8501 +""" + +import argparse +import asyncio +import logging +import sys +import webbrowser + +import uvicorn + +from .scanner import DashboardScanner +from .app import create_dashboard_app + + +def main(): + parser = argparse.ArgumentParser( + prog="dbx-agent-app", + description="Developer dashboard for Databricks agent discovery", + ) + sub = parser.add_subparsers(dest="command") + + dash = sub.add_parser("dashboard", help="Launch the agent discovery dashboard") + dash.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + dash.add_argument("--port", type=int, default=8501, help="Port to serve on (default: 8501)") + dash.add_argument("--host", type=str, default="127.0.0.1", help="Host to bind (default: 127.0.0.1)") + dash.add_argument("--no-browser", action="store_true", help="Don't auto-open browser") + + args = parser.parse_args() + + if args.command != "dashboard": + parser.print_help() + sys.exit(1) + + logging.basicConfig(level=logging.INFO, format="%(levelname)s %(name)s: %(message)s") + + scanner = DashboardScanner(profile=args.profile) + + # Run initial scan + print(f"Scanning workspace for agents (profile={args.profile or 'default'})...") + try: + agents = asyncio.run(scanner.scan()) + print(f"Found {len(agents)} agent(s)") + except Exception as e: + print(f"Initial scan failed: {e}", file=sys.stderr) + print("Dashboard will start anyway — use the Scan button to retry.") + + app = create_dashboard_app(scanner, profile=args.profile) + + url = f"http://{args.host}:{args.port}" + if not args.no_browser: + webbrowser.open(url) + + print(f"Dashboard running at {url}") + uvicorn.run(app, host=args.host, port=args.port, log_level="warning") + + +if __name__ == "__main__": + main() diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/scanner.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/scanner.py new file mode 100644 index 00000000..475460be --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/scanner.py @@ -0,0 +1,81 @@ +""" +Dashboard scanner — wraps AgentDiscovery + A2AClient with caching and MCP proxy. +""" + +import asyncio +import logging +from typing import Dict, Any, List, Optional + +import httpx + +from ..discovery import AgentDiscovery, DiscoveredAgent, A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + + +class DashboardScanner: + """ + Thin wrapper around AgentDiscovery that adds result caching + and MCP JSON-RPC proxying for the dashboard UI. + """ + + def __init__(self, profile: Optional[str] = None): + self._discovery = AgentDiscovery(profile=profile) + self._agents: List[DiscoveredAgent] = [] + self._scan_lock = asyncio.Lock() + self._scanned = False + + async def scan(self) -> List[DiscoveredAgent]: + """Run workspace discovery and cache results. Thread-safe via asyncio.Lock.""" + async with self._scan_lock: + result = await self._discovery.discover_agents() + self._agents = result.agents + self._scanned = True + if result.errors: + for err in result.errors: + logger.warning("Discovery error: %s", err) + return self._agents + + def get_agents(self) -> List[DiscoveredAgent]: + """Return cached agent list from the last scan.""" + return list(self._agents) + + def get_agent_by_name(self, name: str) -> Optional[DiscoveredAgent]: + """Look up a cached agent by name.""" + for agent in self._agents: + if agent.name == name or agent.app_name == name: + return agent + return None + + @property + def workspace_token(self) -> Optional[str]: + """Auth token extracted during discovery, used for cross-app requests.""" + return self._discovery._workspace_token + + async def get_agent_card(self, endpoint_url: str) -> Dict[str, Any]: + """Fetch the full agent card JSON from a remote agent.""" + async with A2AClient(timeout=10.0) as client: + return await client.fetch_agent_card( + endpoint_url, auth_token=self.workspace_token + ) + + async def proxy_mcp(self, endpoint_url: str, payload: Dict[str, Any]) -> Dict[str, Any]: + """ + Forward a JSON-RPC request to an agent's MCP endpoint. + + Args: + endpoint_url: Agent base URL + payload: Complete JSON-RPC 2.0 request body + + Returns: + JSON-RPC response from the agent + """ + mcp_url = endpoint_url.rstrip("/") + "/api/mcp" + headers = {"Content-Type": "application/json"} + if self.workspace_token: + headers["Authorization"] = f"Bearer {self.workspace_token}" + + async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as http: + response = await http.post(mcp_url, json=payload, headers=headers) + response.raise_for_status() + return response.json() diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/templates.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/templates.py new file mode 100644 index 00000000..8543d75d --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/dashboard/templates.py @@ -0,0 +1,278 @@ +""" +Server-rendered HTML templates for the dashboard. + +Pure Python functions returning HTML strings — no Jinja2, no React, no build step. +""" + +import html +import json +from typing import List, Dict, Any, Optional + +from ..discovery import DiscoveredAgent + + +# --------------------------------------------------------------------------- +# Base layout +# --------------------------------------------------------------------------- + +def render_base(title: str, content: str) -> str: + """HTML shell with inline CSS (dark theme).""" + return f""" + + + + +{html.escape(title)} + + + +
+
+

dbx-agent-app dashboard

+ +
+
+
+{content} +
+ +""" + + +# --------------------------------------------------------------------------- +# Agent list page +# --------------------------------------------------------------------------- + +def render_agent_list(agents: List[DiscoveredAgent]) -> str: + """Main page: grid of agent cards + scan button.""" + if not agents: + cards_html = """ +
+

No agents discovered

+

No agent-enabled Databricks Apps found in your workspace. Click Scan to retry.

+
""" + else: + cards = [] + for a in agents: + caps = "" + if a.capabilities: + badges = "".join( + f'{html.escape(c.strip())} ' + for c in a.capabilities.split(",") + ) + caps = f'
{badges}
' + + desc = html.escape(a.description or "No description") + cards.append(f""" + +
+

{html.escape(a.name)}

+

{desc}

+
+ App: {html.escape(a.app_name)} + {f'Protocol: {html.escape(a.protocol_version)}' if a.protocol_version else ''} +
+ {caps} +
+
""") + cards_html = f'
{"".join(cards)}
' + + return render_base( + "Agent Dashboard", + f""" +
+ {len(agents)} agent{"s" if len(agents) != 1 else ""} discovered + +
+{cards_html} +""", + ) + + +# --------------------------------------------------------------------------- +# Agent detail page +# --------------------------------------------------------------------------- + +def render_agent_detail( + agent: DiscoveredAgent, + card: Optional[Dict[str, Any]] = None, +) -> str: + """Detail page: agent card JSON, tools list, MCP test panel.""" + card_json = json.dumps(card, indent=2) if card else "Card not available" + + # Extract tools from card if present + tools_html = "" + if card: + skills = card.get("skills") or card.get("tools") or [] + if skills: + rows = [] + for t in skills: + name = html.escape(t.get("name", t.get("id", "unknown"))) + desc = html.escape(t.get("description", "")) + rows.append( + f'
{name}' + f'
{desc}
' + ) + tools_html = f""" +
+

Tools ({len(skills)})

+ {"".join(rows)} +
""" + + safe_name = html.escape(agent.name) + safe_endpoint = html.escape(agent.endpoint_url) + + return render_base( + f"{safe_name} — Agent Dashboard", + f""" +
+ ← All agents +

{safe_name}

+

{html.escape(agent.description or 'No description')}

+
+ Endpoint: {safe_endpoint} + App: {html.escape(agent.app_name)} + {f'{html.escape(agent.protocol_version)}' if agent.protocol_version else ''} +
+
+ +
+

Agent Card

+
{html.escape(card_json)}
+
+ +{tools_html} + +
+

MCP Test Panel

+

+ Send a JSON-RPC request to this agent's /api/mcp endpoint. +

+
+ + +
+ + +
+
+ +""", + ) diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/discovery/__init__.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/discovery/__init__.py new file mode 100644 index 00000000..d6d04008 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/discovery/__init__.py @@ -0,0 +1,24 @@ +""" +Agent discovery for Databricks Apps. + +This module provides clients and utilities for discovering agent-enabled +Databricks Apps that expose A2A protocol agent cards. +""" + +from .agent_discovery import ( + AgentDiscovery, + DiscoveredAgent, + AgentDiscoveryResult, +) +from .a2a_client import ( + A2AClient, + A2AClientError, +) + +__all__ = [ + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", +] diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/discovery/a2a_client.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/discovery/a2a_client.py new file mode 100644 index 00000000..1243d1a3 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/discovery/a2a_client.py @@ -0,0 +1,268 @@ +""" +A2A Client for agent-to-agent communication. + +Implements the A2A protocol for discovering and communicating with peer agents. +""" + +import json +import uuid +import logging +from typing import Dict, Any, Optional, AsyncIterator + +import httpx + +logger = logging.getLogger(__name__) + + +class A2AClientError(Exception): + """Raised when an A2A operation fails.""" + pass + + +class A2AClient: + """ + Async client for A2A protocol communication with peer agents. + + Usage: + async with A2AClient() as client: + card = await client.fetch_agent_card("https://app.databricksapps.com") + result = await client.send_message("https://app.databricksapps.com/api/a2a", "Hello") + """ + + def __init__(self, timeout: float = 60.0): + """ + Initialize A2A client. + + Args: + timeout: Request timeout in seconds + """ + self.timeout = timeout + self._client: Optional[httpx.AsyncClient] = None + + async def __aenter__(self): + self._client = httpx.AsyncClient( + timeout=self.timeout, + follow_redirects=True, + ) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self._client: + await self._client.aclose() + + def _auth_headers(self, auth_token: Optional[str] = None) -> Dict[str, str]: + """Build authentication headers.""" + headers = {"Content-Type": "application/json"} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + return headers + + async def fetch_agent_card( + self, + base_url: str, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Fetch an agent's A2A protocol agent card. + + Tries /.well-known/agent.json first, then /card as fallback. + Handles OAuth redirects gracefully (returns error instead of following). + + Args: + base_url: Base URL of the agent application + auth_token: Optional OAuth token for authenticated requests + + Returns: + Agent card JSON data + + Raises: + A2AClientError: If agent card cannot be fetched + + Example: + >>> async with A2AClient() as client: + >>> card = await client.fetch_agent_card("https://app.databricksapps.com") + >>> print(card["name"], card["description"]) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + headers = {} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + # Use a client that doesn't follow redirects to detect OAuth flows + async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=False) as probe_client: + for path in ["/.well-known/agent.json", "/card"]: + try: + url = base_url.rstrip("/") + path + response = await probe_client.get(url, headers=headers) + + # OAuth redirect detected - app requires interactive auth + if response.status_code in (301, 302, 303, 307, 308): + logger.debug(f"OAuth redirect detected for {url}") + continue + + if response.status_code == 200: + if not response.text or response.text.isspace(): + logger.debug(f"Empty response body for {url}") + continue + return response.json() + + except Exception as e: + logger.debug(f"Agent card fetch failed for {url}: {e}") + continue + + raise A2AClientError(f"Could not fetch agent card from {base_url}") + + async def _jsonrpc_call( + self, + url: str, + method: str, + params: Dict[str, Any], + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a JSON-RPC 2.0 request to an agent. + + Args: + url: A2A endpoint URL + method: JSON-RPC method name (e.g., "message/send") + params: Method parameters + auth_token: Optional authentication token + + Returns: + JSON-RPC result + + Raises: + A2AClientError: If request fails or returns error + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": method, + "params": params, + } + + try: + response = await self._client.post( + url, + json=payload, + headers=self._auth_headers(auth_token), + ) + response.raise_for_status() + result = response.json() + + if "error" in result: + error = result["error"] + raise A2AClientError( + f"A2A error: {error.get('message', 'Unknown')} " + f"(code: {error.get('code')})" + ) + + return result.get("result", {}) + + except httpx.TimeoutException as e: + raise A2AClientError(f"Request to {url} timed out: {e}") + except httpx.HTTPStatusError as e: + raise A2AClientError( + f"HTTP error from {url}: {e.response.status_code}" + ) + except json.JSONDecodeError as e: + raise A2AClientError(f"Invalid JSON from {url}: {e}") + + async def send_message( + self, + agent_url: str, + message: str, + context_id: Optional[str] = None, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a message to a peer agent using A2A protocol. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + context_id: Optional conversation context ID + auth_token: Optional authentication token + + Returns: + Agent's response + + Example: + >>> async with A2AClient() as client: + >>> response = await client.send_message( + >>> "https://app.databricksapps.com/api/a2a", + >>> "What are your capabilities?" + >>> ) + """ + params: Dict[str, Any] = { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + } + if context_id: + params["message"]["contextId"] = context_id + + return await self._jsonrpc_call( + agent_url, "message/send", params, auth_token + ) + + async def send_streaming_message( + self, + agent_url: str, + message: str, + auth_token: Optional[str] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """ + Send a streaming message and yield SSE events. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + auth_token: Optional authentication token + + Yields: + SSE events from the agent's response stream + + Example: + >>> async with A2AClient() as client: + >>> async for event in client.send_streaming_message(url, "Analyze this"): + >>> print(event) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + stream_url = agent_url.rstrip("/") + "/stream" + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": "message/stream", + "params": { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + }, + } + + async with self._client.stream( + "POST", + stream_url, + json=payload, + headers=self._auth_headers(auth_token), + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.startswith("data: "): + try: + yield json.loads(line[6:]) + except json.JSONDecodeError: + continue diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/discovery/agent_discovery.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/discovery/agent_discovery.py new file mode 100644 index 00000000..1563b304 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/discovery/agent_discovery.py @@ -0,0 +1,253 @@ +""" +Agent discovery for Databricks Apps. + +Discovers agent-enabled Databricks Apps by scanning workspace apps +and probing for A2A protocol agent cards. +""" + +import asyncio +import logging +from typing import List, Dict, Any, Optional +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +from .a2a_client import A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + +# Agent card probe paths and timeout +AGENT_CARD_PATHS = ["/.well-known/agent.json", "/card"] +AGENT_CARD_PROBE_TIMEOUT = 5.0 + + +@dataclass +class DiscoveredAgent: + """ + An agent discovered from a Databricks App. + + Attributes: + name: Agent name (from agent card or app name) + endpoint_url: Agent's base URL + description: Agent description (from agent card) + capabilities: Comma-separated list of capabilities + protocol_version: A2A protocol version + app_name: Name of the backing Databricks App + """ + name: str + endpoint_url: str + app_name: str + description: Optional[str] = None + capabilities: Optional[str] = None + protocol_version: Optional[str] = None + + +@dataclass +class AgentDiscoveryResult: + """ + Results from agent discovery operation. + + Attributes: + agents: List of discovered agents + errors: List of error messages encountered during discovery + """ + agents: List[DiscoveredAgent] + errors: List[str] + + +class AgentDiscovery: + """ + Discovers agent-enabled Databricks Apps in a workspace. + + Scans running Databricks Apps and probes for A2A protocol agent cards + to identify which apps are agents. + + Usage: + discovery = AgentDiscovery(profile="my-profile") + result = await discovery.discover_agents() + for agent in result.agents: + print(f"Found agent: {agent.name} at {agent.endpoint_url}") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize agent discovery. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._workspace_token: Optional[str] = None + + async def discover_agents(self) -> AgentDiscoveryResult: + """ + Discover all agent-enabled Databricks Apps in the workspace. + + Returns: + AgentDiscoveryResult with discovered agents and any errors + + Example: + >>> discovery = AgentDiscovery(profile="my-profile") + >>> result = await discovery.discover_agents() + >>> print(f"Found {len(result.agents)} agents") + """ + agents: List[DiscoveredAgent] = [] + errors: List[str] = [] + + try: + app_list = await self._list_workspace_apps() + except Exception as e: + logger.error("Workspace app listing failed: %s", e) + return AgentDiscoveryResult( + agents=[], + errors=[f"Failed to list workspace apps: {e}"], + ) + + if not app_list: + return AgentDiscoveryResult(agents=[], errors=[]) + + # Probe each running app for agent card in parallel + probe_tasks = [ + self._probe_app_for_agent(app_info) + for app_info in app_list + if app_info.get("url") + ] + + if probe_tasks: + probe_results = await asyncio.gather( + *probe_tasks, return_exceptions=True + ) + + for result in probe_results: + if isinstance(result, Exception): + errors.append(str(result)) + elif result is not None: + agents.append(result) + + logger.info( + "Agent discovery: %d apps checked, %d agents found", + len(app_list), len(agents) + ) + + return AgentDiscoveryResult(agents=agents, errors=errors) + + async def _list_workspace_apps(self) -> List[Dict[str, Any]]: + """ + Enumerate Databricks Apps in the workspace. + + Returns: + List of running apps with name, url, owner + """ + def _list_sync() -> tuple: + client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + + # Extract auth token for cross-app requests + auth_headers = client.config.authenticate() + auth_val = auth_headers.get("Authorization", "") + token = auth_val[7:] if auth_val.startswith("Bearer ") else None + + results = [] + for app in client.apps.list(): + # Check if app is running via compute_status or deployment status + compute_state = None + cs = getattr(app, "compute_status", None) + if cs: + compute_state = str(getattr(cs, "state", "")) + + deploy_state = None + dep = getattr(app, "active_deployment", None) + if dep: + dep_status = getattr(dep, "status", None) + if dep_status: + deploy_state = str(getattr(dep_status, "state", "")) + + app_url = getattr(app, "url", None) or "" + app_url = app_url.rstrip("/") if app_url else "" + + results.append({ + "name": app.name, + "url": app_url, + "owner": getattr(app, "creator", None) or getattr(app, "updater", None), + "compute_state": compute_state, + "deploy_state": deploy_state, + }) + + return results, token + + loop = asyncio.get_event_loop() + result_tuple = await loop.run_in_executor(None, _list_sync) + all_apps, workspace_token = result_tuple + + # Store token for probing + self._workspace_token = workspace_token + + # Filter to running apps + running = [ + a for a in all_apps + if a.get("url") and ( + "ACTIVE" in (a.get("compute_state") or "") + or "SUCCEEDED" in (a.get("deploy_state") or "") + ) + ] + + logger.info( + "Workspace apps: %d total, %d running", + len(all_apps), len(running) + ) + + return running + + async def _probe_app_for_agent( + self, + app_info: Dict[str, Any], + ) -> Optional[DiscoveredAgent]: + """ + Probe a Databricks App for an A2A agent card. + + Args: + app_info: App metadata from workspace listing + + Returns: + DiscoveredAgent if agent card found, None otherwise + """ + app_url = app_info["url"] + app_name = app_info["name"] + + token = self._workspace_token + agent_card = None + + try: + logger.debug(f"Probing app '{app_name}' at {app_url}") + async with A2AClient(timeout=AGENT_CARD_PROBE_TIMEOUT) as client: + agent_card = await client.fetch_agent_card(app_url, auth_token=token) + logger.info(f"Found agent card for '{app_name}'") + except A2AClientError as e: + logger.debug(f"No agent card for '{app_name}': {e}") + return None + except Exception as e: + logger.warning(f"Probe failed for '{app_name}': {e}") + return None + + if not agent_card: + return None + + # Extract capabilities + capabilities_list = [] + caps = agent_card.get("capabilities") + if isinstance(caps, dict): + capabilities_list = list(caps.keys()) + elif isinstance(caps, list): + capabilities_list = caps + + return DiscoveredAgent( + name=agent_card.get("name", app_name), + endpoint_url=app_url, + app_name=app_name, + description=agent_card.get("description"), + capabilities=",".join(capabilities_list) if capabilities_list else None, + protocol_version=agent_card.get("protocolVersion"), + ) diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/mcp/__init__.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/mcp/__init__.py new file mode 100644 index 00000000..60ee38ad --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/mcp/__init__.py @@ -0,0 +1,11 @@ +""" +Model Context Protocol (MCP) support. + +This module provides utilities for integrating agents with MCP servers +and exposing UC Functions as MCP tools. +""" + +from .mcp_server import MCPServer, MCPServerConfig, setup_mcp_server +from .uc_functions import UCFunctionAdapter + +__all__ = ["MCPServer", "MCPServerConfig", "setup_mcp_server", "UCFunctionAdapter"] diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/mcp/mcp_server.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/mcp/mcp_server.py new file mode 100644 index 00000000..98a456fb --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/mcp/mcp_server.py @@ -0,0 +1,196 @@ +""" +MCP server implementation for agents. + +Provides an MCP server that exposes agent tools via the Model Context Protocol. +Works with any FastAPI app. +""" + +import json +import logging +from typing import Dict, Any, List, Optional +from dataclasses import dataclass + +from fastapi import Request + +logger = logging.getLogger(__name__) + + +@dataclass +class MCPServerConfig: + """ + Configuration for MCP server. + + Attributes: + name: Server name + version: Server version + description: Server description + """ + name: str + version: str = "1.0.0" + description: str = "MCP server for agent tools" + + +class MCPServer: + """ + MCP server that exposes tools via JSON-RPC. + + Accepts tools as dicts with name, description, parameters, function keys. + """ + + def __init__(self, tools, config: MCPServerConfig): + """ + Initialize MCP server. + + Args: + tools: List of ToolDefinition objects or dicts with name/description/parameters/function + config: MCP server configuration + """ + self._tools = tools + self.config = config + + def setup_routes(self, app): + """ + Set up MCP protocol routes on the FastAPI app. + + Adds: + - POST /api/mcp - MCP JSON-RPC endpoint + - GET /api/mcp/tools - List available tools + """ + + @app.post("/api/mcp") + async def mcp_jsonrpc(request: Request): + """MCP JSON-RPC endpoint.""" + try: + body = await request.json() + method = body.get("method") + params = body.get("params", {}) + request_id = body.get("id") + + if method == "tools/list": + result = await self._list_tools() + elif method == "tools/call": + result = await self._call_tool(params) + elif method == "server/info": + result = self._server_info() + else: + return { + "jsonrpc": "2.0", + "id": request_id, + "error": { + "code": -32601, + "message": f"Method not found: {method}" + } + } + + return { + "jsonrpc": "2.0", + "id": request_id, + "result": result + } + + except Exception as e: + logger.error("MCP request failed: %s", e) + return { + "jsonrpc": "2.0", + "id": body.get("id") if isinstance(body, dict) else None, + "error": { + "code": -32603, + "message": str(e) + } + } + + @app.get("/api/mcp/tools") + async def list_mcp_tools(): + """List available MCP tools.""" + return await self._list_tools() + + def _get_tool_name(self, tool) -> str: + return tool.name if hasattr(tool, "name") else tool["name"] + + def _get_tool_description(self, tool) -> str: + return tool.description if hasattr(tool, "description") else tool["description"] + + def _get_tool_parameters(self, tool) -> Dict[str, Any]: + return tool.parameters if hasattr(tool, "parameters") else tool.get("parameters", {}) + + def _get_tool_function(self, tool): + return tool.function if hasattr(tool, "function") else tool["function"] + + def _server_info(self) -> Dict[str, Any]: + """Get MCP server information.""" + return { + "name": self.config.name, + "version": self.config.version, + "description": self.config.description, + "protocol_version": "1.0", + } + + async def _list_tools(self) -> Dict[str, Any]: + """List all available tools in MCP format.""" + tools = [] + + for tool in self._tools: + mcp_tool = { + "name": self._get_tool_name(tool), + "description": self._get_tool_description(tool), + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + + for param_name, param_spec in self._get_tool_parameters(tool).items(): + param_type = param_spec.get("type", "string") + mcp_tool["inputSchema"]["properties"][param_name] = { + "type": param_type, + "description": param_spec.get("description", "") + } + if param_spec.get("required", False): + mcp_tool["inputSchema"]["required"].append(param_name) + + tools.append(mcp_tool) + + return {"tools": tools} + + async def _call_tool(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Call a tool via MCP.""" + tool_name = params.get("name") + arguments = params.get("arguments", {}) + + tool_def = None + for tool in self._tools: + if self._get_tool_name(tool) == tool_name: + tool_def = tool + break + + if not tool_def: + raise ValueError(f"Tool not found: {tool_name}") + + try: + result = await self._get_tool_function(tool_def)(**arguments) + return {"result": result} + except Exception as e: + logger.error("Tool execution failed: %s", e) + raise + + +def setup_mcp_server(tools, config: Optional[MCPServerConfig] = None, fastapi_app=None): + """ + Set up MCP server for an agent. + + Args: + tools: List of tool dicts with name/description/parameters/function keys + config: Optional MCP server configuration + fastapi_app: FastAPI app to add routes to (required) + + Returns: + MCPServer instance + """ + if config is None: + config = MCPServerConfig(name="mcp-server") + + server = MCPServer(tools, config) + server.setup_routes(fastapi_app) + + return server diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/mcp/uc_functions.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/mcp/uc_functions.py new file mode 100644 index 00000000..f1659960 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/mcp/uc_functions.py @@ -0,0 +1,240 @@ +""" +Unity Catalog Functions adapter for MCP. + +Automatically discovers UC Functions and exposes them as MCP tools. +""" + +import logging +from typing import List, Dict, Any, Optional + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCFunctionAdapter: + """ + Adapter for Unity Catalog Functions to MCP protocol. + + Discovers UC Functions and converts them to MCP tool format for + use with agents. + + Usage: + adapter = UCFunctionAdapter(profile="my-profile") + tools = adapter.discover_functions(catalog="main", schema="functions") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC Functions adapter. + + Args: + profile: Databricks CLI profile name + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def discover_functions( + self, + catalog: str, + schema: str, + name_pattern: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + Discover UC Functions and convert to MCP tool format. + + Args: + catalog: UC catalog name + schema: UC schema name + name_pattern: Optional name pattern filter (SQL LIKE pattern) + + Returns: + List of tool definitions in MCP format + + Example: + >>> adapter = UCFunctionAdapter() + >>> tools = adapter.discover_functions("main", "functions") + >>> for tool in tools: + ... print(tool["name"], tool["description"]) + """ + client = self._get_client() + tools = [] + + try: + functions = client.functions.list( + catalog_name=catalog, + schema_name=schema, + ) + + for func in functions: + # Skip system functions + if func.name.startswith("system."): + continue + + # Apply name pattern filter + if name_pattern and name_pattern not in func.name: + continue + + # Convert to MCP tool format + tool = self._convert_function_to_tool(func) + if tool: + tools.append(tool) + + logger.info( + f"Discovered {len(tools)} UC Functions from {catalog}.{schema}" + ) + + except Exception as e: + logger.error(f"Failed to discover UC Functions: {e}") + + return tools + + def _convert_function_to_tool(self, func) -> Optional[Dict[str, Any]]: + """ + Convert a UC Function to MCP tool format. + + Args: + func: Function info from Databricks SDK + + Returns: + MCP tool definition or None if conversion fails + """ + try: + # Extract function metadata + name = func.name.split(".")[-1] # Get short name + description = func.comment or f"Unity Catalog function: {name}" + + # Build parameter schema + input_schema = { + "type": "object", + "properties": {}, + "required": [] + } + + # Parse function parameters + if hasattr(func, "input_params") and func.input_params: + for param in func.input_params.parameters: + param_name = param.name + param_type = self._map_uc_type_to_json_type(param.type_name) + + input_schema["properties"][param_name] = { + "type": param_type, + "description": param.comment or "" + } + + # Parameters without defaults are required + if not hasattr(param, "default_value") or param.default_value is None: + input_schema["required"].append(param_name) + + return { + "name": name, + "description": description, + "inputSchema": input_schema, + "full_name": func.full_name, + "source": "unity_catalog" + } + + except Exception as e: + logger.warning(f"Failed to convert function {func.name}: {e}") + return None + + def _map_uc_type_to_json_type(self, uc_type: str) -> str: + """ + Map Unity Catalog data type to JSON Schema type. + + Args: + uc_type: UC type name (e.g., "STRING", "BIGINT", "BOOLEAN") + + Returns: + JSON Schema type ("string", "number", "boolean", etc.) + """ + type_mapping = { + "STRING": "string", + "VARCHAR": "string", + "CHAR": "string", + "BIGINT": "integer", + "INT": "integer", + "INTEGER": "integer", + "SMALLINT": "integer", + "TINYINT": "integer", + "DOUBLE": "number", + "FLOAT": "number", + "DECIMAL": "number", + "BOOLEAN": "boolean", + "BINARY": "string", + "DATE": "string", + "TIMESTAMP": "string", + "ARRAY": "array", + "MAP": "object", + "STRUCT": "object", + } + + uc_type_upper = uc_type.upper() + return type_mapping.get(uc_type_upper, "string") + + async def call_function( + self, + full_name: str, + arguments: Dict[str, Any] + ) -> Any: + """ + Call a UC Function with given arguments. + + Args: + full_name: Full function name (catalog.schema.function) + arguments: Function arguments + + Returns: + Function result + + Example: + >>> adapter = UCFunctionAdapter() + >>> result = await adapter.call_function( + ... "main.functions.calculate_tax", + ... {"amount": 100, "rate": 0.08} + ... ) + """ + client = self._get_client() + + try: + # Build SQL query to call the function + args_list = [f":{key}" for key in arguments.keys()] + query = f"SELECT {full_name}({', '.join(args_list)})" + + # Execute via SQL warehouse + # Note: This requires a warehouse ID to be configured + result = client.statement_execution.execute_statement( + statement=query, + warehouse_id=self._get_default_warehouse(), + parameters=[ + {"name": key, "value": str(value)} + for key, value in arguments.items() + ] + ) + + return result.result.data_array[0][0] if result.result.data_array else None + + except Exception as e: + logger.error(f"Failed to call UC Function {full_name}: {e}") + raise + + def _get_default_warehouse(self) -> str: + """Get default SQL warehouse ID from environment or client.""" + import os + warehouse_id = os.getenv("DATABRICKS_WAREHOUSE_ID") + if not warehouse_id: + raise ValueError( + "DATABRICKS_WAREHOUSE_ID not set. " + "Set this environment variable to use UC Functions." + ) + return warehouse_id diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/py.typed b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/registry/__init__.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/registry/__init__.py new file mode 100644 index 00000000..892043b7 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/registry/__init__.py @@ -0,0 +1,10 @@ +""" +Unity Catalog integration for agent registration. + +This module provides utilities for registering agents in Unity Catalog +as AGENT objects, enabling catalog-based discovery and permission management. +""" + +from .uc_registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError + +__all__ = ["UCAgentRegistry", "UCAgentSpec", "UCRegistrationError"] diff --git a/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/registry/uc_registry.py b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/registry/uc_registry.py new file mode 100644 index 00000000..f2651aca --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/dbx_agent_app/registry/uc_registry.py @@ -0,0 +1,345 @@ +""" +Unity Catalog agent registry. + +Registers and manages agents as Unity Catalog AGENT objects for +catalog-based discovery and permission management. +""" + +import json +import logging +from typing import Dict, Any, Optional, List +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCRegistrationError(Exception): + """Raised when agent registration in Unity Catalog fails.""" + pass + + +@dataclass +class UCAgentSpec: + """ + Specification for registering an agent in Unity Catalog. + + Attributes: + name: Agent name (will be catalog object name) + catalog: UC catalog name + schema: UC schema name + endpoint_url: Agent's base URL + description: Agent description + capabilities: List of agent capabilities + properties: Additional metadata key-value pairs + """ + name: str + catalog: str + schema: str + endpoint_url: str + description: Optional[str] = None + capabilities: Optional[List[str]] = None + properties: Optional[Dict[str, str]] = None + + +class UCAgentRegistry: + """ + Unity Catalog agent registry. + + Registers agents as UC AGENT objects for catalog-based discovery + and permission management. + + Usage: + registry = UCAgentRegistry(profile="my-profile") + + spec = UCAgentSpec( + name="customer_research", + catalog="main", + schema="agents", + endpoint_url="https://app.databricksapps.com", + description="Customer research agent", + capabilities=["search", "analysis"], + ) + + registry.register_agent(spec) + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC agent registry. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def register_agent(self, spec: UCAgentSpec) -> Dict[str, Any]: + """ + Register an agent in Unity Catalog. + + Creates a AGENT object in the specified catalog and schema with + metadata about the agent's endpoint, capabilities, and properties. + + Args: + spec: Agent specification + + Returns: + Dictionary with registration details + + Raises: + UCRegistrationError: If registration fails + + Example: + >>> registry = UCAgentRegistry(profile="my-profile") + >>> spec = UCAgentSpec( + ... name="my_agent", + ... catalog="main", + ... schema="agents", + ... endpoint_url="https://app.databricksapps.com", + ... ) + >>> result = registry.register_agent(spec) + """ + client = self._get_client() + full_name = f"{spec.catalog}.{spec.schema}.{spec.name}" + + try: + # Build agent properties for UC metadata + properties = spec.properties or {} + properties["endpoint_url"] = spec.endpoint_url + properties["agent_card_url"] = f"{spec.endpoint_url}/.well-known/agent.json" + + if spec.capabilities: + properties["capabilities"] = ",".join(spec.capabilities) + + # Register as a UC registered model with AGENT type + # (UC doesn't have a native AGENT type yet, so we use registered models + # with special tags/properties to mark them as agents) + + logger.info(f"Registering agent '{full_name}' in Unity Catalog") + + # Check if catalog and schema exist + try: + client.catalogs.get(spec.catalog) + except Exception as e: + raise UCRegistrationError( + f"Catalog '{spec.catalog}' does not exist or is not accessible: {e}" + ) + + try: + client.schemas.get(f"{spec.catalog}.{spec.schema}") + except Exception as e: + raise UCRegistrationError( + f"Schema '{spec.catalog}.{spec.schema}' does not exist or is not accessible: {e}" + ) + + # Create or update registered model as agent placeholder + # In a future UC version with native AGENT support, this would use: + # client.agents.create(name=full_name, properties=properties) + + # Encode properties as JSON suffix in comment for discovery + # Format: "description\n---AGENT_META---\n{json}" + meta = {"databricks_agent": True, **properties} + comment = spec.description or "" + comment_with_meta = f"{comment}\n---AGENT_META---\n{json.dumps(meta)}" + + # Try update first (model may already exist from prior deploy), + # fall back to create if it doesn't exist + try: + client.registered_models.update( + full_name, + comment=comment_with_meta, + ) + logger.info(f"Updated existing agent '{full_name}'") + except Exception as update_err: + # Model doesn't exist or SP can't access it — try create + logger.debug(f"Update failed ({update_err}), trying create") + try: + client.registered_models.create( + name=spec.name, + catalog_name=spec.catalog, + schema_name=spec.schema, + comment=comment_with_meta, + ) + logger.info(f"Created new agent '{full_name}'") + except Exception as create_err: + # If create fails with "already exists", the SP just + # can't see the model — log warning but don't fail + err_str = str(create_err).lower() + if "already exists" in err_str or "not a valid name" in err_str: + logger.warning( + "Agent '%s' exists but SP cannot update it. " + "Grant the app's SP ownership or MANAGE on the model.", + full_name, + ) + else: + raise + + logger.info(f"Successfully registered agent '{full_name}'") + + return { + "full_name": full_name, + "catalog": spec.catalog, + "schema": spec.schema, + "name": spec.name, + "endpoint_url": spec.endpoint_url, + "properties": properties, + } + + except UCRegistrationError: + raise + except Exception as e: + raise UCRegistrationError( + f"Failed to register agent '{full_name}': {e}" + ) from e + + @staticmethod + def _parse_agent_meta(comment: Optional[str]) -> Optional[Dict[str, Any]]: + """Parse agent metadata from comment field (JSON after ---AGENT_META--- marker).""" + if not comment or "---AGENT_META---" not in comment: + return None + try: + _, meta_json = comment.split("---AGENT_META---", 1) + return json.loads(meta_json.strip()) + except (ValueError, json.JSONDecodeError): + return None + + @staticmethod + def _clean_description(comment: Optional[str]) -> str: + """Extract human-readable description from comment (before the meta marker).""" + if not comment: + return "" + if "---AGENT_META---" in comment: + return comment.split("---AGENT_META---")[0].strip() + return comment + + def get_agent(self, catalog: str, schema: str, name: str) -> Optional[Dict[str, Any]]: + """ + Get agent metadata from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + Agent metadata dictionary or None if not found + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + model = client.registered_models.get(full_name) + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + return None + + return { + "full_name": full_name, + "catalog": catalog, + "schema": schema, + "name": name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "agent_card_url": meta.get("agent_card_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + "properties": meta, + } + + except Exception as e: + logger.debug(f"Agent '{full_name}' not found: {e}") + return None + + def list_agents( + self, + catalog: str, + schema: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + List all agents in a catalog or schema. + + Args: + catalog: UC catalog name + schema: Optional UC schema name (lists all schemas if not specified) + + Returns: + List of agent metadata dictionaries + """ + client = self._get_client() + agents = [] + + # Determine which schemas to scan + schemas_to_scan = [schema] if schema else [] + if not schema: + try: + for s in client.schemas.list(catalog_name=catalog): + if s.name != "information_schema": + schemas_to_scan.append(s.name) + except Exception as e: + logger.error(f"Failed to list schemas in {catalog}: {e}") + return [] + + for schema_name in schemas_to_scan: + try: + models = client.registered_models.list( + catalog_name=catalog, schema_name=schema_name + ) + for model in models: + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + continue + + agents.append({ + "full_name": model.full_name, + "catalog": catalog, + "schema": schema_name, + "name": model.name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + }) + except Exception as e: + logger.debug(f"Failed to list models in {catalog}.{schema_name}: {e}") + continue + + return agents + + def delete_agent(self, catalog: str, schema: str, name: str) -> bool: + """ + Delete an agent from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + True if deleted, False if not found + + Raises: + UCRegistrationError: If deletion fails + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + client.registered_models.delete(full_name) + logger.info(f"Deleted agent '{full_name}'") + return True + except Exception as e: + if "does not exist" in str(e).lower(): + return False + raise UCRegistrationError( + f"Failed to delete agent '{full_name}': {e}" + ) from e diff --git a/dbx-agent-app/examples/supervisor/agents/research/requirements.txt b/dbx-agent-app/examples/supervisor/agents/research/requirements.txt new file mode 100644 index 00000000..7b0556d1 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/agents/research/requirements.txt @@ -0,0 +1,5 @@ +fastapi>=0.115.0 +uvicorn[standard]>=0.30.0 +pydantic>=2.0.0 +httpx>=0.27.0 +databricks-sdk>=0.30.0 diff --git a/dbx-agent-app/examples/supervisor/app.py b/dbx-agent-app/examples/supervisor/app.py new file mode 100644 index 00000000..4520803e --- /dev/null +++ b/dbx-agent-app/examples/supervisor/app.py @@ -0,0 +1,97 @@ +""" +Supervisor Agent — @app_agent pattern + +Routes queries to specialized sub-agents (research, expert_finder, +analytics, compliance) via their /invocations endpoints. + +Single-layer API: the decorated function IS the /invocations handler. +""" + +import os + +from dbx_agent_app import app_agent, AgentRequest, AgentResponse +from agent import SupervisorAgent + + +# Initialize agent (singleton) +_agent = None + + +def get_agent() -> SupervisorAgent: + """Get or create supervisor agent instance.""" + global _agent + if _agent is None: + config = { + "endpoint": os.environ.get("MODEL_ENDPOINT", "databricks-claude-sonnet-4-5"), + "catalog": os.environ.get("UC_CATALOG", "serverless_dxukih_catalog"), + "schema": os.environ.get("UC_SCHEMA", "agents"), + } + _agent = SupervisorAgent(config) + return _agent + + +@app_agent( + name="supervisor", + description="Multi-agent supervisor that routes queries to specialized sub-agents", + capabilities=["orchestration", "routing", "research", "expert_finder", "analytics", "compliance"], + uc_catalog=os.environ.get("UC_CATALOG", "main"), + uc_schema=os.environ.get("UC_SCHEMA", "agents"), +) +async def supervisor(request: AgentRequest) -> AgentResponse: + """Route query to the appropriate sub-agent.""" + agent = get_agent() + response = agent.predict(request) + + # Attach routing metadata for observability + result = {"response": response.output[0].content[0].text if response.output else ""} + if agent._last_routing: + result["_routing"] = agent._last_routing + + return AgentResponse.from_dict(result) + + +@supervisor.tool(description="Get supervisor configuration and sub-agent status") +async def get_config() -> dict: + """Get supervisor configuration and available sub-agents.""" + agent = get_agent() + sub_agents = [] + for name, cfg in agent.SUBAGENT_CONFIG.items(): + url = os.environ.get(cfg["url_env"], "not configured") + sub_agents.append({ + "name": name, + "url": url, + "description": { + "research": "Expert transcript research", + "expert_finder": "Find experts by topic", + "analytics": "Business metrics and SQL queries", + "compliance": "Conflict of interest checks", + }.get(name, name), + }) + return { + "model_endpoint": agent.config.get("endpoint"), + "sub_agents": sub_agents, + "tools_count": len(agent.tools), + "architecture": "invocations_routing", + } + + +# The FastAPI app — uvicorn app:supervisor.app +app = supervisor.app + + +if __name__ == "__main__": + import uvicorn + + os.environ.setdefault("UC_CATALOG", "main") + os.environ.setdefault("UC_SCHEMA", "agents") + os.environ.setdefault("MODEL_ENDPOINT", "databricks-claude-sonnet-4-5") + + print("Starting Supervisor Agent (@app_agent)") + print("\nEndpoints:") + print(" http://localhost:8000/invocations - Responses Agent protocol") + print(" http://localhost:8000/.well-known/agent.json - Agent card (A2A)") + print(" http://localhost:8000/health - Health check") + print(" http://localhost:8000/api/mcp - MCP server") + print() + + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/dbx-agent-app/examples/supervisor/app.yaml b/dbx-agent-app/examples/supervisor/app.yaml new file mode 100644 index 00000000..8b37056c --- /dev/null +++ b/dbx-agent-app/examples/supervisor/app.yaml @@ -0,0 +1,49 @@ +command: + - "python" + - "-m" + - "uvicorn" + - "app:app" + - "--host" + - "0.0.0.0" + - "--port" + - "8000" + +env: + # Foundation Model endpoint for routing decisions + - name: MODEL_ENDPOINT + value: databricks-claude-sonnet-4-5 + + # Unity Catalog settings + - name: UC_CATALOG + value: serverless_dxukih_catalog + + - name: UC_SCHEMA + value: agents + + # Sub-agent URLs (independently deployed Databricks Apps) + - name: RESEARCH_URL + value: https://sdk-sub-research-7474660127789418.aws.databricksapps.com + + - name: EXPERT_FINDER_URL + value: https://sdk-sub-expert-finder-7474660127789418.aws.databricksapps.com + + - name: ANALYTICS_URL + value: https://sdk-sub-analytics-7474660127789418.aws.databricksapps.com + + - name: COMPLIANCE_URL + value: https://sdk-sub-compliance-7474660127789418.aws.databricksapps.com + +# ── User Authorization (on-behalf-of) ────────────────────────────── +# When user authorization scopes are configured, the supervisor forwards +# X-Forwarded-* headers to sub-agents so UC governance applies per-user. +# +# Configure via Databricks CLI or Custom App Integration API: +# databricks apps update --json '{ +# "resources": [{ +# "name": "user-auth", +# "description": "On-behalf-of user authorization", +# "sql": { "permission": "CAN_USE" } +# }] +# }' +# +# See: https://docs.databricks.com/en/dev-tools/databricks-apps/app-development.html diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/__init__.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/__init__.py new file mode 100644 index 00000000..95db07d3 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/__init__.py @@ -0,0 +1,41 @@ +""" +dbx-agent-app: Framework for building discoverable AI agents on Databricks Apps. + +This package provides: +- @app_agent: Decorator to turn an async function into a discoverable agent +- AgentDiscovery: Discover agents in your Databricks workspace +- A2AClient: Communicate with agents using the A2A protocol +- UCAgentRegistry: Register agents in Unity Catalog +- MCPServerConfig: Configure MCP server for agent tools +""" + +from .discovery import AgentDiscovery, DiscoveredAgent, AgentDiscoveryResult, A2AClient, A2AClientError +from .mcp import MCPServerConfig, setup_mcp_server, UCFunctionAdapter +from .registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError +from .dashboard import create_dashboard_app + +try: + from importlib.metadata import version + __version__ = version("dbx-agent-app") +except Exception: + __version__ = "0.1.0" + +__all__ = [ + # Discovery + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", + # Registry + "UCAgentRegistry", + "UCAgentSpec", + "UCRegistrationError", + # MCP + "MCPServerConfig", + "setup_mcp_server", + "UCFunctionAdapter", + # Dashboard + "create_dashboard_app", +] + diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/core/__init__.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/core/__init__.py new file mode 100644 index 00000000..623da88c --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/core/__init__.py @@ -0,0 +1 @@ +"""Core agent application components.""" diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/__init__.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/__init__.py new file mode 100644 index 00000000..89929a08 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/__init__.py @@ -0,0 +1,14 @@ +""" +Developer dashboard for agent discovery. + +Launch via CLI: + dbx-agent-app dashboard --profile my-profile + +Or programmatically: + from dbx_agent_app.dashboard import create_dashboard_app, run_dashboard +""" + +from .app import create_dashboard_app +from .cli import main as run_dashboard + +__all__ = ["create_dashboard_app", "run_dashboard"] diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/app.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/app.py new file mode 100644 index 00000000..d16c3d54 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/app.py @@ -0,0 +1,112 @@ +""" +FastAPI application for the developer dashboard. + +Routes: + HTML: GET / — agent list page + GET /agent/{name} — agent detail page + API: GET /api/agents — JSON list of agents + GET /api/agents/{name}/card — full agent card + POST /api/agents/{name}/mcp — MCP JSON-RPC proxy + POST /api/scan — trigger re-scan + GET /health — health check +""" + +import logging +from typing import Optional + +from fastapi import FastAPI, Request +from fastapi.responses import HTMLResponse, JSONResponse + +from .scanner import DashboardScanner +from .templates import render_agent_list, render_agent_detail + +logger = logging.getLogger(__name__) + + +def create_dashboard_app( + scanner: DashboardScanner, + profile: Optional[str] = None, +) -> FastAPI: + """Build and return the dashboard FastAPI app.""" + app = FastAPI(title="dbx-agent-app dashboard", docs_url=None, redoc_url=None) + + # --- HTML pages ------------------------------------------------------- + + @app.get("/", response_class=HTMLResponse) + async def index(): + agents = scanner.get_agents() + return render_agent_list(agents) + + @app.get("/agent/{name}", response_class=HTMLResponse) + async def agent_detail(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return HTMLResponse("

Agent not found

", status_code=404) + + card = None + try: + card = await scanner.get_agent_card(agent.endpoint_url) + except Exception as e: + logger.warning("Could not fetch card for %s: %s", name, e) + + return render_agent_detail(agent, card) + + # --- JSON API --------------------------------------------------------- + + @app.get("/api/agents") + async def api_agents(): + agents = scanner.get_agents() + return [ + { + "name": a.name, + "endpoint_url": a.endpoint_url, + "app_name": a.app_name, + "description": a.description, + "capabilities": a.capabilities, + "protocol_version": a.protocol_version, + } + for a in agents + ] + + @app.get("/api/agents/{name}/card") + async def api_agent_card(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + card = await scanner.get_agent_card(agent.endpoint_url) + return card + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=502) + + @app.post("/api/agents/{name}/mcp") + async def api_mcp_proxy(name: str, request: Request): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + payload = await request.json() + result = await scanner.proxy_mcp(agent.endpoint_url, payload) + return result + except Exception as e: + return JSONResponse( + {"jsonrpc": "2.0", "id": None, "error": {"code": -1, "message": str(e)}}, + status_code=502, + ) + + @app.post("/api/scan") + async def api_scan(): + agents = await scanner.scan() + return {"count": len(agents), "agents": [a.name for a in agents]} + + @app.get("/health") + async def health(): + return { + "status": "ok", + "agents_cached": len(scanner.get_agents()), + "profile": profile, + } + + return app diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/cli.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/cli.py new file mode 100644 index 00000000..3b17aa90 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/cli.py @@ -0,0 +1,63 @@ +""" +CLI entry point for the developer dashboard. + +Usage: + dbx-agent-app dashboard --profile my-profile --port 8501 +""" + +import argparse +import asyncio +import logging +import sys +import webbrowser + +import uvicorn + +from .scanner import DashboardScanner +from .app import create_dashboard_app + + +def main(): + parser = argparse.ArgumentParser( + prog="dbx-agent-app", + description="Developer dashboard for Databricks agent discovery", + ) + sub = parser.add_subparsers(dest="command") + + dash = sub.add_parser("dashboard", help="Launch the agent discovery dashboard") + dash.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + dash.add_argument("--port", type=int, default=8501, help="Port to serve on (default: 8501)") + dash.add_argument("--host", type=str, default="127.0.0.1", help="Host to bind (default: 127.0.0.1)") + dash.add_argument("--no-browser", action="store_true", help="Don't auto-open browser") + + args = parser.parse_args() + + if args.command != "dashboard": + parser.print_help() + sys.exit(1) + + logging.basicConfig(level=logging.INFO, format="%(levelname)s %(name)s: %(message)s") + + scanner = DashboardScanner(profile=args.profile) + + # Run initial scan + print(f"Scanning workspace for agents (profile={args.profile or 'default'})...") + try: + agents = asyncio.run(scanner.scan()) + print(f"Found {len(agents)} agent(s)") + except Exception as e: + print(f"Initial scan failed: {e}", file=sys.stderr) + print("Dashboard will start anyway — use the Scan button to retry.") + + app = create_dashboard_app(scanner, profile=args.profile) + + url = f"http://{args.host}:{args.port}" + if not args.no_browser: + webbrowser.open(url) + + print(f"Dashboard running at {url}") + uvicorn.run(app, host=args.host, port=args.port, log_level="warning") + + +if __name__ == "__main__": + main() diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/scanner.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/scanner.py new file mode 100644 index 00000000..475460be --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/scanner.py @@ -0,0 +1,81 @@ +""" +Dashboard scanner — wraps AgentDiscovery + A2AClient with caching and MCP proxy. +""" + +import asyncio +import logging +from typing import Dict, Any, List, Optional + +import httpx + +from ..discovery import AgentDiscovery, DiscoveredAgent, A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + + +class DashboardScanner: + """ + Thin wrapper around AgentDiscovery that adds result caching + and MCP JSON-RPC proxying for the dashboard UI. + """ + + def __init__(self, profile: Optional[str] = None): + self._discovery = AgentDiscovery(profile=profile) + self._agents: List[DiscoveredAgent] = [] + self._scan_lock = asyncio.Lock() + self._scanned = False + + async def scan(self) -> List[DiscoveredAgent]: + """Run workspace discovery and cache results. Thread-safe via asyncio.Lock.""" + async with self._scan_lock: + result = await self._discovery.discover_agents() + self._agents = result.agents + self._scanned = True + if result.errors: + for err in result.errors: + logger.warning("Discovery error: %s", err) + return self._agents + + def get_agents(self) -> List[DiscoveredAgent]: + """Return cached agent list from the last scan.""" + return list(self._agents) + + def get_agent_by_name(self, name: str) -> Optional[DiscoveredAgent]: + """Look up a cached agent by name.""" + for agent in self._agents: + if agent.name == name or agent.app_name == name: + return agent + return None + + @property + def workspace_token(self) -> Optional[str]: + """Auth token extracted during discovery, used for cross-app requests.""" + return self._discovery._workspace_token + + async def get_agent_card(self, endpoint_url: str) -> Dict[str, Any]: + """Fetch the full agent card JSON from a remote agent.""" + async with A2AClient(timeout=10.0) as client: + return await client.fetch_agent_card( + endpoint_url, auth_token=self.workspace_token + ) + + async def proxy_mcp(self, endpoint_url: str, payload: Dict[str, Any]) -> Dict[str, Any]: + """ + Forward a JSON-RPC request to an agent's MCP endpoint. + + Args: + endpoint_url: Agent base URL + payload: Complete JSON-RPC 2.0 request body + + Returns: + JSON-RPC response from the agent + """ + mcp_url = endpoint_url.rstrip("/") + "/api/mcp" + headers = {"Content-Type": "application/json"} + if self.workspace_token: + headers["Authorization"] = f"Bearer {self.workspace_token}" + + async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as http: + response = await http.post(mcp_url, json=payload, headers=headers) + response.raise_for_status() + return response.json() diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/templates.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/templates.py new file mode 100644 index 00000000..8543d75d --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/dashboard/templates.py @@ -0,0 +1,278 @@ +""" +Server-rendered HTML templates for the dashboard. + +Pure Python functions returning HTML strings — no Jinja2, no React, no build step. +""" + +import html +import json +from typing import List, Dict, Any, Optional + +from ..discovery import DiscoveredAgent + + +# --------------------------------------------------------------------------- +# Base layout +# --------------------------------------------------------------------------- + +def render_base(title: str, content: str) -> str: + """HTML shell with inline CSS (dark theme).""" + return f""" + + + + +{html.escape(title)} + + + +
+
+

dbx-agent-app dashboard

+ +
+
+
+{content} +
+ +""" + + +# --------------------------------------------------------------------------- +# Agent list page +# --------------------------------------------------------------------------- + +def render_agent_list(agents: List[DiscoveredAgent]) -> str: + """Main page: grid of agent cards + scan button.""" + if not agents: + cards_html = """ +
+

No agents discovered

+

No agent-enabled Databricks Apps found in your workspace. Click Scan to retry.

+
""" + else: + cards = [] + for a in agents: + caps = "" + if a.capabilities: + badges = "".join( + f'{html.escape(c.strip())} ' + for c in a.capabilities.split(",") + ) + caps = f'
{badges}
' + + desc = html.escape(a.description or "No description") + cards.append(f""" + +
+

{html.escape(a.name)}

+

{desc}

+
+ App: {html.escape(a.app_name)} + {f'Protocol: {html.escape(a.protocol_version)}' if a.protocol_version else ''} +
+ {caps} +
+
""") + cards_html = f'
{"".join(cards)}
' + + return render_base( + "Agent Dashboard", + f""" +
+ {len(agents)} agent{"s" if len(agents) != 1 else ""} discovered + +
+{cards_html} +""", + ) + + +# --------------------------------------------------------------------------- +# Agent detail page +# --------------------------------------------------------------------------- + +def render_agent_detail( + agent: DiscoveredAgent, + card: Optional[Dict[str, Any]] = None, +) -> str: + """Detail page: agent card JSON, tools list, MCP test panel.""" + card_json = json.dumps(card, indent=2) if card else "Card not available" + + # Extract tools from card if present + tools_html = "" + if card: + skills = card.get("skills") or card.get("tools") or [] + if skills: + rows = [] + for t in skills: + name = html.escape(t.get("name", t.get("id", "unknown"))) + desc = html.escape(t.get("description", "")) + rows.append( + f'
{name}' + f'
{desc}
' + ) + tools_html = f""" +
+

Tools ({len(skills)})

+ {"".join(rows)} +
""" + + safe_name = html.escape(agent.name) + safe_endpoint = html.escape(agent.endpoint_url) + + return render_base( + f"{safe_name} — Agent Dashboard", + f""" +
+ ← All agents +

{safe_name}

+

{html.escape(agent.description or 'No description')}

+
+ Endpoint: {safe_endpoint} + App: {html.escape(agent.app_name)} + {f'{html.escape(agent.protocol_version)}' if agent.protocol_version else ''} +
+
+ +
+

Agent Card

+
{html.escape(card_json)}
+
+ +{tools_html} + +
+

MCP Test Panel

+

+ Send a JSON-RPC request to this agent's /api/mcp endpoint. +

+
+ + +
+ + +
+
+ +""", + ) diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/discovery/__init__.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/discovery/__init__.py new file mode 100644 index 00000000..d6d04008 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/discovery/__init__.py @@ -0,0 +1,24 @@ +""" +Agent discovery for Databricks Apps. + +This module provides clients and utilities for discovering agent-enabled +Databricks Apps that expose A2A protocol agent cards. +""" + +from .agent_discovery import ( + AgentDiscovery, + DiscoveredAgent, + AgentDiscoveryResult, +) +from .a2a_client import ( + A2AClient, + A2AClientError, +) + +__all__ = [ + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", +] diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/discovery/a2a_client.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/discovery/a2a_client.py new file mode 100644 index 00000000..1243d1a3 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/discovery/a2a_client.py @@ -0,0 +1,268 @@ +""" +A2A Client for agent-to-agent communication. + +Implements the A2A protocol for discovering and communicating with peer agents. +""" + +import json +import uuid +import logging +from typing import Dict, Any, Optional, AsyncIterator + +import httpx + +logger = logging.getLogger(__name__) + + +class A2AClientError(Exception): + """Raised when an A2A operation fails.""" + pass + + +class A2AClient: + """ + Async client for A2A protocol communication with peer agents. + + Usage: + async with A2AClient() as client: + card = await client.fetch_agent_card("https://app.databricksapps.com") + result = await client.send_message("https://app.databricksapps.com/api/a2a", "Hello") + """ + + def __init__(self, timeout: float = 60.0): + """ + Initialize A2A client. + + Args: + timeout: Request timeout in seconds + """ + self.timeout = timeout + self._client: Optional[httpx.AsyncClient] = None + + async def __aenter__(self): + self._client = httpx.AsyncClient( + timeout=self.timeout, + follow_redirects=True, + ) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self._client: + await self._client.aclose() + + def _auth_headers(self, auth_token: Optional[str] = None) -> Dict[str, str]: + """Build authentication headers.""" + headers = {"Content-Type": "application/json"} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + return headers + + async def fetch_agent_card( + self, + base_url: str, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Fetch an agent's A2A protocol agent card. + + Tries /.well-known/agent.json first, then /card as fallback. + Handles OAuth redirects gracefully (returns error instead of following). + + Args: + base_url: Base URL of the agent application + auth_token: Optional OAuth token for authenticated requests + + Returns: + Agent card JSON data + + Raises: + A2AClientError: If agent card cannot be fetched + + Example: + >>> async with A2AClient() as client: + >>> card = await client.fetch_agent_card("https://app.databricksapps.com") + >>> print(card["name"], card["description"]) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + headers = {} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + # Use a client that doesn't follow redirects to detect OAuth flows + async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=False) as probe_client: + for path in ["/.well-known/agent.json", "/card"]: + try: + url = base_url.rstrip("/") + path + response = await probe_client.get(url, headers=headers) + + # OAuth redirect detected - app requires interactive auth + if response.status_code in (301, 302, 303, 307, 308): + logger.debug(f"OAuth redirect detected for {url}") + continue + + if response.status_code == 200: + if not response.text or response.text.isspace(): + logger.debug(f"Empty response body for {url}") + continue + return response.json() + + except Exception as e: + logger.debug(f"Agent card fetch failed for {url}: {e}") + continue + + raise A2AClientError(f"Could not fetch agent card from {base_url}") + + async def _jsonrpc_call( + self, + url: str, + method: str, + params: Dict[str, Any], + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a JSON-RPC 2.0 request to an agent. + + Args: + url: A2A endpoint URL + method: JSON-RPC method name (e.g., "message/send") + params: Method parameters + auth_token: Optional authentication token + + Returns: + JSON-RPC result + + Raises: + A2AClientError: If request fails or returns error + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": method, + "params": params, + } + + try: + response = await self._client.post( + url, + json=payload, + headers=self._auth_headers(auth_token), + ) + response.raise_for_status() + result = response.json() + + if "error" in result: + error = result["error"] + raise A2AClientError( + f"A2A error: {error.get('message', 'Unknown')} " + f"(code: {error.get('code')})" + ) + + return result.get("result", {}) + + except httpx.TimeoutException as e: + raise A2AClientError(f"Request to {url} timed out: {e}") + except httpx.HTTPStatusError as e: + raise A2AClientError( + f"HTTP error from {url}: {e.response.status_code}" + ) + except json.JSONDecodeError as e: + raise A2AClientError(f"Invalid JSON from {url}: {e}") + + async def send_message( + self, + agent_url: str, + message: str, + context_id: Optional[str] = None, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Send a message to a peer agent using A2A protocol. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + context_id: Optional conversation context ID + auth_token: Optional authentication token + + Returns: + Agent's response + + Example: + >>> async with A2AClient() as client: + >>> response = await client.send_message( + >>> "https://app.databricksapps.com/api/a2a", + >>> "What are your capabilities?" + >>> ) + """ + params: Dict[str, Any] = { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + } + if context_id: + params["message"]["contextId"] = context_id + + return await self._jsonrpc_call( + agent_url, "message/send", params, auth_token + ) + + async def send_streaming_message( + self, + agent_url: str, + message: str, + auth_token: Optional[str] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """ + Send a streaming message and yield SSE events. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + auth_token: Optional authentication token + + Yields: + SSE events from the agent's response stream + + Example: + >>> async with A2AClient() as client: + >>> async for event in client.send_streaming_message(url, "Analyze this"): + >>> print(event) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + stream_url = agent_url.rstrip("/") + "/stream" + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": "message/stream", + "params": { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + }, + } + + async with self._client.stream( + "POST", + stream_url, + json=payload, + headers=self._auth_headers(auth_token), + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.startswith("data: "): + try: + yield json.loads(line[6:]) + except json.JSONDecodeError: + continue diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/discovery/agent_discovery.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/discovery/agent_discovery.py new file mode 100644 index 00000000..1563b304 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/discovery/agent_discovery.py @@ -0,0 +1,253 @@ +""" +Agent discovery for Databricks Apps. + +Discovers agent-enabled Databricks Apps by scanning workspace apps +and probing for A2A protocol agent cards. +""" + +import asyncio +import logging +from typing import List, Dict, Any, Optional +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +from .a2a_client import A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + +# Agent card probe paths and timeout +AGENT_CARD_PATHS = ["/.well-known/agent.json", "/card"] +AGENT_CARD_PROBE_TIMEOUT = 5.0 + + +@dataclass +class DiscoveredAgent: + """ + An agent discovered from a Databricks App. + + Attributes: + name: Agent name (from agent card or app name) + endpoint_url: Agent's base URL + description: Agent description (from agent card) + capabilities: Comma-separated list of capabilities + protocol_version: A2A protocol version + app_name: Name of the backing Databricks App + """ + name: str + endpoint_url: str + app_name: str + description: Optional[str] = None + capabilities: Optional[str] = None + protocol_version: Optional[str] = None + + +@dataclass +class AgentDiscoveryResult: + """ + Results from agent discovery operation. + + Attributes: + agents: List of discovered agents + errors: List of error messages encountered during discovery + """ + agents: List[DiscoveredAgent] + errors: List[str] + + +class AgentDiscovery: + """ + Discovers agent-enabled Databricks Apps in a workspace. + + Scans running Databricks Apps and probes for A2A protocol agent cards + to identify which apps are agents. + + Usage: + discovery = AgentDiscovery(profile="my-profile") + result = await discovery.discover_agents() + for agent in result.agents: + print(f"Found agent: {agent.name} at {agent.endpoint_url}") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize agent discovery. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._workspace_token: Optional[str] = None + + async def discover_agents(self) -> AgentDiscoveryResult: + """ + Discover all agent-enabled Databricks Apps in the workspace. + + Returns: + AgentDiscoveryResult with discovered agents and any errors + + Example: + >>> discovery = AgentDiscovery(profile="my-profile") + >>> result = await discovery.discover_agents() + >>> print(f"Found {len(result.agents)} agents") + """ + agents: List[DiscoveredAgent] = [] + errors: List[str] = [] + + try: + app_list = await self._list_workspace_apps() + except Exception as e: + logger.error("Workspace app listing failed: %s", e) + return AgentDiscoveryResult( + agents=[], + errors=[f"Failed to list workspace apps: {e}"], + ) + + if not app_list: + return AgentDiscoveryResult(agents=[], errors=[]) + + # Probe each running app for agent card in parallel + probe_tasks = [ + self._probe_app_for_agent(app_info) + for app_info in app_list + if app_info.get("url") + ] + + if probe_tasks: + probe_results = await asyncio.gather( + *probe_tasks, return_exceptions=True + ) + + for result in probe_results: + if isinstance(result, Exception): + errors.append(str(result)) + elif result is not None: + agents.append(result) + + logger.info( + "Agent discovery: %d apps checked, %d agents found", + len(app_list), len(agents) + ) + + return AgentDiscoveryResult(agents=agents, errors=errors) + + async def _list_workspace_apps(self) -> List[Dict[str, Any]]: + """ + Enumerate Databricks Apps in the workspace. + + Returns: + List of running apps with name, url, owner + """ + def _list_sync() -> tuple: + client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + + # Extract auth token for cross-app requests + auth_headers = client.config.authenticate() + auth_val = auth_headers.get("Authorization", "") + token = auth_val[7:] if auth_val.startswith("Bearer ") else None + + results = [] + for app in client.apps.list(): + # Check if app is running via compute_status or deployment status + compute_state = None + cs = getattr(app, "compute_status", None) + if cs: + compute_state = str(getattr(cs, "state", "")) + + deploy_state = None + dep = getattr(app, "active_deployment", None) + if dep: + dep_status = getattr(dep, "status", None) + if dep_status: + deploy_state = str(getattr(dep_status, "state", "")) + + app_url = getattr(app, "url", None) or "" + app_url = app_url.rstrip("/") if app_url else "" + + results.append({ + "name": app.name, + "url": app_url, + "owner": getattr(app, "creator", None) or getattr(app, "updater", None), + "compute_state": compute_state, + "deploy_state": deploy_state, + }) + + return results, token + + loop = asyncio.get_event_loop() + result_tuple = await loop.run_in_executor(None, _list_sync) + all_apps, workspace_token = result_tuple + + # Store token for probing + self._workspace_token = workspace_token + + # Filter to running apps + running = [ + a for a in all_apps + if a.get("url") and ( + "ACTIVE" in (a.get("compute_state") or "") + or "SUCCEEDED" in (a.get("deploy_state") or "") + ) + ] + + logger.info( + "Workspace apps: %d total, %d running", + len(all_apps), len(running) + ) + + return running + + async def _probe_app_for_agent( + self, + app_info: Dict[str, Any], + ) -> Optional[DiscoveredAgent]: + """ + Probe a Databricks App for an A2A agent card. + + Args: + app_info: App metadata from workspace listing + + Returns: + DiscoveredAgent if agent card found, None otherwise + """ + app_url = app_info["url"] + app_name = app_info["name"] + + token = self._workspace_token + agent_card = None + + try: + logger.debug(f"Probing app '{app_name}' at {app_url}") + async with A2AClient(timeout=AGENT_CARD_PROBE_TIMEOUT) as client: + agent_card = await client.fetch_agent_card(app_url, auth_token=token) + logger.info(f"Found agent card for '{app_name}'") + except A2AClientError as e: + logger.debug(f"No agent card for '{app_name}': {e}") + return None + except Exception as e: + logger.warning(f"Probe failed for '{app_name}': {e}") + return None + + if not agent_card: + return None + + # Extract capabilities + capabilities_list = [] + caps = agent_card.get("capabilities") + if isinstance(caps, dict): + capabilities_list = list(caps.keys()) + elif isinstance(caps, list): + capabilities_list = caps + + return DiscoveredAgent( + name=agent_card.get("name", app_name), + endpoint_url=app_url, + app_name=app_name, + description=agent_card.get("description"), + capabilities=",".join(capabilities_list) if capabilities_list else None, + protocol_version=agent_card.get("protocolVersion"), + ) diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/mcp/__init__.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/mcp/__init__.py new file mode 100644 index 00000000..60ee38ad --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/mcp/__init__.py @@ -0,0 +1,11 @@ +""" +Model Context Protocol (MCP) support. + +This module provides utilities for integrating agents with MCP servers +and exposing UC Functions as MCP tools. +""" + +from .mcp_server import MCPServer, MCPServerConfig, setup_mcp_server +from .uc_functions import UCFunctionAdapter + +__all__ = ["MCPServer", "MCPServerConfig", "setup_mcp_server", "UCFunctionAdapter"] diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/mcp/mcp_server.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/mcp/mcp_server.py new file mode 100644 index 00000000..98a456fb --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/mcp/mcp_server.py @@ -0,0 +1,196 @@ +""" +MCP server implementation for agents. + +Provides an MCP server that exposes agent tools via the Model Context Protocol. +Works with any FastAPI app. +""" + +import json +import logging +from typing import Dict, Any, List, Optional +from dataclasses import dataclass + +from fastapi import Request + +logger = logging.getLogger(__name__) + + +@dataclass +class MCPServerConfig: + """ + Configuration for MCP server. + + Attributes: + name: Server name + version: Server version + description: Server description + """ + name: str + version: str = "1.0.0" + description: str = "MCP server for agent tools" + + +class MCPServer: + """ + MCP server that exposes tools via JSON-RPC. + + Accepts tools as dicts with name, description, parameters, function keys. + """ + + def __init__(self, tools, config: MCPServerConfig): + """ + Initialize MCP server. + + Args: + tools: List of ToolDefinition objects or dicts with name/description/parameters/function + config: MCP server configuration + """ + self._tools = tools + self.config = config + + def setup_routes(self, app): + """ + Set up MCP protocol routes on the FastAPI app. + + Adds: + - POST /api/mcp - MCP JSON-RPC endpoint + - GET /api/mcp/tools - List available tools + """ + + @app.post("/api/mcp") + async def mcp_jsonrpc(request: Request): + """MCP JSON-RPC endpoint.""" + try: + body = await request.json() + method = body.get("method") + params = body.get("params", {}) + request_id = body.get("id") + + if method == "tools/list": + result = await self._list_tools() + elif method == "tools/call": + result = await self._call_tool(params) + elif method == "server/info": + result = self._server_info() + else: + return { + "jsonrpc": "2.0", + "id": request_id, + "error": { + "code": -32601, + "message": f"Method not found: {method}" + } + } + + return { + "jsonrpc": "2.0", + "id": request_id, + "result": result + } + + except Exception as e: + logger.error("MCP request failed: %s", e) + return { + "jsonrpc": "2.0", + "id": body.get("id") if isinstance(body, dict) else None, + "error": { + "code": -32603, + "message": str(e) + } + } + + @app.get("/api/mcp/tools") + async def list_mcp_tools(): + """List available MCP tools.""" + return await self._list_tools() + + def _get_tool_name(self, tool) -> str: + return tool.name if hasattr(tool, "name") else tool["name"] + + def _get_tool_description(self, tool) -> str: + return tool.description if hasattr(tool, "description") else tool["description"] + + def _get_tool_parameters(self, tool) -> Dict[str, Any]: + return tool.parameters if hasattr(tool, "parameters") else tool.get("parameters", {}) + + def _get_tool_function(self, tool): + return tool.function if hasattr(tool, "function") else tool["function"] + + def _server_info(self) -> Dict[str, Any]: + """Get MCP server information.""" + return { + "name": self.config.name, + "version": self.config.version, + "description": self.config.description, + "protocol_version": "1.0", + } + + async def _list_tools(self) -> Dict[str, Any]: + """List all available tools in MCP format.""" + tools = [] + + for tool in self._tools: + mcp_tool = { + "name": self._get_tool_name(tool), + "description": self._get_tool_description(tool), + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + + for param_name, param_spec in self._get_tool_parameters(tool).items(): + param_type = param_spec.get("type", "string") + mcp_tool["inputSchema"]["properties"][param_name] = { + "type": param_type, + "description": param_spec.get("description", "") + } + if param_spec.get("required", False): + mcp_tool["inputSchema"]["required"].append(param_name) + + tools.append(mcp_tool) + + return {"tools": tools} + + async def _call_tool(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Call a tool via MCP.""" + tool_name = params.get("name") + arguments = params.get("arguments", {}) + + tool_def = None + for tool in self._tools: + if self._get_tool_name(tool) == tool_name: + tool_def = tool + break + + if not tool_def: + raise ValueError(f"Tool not found: {tool_name}") + + try: + result = await self._get_tool_function(tool_def)(**arguments) + return {"result": result} + except Exception as e: + logger.error("Tool execution failed: %s", e) + raise + + +def setup_mcp_server(tools, config: Optional[MCPServerConfig] = None, fastapi_app=None): + """ + Set up MCP server for an agent. + + Args: + tools: List of tool dicts with name/description/parameters/function keys + config: Optional MCP server configuration + fastapi_app: FastAPI app to add routes to (required) + + Returns: + MCPServer instance + """ + if config is None: + config = MCPServerConfig(name="mcp-server") + + server = MCPServer(tools, config) + server.setup_routes(fastapi_app) + + return server diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/mcp/uc_functions.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/mcp/uc_functions.py new file mode 100644 index 00000000..f1659960 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/mcp/uc_functions.py @@ -0,0 +1,240 @@ +""" +Unity Catalog Functions adapter for MCP. + +Automatically discovers UC Functions and exposes them as MCP tools. +""" + +import logging +from typing import List, Dict, Any, Optional + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCFunctionAdapter: + """ + Adapter for Unity Catalog Functions to MCP protocol. + + Discovers UC Functions and converts them to MCP tool format for + use with agents. + + Usage: + adapter = UCFunctionAdapter(profile="my-profile") + tools = adapter.discover_functions(catalog="main", schema="functions") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC Functions adapter. + + Args: + profile: Databricks CLI profile name + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def discover_functions( + self, + catalog: str, + schema: str, + name_pattern: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + Discover UC Functions and convert to MCP tool format. + + Args: + catalog: UC catalog name + schema: UC schema name + name_pattern: Optional name pattern filter (SQL LIKE pattern) + + Returns: + List of tool definitions in MCP format + + Example: + >>> adapter = UCFunctionAdapter() + >>> tools = adapter.discover_functions("main", "functions") + >>> for tool in tools: + ... print(tool["name"], tool["description"]) + """ + client = self._get_client() + tools = [] + + try: + functions = client.functions.list( + catalog_name=catalog, + schema_name=schema, + ) + + for func in functions: + # Skip system functions + if func.name.startswith("system."): + continue + + # Apply name pattern filter + if name_pattern and name_pattern not in func.name: + continue + + # Convert to MCP tool format + tool = self._convert_function_to_tool(func) + if tool: + tools.append(tool) + + logger.info( + f"Discovered {len(tools)} UC Functions from {catalog}.{schema}" + ) + + except Exception as e: + logger.error(f"Failed to discover UC Functions: {e}") + + return tools + + def _convert_function_to_tool(self, func) -> Optional[Dict[str, Any]]: + """ + Convert a UC Function to MCP tool format. + + Args: + func: Function info from Databricks SDK + + Returns: + MCP tool definition or None if conversion fails + """ + try: + # Extract function metadata + name = func.name.split(".")[-1] # Get short name + description = func.comment or f"Unity Catalog function: {name}" + + # Build parameter schema + input_schema = { + "type": "object", + "properties": {}, + "required": [] + } + + # Parse function parameters + if hasattr(func, "input_params") and func.input_params: + for param in func.input_params.parameters: + param_name = param.name + param_type = self._map_uc_type_to_json_type(param.type_name) + + input_schema["properties"][param_name] = { + "type": param_type, + "description": param.comment or "" + } + + # Parameters without defaults are required + if not hasattr(param, "default_value") or param.default_value is None: + input_schema["required"].append(param_name) + + return { + "name": name, + "description": description, + "inputSchema": input_schema, + "full_name": func.full_name, + "source": "unity_catalog" + } + + except Exception as e: + logger.warning(f"Failed to convert function {func.name}: {e}") + return None + + def _map_uc_type_to_json_type(self, uc_type: str) -> str: + """ + Map Unity Catalog data type to JSON Schema type. + + Args: + uc_type: UC type name (e.g., "STRING", "BIGINT", "BOOLEAN") + + Returns: + JSON Schema type ("string", "number", "boolean", etc.) + """ + type_mapping = { + "STRING": "string", + "VARCHAR": "string", + "CHAR": "string", + "BIGINT": "integer", + "INT": "integer", + "INTEGER": "integer", + "SMALLINT": "integer", + "TINYINT": "integer", + "DOUBLE": "number", + "FLOAT": "number", + "DECIMAL": "number", + "BOOLEAN": "boolean", + "BINARY": "string", + "DATE": "string", + "TIMESTAMP": "string", + "ARRAY": "array", + "MAP": "object", + "STRUCT": "object", + } + + uc_type_upper = uc_type.upper() + return type_mapping.get(uc_type_upper, "string") + + async def call_function( + self, + full_name: str, + arguments: Dict[str, Any] + ) -> Any: + """ + Call a UC Function with given arguments. + + Args: + full_name: Full function name (catalog.schema.function) + arguments: Function arguments + + Returns: + Function result + + Example: + >>> adapter = UCFunctionAdapter() + >>> result = await adapter.call_function( + ... "main.functions.calculate_tax", + ... {"amount": 100, "rate": 0.08} + ... ) + """ + client = self._get_client() + + try: + # Build SQL query to call the function + args_list = [f":{key}" for key in arguments.keys()] + query = f"SELECT {full_name}({', '.join(args_list)})" + + # Execute via SQL warehouse + # Note: This requires a warehouse ID to be configured + result = client.statement_execution.execute_statement( + statement=query, + warehouse_id=self._get_default_warehouse(), + parameters=[ + {"name": key, "value": str(value)} + for key, value in arguments.items() + ] + ) + + return result.result.data_array[0][0] if result.result.data_array else None + + except Exception as e: + logger.error(f"Failed to call UC Function {full_name}: {e}") + raise + + def _get_default_warehouse(self) -> str: + """Get default SQL warehouse ID from environment or client.""" + import os + warehouse_id = os.getenv("DATABRICKS_WAREHOUSE_ID") + if not warehouse_id: + raise ValueError( + "DATABRICKS_WAREHOUSE_ID not set. " + "Set this environment variable to use UC Functions." + ) + return warehouse_id diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/py.typed b/dbx-agent-app/examples/supervisor/dbx_agent_app/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/registry/__init__.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/registry/__init__.py new file mode 100644 index 00000000..892043b7 --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/registry/__init__.py @@ -0,0 +1,10 @@ +""" +Unity Catalog integration for agent registration. + +This module provides utilities for registering agents in Unity Catalog +as AGENT objects, enabling catalog-based discovery and permission management. +""" + +from .uc_registry import UCAgentRegistry, UCAgentSpec, UCRegistrationError + +__all__ = ["UCAgentRegistry", "UCAgentSpec", "UCRegistrationError"] diff --git a/dbx-agent-app/examples/supervisor/dbx_agent_app/registry/uc_registry.py b/dbx-agent-app/examples/supervisor/dbx_agent_app/registry/uc_registry.py new file mode 100644 index 00000000..f2651aca --- /dev/null +++ b/dbx-agent-app/examples/supervisor/dbx_agent_app/registry/uc_registry.py @@ -0,0 +1,345 @@ +""" +Unity Catalog agent registry. + +Registers and manages agents as Unity Catalog AGENT objects for +catalog-based discovery and permission management. +""" + +import json +import logging +from typing import Dict, Any, Optional, List +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCRegistrationError(Exception): + """Raised when agent registration in Unity Catalog fails.""" + pass + + +@dataclass +class UCAgentSpec: + """ + Specification for registering an agent in Unity Catalog. + + Attributes: + name: Agent name (will be catalog object name) + catalog: UC catalog name + schema: UC schema name + endpoint_url: Agent's base URL + description: Agent description + capabilities: List of agent capabilities + properties: Additional metadata key-value pairs + """ + name: str + catalog: str + schema: str + endpoint_url: str + description: Optional[str] = None + capabilities: Optional[List[str]] = None + properties: Optional[Dict[str, str]] = None + + +class UCAgentRegistry: + """ + Unity Catalog agent registry. + + Registers agents as UC AGENT objects for catalog-based discovery + and permission management. + + Usage: + registry = UCAgentRegistry(profile="my-profile") + + spec = UCAgentSpec( + name="customer_research", + catalog="main", + schema="agents", + endpoint_url="https://app.databricksapps.com", + description="Customer research agent", + capabilities=["search", "analysis"], + ) + + registry.register_agent(spec) + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC agent registry. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def register_agent(self, spec: UCAgentSpec) -> Dict[str, Any]: + """ + Register an agent in Unity Catalog. + + Creates a AGENT object in the specified catalog and schema with + metadata about the agent's endpoint, capabilities, and properties. + + Args: + spec: Agent specification + + Returns: + Dictionary with registration details + + Raises: + UCRegistrationError: If registration fails + + Example: + >>> registry = UCAgentRegistry(profile="my-profile") + >>> spec = UCAgentSpec( + ... name="my_agent", + ... catalog="main", + ... schema="agents", + ... endpoint_url="https://app.databricksapps.com", + ... ) + >>> result = registry.register_agent(spec) + """ + client = self._get_client() + full_name = f"{spec.catalog}.{spec.schema}.{spec.name}" + + try: + # Build agent properties for UC metadata + properties = spec.properties or {} + properties["endpoint_url"] = spec.endpoint_url + properties["agent_card_url"] = f"{spec.endpoint_url}/.well-known/agent.json" + + if spec.capabilities: + properties["capabilities"] = ",".join(spec.capabilities) + + # Register as a UC registered model with AGENT type + # (UC doesn't have a native AGENT type yet, so we use registered models + # with special tags/properties to mark them as agents) + + logger.info(f"Registering agent '{full_name}' in Unity Catalog") + + # Check if catalog and schema exist + try: + client.catalogs.get(spec.catalog) + except Exception as e: + raise UCRegistrationError( + f"Catalog '{spec.catalog}' does not exist or is not accessible: {e}" + ) + + try: + client.schemas.get(f"{spec.catalog}.{spec.schema}") + except Exception as e: + raise UCRegistrationError( + f"Schema '{spec.catalog}.{spec.schema}' does not exist or is not accessible: {e}" + ) + + # Create or update registered model as agent placeholder + # In a future UC version with native AGENT support, this would use: + # client.agents.create(name=full_name, properties=properties) + + # Encode properties as JSON suffix in comment for discovery + # Format: "description\n---AGENT_META---\n{json}" + meta = {"databricks_agent": True, **properties} + comment = spec.description or "" + comment_with_meta = f"{comment}\n---AGENT_META---\n{json.dumps(meta)}" + + # Try update first (model may already exist from prior deploy), + # fall back to create if it doesn't exist + try: + client.registered_models.update( + full_name, + comment=comment_with_meta, + ) + logger.info(f"Updated existing agent '{full_name}'") + except Exception as update_err: + # Model doesn't exist or SP can't access it — try create + logger.debug(f"Update failed ({update_err}), trying create") + try: + client.registered_models.create( + name=spec.name, + catalog_name=spec.catalog, + schema_name=spec.schema, + comment=comment_with_meta, + ) + logger.info(f"Created new agent '{full_name}'") + except Exception as create_err: + # If create fails with "already exists", the SP just + # can't see the model — log warning but don't fail + err_str = str(create_err).lower() + if "already exists" in err_str or "not a valid name" in err_str: + logger.warning( + "Agent '%s' exists but SP cannot update it. " + "Grant the app's SP ownership or MANAGE on the model.", + full_name, + ) + else: + raise + + logger.info(f"Successfully registered agent '{full_name}'") + + return { + "full_name": full_name, + "catalog": spec.catalog, + "schema": spec.schema, + "name": spec.name, + "endpoint_url": spec.endpoint_url, + "properties": properties, + } + + except UCRegistrationError: + raise + except Exception as e: + raise UCRegistrationError( + f"Failed to register agent '{full_name}': {e}" + ) from e + + @staticmethod + def _parse_agent_meta(comment: Optional[str]) -> Optional[Dict[str, Any]]: + """Parse agent metadata from comment field (JSON after ---AGENT_META--- marker).""" + if not comment or "---AGENT_META---" not in comment: + return None + try: + _, meta_json = comment.split("---AGENT_META---", 1) + return json.loads(meta_json.strip()) + except (ValueError, json.JSONDecodeError): + return None + + @staticmethod + def _clean_description(comment: Optional[str]) -> str: + """Extract human-readable description from comment (before the meta marker).""" + if not comment: + return "" + if "---AGENT_META---" in comment: + return comment.split("---AGENT_META---")[0].strip() + return comment + + def get_agent(self, catalog: str, schema: str, name: str) -> Optional[Dict[str, Any]]: + """ + Get agent metadata from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + Agent metadata dictionary or None if not found + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + model = client.registered_models.get(full_name) + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + return None + + return { + "full_name": full_name, + "catalog": catalog, + "schema": schema, + "name": name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "agent_card_url": meta.get("agent_card_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + "properties": meta, + } + + except Exception as e: + logger.debug(f"Agent '{full_name}' not found: {e}") + return None + + def list_agents( + self, + catalog: str, + schema: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + List all agents in a catalog or schema. + + Args: + catalog: UC catalog name + schema: Optional UC schema name (lists all schemas if not specified) + + Returns: + List of agent metadata dictionaries + """ + client = self._get_client() + agents = [] + + # Determine which schemas to scan + schemas_to_scan = [schema] if schema else [] + if not schema: + try: + for s in client.schemas.list(catalog_name=catalog): + if s.name != "information_schema": + schemas_to_scan.append(s.name) + except Exception as e: + logger.error(f"Failed to list schemas in {catalog}: {e}") + return [] + + for schema_name in schemas_to_scan: + try: + models = client.registered_models.list( + catalog_name=catalog, schema_name=schema_name + ) + for model in models: + meta = self._parse_agent_meta(model.comment) + if not meta or not meta.get("databricks_agent"): + continue + + agents.append({ + "full_name": model.full_name, + "catalog": catalog, + "schema": schema_name, + "name": model.name, + "description": self._clean_description(model.comment), + "endpoint_url": meta.get("endpoint_url"), + "capabilities": meta.get("capabilities", "").split(",") if meta.get("capabilities") else None, + }) + except Exception as e: + logger.debug(f"Failed to list models in {catalog}.{schema_name}: {e}") + continue + + return agents + + def delete_agent(self, catalog: str, schema: str, name: str) -> bool: + """ + Delete an agent from Unity Catalog. + + Args: + catalog: UC catalog name + schema: UC schema name + name: Agent name + + Returns: + True if deleted, False if not found + + Raises: + UCRegistrationError: If deletion fails + """ + client = self._get_client() + full_name = f"{catalog}.{schema}.{name}" + + try: + client.registered_models.delete(full_name) + logger.info(f"Deleted agent '{full_name}'") + return True + except Exception as e: + if "does not exist" in str(e).lower(): + return False + raise UCRegistrationError( + f"Failed to delete agent '{full_name}': {e}" + ) from e diff --git a/dbx-agent-app/examples/supervisor/requirements.txt b/dbx-agent-app/examples/supervisor/requirements.txt new file mode 100644 index 00000000..23d9ca4e --- /dev/null +++ b/dbx-agent-app/examples/supervisor/requirements.txt @@ -0,0 +1,8 @@ +fastapi>=0.115.0 +uvicorn[standard]>=0.30.0 +pydantic>=2.0.0 +httpx>=0.27.0 +mlflow>=2.16.0 +databricks-langchain>=0.1.0 +langchain-core>=0.3.0 +databricks-sdk>=0.30.0 diff --git a/dbx-agent-app/examples/supervisor/setup_tables.py b/dbx-agent-app/examples/supervisor/setup_tables.py new file mode 100644 index 00000000..82f637cd --- /dev/null +++ b/dbx-agent-app/examples/supervisor/setup_tables.py @@ -0,0 +1,349 @@ +""" +Setup script to create and seed UC tables for the Supervisor agent's sub-agents. + +Creates 6 tables in serverless_dxukih_catalog.agents: + - expert_transcripts (research sub-agent) + - experts (expert_finder sub-agent) + - call_metrics (analytics sub-agent) + - engagement_summary (analytics sub-agent) + - restricted_list (compliance sub-agent) + - nda_registry (compliance sub-agent) + +Usage: + python setup_tables.py --profile fe-vm-serverless-dxukih + python setup_tables.py --profile fe-vm-serverless-dxukih --drop + python setup_tables.py --profile fe-vm-serverless-dxukih --verify +""" + +import argparse +import sys +import time + +from databricks.sdk import WorkspaceClient + + +CATALOG = "serverless_dxukih_catalog" +SCHEMA = "agents" + + +# --------------------------------------------------------------------------- +# Table DDL +# --------------------------------------------------------------------------- + +TABLES = { + "expert_transcripts": """ +CREATE TABLE IF NOT EXISTS {catalog}.{schema}.expert_transcripts ( + transcript_id STRING, + expert_name STRING, + topic STRING, + transcript_excerpt STRING, + interview_date DATE, + relevance_score DOUBLE, + sector STRING +) +""", + "experts": """ +CREATE TABLE IF NOT EXISTS {catalog}.{schema}.experts ( + expert_id STRING, + name STRING, + specialty STRING, + interview_count INT, + rating DOUBLE, + topics STRING, + bio STRING, + region STRING +) +""", + "call_metrics": """ +CREATE TABLE IF NOT EXISTS {catalog}.{schema}.call_metrics ( + metric_date DATE, + region STRING, + call_count INT, + avg_duration_min DOUBLE, + segment STRING, + revenue_usd DOUBLE +) +""", + "engagement_summary": """ +CREATE TABLE IF NOT EXISTS {catalog}.{schema}.engagement_summary ( + metric_name STRING, + metric_value DOUBLE, + period STRING, + updated_at TIMESTAMP +) +""", + "restricted_list": """ +CREATE TABLE IF NOT EXISTS {catalog}.{schema}.restricted_list ( + entity_name STRING, + restriction_type STRING, + effective_date DATE, + expiry_date DATE, + reason STRING +) +""", + "nda_registry": """ +CREATE TABLE IF NOT EXISTS {catalog}.{schema}.nda_registry ( + expert_name STRING, + nda_status STRING, + effective_date DATE, + expiry_date DATE, + coverage_scope STRING +) +""", +} + + +# --------------------------------------------------------------------------- +# Seed data +# --------------------------------------------------------------------------- + +SEED_DATA = { + "expert_transcripts": """ +INSERT INTO {catalog}.{schema}.expert_transcripts VALUES +('T-2025-1247', 'Dr. Sarah Chen', 'AI in Healthcare', 'We are seeing 40% year-over-year growth in AI-driven diagnostics adoption across tier-1 hospital systems. The key driver is FDA clearance of new radiology AI tools, which has accelerated trust among clinicians.', DATE '2025-01-15', 0.95, 'Healthcare'), +('T-2025-1248', 'Dr. Sarah Chen', 'Digital Health Regulation', 'The regulatory landscape is evolving faster than most health systems can adapt. We expect the EU AI Act to create a two-speed market: compliant vendors will gain share while others face exclusion.', DATE '2025-02-03', 0.88, 'Healthcare'), +('T-2025-1189', 'Michael Torres', 'Supply Chain Resilience', 'Leaders are prioritizing real-time visibility and transparency. The shift from just-in-time to just-in-case inventory models is costing 15-20% more but reducing disruption risk by 60%.', DATE '2025-01-22', 0.92, 'Supply Chain'), +('T-2025-1190', 'Michael Torres', 'Logistics Technology', 'Autonomous warehouse robotics reached an inflection point in Q4 2024. Companies deploying full-stack automation report 35% labor cost reduction with 2x throughput.', DATE '2025-02-10', 0.87, 'Supply Chain'), +('T-2025-1301', 'Dr. James Liu', 'Embedded Finance', 'Every SaaS platform is becoming a fintech company. We estimate embedded lending will grow from $33B to $68B by 2027, with vertical SaaS capturing the majority of new originations.', DATE '2025-01-28', 0.91, 'Fintech'), +('T-2025-1302', 'Dr. James Liu', 'Crypto Regulation', 'Institutional crypto adoption depends entirely on regulatory clarity. The MiCA framework in Europe has already driven 3x growth in compliant exchange volume.', DATE '2025-02-15', 0.84, 'Fintech'), +('T-2025-1303', 'Dr. James Liu', 'Digital Payments', 'Real-time payment rails are making batch processing obsolete. Countries with instant payment infrastructure see 22% higher digital commerce growth rates.', DATE '2025-03-01', 0.89, 'Fintech'), +('T-2025-1410', 'Rachel Martinez', 'Renewable Energy Transition', 'Grid-scale battery storage costs dropped 40% since 2022. We are now at the tipping point where solar-plus-storage is cheaper than natural gas peakers in 80% of US markets.', DATE '2025-01-10', 0.93, 'Energy'), +('T-2025-1411', 'Rachel Martinez', 'Carbon Markets', 'Voluntary carbon markets are consolidating rapidly. Only credits with rigorous MRV (measurement, reporting, verification) will retain value. We expect 50% of current credits to be worthless by 2027.', DATE '2025-02-20', 0.86, 'Energy'), +('T-2025-1520', 'David Kim', 'AI Security Threats', 'Adversarial AI attacks increased 300% in 2024. The most concerning vector is prompt injection in enterprise LLM deployments, which can exfiltrate sensitive data through seemingly benign queries.', DATE '2025-01-18', 0.94, 'Cybersecurity'), +('T-2025-1521', 'David Kim', 'Zero Trust Architecture', 'Enterprises adopting zero-trust report 68% fewer breach incidents. But implementation remains slow: only 12% of Fortune 500 companies have fully deployed ZTNA across all workloads.', DATE '2025-02-05', 0.90, 'Cybersecurity'), +('T-2025-1522', 'David Kim', 'Cloud Security Posture', 'Misconfigured cloud resources remain the number one cause of data breaches. Automated CSPM tools now detect 95% of misconfigurations, but remediation still averages 72 hours.', DATE '2025-03-10', 0.88, 'Cybersecurity'), +('T-2025-1630', 'Dr. Priya Patel', 'GLP-1 Market Dynamics', 'The GLP-1 market will exceed $100B by 2028. Supply chain constraints are the binding factor: active ingredient manufacturing capacity needs to triple to meet demand.', DATE '2025-01-25', 0.96, 'Healthcare'), +('T-2025-1631', 'Dr. Priya Patel', 'Biosimilar Competition', 'Biosimilar penetration in the US reached 40% for infused biologics but only 15% for self-administered products. Distribution complexity is the key barrier.', DATE '2025-02-12', 0.85, 'Healthcare'), +('T-2025-1740', 'Alex Novak', 'Retail Media Networks', 'Retail media ad spend will surpass $60B in 2025. The ROI advantage over traditional digital ads is 2.3x, driven by closed-loop attribution and first-party data.', DATE '2025-01-30', 0.91, 'Retail'), +('T-2025-1741', 'Alex Novak', 'Unified Commerce', 'Omnichannel retailers with unified inventory systems see 25% higher conversion rates. The gap between leaders and laggards is widening as technology costs decrease.', DATE '2025-02-18', 0.87, 'Retail'), +('T-2025-1850', 'Dr. Emily Watson', 'Climate Risk Modeling', 'Physical climate risk is being priced into commercial real estate for the first time. Properties in high-risk zones are seeing 8-15% valuation discounts, creating arbitrage opportunities for informed investors.', DATE '2025-02-01', 0.92, 'Climate/ESG'), +('T-2025-1851', 'Dr. Emily Watson', 'ESG Data Quality', 'Only 23% of corporate ESG disclosures meet institutional investor standards. The gap between reported and verified emissions data averages 35%, undermining portfolio decarbonization strategies.', DATE '2025-03-05', 0.89, 'Climate/ESG') +""", + "experts": """ +INSERT INTO {catalog}.{schema}.experts VALUES +('EXP-001', 'Dr. Sarah Chen', 'Healthcare AI & Digital Health', 23, 4.9, 'AI in healthcare, digital health regulation, clinical AI adoption, FDA approval processes', 'Former Chief Medical Officer at HealthTech Ventures. 15 years in healthcare technology strategy. Published 40+ papers on clinical AI validation.', 'North America'), +('EXP-002', 'Michael Torres', 'Supply Chain Analytics', 18, 4.8, 'supply chain resilience, logistics technology, warehouse automation, inventory optimization', 'VP of Supply Chain Innovation at a Fortune 100 retailer. Led digital transformation reducing fulfillment costs by 30%. MIT Supply Chain Management graduate.', 'North America'), +('EXP-003', 'Dr. James Liu', 'Fintech & Digital Payments', 21, 4.7, 'embedded finance, crypto regulation, digital payments, real-time payment rails, DeFi', 'Partner at Fintech Capital Partners. Previously Head of Strategy at a top-3 payment processor. PhD in Financial Engineering from Stanford.', 'Asia Pacific'), +('EXP-004', 'Rachel Martinez', 'Energy Transition & Sustainability', 15, 4.8, 'renewable energy, carbon markets, grid-scale storage, energy policy, clean tech investment', 'Managing Director at GreenShift Advisory. 20 years in energy markets including roles at two major energy companies. Board member of the Clean Energy Council.', 'Europe'), +('EXP-005', 'David Kim', 'Cybersecurity & AI Security', 19, 4.9, 'AI security threats, zero trust architecture, cloud security, adversarial AI, threat intelligence', 'Former CISO at a global bank. Founded a cybersecurity AI startup (acquired). CISSP, CISM certified. Regular speaker at Black Hat and RSA Conference.', 'North America'), +('EXP-006', 'Dr. Priya Patel', 'Pharma & Biotech', 14, 4.6, 'GLP-1 therapeutics, biosimilar markets, drug pricing, pharmaceutical supply chain, clinical trials', 'Chief Strategy Officer at a mid-cap biotech firm. Former McKinsey healthcare practice leader. MD/MBA from Johns Hopkins.', 'North America'), +('EXP-007', 'Alex Novak', 'Retail & Consumer Tech', 16, 4.5, 'retail media networks, unified commerce, consumer behavior, e-commerce, personalization', 'Head of Digital Commerce at a major department store chain. Previously at Amazon Retail division. Known for pioneering retail media strategies.', 'Europe'), +('EXP-008', 'Dr. Emily Watson', 'Climate Risk & ESG', 12, 4.7, 'climate risk modeling, ESG data quality, sustainable finance, carbon accounting, green bonds', 'Director of Climate Analytics at a global asset manager. PhD in Environmental Economics from LSE. Advisor to UN PRI on climate disclosure standards.', 'Europe') +""", + "call_metrics": None, # generated programmatically + "engagement_summary": """ +INSERT INTO {catalog}.{schema}.engagement_summary VALUES +('total_calls_90d', 2847, 'last_90_days', CURRENT_TIMESTAMP()), +('avg_duration_min', 52.3, 'last_90_days', CURRENT_TIMESTAMP()), +('month_over_month_growth_pct', 18.2, 'last_30_days', CURRENT_TIMESTAMP()), +('unique_experts_engaged', 87, 'last_90_days', CURRENT_TIMESTAMP()), +('unique_clients', 142, 'last_90_days', CURRENT_TIMESTAMP()), +('repeat_engagement_rate_pct', 64.5, 'last_90_days', CURRENT_TIMESTAMP()), +('avg_client_satisfaction', 4.6, 'last_90_days', CURRENT_TIMESTAMP()), +('revenue_per_call_usd', 1250.0, 'last_90_days', CURRENT_TIMESTAMP()), +('total_revenue_usd', 3558750.0, 'last_90_days', CURRENT_TIMESTAMP()), +('healthcare_segment_pct', 34.2, 'last_90_days', CURRENT_TIMESTAMP()), +('technology_segment_pct', 28.7, 'last_90_days', CURRENT_TIMESTAMP()), +('finance_segment_pct', 22.1, 'last_90_days', CURRENT_TIMESTAMP()) +""", + "restricted_list": """ +INSERT INTO {catalog}.{schema}.restricted_list VALUES +('Acme Corp', 'trading', DATE '2025-06-01', DATE '2026-12-31', 'Active M&A proceedings - material non-public information'), +('GlobalPharma Inc', 'regulatory', DATE '2025-09-15', DATE '2026-08-15', 'FDA advisory committee review pending'), +('TechVentures LLC', 'nda', DATE '2025-01-01', DATE '2027-01-01', 'Exclusive consulting agreement with competitor'), +('EnergyFirst Holdings', 'trading', DATE '2025-10-01', DATE '2026-09-01', 'Pending regulatory approval for acquisition'), +('CryptoExchange Global', 'regulatory', DATE '2025-08-15', DATE '2026-07-15', 'SEC enforcement investigation ongoing'), +('MedDevice Partners', 'nda', DATE '2025-03-01', DATE '2026-11-01', 'Confidential product development partnership') +""", + "nda_registry": """ +INSERT INTO {catalog}.{schema}.nda_registry VALUES +('Dr. Sarah Chen', 'active', DATE '2024-01-01', DATE '2026-01-01', 'Healthcare technology and AI diagnostics'), +('Michael Torres', 'active', DATE '2024-03-15', DATE '2026-03-15', 'Supply chain and logistics operations'), +('Dr. James Liu', 'active', DATE '2024-06-01', DATE '2026-06-01', 'Financial technology and payments'), +('Rachel Martinez', 'active', DATE '2024-02-01', DATE '2026-02-01', 'Energy markets and sustainability'), +('David Kim', 'active', DATE '2024-04-01', DATE '2026-04-01', 'Cybersecurity and threat intelligence'), +('Dr. Priya Patel', 'active', DATE '2024-07-01', DATE '2026-07-01', 'Pharmaceutical and biotech markets'), +('Alex Novak', 'expired', DATE '2023-01-01', DATE '2025-01-01', 'Retail technology and e-commerce'), +('Dr. Emily Watson', 'pending', DATE '2025-04-01', DATE '2027-04-01', 'Climate risk and ESG analytics') +""", +} + + +def _call_metrics_seed_sql() -> str: + """Generate ~90 rows of call_metrics covering 90 days across regions/segments.""" + regions = ["North America", "Europe", "Asia Pacific", "Latin America"] + segments = ["Healthcare", "Technology", "Finance"] + + rows = [] + import random + random.seed(42) + + for day_offset in range(0, 90, 3): # every 3 days = ~30 rows per region*segment combo + for region in regions: + for segment in segments: + base_calls = {"North America": 12, "Europe": 9, "Asia Pacific": 7, "Latin America": 4} + base_rev = {"Healthcare": 1400, "Technology": 1200, "Finance": 1500} + + call_count = base_calls[region] + random.randint(-3, 5) + avg_dur = round(45 + random.uniform(-10, 15), 1) + rev = round(base_rev[segment] * call_count * (0.85 + random.uniform(0, 0.3)), 2) + + rows.append( + f"(DATE_ADD(CURRENT_DATE(), -{90 - day_offset}), " + f"'{region}', {call_count}, {avg_dur}, '{segment}', {rev})" + ) + + # batch into chunks of 50 to avoid overly long statements + chunks = [rows[i:i + 50] for i in range(0, len(rows), 50)] + statements = [] + for chunk in chunks: + statements.append( + f"INSERT INTO {{catalog}}.{{schema}}.call_metrics VALUES\n" + + ",\n".join(chunk) + ) + return statements + + +# --------------------------------------------------------------------------- +# Expected row counts for verification +# --------------------------------------------------------------------------- + +EXPECTED_COUNTS = { + "expert_transcripts": 18, + "experts": 8, + "call_metrics": 360, # approximate: 30 date points * 4 regions * 3 segments + "engagement_summary": 12, + "restricted_list": 6, + "nda_registry": 8, +} + + +# --------------------------------------------------------------------------- +# Execution helpers +# --------------------------------------------------------------------------- + +def get_warehouse_id(ws: WorkspaceClient) -> str: + """Get a serverless SQL warehouse ID.""" + for wh in ws.warehouses.list(): + if wh.enable_serverless_compute: + return wh.id + first = next(iter(ws.warehouses.list()), None) + if first: + return first.id + raise RuntimeError("No SQL warehouse available") + + +def execute_sql(ws: WorkspaceClient, warehouse_id: str, statement: str, label: str = ""): + """Execute a SQL statement and wait for completion.""" + result = ws.statement_execution.execute_statement( + warehouse_id=warehouse_id, + statement=statement, + wait_timeout="50s", + ) + status = result.status + if status and status.state: + state = status.state.value if hasattr(status.state, "value") else str(status.state) + else: + state = "UNKNOWN" + + if state not in ("SUCCEEDED", "CLOSED"): + error_msg = "" + if status and status.error: + error_msg = f" — {status.error.message}" + raise RuntimeError(f"{label}: statement failed with state {state}{error_msg}") + return result + + +def create_tables(ws: WorkspaceClient, warehouse_id: str, drop: bool = False): + """Create all 6 tables (optionally dropping first).""" + if drop: + for table_name in TABLES: + print(f" Dropping {CATALOG}.{SCHEMA}.{table_name}...") + execute_sql( + ws, warehouse_id, + f"DROP TABLE IF EXISTS {CATALOG}.{SCHEMA}.{table_name}", + label=f"drop {table_name}", + ) + + for table_name, ddl in TABLES.items(): + print(f" Creating {CATALOG}.{SCHEMA}.{table_name}...") + execute_sql( + ws, warehouse_id, + ddl.format(catalog=CATALOG, schema=SCHEMA), + label=f"create {table_name}", + ) + + +def seed_tables(ws: WorkspaceClient, warehouse_id: str): + """Insert seed data into all tables.""" + for table_name, insert_sql in SEED_DATA.items(): + if table_name == "call_metrics": + # call_metrics uses programmatic generation + statements = _call_metrics_seed_sql() + for i, stmt in enumerate(statements): + print(f" Seeding {CATALOG}.{SCHEMA}.call_metrics (batch {i+1}/{len(statements)})...") + execute_sql( + ws, warehouse_id, + stmt.format(catalog=CATALOG, schema=SCHEMA), + label=f"seed call_metrics batch {i+1}", + ) + else: + print(f" Seeding {CATALOG}.{SCHEMA}.{table_name}...") + execute_sql( + ws, warehouse_id, + insert_sql.format(catalog=CATALOG, schema=SCHEMA), + label=f"seed {table_name}", + ) + + +def verify_tables(ws: WorkspaceClient, warehouse_id: str): + """Verify row counts for all tables.""" + all_ok = True + for table_name, expected in EXPECTED_COUNTS.items(): + fqn = f"{CATALOG}.{SCHEMA}.{table_name}" + result = execute_sql(ws, warehouse_id, f"SELECT COUNT(*) FROM {fqn}", label=f"count {table_name}") + count = int(result.result.data_array[0][0]) if result.result and result.result.data_array else 0 + status = "OK" if count >= expected * 0.8 else "LOW" + if status == "LOW": + all_ok = False + print(f" {fqn}: {count} rows (expected ~{expected}) [{status}]") + + return all_ok + + +# --------------------------------------------------------------------------- +# CLI +# --------------------------------------------------------------------------- + +def main(): + parser = argparse.ArgumentParser(description="Setup UC tables for Supervisor sub-agents") + parser.add_argument("--profile", default="fe-vm-serverless-dxukih", help="Databricks CLI profile") + parser.add_argument("--drop", action="store_true", help="Drop tables before recreating") + parser.add_argument("--verify", action="store_true", help="Only verify existing tables") + args = parser.parse_args() + + print(f"Connecting with profile: {args.profile}") + ws = WorkspaceClient(profile=args.profile) + warehouse_id = get_warehouse_id(ws) + print(f"Using warehouse: {warehouse_id}\n") + + if args.verify: + print("Verifying tables...") + ok = verify_tables(ws, warehouse_id) + sys.exit(0 if ok else 1) + + print("Creating tables...") + create_tables(ws, warehouse_id, drop=args.drop) + print() + + print("Seeding data...") + seed_tables(ws, warehouse_id) + print() + + print("Verifying...") + verify_tables(ws, warehouse_id) + print("\nDone!") + + +if __name__ == "__main__": + main() diff --git a/dbx-agent-app/manifest.yaml b/dbx-agent-app/manifest.yaml new file mode 100644 index 00000000..96e48def --- /dev/null +++ b/dbx-agent-app/manifest.yaml @@ -0,0 +1,51 @@ +name: dbx-agent-app +version: 0.1.0 +description: Framework for building discoverable AI agents on Databricks Apps +author: Databricks Labs +license: Apache-2.0 +homepage: https://github.com/databricks-labs/dbx-agent-app +documentation: https://databricks-labs.github.io/dbx-agent-app/ + +tags: + - agents + - a2a + - mcp + - unity-catalog + - databricks-apps + +python: + min_version: "3.10" + package: dbx-agent-app + entry_point: dbx_agent_app + +dependencies: + - fastapi>=0.115.0 + - uvicorn[standard]>=0.30.0 + - pydantic>=2.0.0 + - httpx>=0.27.0 + - databricks-sdk>=0.30.0 + +dev_dependencies: + - pytest>=8.0.0 + - pytest-asyncio>=0.23.0 + - pytest-cov>=4.1.0 + - black>=24.0.0 + - ruff>=0.5.0 + - mypy>=1.8.0 + +status: experimental +maturity: sandbox + +features: + - Auto-generates A2A protocol endpoints + - Agent discovery across workspace + - Unity Catalog integration + - MCP server support + - UC Functions adapter + - Agent-to-agent communication + +use_cases: + - Building discoverable agents on Databricks Apps + - Multi-agent systems + - Tool-based agents with UC Functions + - Workspace-wide agent orchestration diff --git a/dbx-agent-app/mkdocs.yml b/dbx-agent-app/mkdocs.yml new file mode 100644 index 00000000..7e9012d1 --- /dev/null +++ b/dbx-agent-app/mkdocs.yml @@ -0,0 +1,60 @@ +site_name: dbx-agent-app +site_description: Framework for building discoverable AI agents on Databricks Apps +site_url: https://databricks-labs.github.io/dbx-agent-app +repo_url: https://github.com/databricks-labs/dbx-agent-app +repo_name: databricks-labs/dbx-agent-app + +theme: + name: material + palette: + primary: red + accent: orange + features: + - navigation.tabs + - navigation.sections + - navigation.top + - search.suggest + - search.highlight + - content.code.copy + +nav: + - Home: index.md + - Getting Started: + - Installation: getting-started/installation.md + - Quick Start: getting-started/quickstart.md + - Your First Agent: getting-started/first-agent.md + - User Guide: + - Tool Registration: guide/tools.md + - Agent Discovery: guide/discovery.md + - A2A Communication: guide/a2a-protocol.md + - Unity Catalog: guide/unity-catalog.md + - Examples: + - Customer Research Agent: examples/customer-research.md + - Multi-Agent System: examples/multi-agent.md + - UC Integration: examples/uc-integration.md + - API Reference: + - AgentDiscovery: api/discovery.md + - A2AClient: api/a2a-client.md + - UCAgentRegistry: api/uc-registry.md + - Contributing: contributing.md + +plugins: + - search + - mkdocstrings: + handlers: + python: + options: + docstring_style: google + +markdown_extensions: + - admonition + - pymdownx.details + - pymdownx.superfences + - pymdownx.highlight + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.tabbed: + alternate_style: true + - tables + - toc: + permalink: true diff --git a/dbx-agent-app/pyproject.toml b/dbx-agent-app/pyproject.toml new file mode 100644 index 00000000..55b28c0b --- /dev/null +++ b/dbx-agent-app/pyproject.toml @@ -0,0 +1,61 @@ +[build-system] +requires = ["setuptools>=68.0.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "dbx-agent-app" +version = "0.3.0" +description = "Agent platform for Databricks: discover, test, trace, and govern agents in your workspace" +readme = "README.md" +requires-python = ">=3.10" +license = {text = "Apache-2.0"} +authors = [ + {name = "Databricks Labs", email = "labs@databricks.com"} +] +keywords = ["databricks", "agents", "ai", "mcp", "a2a"] + +dependencies = [ + "fastapi>=0.115.0", + "uvicorn[standard]>=0.30.0", + "pydantic>=2.0.0", + "httpx>=0.27.0", + "databricks-sdk>=0.30.0", + "pyyaml>=6.0", +] + +[project.optional-dependencies] +mlflow = [ + "mlflow>=2.15.0", +] +dev = [ + "pytest>=8.0.0", + "pytest-asyncio>=0.23.0", + "pytest-cov>=4.1.0", + "respx>=0.21.0", + "black>=24.0.0", + "ruff>=0.5.0", + "mypy>=1.8.0", +] + +[project.scripts] +dbx-agent-app = "dbx_agent_app.cli:main" + +[project.urls] +Homepage = "https://github.com/databricks-labs/dbx-agent-app" +Documentation = "https://databricks-labs.github.io/dbx-agent-app" + +[tool.setuptools.packages.find] +where = ["src"] + +[tool.setuptools.package-data] +"dbx_agent_app.dashboard" = ["static/**/*"] + +[tool.pytest.ini_options] +asyncio_mode = "auto" +testpaths = ["tests"] + +[tool.black] +line-length = 100 + +[tool.ruff] +line-length = 100 diff --git a/dbx-agent-app/src/dbx_agent_app/__init__.py b/dbx-agent-app/src/dbx_agent_app/__init__.py new file mode 100644 index 00000000..18227528 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/__init__.py @@ -0,0 +1,74 @@ +""" +dbx-agent-app: Agent platform for Databricks Apps. + +Build agents with any framework. Deploy the platform to discover, test, trace, and govern them. + +Primary API: +- @app_agent: Decorator to turn an async function into a discoverable agent +- add_agent_card: Make any FastAPI app discoverable via /.well-known/agent.json +- add_mcp_endpoints: Add MCP JSON-RPC endpoints to any FastAPI app + +Platform components: +- AgentDiscovery: Discover agents in your Databricks workspace +- DeployEngine: Multi-agent deploy orchestration (agents.yaml -> deploy -> wire -> uc_securable) +- create_dashboard_app: Agent platform dashboard (discovery, testing, lineage, governance) +""" + +from .core import ( + add_agent_card, + add_mcp_endpoints, + app_agent, + AppAgent, + AgentRequest, + AgentResponse, + InputItem, + OutputItem, + OutputTextContent, + StreamEvent, + UserContext, +) +from .discovery import AgentDiscovery, DiscoveredAgent, AgentDiscoveryResult, A2AClient, A2AClientError +from .mcp import MCPServerConfig, setup_mcp_server, UCFunctionAdapter +from .dashboard import create_dashboard_app +from .deploy import DeployConfig, DeployEngine +from .bridge import app_predict_fn + +try: + from importlib.metadata import version as _get_version + __version__ = _get_version("dbx-agent-app") +except Exception: + __version__ = "0.3.0" + +__all__ = [ + # Primary API: @app_agent decorator + "app_agent", + "AppAgent", + # Wire protocol types + "AgentRequest", + "AgentResponse", + "InputItem", + "OutputItem", + "OutputTextContent", + "StreamEvent", + "UserContext", + # Helpers + "add_agent_card", + "add_mcp_endpoints", + # Discovery + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", + # MCP + "MCPServerConfig", + "setup_mcp_server", + "UCFunctionAdapter", + # Dashboard + "create_dashboard_app", + # Deploy + "DeployConfig", + "DeployEngine", + # Bridge (official dbx-agent-app interop) + "app_predict_fn", +] diff --git a/dbx-agent-app/src/dbx_agent_app/bridge/__init__.py b/dbx-agent-app/src/dbx_agent_app/bridge/__init__.py new file mode 100644 index 00000000..715aa40a --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/bridge/__init__.py @@ -0,0 +1,10 @@ +""" +Bridge module for interoperability with the official ``databricks-agents`` package. + +Provides ``app_predict_fn()`` which wraps a Databricks App's /invocations +endpoint as a ``predict_fn`` compatible with ``mlflow.genai.evaluate()``. +""" + +from dbx_agent_app.bridge.eval import app_predict_fn + +__all__ = ["app_predict_fn"] diff --git a/dbx-agent-app/src/dbx_agent_app/bridge/eval.py b/dbx-agent-app/src/dbx_agent_app/bridge/eval.py new file mode 100644 index 00000000..9e667651 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/bridge/eval.py @@ -0,0 +1,118 @@ +""" +Eval bridge: wrap a Databricks App as a predict_fn for mlflow.genai.evaluate(). + +The official ``databricks-agents`` package provides ``mlflow.genai.to_predict_fn()`` +for Model Serving endpoints, but Databricks Apps aren't model serving endpoints. +This module fills that gap. + +Usage:: + + from dbx_agent_app.bridge import app_predict_fn + + predict = app_predict_fn("https://my-app.cloud.databricks.com") + + # Use with mlflow.genai.evaluate() + import mlflow + results = mlflow.genai.evaluate( + data=[{"inputs": {"messages": [{"role": "user", "content": "Hello"}]}}], + predict_fn=predict, + scorers=[...], + ) + +Wire format translation: + + evaluate() calls: predict_fn(messages=[{"role": "user", "content": "..."}]) + Bridge sends: POST {app_url}/invocations {"input": [{"role": "user", "content": "..."}]} + App returns: {"output": [{"type": "message", "content": [{"text": "..."}]}]} + Bridge returns: {"response": "...", "output": [...]} +""" + +from __future__ import annotations + +from typing import Any, Callable, Dict, List, Optional + + +def app_predict_fn( + app_url: str, + *, + token: Optional[str] = None, + timeout: float = 120.0, +) -> Callable[..., Dict[str, Any]]: + """ + Create a predict_fn that calls a Databricks App's /invocations endpoint. + + Args: + app_url: Base URL of the deployed Databricks App (e.g. "https://my-app.cloud.databricks.com"). + Trailing slashes are stripped. + token: Databricks PAT for authentication. If None, reads from DATABRICKS_TOKEN env var. + timeout: HTTP timeout in seconds (default: 120). + + Returns: + A callable ``predict_fn(messages=...) -> dict`` compatible with + ``mlflow.genai.evaluate()``. + + Raises: + httpx.HTTPStatusError: If the app returns a non-2xx response. + ValueError: If no authentication token is available. + """ + import os + + import httpx + + resolved_token = token or os.environ.get("DATABRICKS_TOKEN") + if not resolved_token: + raise ValueError( + "No auth token provided. Pass token= or set DATABRICKS_TOKEN env var." + ) + + base = app_url.rstrip("/") + url = f"{base}/invocations" + + headers = { + "Authorization": f"Bearer {resolved_token}", + "Content-Type": "application/json", + } + + client = httpx.Client(timeout=timeout, headers=headers) + + def predict( + messages: Optional[List[Dict[str, str]]] = None, + **kwargs: Any, + ) -> Dict[str, Any]: + """ + Call the app's /invocations endpoint. + + Accepts either: + predict(messages=[{"role": "user", "content": "..."}]) + predict(question="...") — auto-wraps as single user message + """ + if messages is None: + # Support simple string input (e.g. predict(question="Hello")) + text = kwargs.get("question") or kwargs.get("input") or kwargs.get("query") + if text: + messages = [{"role": "user", "content": str(text)}] + else: + raise ValueError( + "predict_fn requires 'messages' list or a 'question'/'input'/'query' kwarg" + ) + + # Translate to our wire format + payload = {"input": messages} + + response = client.post(url, json=payload) + response.raise_for_status() + data = response.json() + + # Extract text from our output format + text_parts = [] + for item in data.get("output", []): + for content_block in item.get("content", []): + if "text" in content_block: + text_parts.append(content_block["text"]) + + return { + "response": "\n".join(text_parts), + "output": data.get("output", []), + } + + return predict diff --git a/dbx-agent-app/src/dbx_agent_app/cli.py b/dbx-agent-app/src/dbx_agent_app/cli.py new file mode 100644 index 00000000..3808372c --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/cli.py @@ -0,0 +1,239 @@ +""" +Top-level CLI dispatcher for dbx-agent-app. + +Commands: + dbx-agent-app deploy Deploy agents from agents.yaml + dbx-agent-app validate Validate agents.yaml without deploying + dbx-agent-app status Show deployment status + dbx-agent-app destroy Tear down deployed agents + dbx-agent-app dashboard Launch the agent discovery dashboard locally + dbx-agent-app platform Deploy the Agent Platform as a Databricks App +""" + +import argparse +import logging +import sys + + +def main(): + parser = argparse.ArgumentParser( + prog="dbx-agent-app", + description="CLI for Databricks agent deployment, discovery, and platform management", + ) + sub = parser.add_subparsers(dest="command") + + # ---- deploy ---- + deploy_cmd = sub.add_parser("deploy", help="Deploy agents from agents.yaml") + deploy_cmd.add_argument( + "--config", type=str, default="agents.yaml", help="Path to agents.yaml (default: agents.yaml)" + ) + deploy_cmd.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + deploy_cmd.add_argument("--agent", type=str, default=None, help="Deploy a single agent by name") + deploy_cmd.add_argument("--dry-run", action="store_true", help="Show plan without deploying") + deploy_cmd.add_argument( + "--mode", type=str, default="snapshot", choices=["snapshot", "auto_sync"], + help="Deployment mode: snapshot (default) or auto_sync (dev workflow)", + ) + + # ---- validate ---- + validate_cmd = sub.add_parser("validate", help="Validate agents.yaml without deploying") + validate_cmd.add_argument( + "--config", type=str, default="agents.yaml", help="Path to agents.yaml (default: agents.yaml)" + ) + + # ---- status ---- + status_cmd = sub.add_parser("status", help="Show deployment status") + status_cmd.add_argument( + "--config", type=str, default="agents.yaml", help="Path to agents.yaml (default: agents.yaml)" + ) + status_cmd.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + status_cmd.add_argument("--json", action="store_true", dest="as_json", help="Output as JSON") + + # ---- destroy ---- + destroy_cmd = sub.add_parser("destroy", help="Tear down all deployed agents") + destroy_cmd.add_argument( + "--config", type=str, default="agents.yaml", help="Path to agents.yaml (default: agents.yaml)" + ) + destroy_cmd.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + destroy_cmd.add_argument("--yes", action="store_true", help="Skip confirmation prompt") + + # ---- dashboard (local dev) ---- + dash_cmd = sub.add_parser("dashboard", help="Launch the agent discovery dashboard locally") + dash_cmd.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + dash_cmd.add_argument("--port", type=int, default=8501, help="Port (default: 8501)") + dash_cmd.add_argument("--host", type=str, default="127.0.0.1", help="Host (default: 127.0.0.1)") + dash_cmd.add_argument("--catalog", type=str, default=None, help="UC catalog for lineage") + dash_cmd.add_argument("--no-browser", action="store_true", help="Don't auto-open browser") + + # ---- platform (deploy as Databricks App) ---- + platform_cmd = sub.add_parser("platform", help="Deploy the Agent Platform as a Databricks App") + platform_cmd.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + platform_cmd.add_argument( + "--app-name", type=str, default="agent-platform", + help="Databricks App name (default: agent-platform)", + ) + platform_cmd.add_argument("--catalog", type=str, default="main", help="UC catalog (default: main)") + + args = parser.parse_args() + + if not args.command: + parser.print_help() + sys.exit(1) + + logging.basicConfig(level=logging.INFO, format="%(levelname)s %(name)s: %(message)s") + + if args.command == "dashboard": + _run_dashboard(args) + elif args.command == "deploy": + _run_deploy(args) + elif args.command == "validate": + _run_validate(args) + elif args.command == "status": + _run_status(args) + elif args.command == "destroy": + _run_destroy(args) + elif args.command == "platform": + _run_platform(args) + + +def _run_deploy(args): + from .deploy.config import DeployConfig + from .deploy.engine import DeployEngine + + config = DeployConfig.from_yaml(args.config) + engine = DeployEngine(config, profile=args.profile, dry_run=args.dry_run, mode=args.mode) + engine.deploy(agent_filter=args.agent) + + +def _run_validate(args): + from .deploy.config import DeployConfig + + try: + config = DeployConfig.from_yaml(args.config) + except Exception as e: + print(f"INVALID: {e}", file=sys.stderr) + sys.exit(1) + + print(f"Config: {args.config}") + print(f"Project: {config.project.name}") + print(f"UC: {config.uc.catalog}.{config.uc.schema_}") + print(f"Warehouse: {config.warehouse.id}") + print(f"Agents: {len(config.agents)}") + for agent in config.agents: + deps = f" (depends: {', '.join(agent.depends_on)})" if agent.depends_on else "" + print(f" {agent.name}: {len(agent.resources)} resource(s){deps}") + for r in agent.resources: + rtype = next( + (f for f in ("uc_securable", "sql_warehouse", "job", "secret", "serving_endpoint", "database") + if getattr(r, f) is not None), + "unknown", + ) + print(f" - {r.name} ({rtype})") + + # Validate topological ordering + try: + config.ordered_agents + except ValueError as e: + print(f"\nERROR: {e}", file=sys.stderr) + sys.exit(1) + + print(f"\nVALID — {len(config.agents)} agent(s), deploy order: {', '.join(a.name for a in config.ordered_agents)}") + + +def _run_status(args): + import json as json_mod + + from .deploy.config import DeployConfig + from .deploy.engine import DeployEngine + + config = DeployConfig.from_yaml(args.config) + engine = DeployEngine(config, profile=args.profile) + result = engine.status(as_json=args.as_json) + if args.as_json and result: + print(json_mod.dumps(result, indent=2)) + + +def _run_destroy(args): + from .deploy.config import DeployConfig + from .deploy.engine import DeployEngine + + config = DeployConfig.from_yaml(args.config) + engine = DeployEngine(config, profile=args.profile) + + if not args.yes: + answer = input(f"Destroy all agents in '{config.project.name}'? [y/N] ") + if answer.lower() != "y": + print("Aborted.") + sys.exit(0) + + engine.destroy() + + +def _run_dashboard(args): + from .dashboard.cli import run_dashboard + + run_dashboard(args) + + +def _run_platform(args): + """Deploy the Agent Platform as a Databricks App.""" + import shutil + import tempfile + from pathlib import Path + + print(f"Deploying Agent Platform as '{args.app_name}' (profile={args.profile or 'default'})...") + + # Create a temporary deployment directory with app.yaml + platform_src = Path(__file__).parent / "dashboard" + app_yaml_src = platform_src / "app.yaml" + + if not app_yaml_src.exists(): + print("Error: app.yaml not found in dashboard package", file=sys.stderr) + sys.exit(1) + + # Build the deployment bundle in a temp dir + with tempfile.TemporaryDirectory() as tmpdir: + deploy_dir = Path(tmpdir) + + # Copy app.yaml + shutil.copy2(app_yaml_src, deploy_dir / "app.yaml") + + # Write requirements.txt for the deployed app + try: + from importlib.metadata import version as _get_version + pkg_version = _get_version("dbx-agent-app") + except Exception: + pkg_version = "0.3.0" + + (deploy_dir / "requirements.txt").write_text( + f"dbx-agent-app>={pkg_version}\n" + ) + + # Copy static frontend assets if they exist + static_dir = platform_src / "static" + if static_dir.is_dir(): + shutil.copytree(static_dir, deploy_dir / "static") + + # Deploy via databricks CLI + import subprocess + + cmd = [ + "databricks", "apps", "deploy", args.app_name, + "--source-code-path", str(deploy_dir), + ] + if args.profile: + cmd.extend(["--profile", args.profile]) + + print(f"Running: {' '.join(cmd)}") + result = subprocess.run(cmd, capture_output=True, text=True) + + if result.returncode != 0: + print(f"Deploy failed:\n{result.stderr}", file=sys.stderr) + sys.exit(1) + + print(result.stdout) + print(f"Agent Platform deployed as '{args.app_name}'") + + +if __name__ == "__main__": + main() diff --git a/dbx-agent-app/src/dbx_agent_app/core/__init__.py b/dbx-agent-app/src/dbx_agent_app/core/__init__.py new file mode 100644 index 00000000..489da877 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/core/__init__.py @@ -0,0 +1,22 @@ +"""Core agent components: @app_agent decorator, helpers, and types.""" + +from .helpers import add_agent_card, add_mcp_endpoints +from .app_agent import app_agent, AppAgent +from .types import AgentRequest, AgentResponse, InputItem, OutputItem, OutputTextContent, StreamEvent, UserContext + +__all__ = [ + # Primary API: @app_agent decorator + "app_agent", + "AppAgent", + # Wire protocol types + "AgentRequest", + "AgentResponse", + "InputItem", + "OutputItem", + "OutputTextContent", + "StreamEvent", + "UserContext", + # Helpers + "add_agent_card", + "add_mcp_endpoints", +] diff --git a/dbx-agent-app/src/dbx_agent_app/core/app_agent.py b/dbx-agent-app/src/dbx_agent_app/core/app_agent.py new file mode 100644 index 00000000..2181397b --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/core/app_agent.py @@ -0,0 +1,328 @@ +""" +@app_agent decorator — the API for building discoverable agents on Databricks Apps. + +One decorator wires a plain async function into a fully-featured FastAPI app. + +Usage: + from dbx_agent_app import app_agent, AgentRequest, AgentResponse + + @app_agent( + name="research", + description="Search expert transcripts", + capabilities=["research", "search"], + ) + async def research(request: AgentRequest) -> AgentResponse: + result = my_graph.invoke(request.messages) + return AgentResponse.text(result) + + # research.app -> FastAPI with /invocations, /.well-known/agent.json, /health, /api/mcp + # uvicorn app:research.app +""" + +from __future__ import annotations + +import inspect +import json +import logging +from typing import Any, AsyncGenerator, Callable, Dict, List, Optional, Union + +from fastapi import FastAPI, Request +from fastapi.responses import JSONResponse, StreamingResponse + +from .types import AgentRequest, AgentResponse, InputItem, StreamEvent, UserContext + +logger = logging.getLogger(__name__) + + +def _python_type_to_json_schema(annotation) -> str: + """Convert a Python type annotation to a JSON Schema type string.""" + if annotation is inspect.Parameter.empty: + return "string" + type_map = { + str: "string", + int: "integer", + float: "number", + bool: "boolean", + list: "array", + dict: "object", + } + return type_map.get(annotation, "string") + + +class AppAgent: + """ + Wraps a handler function with a FastAPI app and agent platform endpoints. + + Created by the ``@app_agent`` decorator. Provides: + + - ``.app`` — lazily-built FastAPI with /invocations, agent card, health, MCP + - ``.tool(description=...)`` — decorator to register tools + - ``__call__()`` — calls handler directly (for testing) + """ + + def __init__( + self, + handler: Callable, + *, + name: str, + description: str, + capabilities: List[str], + version: str = "1.0.0", + enable_mcp: bool = True, + ): + self._handler = handler + self._is_async = inspect.iscoroutinefunction(handler) + self._is_generator = inspect.isasyncgenfunction(handler) + + self.name = name + self.description = description + self.capabilities = capabilities + self.version = version + self.enable_mcp = enable_mcp + + self._tools: List[Dict[str, Any]] = [] + self._app: Optional[FastAPI] = None + + # ------------------------------------------------------------------ + # Tool registration + # ------------------------------------------------------------------ + + def tool(self, description: str, parameters: Optional[Dict[str, Any]] = None): + """ + Decorator to register a function as an agent tool. + + Registered tools appear in the agent card, MCP, and /api/tools/. + + Usage: + @my_agent.tool(description="Search the database") + async def search(query: str) -> dict: + return {...} + """ + + def decorator(func: Callable): + sig = inspect.signature(func) + if parameters is None: + param_schema = {} + for pname, param in sig.parameters.items(): + param_schema[pname] = { + "type": _python_type_to_json_schema(param.annotation), + "required": param.default is inspect.Parameter.empty, + } + else: + param_schema = parameters + + self._tools.append({ + "name": func.__name__, + "description": description, + "parameters": param_schema, + "function": func, + }) + return func + + return decorator + + # ------------------------------------------------------------------ + # Direct call (for testing) + # ------------------------------------------------------------------ + + async def __call__(self, request: AgentRequest) -> AgentResponse: + """Call the handler directly. Useful for testing without HTTP.""" + result = self._handler(request) + if inspect.isawaitable(result): + result = await result + return self._coerce_response(result) + + # ------------------------------------------------------------------ + # FastAPI app (lazy) + # ------------------------------------------------------------------ + + @property + def app(self) -> FastAPI: + """Lazily build and return the FastAPI application.""" + if self._app is None: + self._app = self._build_app() + return self._app + + def _build_app(self) -> FastAPI: + fastapi_app = FastAPI() + + self._setup_invocations(fastapi_app) + self._setup_agent_card(fastapi_app) + self._setup_health(fastapi_app) + self._setup_tool_endpoints(fastapi_app) + + if self.enable_mcp and self._tools: + self._setup_mcp(fastapi_app) + + return fastapi_app + + # ------------------------------------------------------------------ + # Endpoint setup + # ------------------------------------------------------------------ + + def _setup_invocations(self, fastapi_app: FastAPI): + agent = self + + @fastapi_app.post("/invocations") + async def invocations(request: Request): + body = await request.json() + input_items = body.get("input", []) + + agent_request = AgentRequest( + input=[InputItem(**item) for item in input_items] + ) + + # Extract on-behalf-of user identity from Databricks Apps headers + fwd_token = request.headers.get("x-forwarded-access-token") + fwd_email = request.headers.get("x-forwarded-email") + fwd_user = request.headers.get("x-forwarded-user") + if fwd_token or fwd_email or fwd_user: + agent_request._user_context = UserContext( + access_token=fwd_token, + email=fwd_email, + user=fwd_user, + ) + + if not agent_request.last_user_message: + return JSONResponse( + status_code=400, + content={"error": "No user message found in input"}, + ) + + # Handle async generator (streaming) + if agent._is_generator: + return StreamingResponse( + agent._stream_handler(agent_request), + media_type="text/event-stream", + ) + + # Non-streaming: call handler and coerce response + result = agent._handler(agent_request) + if inspect.isawaitable(result): + result = await result + + response = agent._coerce_response(result) + return response.to_wire() + + def _setup_agent_card(self, fastapi_app: FastAPI): + agent = self + + @fastapi_app.get("/.well-known/agent.json") + async def agent_card(): + card = { + "schema_version": "a2a/1.0", + "name": agent.name, + "description": agent.description, + "capabilities": agent.capabilities, + "version": agent.version, + "endpoints": { + "invocations": "/invocations", + }, + "tools": [ + { + "name": t["name"], + "description": t["description"], + "parameters": t["parameters"], + } + for t in agent._tools + ], + } + if agent.enable_mcp and agent._tools: + card["endpoints"]["mcp"] = "/api/mcp" + return card + + def _setup_health(self, fastapi_app: FastAPI): + agent = self + + @fastapi_app.get("/health") + async def health(): + return { + "status": "healthy", + "agent": agent.name, + "version": agent.version, + } + + def _setup_tool_endpoints(self, fastapi_app: FastAPI): + for tool_def in self._tools: + fastapi_app.post(f"/api/tools/{tool_def['name']}")(tool_def["function"]) + + def _setup_mcp(self, fastapi_app: FastAPI): + try: + from .helpers import add_mcp_endpoints + + add_mcp_endpoints(fastapi_app, tools=self._tools) + except ImportError: + logger.warning("Could not import add_mcp_endpoints — MCP disabled") + + # ------------------------------------------------------------------ + # Response coercion + # ------------------------------------------------------------------ + + @staticmethod + def _coerce_response(result: Any) -> AgentResponse: + """Coerce handler return value to AgentResponse.""" + if isinstance(result, AgentResponse): + return result + if isinstance(result, dict): + return AgentResponse.from_dict(result) + if isinstance(result, str): + return AgentResponse.text(result) + return AgentResponse.text(str(result)) + + # ------------------------------------------------------------------ + # Streaming + # ------------------------------------------------------------------ + + async def _stream_handler(self, request: AgentRequest) -> AsyncGenerator[str, None]: + """Consume an async generator handler and yield SSE events.""" + async for event in self._handler(request): + if isinstance(event, StreamEvent): + yield event.to_sse() + elif isinstance(event, str): + yield StreamEvent.text_delta(event).to_sse() + else: + yield StreamEvent.text_delta(str(event)).to_sse() + + + +def app_agent( + name: str, + description: str, + capabilities: List[str], + *, + version: str = "1.0.0", + enable_mcp: bool = True, +) -> Callable[[Callable], AppAgent]: + """ + Decorator to turn an async function into a discoverable Databricks Apps agent. + + The decorated function receives an ``AgentRequest`` and returns one of: + - ``AgentResponse`` (direct) + - ``str`` (auto-wrapped via ``AgentResponse.text()``) + - ``dict`` (auto-wrapped via ``AgentResponse.from_dict()``) + - ``AsyncGenerator[StreamEvent | str, None]`` (streaming SSE) + + Returns an ``AppAgent`` instance. Access the FastAPI app via ``.app``. + + Agent governance is handled via uc_securable resources declared in app.yaml + or agents.yaml — no runtime UC registration needed. + + Example: + @app_agent(name="hello", description="Greeter", capabilities=["chat"]) + async def hello(request: AgentRequest) -> AgentResponse: + return AgentResponse.text(f"Hello! You said: {request.last_user_message}") + + # uvicorn app:hello.app + """ + + def decorator(func: Callable) -> AppAgent: + return AppAgent( + func, + name=name, + description=description, + capabilities=capabilities, + version=version, + enable_mcp=enable_mcp, + ) + + return decorator diff --git a/dbx-agent-app/src/dbx_agent_app/core/compat.py b/dbx-agent-app/src/dbx_agent_app/core/compat.py new file mode 100644 index 00000000..4156fde8 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/core/compat.py @@ -0,0 +1,47 @@ +""" +Optional LangChain compatibility helpers. + +Converts SDK types to LangChain message objects. This replaces the +``ResponsesAgent.prep_msgs_for_llm()`` method for agents that use LangChain. + +Requires ``langchain-core`` — only import this module from agents that +already depend on LangChain. + +Usage: + from dbx_agent_app.core.compat import to_langchain_messages + + messages = to_langchain_messages(request) + result = llm.invoke(messages) +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, List + +if TYPE_CHECKING: + from langchain_core.messages import BaseMessage + +from .types import AgentRequest, InputItem + + +def to_langchain_messages(request: AgentRequest) -> List[BaseMessage]: + """ + Convert an AgentRequest into a list of LangChain BaseMessage objects. + + Maps roles: "user" -> HumanMessage, "assistant" -> AIMessage, + "system" -> SystemMessage. Unknown roles default to HumanMessage. + """ + from langchain_core.messages import AIMessage, HumanMessage, SystemMessage + + _role_map = { + "user": HumanMessage, + "assistant": AIMessage, + "system": SystemMessage, + } + + messages: List[BaseMessage] = [] + for item in request.input: + cls = _role_map.get(item.role, HumanMessage) + messages.append(cls(content=item.content)) + + return messages diff --git a/dbx-agent-app/src/dbx_agent_app/core/helpers.py b/dbx-agent-app/src/dbx_agent_app/core/helpers.py new file mode 100644 index 00000000..c689628c --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/core/helpers.py @@ -0,0 +1,189 @@ +""" +Lightweight helpers to make any FastAPI app discoverable in the agent platform. + +These are opt-in functions that agent developers call on their own FastAPI app. +They do NOT impose a framework — they just add discoverability endpoints. + +Usage: + from fastapi import FastAPI + from dbx_agent_app import add_agent_card, add_mcp_endpoints + + app = FastAPI() + + @app.post("/invocations") + async def invocations(request): ... + + add_agent_card(app, name="my_agent", description="Does stuff", capabilities=["search"]) + add_mcp_endpoints(app, tools=[...]) # optional +""" + +import logging +from typing import Any, Dict, List, Optional + +from fastapi import FastAPI, Request + +logger = logging.getLogger(__name__) + + +def add_agent_card( + app: FastAPI, + *, + name: str, + description: str, + capabilities: List[str], + version: str = "1.0.0", + tools: Optional[List[Dict[str, Any]]] = None, +) -> None: + """ + Add a GET /.well-known/agent.json endpoint to a FastAPI app. + + This makes the app discoverable by the agent platform scanner. + Any Databricks App serving this endpoint will be auto-discovered. + + Args: + app: FastAPI application to add the endpoint to + name: Agent name (used for discovery and UC registration) + description: Human-readable description of what the agent does + capabilities: List of capability tags (e.g. ["search", "analysis"]) + version: Agent version string + tools: Optional list of tool metadata dicts with name, description, parameters + """ + card = { + "schema_version": "a2a/1.0", + "name": name, + "description": description, + "capabilities": capabilities, + "version": version, + "endpoints": { + "invocations": "/invocations", + }, + "tools": tools or [], + } + + @app.get("/.well-known/agent.json") + async def agent_card(): + return card + + @app.get("/health") + async def health(): + return {"status": "healthy", "agent": name, "version": version} + + logger.info("Agent card registered: %s (capabilities=%s)", name, capabilities) + + +def add_mcp_endpoints( + app: FastAPI, + *, + tools: List[Dict[str, Any]], + server_name: Optional[str] = None, + server_version: str = "1.0.0", +) -> None: + """ + Add MCP JSON-RPC endpoints to a FastAPI app. + + Adds: + POST /api/mcp — MCP JSON-RPC 2.0 (tools/list, tools/call, server/info) + GET /api/mcp/tools — convenience tool listing + + Each tool dict must have: + - name: str + - description: str + - function: async callable + - parameters: dict of param_name -> {"type": str, "required": bool} + + Args: + app: FastAPI application + tools: List of tool definition dicts + server_name: MCP server name (defaults to app title) + server_version: MCP server version + """ + _server_name = server_name or getattr(app, "title", "mcp-server") + + def _to_mcp_format(tool_list): + """Convert tool dicts to MCP tool format.""" + mcp_tools = [] + for t in tool_list: + props = {} + required = [] + for pname, pspec in t.get("parameters", {}).items(): + props[pname] = { + "type": pspec.get("type", "string"), + "description": pspec.get("description", ""), + } + if pspec.get("required", False): + required.append(pname) + + mcp_tools.append({ + "name": t["name"], + "description": t["description"], + "inputSchema": { + "type": "object", + "properties": props, + "required": required, + }, + }) + return mcp_tools + + tool_lookup = {t["name"]: t for t in tools} + mcp_formatted = _to_mcp_format(tools) + + @app.post("/api/mcp") + async def mcp_jsonrpc(request: Request): + body = await request.json() + method = body.get("method") + params = body.get("params", {}) + request_id = body.get("id") + + try: + if method == "tools/list": + return { + "jsonrpc": "2.0", + "id": request_id, + "result": {"tools": mcp_formatted}, + } + + elif method == "tools/call": + tool_name = params.get("name") + arguments = params.get("arguments", {}) + tool = tool_lookup.get(tool_name) + if not tool: + raise ValueError(f"Tool not found: {tool_name}") + + result = await tool["function"](**arguments) + return { + "jsonrpc": "2.0", + "id": request_id, + "result": {"result": result}, + } + + elif method == "server/info": + return { + "jsonrpc": "2.0", + "id": request_id, + "result": { + "name": _server_name, + "version": server_version, + "protocol_version": "1.0", + }, + } + + else: + return { + "jsonrpc": "2.0", + "id": request_id, + "error": {"code": -32601, "message": f"Method not found: {method}"}, + } + + except Exception as e: + logger.error("MCP request failed: %s", e) + return { + "jsonrpc": "2.0", + "id": request_id, + "error": {"code": -32603, "message": str(e)}, + } + + @app.get("/api/mcp/tools") + async def list_mcp_tools(): + return {"tools": mcp_formatted} + + logger.info("MCP endpoints registered: %d tools", len(tools)) diff --git a/dbx-agent-app/src/dbx_agent_app/core/types.py b/dbx-agent-app/src/dbx_agent_app/core/types.py new file mode 100644 index 00000000..1f5ef752 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/core/types.py @@ -0,0 +1,261 @@ +""" +Wire protocol types for the Databricks Responses Agent protocol. + +These Pydantic models replace the mlflow.types.responses types for agents +deployed as Databricks Apps. The wire format is identical: + + Request: {"input": [{"role": "user", "content": "..."}]} + Response: {"output": [{"type": "message", "id": "...", "content": [...]}]} + Stream: {"type": "response.output_text.delta", "item_id": "...", "delta": "..."} + +Usage: + from dbx_agent_app import AgentRequest, AgentResponse, StreamEvent + + def predict(request: AgentRequest) -> AgentResponse: + return AgentResponse.text("Hello!") +""" + +from __future__ import annotations + +import os +from typing import Any, Dict, List, Optional +from uuid import uuid4 + +from pydantic import BaseModel, Field, PrivateAttr + + +# --------------------------------------------------------------------------- +# User context (on-behalf-of user authorization) +# --------------------------------------------------------------------------- + + +class UserContext(BaseModel): + """ + User identity from Databricks Apps on-behalf-of-user authorization. + + When user authorization scopes are configured on a Databricks App, + the platform injects X-Forwarded-* headers with the calling user's + identity. The @app_agent decorator extracts these into a UserContext. + + Usage: + @app_agent(name="research", ...) + async def research(request: AgentRequest) -> AgentResponse: + if request.user_context and request.user_context.is_authenticated: + ws = request.user_context.get_workspace_client() + # UC queries execute as the user — row-level security applies + """ + + access_token: Optional[str] = Field(default=None, repr=False) + email: Optional[str] = None + user: Optional[str] = None + + @property + def is_authenticated(self) -> bool: + """True when a user access token is available.""" + return self.access_token is not None + + def get_workspace_client(self, host: Optional[str] = None): + """ + Create a WorkspaceClient that executes as this user. + + Args: + host: Databricks workspace URL. Defaults to DATABRICKS_HOST env var. + + Returns: + WorkspaceClient authenticated with the user's token. + + Raises: + ValueError: If no access token is available. + """ + if not self.access_token: + raise ValueError("No user access token available. Configure user authorization scopes on the Databricks App.") + + from databricks.sdk import WorkspaceClient + from databricks.sdk.config import Config + + # Force PAT auth and explicitly null out client_id/client_secret + # to prevent the SDK from picking up service principal env vars + # (DATABRICKS_CLIENT_ID/SECRET) which would cause a "multiple auth + # methods" error. Pattern from databricks-genie-workbench. + cfg = Config( + host=host or os.environ.get("DATABRICKS_HOST"), + token=self.access_token, + auth_type="pat", + client_id=None, + client_secret=None, + ) + return WorkspaceClient(config=cfg) + + def as_forwarded_headers(self) -> Dict[str, str]: + """ + Build X-Forwarded-* headers for forwarding user identity to downstream agents. + + Returns: + Dict of headers to include in agent-to-agent HTTP calls. + """ + headers = {} + if self.access_token: + headers["X-Forwarded-Access-Token"] = self.access_token + if self.email: + headers["X-Forwarded-Email"] = self.email + if self.user: + headers["X-Forwarded-User"] = self.user + return headers + + +# --------------------------------------------------------------------------- +# Request types +# --------------------------------------------------------------------------- + + +class InputItem(BaseModel): + """A single input message in the Responses Agent protocol.""" + + role: str + content: str + + +class AgentRequest(BaseModel): + """ + Incoming request to an agent via /invocations. + + Wire format: {"input": [{"role": "user", "content": "..."}]} + """ + + input: List[InputItem] + _user_context: Optional[UserContext] = PrivateAttr(default=None) + + @property + def user_context(self) -> Optional[UserContext]: + """User identity from on-behalf-of authorization, or None.""" + return self._user_context + + @property + def messages(self) -> List[InputItem]: + """All input messages (alias for input).""" + return self.input + + @property + def last_user_message(self) -> str: + """Extract the last user message content, or empty string.""" + for item in reversed(self.input): + if item.role == "user": + return item.content + return "" + + +# --------------------------------------------------------------------------- +# Response types +# --------------------------------------------------------------------------- + + +class OutputTextContent(BaseModel): + """A text content block inside an output item.""" + + type: str = "output_text" + text: str = "" + + +class OutputItem(BaseModel): + """A single output item in the Responses Agent protocol.""" + + type: str = "message" + id: str = Field(default_factory=lambda: str(uuid4())) + content: List[OutputTextContent] = Field(default_factory=list) + + +class AgentResponse(BaseModel): + """ + Outgoing response from an agent via /invocations. + + Wire format: {"output": [{"type": "message", "id": "...", "content": [...]}]} + + Use class methods for convenient construction: + AgentResponse.text("Hello!") + AgentResponse.from_dict({"response": "...", "metrics": {...}}) + """ + + output: List[OutputItem] = Field(default_factory=list) + + @classmethod + def text(cls, text: str, item_id: Optional[str] = None) -> AgentResponse: + """Create a response with a single text output item.""" + item = OutputItem( + id=item_id or str(uuid4()), + content=[OutputTextContent(text=text)], + ) + return cls(output=[item]) + + @classmethod + def from_dict(cls, data: Dict[str, Any], item_id: Optional[str] = None) -> AgentResponse: + """ + Create a response from a dict. + + If the dict has a "response" key, uses that as text. + Otherwise serializes the whole dict as the text content. + """ + import json + + if "response" in data: + text = data["response"] + else: + text = json.dumps(data) + + return cls.text(text, item_id=item_id) + + def to_wire(self) -> Dict[str, Any]: + """Serialize to the wire protocol dict.""" + return self.model_dump() + + +# --------------------------------------------------------------------------- +# Streaming types +# --------------------------------------------------------------------------- + + +class StreamEvent(BaseModel): + """ + A single SSE event in the streaming Responses Agent protocol. + + Event types: + response.output_text.delta — a text chunk + response.output_item.done — final item with full text + + Use class methods for convenient construction: + StreamEvent.text_delta("chunk", item_id="...") + StreamEvent.done("full text", item_id="...") + """ + + type: str + item_id: Optional[str] = None + delta: Optional[str] = None + item: Optional[OutputItem] = None + + @classmethod + def text_delta(cls, delta: str, item_id: Optional[str] = None) -> StreamEvent: + """Create a text delta event.""" + return cls( + type="response.output_text.delta", + item_id=item_id or str(uuid4()), + delta=delta, + ) + + @classmethod + def done(cls, text: str, item_id: Optional[str] = None) -> StreamEvent: + """Create a done event with the final output item.""" + _id = item_id or str(uuid4()) + item = OutputItem( + id=_id, + content=[OutputTextContent(text=text)], + ) + return cls( + type="response.output_item.done", + item_id=_id, + item=item, + ) + + def to_sse(self) -> str: + """Serialize to SSE data line.""" + import json + + return f"data: {json.dumps(self.model_dump(exclude_none=True))}\n\n" diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/__init__.py b/dbx-agent-app/src/dbx_agent_app/dashboard/__init__.py new file mode 100644 index 00000000..89929a08 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/__init__.py @@ -0,0 +1,14 @@ +""" +Developer dashboard for agent discovery. + +Launch via CLI: + dbx-agent-app dashboard --profile my-profile + +Or programmatically: + from dbx_agent_app.dashboard import create_dashboard_app, run_dashboard +""" + +from .app import create_dashboard_app +from .cli import main as run_dashboard + +__all__ = ["create_dashboard_app", "run_dashboard"] diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/__main__.py b/dbx-agent-app/src/dbx_agent_app/dashboard/__main__.py new file mode 100644 index 00000000..88f86b8a --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/__main__.py @@ -0,0 +1,40 @@ +""" +Entry point for the Agent Platform when deployed as a Databricks App. + +Deployed via: dbx-agent-app platform --profile

+Runtime: uvicorn dbx_agent_app.dashboard.__main__:app --host 0.0.0.0 --port 8000 + +Environment variables (set automatically by Databricks Apps): + DATABRICKS_HOST — workspace URL + DATABRICKS_TOKEN — auth token (via service principal) + DATABRICKS_APP_URL — this app's public URL +""" + +import logging +import os + +from .app import create_dashboard_app +from .governance import GovernanceService +from .scanner import DashboardScanner +from .system_builder import SystemBuilderService + +logging.basicConfig(level=logging.INFO, format="%(levelname)s %(name)s: %(message)s") +logger = logging.getLogger(__name__) + +# Use the Databricks CLI profile if set, otherwise rely on env vars +profile = os.getenv("DATABRICKS_PROFILE") +catalog = os.getenv("UC_CATALOG", "main") + +scanner = DashboardScanner(profile=profile) +governance = GovernanceService(scanner, profile=profile, catalog=catalog) +system_builder = SystemBuilderService(scanner=scanner, profile=profile) + +app = create_dashboard_app( + scanner, + profile=profile, + governance=governance, + system_builder=system_builder, + auto_scan_interval=60, +) + +logger.info("Agent Platform app initialized (catalog=%s)", catalog) diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/analytics.py b/dbx-agent-app/src/dbx_agent_app/dashboard/analytics.py new file mode 100644 index 00000000..c4ce25a7 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/analytics.py @@ -0,0 +1,70 @@ +"""In-memory analytics tracker for agent invocations.""" + +from __future__ import annotations + +import time +from collections import deque +from dataclasses import dataclass, asdict +from typing import Any + + +@dataclass +class InvocationRecord: + timestamp: float + success: bool + latency_ms: int + source: str # "test" | "chat" | "evaluate" + error: str | None = None + + +class AnalyticsTracker: + """In-memory ring buffer of recent agent invocations.""" + + def __init__(self, max_per_agent: int = 100): + self._max = max_per_agent + self._records: dict[str, deque[InvocationRecord]] = {} + + def record( + self, + agent_name: str, + *, + success: bool, + latency_ms: int, + source: str, + error: str | None = None, + ) -> None: + buf = self._records.setdefault(agent_name, deque(maxlen=self._max)) + buf.append(InvocationRecord( + timestamp=time.time(), + success=success, + latency_ms=latency_ms, + source=source, + error=error, + )) + + def get_summary(self, agent_name: str) -> dict[str, Any]: + buf = self._records.get(agent_name) + if not buf: + return { + "total": 0, + "success_count": 0, + "failure_count": 0, + "success_rate": 0.0, + "avg_latency_ms": 0, + "recent": [], + } + + records = list(buf) + total = len(records) + success_count = sum(1 for r in records if r.success) + failure_count = total - success_count + avg_latency = sum(r.latency_ms for r in records) / total if total else 0 + + return { + "total": total, + "success_count": success_count, + "failure_count": failure_count, + "success_rate": round(success_count / total, 3) if total else 0.0, + "avg_latency_ms": round(avg_latency), + "recent": [asdict(r) for r in reversed(records[-20:])], + } diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/app.py b/dbx-agent-app/src/dbx_agent_app/dashboard/app.py new file mode 100644 index 00000000..1aa8fb77 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/app.py @@ -0,0 +1,468 @@ +""" +FastAPI application for the developer dashboard. + +Routes: + SPA: GET / — React SPA (if built) or server-rendered HTML fallback + API: GET /api/agents — JSON list of agents + GET /api/agents/{name}/card — full agent card + POST /api/agents/{name}/test — call agent via /invocations + POST /api/agents/{name}/evaluate — run eval bridge against agent + GET /api/agents/{name}/analytics — invocation analytics summary + GET /api/agents/{name}/lineage — agent-centric lineage graph + GET /api/agents/{name}/governance — app governance status + declared resources + POST /api/agents/{name}/mcp — MCP JSON-RPC proxy + POST /api/agents/{name}/chat — A2A message/send proxy (A2A -> /invocations -> MCP) + POST /api/agents/{name}/chat/stream — SSE streaming A2A proxy + GET /api/lineage — workspace-wide lineage graph + POST /api/scan — trigger re-scan + GET /health — health check +""" + +import asyncio +import logging +import time +from contextlib import asynccontextmanager +from pathlib import Path +from typing import Optional + +from fastapi import FastAPI, Request +from fastapi.responses import FileResponse, HTMLResponse, JSONResponse +from fastapi.staticfiles import StaticFiles +from pydantic import BaseModel + +from .analytics import AnalyticsTracker +from .governance import GovernanceService +from .scanner import DashboardScanner +from .system_builder import SystemBuilderService, SystemCreate, SystemUpdate, DeployProgress +from .templates import render_agent_list, render_agent_detail + +logger = logging.getLogger(__name__) + +# Path to built React SPA assets +STATIC_DIR = Path(__file__).parent / "static" + + +class ChatRequest(BaseModel): + message: str + context_id: Optional[str] = None + + +def create_dashboard_app( + scanner: DashboardScanner, + profile: Optional[str] = None, + governance: Optional[GovernanceService] = None, + system_builder: Optional[SystemBuilderService] = None, + auto_scan_interval: int = 60, +) -> FastAPI: + """Build and return the dashboard FastAPI app. + + Args: + scanner: DashboardScanner for workspace discovery + profile: Databricks CLI profile name + governance: Optional GovernanceService for lineage/UC + auto_scan_interval: Seconds between background scans (0 to disable) + """ + async def _background_scan(): + """Periodically re-scan workspace for agents.""" + while True: + await asyncio.sleep(auto_scan_interval) + try: + agents = await scanner.scan() + logger.info("Background scan found %d agent(s)", len(agents)) + except Exception as e: + logger.warning("Background scan failed: %s", e) + + @asynccontextmanager + async def lifespan(app): + # Initial scan on startup + try: + agents = await scanner.scan() + logger.info("Startup scan found %d agent(s)", len(agents)) + except Exception as e: + logger.warning("Startup scan failed: %s", e) + + # Start background scan task if interval > 0 + bg_task = None + if auto_scan_interval > 0: + bg_task = asyncio.create_task(_background_scan()) + logger.info("Background scan started (every %ds)", auto_scan_interval) + + yield + + if bg_task: + bg_task.cancel() + try: + await bg_task + except asyncio.CancelledError: + pass + + app = FastAPI( + title="Agent Platform", + docs_url=None, + redoc_url=None, + lifespan=lifespan, + ) + + analytics = AnalyticsTracker() + + has_spa = (STATIC_DIR / "index.html").is_file() + + # --- JSON API --------------------------------------------------------- + + @app.get("/api/agents") + async def api_agents(): + agents = scanner.get_agents() + return [ + { + "name": a.name, + "endpoint_url": a.endpoint_url, + "app_name": a.app_name, + "description": a.description, + "capabilities": a.capabilities, + "protocol_version": a.protocol_version, + } + for a in agents + ] + + @app.get("/api/agents/{name}/card") + async def api_agent_card(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + card = await scanner.get_agent_card(agent.endpoint_url) + return card + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=502) + + @app.post("/api/agents/{name}/mcp") + async def api_mcp_proxy(name: str, request: Request): + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + try: + payload = await request.json() + result = await scanner.proxy_mcp(agent.endpoint_url, payload) + return result + except Exception as e: + return JSONResponse( + {"jsonrpc": "2.0", "id": None, "error": {"code": -1, "message": str(e)}}, + status_code=502, + ) + + @app.post("/api/agents/{name}/test") + async def api_test_agent(name: str, body: ChatRequest): + """Test an agent via the /invocations protocol (Databricks standard).""" + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + t0 = time.monotonic() + try: + result = await scanner.call_invocations(agent.endpoint_url, body.message) + latency = int((time.monotonic() - t0) * 1000) + analytics.record(name, success=True, latency_ms=latency, source="test") + if governance and isinstance(result, dict): + trace = result.get("_trace", {}) + if trace: + try: + governance.ingest_trace(name, trace) + except Exception: + pass + return {"result": result} + except Exception as e: + latency = int((time.monotonic() - t0) * 1000) + analytics.record(name, success=False, latency_ms=latency, source="test", error=str(e)) + return JSONResponse({"error": str(e)}, status_code=502) + + @app.post("/api/agents/{name}/chat") + async def api_chat(name: str, body: ChatRequest): + """Send an A2A message to an agent and return the response.""" + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + t0 = time.monotonic() + try: + result = await scanner.send_a2a_message( + agent.endpoint_url, body.message, body.context_id + ) + latency = int((time.monotonic() - t0) * 1000) + analytics.record(name, success=True, latency_ms=latency, source="chat") + # Auto-ingest trace for runtime lineage + if governance and isinstance(result, dict): + trace = result.get("_trace", {}) + if trace: + try: + governance.ingest_trace(name, trace) + except Exception: + pass # best-effort + return {"result": result} + except Exception as e: + latency = int((time.monotonic() - t0) * 1000) + analytics.record(name, success=False, latency_ms=latency, source="chat", error=str(e)) + return JSONResponse({"error": str(e)}, status_code=502) + + @app.post("/api/agents/{name}/chat/stream") + async def api_chat_stream(name: str, body: ChatRequest): + """Stream an A2A message response as SSE events.""" + from starlette.responses import StreamingResponse + + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + + import json + + async def event_generator(): + try: + async for event in scanner.stream_a2a_message( + agent.endpoint_url, body.message + ): + yield f"data: {json.dumps(event)}\n\n" + except Exception as e: + yield f"data: {json.dumps({'error': str(e)})}\n\n" + + return StreamingResponse( + event_generator(), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}, + ) + + # --- Analytics & Evaluation API ---------------------------------------- + + @app.get("/api/agents/{name}/analytics") + async def api_agent_analytics(name: str): + """Return invocation analytics summary for an agent.""" + return analytics.get_summary(name) + + @app.post("/api/agents/{name}/evaluate") + async def api_evaluate_agent(name: str, request: Request): + """Run eval bridge — send messages to agent and return structured result.""" + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + if not agent.endpoint_url: + return JSONResponse({"error": "Agent has no endpoint URL"}, status_code=400) + + body = await request.json() + messages = body.get("messages", []) + if not messages: + return JSONResponse({"error": "messages required"}, status_code=400) + + from dbx_agent_app.bridge.eval import app_predict_fn + + # Get auth token from scanner's workspace client if available + token = None + try: + ws = scanner._discovery._w if hasattr(scanner, "_discovery") else None + if ws: + auth = ws.config.authenticate() + if callable(auth): + headers = auth() + if headers: + token = dict(headers).get("Authorization", "").replace("Bearer ", "") + except Exception: + pass + + t0 = time.monotonic() + try: + predict = app_predict_fn(agent.endpoint_url, token=token) + result = predict(messages=messages) + latency = int((time.monotonic() - t0) * 1000) + analytics.record(name, success=True, latency_ms=latency, source="evaluate") + return result + except Exception as e: + latency = int((time.monotonic() - t0) * 1000) + analytics.record(name, success=False, latency_ms=latency, source="evaluate", error=str(e)) + return JSONResponse({"error": str(e)}, status_code=502) + + # --- Lineage & Governance API ------------------------------------------ + + @app.get("/api/agents/{name}/lineage") + async def api_agent_lineage(name: str): + if not governance: + return JSONResponse({"error": "Governance service not available"}, status_code=503) + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + try: + graph = await governance.get_agent_lineage(agent.name) + return graph.to_dict() + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=500) + + @app.get("/api/agents/{name}/governance") + async def api_agent_governance(name: str): + if not governance: + return JSONResponse({"error": "Governance service not available"}, status_code=503) + agent = scanner.get_agent_by_name(name) + if not agent: + return JSONResponse({"error": "Agent not found"}, status_code=404) + try: + status = await governance.get_governance_status(agent.name) + return status + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=500) + + @app.get("/api/lineage") + async def api_workspace_lineage(warehouse_id: Optional[str] = None): + if not governance: + return JSONResponse({"error": "Governance service not available"}, status_code=503) + try: + graph = await governance.get_workspace_lineage(warehouse_id=warehouse_id) + return graph.to_dict() + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=500) + + @app.post("/api/lineage/observe") + async def api_observe_trace(request: Request): + if not governance: + return JSONResponse({"error": "Governance not available"}, status_code=503) + body = await request.json() + agent_name = body.get("agent_name") + trace = body.get("trace", {}) + if not agent_name: + return JSONResponse({"error": "agent_name required"}, status_code=400) + governance.ingest_trace(agent_name, trace) + return {"ok": True} + + @app.post("/api/scan") + async def api_scan(): + agents = await scanner.scan() + if governance: + governance.invalidate_cache() + return { + "count": len(agents), + "agents": [a.name for a in agents], + "lineage_refreshed": governance is not None, + } + + # --- System Builder API ----------------------------------------------- + + @app.get("/api/systems") + async def api_list_systems(): + if not system_builder: + return JSONResponse({"error": "System builder not available"}, status_code=503) + return [s.model_dump() for s in system_builder.list_systems()] + + @app.post("/api/systems") + async def api_create_system(request: Request): + if not system_builder: + return JSONResponse({"error": "System builder not available"}, status_code=503) + body = await request.json() + try: + data = SystemCreate(**body) + defn = system_builder.create_system(data) + return defn.model_dump() + except ValueError as e: + return JSONResponse({"error": str(e)}, status_code=400) + + @app.get("/api/systems/{system_id}") + async def api_get_system(system_id: str): + if not system_builder: + return JSONResponse({"error": "System builder not available"}, status_code=503) + defn = system_builder.get_system(system_id) + if not defn: + return JSONResponse({"error": "System not found"}, status_code=404) + return defn.model_dump() + + @app.put("/api/systems/{system_id}") + async def api_update_system(system_id: str, request: Request): + if not system_builder: + return JSONResponse({"error": "System builder not available"}, status_code=503) + body = await request.json() + try: + data = SystemUpdate(**body) + defn = system_builder.update_system(system_id, data) + if not defn: + return JSONResponse({"error": "System not found"}, status_code=404) + return defn.model_dump() + except ValueError as e: + return JSONResponse({"error": str(e)}, status_code=400) + + @app.delete("/api/systems/{system_id}") + async def api_delete_system(system_id: str): + if not system_builder: + return JSONResponse({"error": "System builder not available"}, status_code=503) + if system_builder.delete_system(system_id): + return {"ok": True} + return JSONResponse({"error": "System not found"}, status_code=404) + + @app.post("/api/systems/{system_id}/deploy") + async def api_deploy_system(system_id: str, request: Request): + if not system_builder: + return JSONResponse({"error": "System builder not available"}, status_code=503) + body = {} + try: + body = await request.json() + except Exception: + pass + # Async mode: start background deploy and return immediately + if isinstance(body, dict) and body.get("async"): + progress = system_builder.start_deploy(system_id) + return progress.model_dump() + # Sync mode (legacy): wait for full result + result = await system_builder.deploy_system(system_id) + return result.model_dump() + + @app.get("/api/systems/{system_id}/deploy/status") + async def api_deploy_status(system_id: str): + if not system_builder: + return JSONResponse({"error": "System builder not available"}, status_code=503) + progress = system_builder.get_deploy_status(system_id) + if not progress: + return JSONResponse({"error": "No active deploy"}, status_code=404) + return progress.model_dump() + + @app.get("/health") + async def health(): + return { + "status": "ok", + "agents_cached": len(scanner.get_agents()), + "profile": profile, + } + + # --- SPA / HTML fallback ----------------------------------------------- + + if has_spa: + # Mount static assets (JS, CSS bundles) + assets_dir = STATIC_DIR / "assets" + if assets_dir.is_dir(): + app.mount("/assets", StaticFiles(directory=str(assets_dir)), name="assets") + + @app.get("/{full_path:path}") + async def spa_catch_all(full_path: str): + """Serve React SPA index.html for all non-API routes.""" + # Check if it's a direct static file request + static_file = STATIC_DIR / full_path + if static_file.is_file() and not full_path.startswith("api/"): + return FileResponse(str(static_file)) + # Otherwise serve the SPA + return FileResponse(str(STATIC_DIR / "index.html")) + else: + # Fallback: server-rendered HTML pages + logger.info("No React build found at %s — using server-rendered HTML", STATIC_DIR) + + @app.get("/", response_class=HTMLResponse) + async def index(): + agents = scanner.get_agents() + return render_agent_list(agents) + + @app.get("/agent/{name}", response_class=HTMLResponse) + async def agent_detail(name: str): + agent = scanner.get_agent_by_name(name) + if not agent: + return HTMLResponse("

Agent not found

", status_code=404) + + card = None + try: + card = await scanner.get_agent_card(agent.endpoint_url) + except Exception as e: + logger.warning("Could not fetch card for %s: %s", name, e) + + return render_agent_detail(agent, card) + + return app diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/app.yaml b/dbx-agent-app/src/dbx_agent_app/dashboard/app.yaml new file mode 100644 index 00000000..7ae6f833 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/app.yaml @@ -0,0 +1,11 @@ +command: + - uvicorn + - dbx_agent_app.dashboard.__main__:app + - --host + - 0.0.0.0 + - --port + - "8000" + +env: + - name: UC_CATALOG + value: main diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/cli.py b/dbx-agent-app/src/dbx_agent_app/dashboard/cli.py new file mode 100644 index 00000000..c6b63b4f --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/cli.py @@ -0,0 +1,74 @@ +""" +CLI entry point for the developer dashboard. + +Usage: + dbx-agent-app dashboard --profile my-profile --port 8501 + +Can be invoked directly or via the top-level CLI dispatcher (cli.py). +""" + +import argparse +import asyncio +import logging +import sys +import webbrowser + +import uvicorn + +from .app import create_dashboard_app +from .governance import GovernanceService +from .scanner import DashboardScanner +from .system_builder import SystemBuilderService + + +def run_dashboard(args): + """Launch the dashboard. Called from the top-level CLI dispatcher.""" + scanner = DashboardScanner(profile=args.profile) + + print(f"Scanning workspace for agents (profile={args.profile or 'default'})...") + try: + agents = asyncio.run(scanner.scan()) + print(f"Found {len(agents)} agent(s)") + except Exception as e: + print(f"Initial scan failed: {e}", file=sys.stderr) + print("Dashboard will start anyway — use the Scan button to retry.") + + governance = GovernanceService(scanner, profile=args.profile, catalog=args.catalog) + system_builder = SystemBuilderService(scanner=scanner, profile=args.profile) + app = create_dashboard_app(scanner, profile=args.profile, governance=governance, system_builder=system_builder) + + url = f"http://{args.host}:{args.port}" + if not args.no_browser: + webbrowser.open(url) + + print(f"Dashboard running at {url}") + uvicorn.run(app, host=args.host, port=args.port, log_level="warning") + + +def main(): + """Standalone entry point (for backwards compatibility).""" + parser = argparse.ArgumentParser( + prog="dbx-agent-app", + description="Developer dashboard for Databricks agent discovery", + ) + sub = parser.add_subparsers(dest="command") + + dash = sub.add_parser("dashboard", help="Launch the agent discovery dashboard") + dash.add_argument("--profile", type=str, default=None, help="Databricks CLI profile") + dash.add_argument("--port", type=int, default=8501, help="Port to serve on (default: 8501)") + dash.add_argument("--host", type=str, default="127.0.0.1", help="Host to bind (default: 127.0.0.1)") + dash.add_argument("--catalog", type=str, default=None, help="UC catalog to scan for tables/functions (enables lineage)") + dash.add_argument("--no-browser", action="store_true", help="Don't auto-open browser") + + args = parser.parse_args() + + if args.command != "dashboard": + parser.print_help() + sys.exit(1) + + logging.basicConfig(level=logging.INFO, format="%(levelname)s %(name)s: %(message)s") + run_dashboard(args) + + +if __name__ == "__main__": + main() diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/data/systems.json b/dbx-agent-app/src/dbx_agent_app/dashboard/data/systems.json new file mode 100644 index 00000000..763ca980 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/data/systems.json @@ -0,0 +1,29 @@ +[ + { + "id": "sgp-multi-agent-001", + "name": "SGP Multi-Agent System", + "description": "Guidepoint expert network platform \u2014 supervisor orchestrates research, expert-finder, analytics, and compliance agents.", + "agents": [ + "research", + "expert-finder", + "analytics", + "compliance" + ], + "edges": [ + { + "source_agent": "research", + "target_agent": "expert-finder", + "env_var": "RESEARCH_URL" + }, + { + "source_agent": "research", + "target_agent": "analytics", + "env_var": "RESEARCH_URL" + } + ], + "uc_catalog": "serverless_dxukih_catalog", + "uc_schema": "agents", + "created_at": "2026-03-03T21:35:00+00:00", + "updated_at": "2026-03-05T15:45:17.413143+00:00" + } +] \ No newline at end of file diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/.gitignore b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/.gitignore new file mode 100644 index 00000000..c2658d7d --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/.gitignore @@ -0,0 +1 @@ +node_modules/ diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/index.html b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/index.html new file mode 100644 index 00000000..ef41ec2a --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/index.html @@ -0,0 +1,12 @@ + + + + + + databricks-agents dashboard + + +
+ + + diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/package-lock.json b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/package-lock.json new file mode 100644 index 00000000..68c759f5 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/package-lock.json @@ -0,0 +1,2132 @@ +{ + "name": "dbx-agent-app-dashboard", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "dbx-agent-app-dashboard", + "version": "0.1.0", + "dependencies": { + "@dagrejs/dagre": "^2.0.4", + "@xyflow/react": "^12.10.1", + "react": "^19.0.0", + "react-dom": "^19.0.0", + "react-router-dom": "^7.1.0" + }, + "devDependencies": { + "@types/react": "^19.0.0", + "@types/react-dom": "^19.0.0", + "@vitejs/plugin-react": "^4.3.0", + "typescript": "^5.7.0", + "vite": "^6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.6.tgz", + "integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@dagrejs/dagre": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@dagrejs/dagre/-/dagre-2.0.4.tgz", + "integrity": "sha512-J6vCWTNpicHF4zFlZG1cS5DkGzMr9941gddYkakjrg3ZNev4bbqEgLHFTWiFrcJm7UCRu7olO3K6IRDd9gSGhA==", + "license": "MIT", + "dependencies": { + "@dagrejs/graphlib": "3.0.4" + } + }, + "node_modules/@dagrejs/graphlib": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@dagrejs/graphlib/-/graphlib-3.0.4.tgz", + "integrity": "sha512-HxZ7fCvAwTLCWCO0WjDkzAFQze8LdC6iOpKbetDKHIuDfIgMlIzYzqZ4nxwLlclQX+3ZVeZ1K2OuaOE2WWcyOg==", + "license": "MIT" + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz", + "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz", + "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz", + "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz", + "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz", + "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz", + "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz", + "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz", + "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz", + "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz", + "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz", + "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz", + "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz", + "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz", + "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz", + "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz", + "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz", + "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz", + "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz", + "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz", + "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz", + "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz", + "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz", + "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz", + "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz", + "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz", + "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.27", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz", + "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz", + "integrity": "sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz", + "integrity": "sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz", + "integrity": "sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz", + "integrity": "sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz", + "integrity": "sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz", + "integrity": "sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz", + "integrity": "sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz", + "integrity": "sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz", + "integrity": "sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz", + "integrity": "sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz", + "integrity": "sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz", + "integrity": "sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz", + "integrity": "sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz", + "integrity": "sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz", + "integrity": "sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz", + "integrity": "sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz", + "integrity": "sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz", + "integrity": "sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz", + "integrity": "sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz", + "integrity": "sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz", + "integrity": "sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz", + "integrity": "sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz", + "integrity": "sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz", + "integrity": "sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz", + "integrity": "sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", + "license": "MIT" + }, + "node_modules/@types/d3-drag": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", + "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "license": "MIT", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-selection": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz", + "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==", + "license": "MIT" + }, + "node_modules/@types/d3-transition": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz", + "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-zoom": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", + "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", + "license": "MIT", + "dependencies": { + "@types/d3-interpolate": "*", + "@types/d3-selection": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "19.2.14", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", + "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", + "devOptional": true, + "license": "MIT", + "peer": true, + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz", + "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.27", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.17.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/@xyflow/react": { + "version": "12.10.1", + "resolved": "https://registry.npmjs.org/@xyflow/react/-/react-12.10.1.tgz", + "integrity": "sha512-5eSWtIK/+rkldOuFbOOz44CRgQRjtS9v5nufk77DV+XBnfCGL9HAQ8PG00o2ZYKqkEU/Ak6wrKC95Tu+2zuK3Q==", + "license": "MIT", + "dependencies": { + "@xyflow/system": "0.0.75", + "classcat": "^5.0.3", + "zustand": "^4.4.0" + }, + "peerDependencies": { + "react": ">=17", + "react-dom": ">=17" + } + }, + "node_modules/@xyflow/system": { + "version": "0.0.75", + "resolved": "https://registry.npmjs.org/@xyflow/system/-/system-0.0.75.tgz", + "integrity": "sha512-iXs+AGFLi8w/VlAoc/iSxk+CxfT6o64Uw/k0CKASOPqjqz6E0rb5jFZgJtXGZCpfQI6OQpu5EnumP5fGxQheaQ==", + "license": "MIT", + "dependencies": { + "@types/d3-drag": "^3.0.7", + "@types/d3-interpolate": "^3.0.4", + "@types/d3-selection": "^3.0.10", + "@types/d3-transition": "^3.0.8", + "@types/d3-zoom": "^3.0.8", + "d3-drag": "^3.0.0", + "d3-interpolate": "^3.0.1", + "d3-selection": "^3.0.0", + "d3-zoom": "^3.0.0" + } + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.0.tgz", + "integrity": "sha512-lIyg0szRfYbiy67j9KN8IyeD7q7hcmqnJ1ddWmNt19ItGpNN64mnllmxUNFIOdOm6by97jlL6wfpTTJrmnjWAA==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001774", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001774.tgz", + "integrity": "sha512-DDdwPGz99nmIEv216hKSgLD+D4ikHQHjBC/seF98N9CPqRX4M5mSxT9eTV6oyisnJcuzxtZy4n17yKKQYmYQOA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/classcat": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz", + "integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==", + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cookie": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz", + "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-selection": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "license": "ISC", + "peer": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "d3-selection": "2 - 3" + } + }, + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.302", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.302.tgz", + "integrity": "sha512-sM6HAN2LyK82IyPBpznDRqlTQAtuSaO+ShzFiWTvoMJLHyZ+Y39r8VMfHzwbU8MVBzQ4Wdn85+wlZl2TLGIlwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/esbuild": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz", + "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.12", + "@esbuild/android-arm": "0.25.12", + "@esbuild/android-arm64": "0.25.12", + "@esbuild/android-x64": "0.25.12", + "@esbuild/darwin-arm64": "0.25.12", + "@esbuild/darwin-x64": "0.25.12", + "@esbuild/freebsd-arm64": "0.25.12", + "@esbuild/freebsd-x64": "0.25.12", + "@esbuild/linux-arm": "0.25.12", + "@esbuild/linux-arm64": "0.25.12", + "@esbuild/linux-ia32": "0.25.12", + "@esbuild/linux-loong64": "0.25.12", + "@esbuild/linux-mips64el": "0.25.12", + "@esbuild/linux-ppc64": "0.25.12", + "@esbuild/linux-riscv64": "0.25.12", + "@esbuild/linux-s390x": "0.25.12", + "@esbuild/linux-x64": "0.25.12", + "@esbuild/netbsd-arm64": "0.25.12", + "@esbuild/netbsd-x64": "0.25.12", + "@esbuild/openbsd-arm64": "0.25.12", + "@esbuild/openbsd-x64": "0.25.12", + "@esbuild/openharmony-arm64": "0.25.12", + "@esbuild/sunos-x64": "0.25.12", + "@esbuild/win32-arm64": "0.25.12", + "@esbuild/win32-ia32": "0.25.12", + "@esbuild/win32-x64": "0.25.12" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/react": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", + "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.4", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", + "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.4" + } + }, + "node_modules/react-refresh": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz", + "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-router": { + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.13.1.tgz", + "integrity": "sha512-td+xP4X2/6BJvZoX6xw++A2DdEi++YypA69bJUV5oVvqf6/9/9nNlD70YO1e9d3MyamJEBQFEzk6mbfDYbqrSA==", + "license": "MIT", + "dependencies": { + "cookie": "^1.0.1", + "set-cookie-parser": "^2.6.0" + }, + "engines": { + "node": ">=20.0.0" + }, + "peerDependencies": { + "react": ">=18", + "react-dom": ">=18" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + } + } + }, + "node_modules/react-router-dom": { + "version": "7.13.1", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.13.1.tgz", + "integrity": "sha512-UJnV3Rxc5TgUPJt2KJpo1Jpy0OKQr0AjgbZzBFjaPJcFOb2Y8jA5H3LT8HUJAiRLlWrEXWHbF1Z4SCZaQjWDHw==", + "license": "MIT", + "dependencies": { + "react-router": "7.13.1" + }, + "engines": { + "node": ">=20.0.0" + }, + "peerDependencies": { + "react": ">=18", + "react-dom": ">=18" + } + }, + "node_modules/rollup": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz", + "integrity": "sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.59.0", + "@rollup/rollup-android-arm64": "4.59.0", + "@rollup/rollup-darwin-arm64": "4.59.0", + "@rollup/rollup-darwin-x64": "4.59.0", + "@rollup/rollup-freebsd-arm64": "4.59.0", + "@rollup/rollup-freebsd-x64": "4.59.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.59.0", + "@rollup/rollup-linux-arm-musleabihf": "4.59.0", + "@rollup/rollup-linux-arm64-gnu": "4.59.0", + "@rollup/rollup-linux-arm64-musl": "4.59.0", + "@rollup/rollup-linux-loong64-gnu": "4.59.0", + "@rollup/rollup-linux-loong64-musl": "4.59.0", + "@rollup/rollup-linux-ppc64-gnu": "4.59.0", + "@rollup/rollup-linux-ppc64-musl": "4.59.0", + "@rollup/rollup-linux-riscv64-gnu": "4.59.0", + "@rollup/rollup-linux-riscv64-musl": "4.59.0", + "@rollup/rollup-linux-s390x-gnu": "4.59.0", + "@rollup/rollup-linux-x64-gnu": "4.59.0", + "@rollup/rollup-linux-x64-musl": "4.59.0", + "@rollup/rollup-openbsd-x64": "4.59.0", + "@rollup/rollup-openharmony-arm64": "4.59.0", + "@rollup/rollup-win32-arm64-msvc": "4.59.0", + "@rollup/rollup-win32-ia32-msvc": "4.59.0", + "@rollup/rollup-win32-x64-gnu": "4.59.0", + "@rollup/rollup-win32-x64-msvc": "4.59.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/set-cookie-parser": { + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz", + "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==", + "license": "MIT" + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/vite": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.1.tgz", + "integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.4.4", + "picomatch": "^4.0.2", + "postcss": "^8.5.3", + "rollup": "^4.34.9", + "tinyglobby": "^0.2.13" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "jiti": ">=1.21.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + } + } +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/package.json b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/package.json new file mode 100644 index 00000000..8a2a553c --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/package.json @@ -0,0 +1,25 @@ +{ + "name": "dbx-agent-app-dashboard", + "private": true, + "version": "0.1.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview" + }, + "dependencies": { + "@dagrejs/dagre": "^2.0.4", + "@xyflow/react": "^12.10.1", + "react": "^19.0.0", + "react-dom": "^19.0.0", + "react-router-dom": "^7.1.0" + }, + "devDependencies": { + "@types/react": "^19.0.0", + "@types/react-dom": "^19.0.0", + "@vitejs/plugin-react": "^4.3.0", + "typescript": "^5.7.0", + "vite": "^6.0.0" + } +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/App.css b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/App.css new file mode 100644 index 00000000..132e6197 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/App.css @@ -0,0 +1,1935 @@ +:root { + --bg: #1b1b2f; + --surface: #1f2937; + --border: #374151; + --text: #f3f4f6; + --muted: #9ca3af; + --accent: #3b82f6; + --accent-hover: #2563eb; + --green: #22c55e; + --red: #ef4444; + --yellow: #eab308; + --font-mono: "SF Mono", "Fira Code", "Cascadia Code", monospace; +} + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, + sans-serif; + background: var(--bg); + color: var(--text); + line-height: 1.6; +} + +a { + color: var(--accent); + text-decoration: none; +} +a:hover { + text-decoration: underline; +} + +/* Layout */ +.shell-header { + background: var(--surface); + border-bottom: 1px solid var(--border); + padding: 1rem 0; +} +.shell-header .container { + display: flex; + align-items: center; + justify-content: space-between; +} +.shell-header h1 { + font-size: 1.25rem; + font-weight: 600; +} +.shell-header h1 span { + color: var(--accent); +} +.nav-links a { + margin-left: 1.5rem; + color: var(--muted); + font-size: 0.9rem; +} +.nav-links a:hover { + color: var(--text); + text-decoration: none; +} +.container { + max-width: 1200px; + margin: 0 auto; + padding: 1.5rem; +} + +/* Cards grid */ +.grid { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(320px, 1fr)); + gap: 1rem; + margin-top: 1.5rem; +} +.card { + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + padding: 1.25rem; + transition: border-color 0.15s; + cursor: pointer; +} +.card:hover { + border-color: var(--accent); +} +.card h3 { + font-size: 1rem; + margin-bottom: 0.4rem; +} +.card p { + color: var(--muted); + font-size: 0.875rem; +} +.card .meta { + display: flex; + gap: 0.75rem; + margin-top: 0.75rem; + font-size: 0.8rem; + color: var(--muted); + flex-wrap: wrap; +} + +/* Badges */ +.badge { + display: inline-block; + padding: 2px 8px; + border-radius: 9999px; + font-size: 0.75rem; + font-weight: 500; +} +.badge-green { + background: rgba(34, 197, 94, 0.15); + color: var(--green); +} +.badge-blue { + background: rgba(59, 130, 246, 0.15); + color: var(--accent); +} +.badge-red { + background: rgba(239, 68, 68, 0.15); + color: var(--red); +} + +/* Buttons */ +.btn { + display: inline-flex; + align-items: center; + gap: 0.4rem; + padding: 0.5rem 1rem; + border-radius: 6px; + border: none; + cursor: pointer; + font-size: 0.875rem; + font-weight: 500; + transition: all 0.15s; +} +.btn:disabled { + opacity: 0.5; + cursor: not-allowed; +} +.btn-primary { + background: var(--accent); + color: #fff; +} +.btn-primary:hover:not(:disabled) { + background: var(--accent-hover); +} +.btn-outline { + background: transparent; + border: 1px solid var(--border); + color: var(--text); +} +.btn-outline:hover:not(:disabled) { + border-color: var(--accent); + color: var(--accent); +} +.btn-sm { + padding: 0.3rem 0.6rem; + font-size: 0.8rem; +} +.btn-danger { + background: rgba(239, 68, 68, 0.15); + color: var(--red); + border: 1px solid rgba(239, 68, 68, 0.3); +} + +/* Sections */ +.section { + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + padding: 1.25rem; + margin-bottom: 1rem; +} +.section h3 { + font-size: 1rem; + margin-bottom: 0.75rem; + color: var(--accent); +} + +/* Code */ +pre { + background: #111827; + border-radius: 6px; + padding: 1rem; + overflow-x: auto; + font-size: 0.8rem; + line-height: 1.5; +} +code { + font-family: var(--font-mono); +} + +/* Toolbar */ +.toolbar { + display: flex; + align-items: center; + justify-content: space-between; + margin-top: 1.5rem; +} +.status-text { + font-size: 0.85rem; + color: var(--muted); +} + +/* Empty state */ +.empty-state { + text-align: center; + padding: 4rem 1rem; + color: var(--muted); +} +.empty-state p { + margin-top: 0.5rem; + font-size: 0.9rem; +} + +/* Spinner */ +.spinner { + display: inline-block; + width: 14px; + height: 14px; + border: 2px solid var(--border); + border-top-color: var(--accent); + border-radius: 50%; + animation: spin 0.6s linear infinite; +} +.spinner-lg { + width: 24px; + height: 24px; + border-width: 3px; +} +@keyframes spin { + to { + transform: rotate(360deg); + } +} + +/* Tabs */ +.tab-bar { + display: flex; + gap: 0; + border-bottom: 1px solid var(--border); + margin-bottom: 1.5rem; +} +.tab-btn { + padding: 0.75rem 1.25rem; + background: transparent; + border: none; + border-bottom: 2px solid transparent; + color: var(--muted); + cursor: pointer; + font-size: 0.9rem; + font-weight: 500; + transition: all 0.15s; +} +.tab-btn:hover { + color: var(--text); +} +.tab-btn.active { + color: var(--accent); + border-bottom-color: var(--accent); +} + +/* Detail header */ +.detail-header { + margin-bottom: 1.5rem; +} +.detail-header h2 { + font-size: 1.5rem; + margin-top: 0.5rem; +} + +/* Tool rows */ +.tool-row { + padding: 0.5rem 0; + border-bottom: 1px solid var(--border); +} +.tool-row:last-child { + border-bottom: none; +} +.tool-name { + font-weight: 600; +} +.tool-desc { + color: var(--muted); + font-size: 0.85rem; +} + +/* Textarea */ +textarea { + width: 100%; + background: #111827; + color: var(--text); + border: 1px solid var(--border); + border-radius: 6px; + padding: 0.75rem; + font-family: var(--font-mono); + font-size: 0.8rem; + resize: vertical; +} +textarea:focus { + outline: none; + border-color: var(--accent); +} + +/* Chat */ +.chat-layout { + display: grid; + grid-template-columns: 1fr 340px; + gap: 1rem; + height: calc(100vh - 260px); + min-height: 400px; +} +.chat-main { + display: flex; + flex-direction: column; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + overflow: hidden; +} +.message-list { + flex: 1; + overflow-y: auto; + padding: 1rem; +} +.message-input-area { + border-top: 1px solid var(--border); + padding: 0.75rem; + display: flex; + gap: 0.5rem; +} +.message-input-area textarea { + flex: 1; + min-height: 40px; + max-height: 120px; +} + +/* Message bubbles */ +.message-bubble { + margin-bottom: 1rem; + max-width: 80%; +} +.message-bubble.user { + margin-left: auto; +} +.message-bubble .role-label { + font-size: 0.75rem; + color: var(--muted); + margin-bottom: 0.25rem; +} +.message-bubble .content { + padding: 0.75rem 1rem; + border-radius: 8px; + font-size: 0.9rem; + line-height: 1.5; + white-space: pre-wrap; + word-break: break-word; +} +.message-bubble.user .content { + background: var(--accent); + color: #fff; + border-bottom-right-radius: 2px; +} +.message-bubble.agent .content { + background: #111827; + border-bottom-left-radius: 2px; +} + +/* Inspector panel */ +.inspector-panel { + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + overflow-y: auto; + padding: 1rem; +} +.inspector-panel h3 { + font-size: 0.9rem; + color: var(--accent); + margin-bottom: 0.75rem; +} + +/* Tool timeline */ +.tool-timeline { + position: relative; + padding-left: 1.25rem; +} +.tool-timeline::before { + content: ""; + position: absolute; + left: 4px; + top: 8px; + bottom: 8px; + width: 2px; + background: var(--border); +} +.tool-call-card { + position: relative; + margin-bottom: 0.75rem; + padding: 0.75rem; + background: #111827; + border-radius: 6px; + font-size: 0.8rem; +} +.tool-call-card::before { + content: ""; + position: absolute; + left: -1.25rem; + top: 1rem; + width: 10px; + height: 10px; + border-radius: 50%; + background: var(--green); + border: 2px solid var(--surface); +} +.tool-call-card.error::before { + background: var(--red); +} +.tool-call-card.running::before { + background: var(--yellow); +} +.tool-call-card .tc-name { + font-weight: 600; + color: var(--text); + margin-bottom: 0.25rem; +} +.tool-call-card .tc-timing { + font-size: 0.7rem; + color: var(--muted); +} + +/* JSON viewer */ +.json-viewer { + font-family: var(--font-mono); + font-size: 0.8rem; + line-height: 1.5; +} +.json-key { + color: #93c5fd; +} +.json-string { + color: #86efac; +} +.json-number { + color: #fbbf24; +} +.json-bool { + color: #c084fc; +} +.json-null { + color: var(--muted); +} +.json-toggle { + cursor: pointer; + user-select: none; + color: var(--muted); +} +.json-toggle:hover { + color: var(--text); +} +.json-collapsed { + display: none; +} + +/* Error banner */ +.error-banner { + background: rgba(239, 68, 68, 0.1); + border: 1px solid rgba(239, 68, 68, 0.3); + color: var(--red); + padding: 0.75rem 1rem; + border-radius: 6px; + margin-bottom: 1rem; + font-size: 0.875rem; +} + +/* Lineage graph */ +.lineage-legend { + display: flex; + flex-wrap: wrap; + gap: 1rem; + margin-top: 0.75rem; + padding: 0.75rem; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 6px; +} +.legend-section { + display: flex; + flex-wrap: wrap; + gap: 0.75rem; + align-items: center; +} +.legend-item { + display: inline-flex; + align-items: center; + gap: 0.35rem; + font-size: 0.8rem; + color: var(--muted); +} +.legend-dot { + display: inline-block; + width: 10px; + height: 10px; + border-radius: 2px; +} + +/* Lineage page toolbar */ +.lineage-toolbar { + display: flex; + align-items: center; + justify-content: space-between; + flex-wrap: wrap; + gap: 0.75rem; + margin-top: 1rem; +} +.lineage-filters { + display: flex; + flex-wrap: wrap; + gap: 0.75rem; +} +.lineage-filter-label { + display: inline-flex; + align-items: center; + gap: 0.35rem; + font-size: 0.8rem; + color: var(--muted); + cursor: pointer; +} +.lineage-filter-label input[type="checkbox"] { + accent-color: var(--accent); +} + +/* Governance tab */ +.governance-path { + padding: 0.2rem 0.6rem; + background: #111827; + border: 1px solid var(--border); + border-radius: 4px; + font-family: var(--font-mono); + font-size: 0.8rem; + color: var(--text); +} +.governance-table { + width: 100%; + border-collapse: collapse; + font-size: 0.85rem; +} +.governance-table th { + text-align: left; + padding: 0.5rem 0.75rem; + border-bottom: 1px solid var(--border); + color: var(--muted); + font-weight: 500; + font-size: 0.8rem; +} +.governance-table td { + padding: 0.5rem 0.75rem; + border-bottom: 1px solid var(--border); + word-break: break-all; +} +.governance-table code { + font-size: 0.8rem; + color: #93c5fd; +} +.governance-details { + display: flex; + flex-direction: column; + gap: 0; +} +.governance-row { + display: flex; + justify-content: space-between; + padding: 0.5rem 0; + border-bottom: 1px solid var(--border); + font-size: 0.85rem; +} +.governance-row:last-child { + border-bottom: none; +} +.governance-label { + color: var(--muted); + font-weight: 500; +} + +/* Register All button */ +.register-all-btn { + white-space: nowrap; + font-size: 0.8rem; +} + +/* Registration result banner */ +.register-result-banner { + padding: 0.6rem 0.75rem; + border-radius: 6px; + font-size: 0.8rem; + margin-bottom: 1rem; + line-height: 1.4; +} +.register-result-success { + background: rgba(34, 197, 94, 0.12); + border: 1px solid rgba(34, 197, 94, 0.3); + color: #86efac; +} +.register-result-partial { + background: rgba(234, 179, 8, 0.12); + border: 1px solid rgba(234, 179, 8, 0.3); + color: #fde68a; +} +.register-result-error { + background: rgba(239, 68, 68, 0.12); + border: 1px solid rgba(239, 68, 68, 0.3); + color: #fca5a5; +} +.register-failures { + font-size: 0.75rem; + opacity: 0.8; +} + +/* ========== Session Bar ========== */ +.session-bar { + position: relative; + display: flex; + align-items: center; +} +.session-bar-toggle { + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0.35rem 0.75rem; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 6px; + color: var(--text); + cursor: pointer; + font-size: 0.85rem; + transition: border-color 0.15s; +} +.session-bar-toggle:hover { + border-color: var(--accent); +} +.session-bar-name { + font-weight: 500; + max-width: 200px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} +.session-bar-count { + color: var(--muted); + font-size: 0.75rem; +} +.session-bar-chevron { + color: var(--muted); + font-size: 0.7rem; +} +.session-dropdown { + position: absolute; + top: 100%; + left: 0; + margin-top: 4px; + min-width: 300px; + max-height: 320px; + overflow-y: auto; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + box-shadow: 0 8px 24px rgba(0, 0, 0, 0.4); + z-index: 100; +} +.session-dropdown-empty { + padding: 1rem; + color: var(--muted); + font-size: 0.85rem; + text-align: center; +} +.session-dropdown-item { + display: flex; + align-items: center; + justify-content: space-between; + padding: 0.5rem 0.75rem; + border-bottom: 1px solid var(--border); + cursor: pointer; + transition: background 0.1s; +} +.session-dropdown-item:last-child { + border-bottom: none; +} +.session-dropdown-item:hover { + background: rgba(59, 130, 246, 0.05); +} +.session-dropdown-item.active { + border-left: 3px solid var(--accent); +} +.session-dropdown-info { + flex: 1; + min-width: 0; +} +.session-dropdown-name { + display: block; + font-size: 0.85rem; + font-weight: 500; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} +.session-dropdown-meta { + display: block; + font-size: 0.7rem; + color: var(--muted); +} +.session-dropdown-actions { + display: flex; + gap: 0.25rem; + margin-left: 0.5rem; +} +.session-action-btn { + background: none; + border: none; + color: var(--muted); + cursor: pointer; + font-size: 0.9rem; + padding: 2px 4px; + border-radius: 3px; +} +.session-action-btn:hover { + color: var(--text); + background: rgba(255, 255, 255, 0.05); +} +.session-action-delete:hover { + color: var(--red); +} +.session-rename-input { + flex: 1; + background: #111827; + border: 1px solid var(--accent); + border-radius: 4px; + color: var(--text); + font-size: 0.85rem; + padding: 0.2rem 0.4rem; + outline: none; +} + +/* ========== Inspector Tabs ========== */ +.inspector-tabs { + display: flex; + border-bottom: 1px solid var(--border); + margin-bottom: 0.75rem; +} +.inspector-tab { + display: flex; + align-items: center; + gap: 0.35rem; + padding: 0.5rem 0.75rem; + background: none; + border: none; + border-bottom: 2px solid transparent; + color: var(--muted); + cursor: pointer; + font-size: 0.8rem; + font-weight: 500; + transition: all 0.15s; +} +.inspector-tab:hover { + color: var(--text); +} +.inspector-tab.active { + color: var(--accent); + border-bottom-color: var(--accent); +} +.inspector-badge { + display: inline-flex; + align-items: center; + justify-content: center; + min-width: 18px; + height: 18px; + padding: 0 4px; + border-radius: 9px; + background: rgba(59, 130, 246, 0.15); + color: var(--accent); + font-size: 0.65rem; + font-weight: 600; +} +.inspector-content { + flex: 1; + overflow-y: auto; +} + +/* ========== Trace Panel ========== */ +.trace-panel { + display: flex; + flex-direction: column; + gap: 0.75rem; +} +.trace-empty { + padding: 1.5rem 1rem; + text-align: center; +} +.trace-turn-list { + display: flex; + flex-direction: column; + gap: 0.5rem; +} +.trace-turn-row { + padding: 0.6rem 0.75rem; + background: #111827; + border-radius: 6px; + cursor: pointer; + border: 1px solid transparent; + transition: all 0.15s; +} +.trace-turn-row:hover { + border-color: var(--border); +} +.trace-turn-row.selected { + border-color: var(--accent); + background: rgba(59, 130, 246, 0.05); +} +.trace-turn-row.error { + border-color: rgba(239, 68, 68, 0.3); +} +.trace-turn-header { + display: flex; + align-items: center; + justify-content: space-between; + margin-bottom: 0.2rem; +} +.trace-turn-label { + font-size: 0.8rem; + font-weight: 600; +} +.trace-turn-meta { + font-size: 0.7rem; + color: var(--muted); +} +.trace-latency-bar { + height: 3px; + background: var(--border); + border-radius: 2px; + margin-top: 0.35rem; + overflow: hidden; +} +.trace-latency-fill { + height: 100%; + border-radius: 2px; + transition: width 0.3s ease; +} +.trace-server-timing { + font-size: 0.65rem; + color: var(--muted); + margin-top: 0.2rem; +} + +/* Trace detail */ +.trace-detail { + border-top: 1px solid var(--border); + padding-top: 0.75rem; +} +.trace-detail-tabs { + display: flex; + gap: 0; + margin-bottom: 0.5rem; +} +.trace-detail-tab { + padding: 0.3rem 0.6rem; + background: none; + border: none; + border-bottom: 2px solid transparent; + color: var(--muted); + cursor: pointer; + font-size: 0.75rem; + font-weight: 500; +} +.trace-detail-tab.active { + color: var(--accent); + border-bottom-color: var(--accent); +} +.trace-events { + display: flex; + flex-direction: column; + gap: 0.25rem; +} +.trace-event-row { + border-radius: 4px; +} +.trace-event-header { + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0.35rem 0.5rem; + cursor: pointer; + border-radius: 4px; + transition: background 0.1s; +} +.trace-event-header:hover { + background: rgba(255, 255, 255, 0.03); +} +.trace-event-dot { + width: 8px; + height: 8px; + border-radius: 50%; + flex-shrink: 0; +} +.trace-event-label { + font-size: 0.75rem; + flex: 1; + min-width: 0; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} +.trace-event-duration { + font-size: 0.7rem; + color: var(--muted); + font-family: var(--font-mono); +} +.trace-event-expand { + font-size: 0.6rem; + color: var(--muted); +} +.trace-event-payload { + padding: 0.25rem 0.5rem 0.5rem 1.5rem; + font-size: 0.75rem; +} +.trace-payload { + padding: 0.5rem; + background: #111827; + border-radius: 4px; + max-height: 300px; + overflow-y: auto; +} + +/* ========== Routing Panel ========== */ +.routing-panel { + display: flex; + flex-direction: column; + gap: 0.75rem; +} +.routing-card { + background: #111827; + border-radius: 8px; + padding: 0.75rem; + border: 1px solid var(--border); +} +.routing-card-header { + display: flex; + align-items: center; + justify-content: space-between; + margin-bottom: 0.6rem; +} +.routing-card-title { + display: flex; + align-items: center; + gap: 0.4rem; + font-size: 0.85rem; + font-weight: 600; +} +.routing-card-turn { + color: var(--muted); + font-weight: 400; +} +.routing-card-arrow { + color: var(--muted); + font-size: 0.75rem; +} +.routing-card-agent { + color: var(--accent); +} +.routing-badge { + font-size: 0.65rem; + font-weight: 700; + padding: 2px 8px; + border-radius: 4px; + letter-spacing: 0.05em; +} + +/* Routing decision row */ +.routing-decision-row { + display: flex; + gap: 1rem; + margin-bottom: 0.5rem; +} +.routing-decision-model, +.routing-decision-tool { + flex: 1; + display: flex; + flex-direction: column; + gap: 0.15rem; +} +.routing-label { + font-size: 0.65rem; + color: var(--muted); + text-transform: uppercase; + letter-spacing: 0.05em; + font-weight: 600; +} +.routing-value { + font-size: 0.8rem; + color: var(--text); +} +.routing-value-mono { + font-family: var(--font-mono); + color: #93c5fd; +} +.routing-value-url { + font-size: 0.7rem; + word-break: break-all; + opacity: 0.8; +} +.routing-endpoint { + margin-bottom: 0.5rem; +} + +/* Keywords */ +.routing-keywords { + margin-bottom: 0.5rem; +} +.routing-keyword-list { + display: flex; + flex-wrap: wrap; + gap: 0.3rem; + margin-top: 0.2rem; +} +.routing-keyword { + font-size: 0.7rem; + padding: 2px 8px; + border-radius: 10px; + background: rgba(59, 130, 246, 0.12); + color: #93c5fd; + font-family: var(--font-mono); +} + +/* Tables */ +.routing-tables { + margin-bottom: 0.5rem; +} +.routing-table-list { + display: flex; + flex-direction: column; + gap: 0.2rem; + margin-top: 0.2rem; +} +.routing-table { + font-size: 0.7rem; + padding: 3px 8px; + border-radius: 4px; + background: rgba(34, 197, 94, 0.1); + color: #86efac; + font-family: var(--font-mono); + display: inline-block; + width: fit-content; +} + +/* Timing bar */ +.routing-timing { + margin: 0.6rem 0; + padding: 0.5rem; + background: rgba(0, 0, 0, 0.2); + border-radius: 6px; +} +.routing-timing-header { + display: flex; + align-items: baseline; + gap: 0.3rem; + margin-bottom: 0.3rem; +} +.routing-timing-total { + font-size: 1rem; + font-weight: 700; + color: var(--text); +} +.routing-timing-label { + font-size: 0.7rem; + color: var(--muted); +} +.routing-timing-bar { + height: 6px; + background: var(--border); + border-radius: 3px; + display: flex; + overflow: hidden; + margin-bottom: 0.3rem; +} +.routing-timing-segment { + height: 100%; + transition: width 0.3s ease; +} +.routing-timing-routing { + background: var(--accent); +} +.routing-timing-network { + background: var(--yellow); +} +.routing-timing-sql { + background: var(--green); +} +.routing-timing-legend { + display: flex; + gap: 1rem; + font-size: 0.65rem; + color: var(--muted); +} +.routing-legend-dot { + display: inline-block; + width: 6px; + height: 6px; + border-radius: 50%; + margin-right: 4px; +} + +/* SQL queries section */ +.routing-sql-section { + margin-top: 0.4rem; +} +.routing-sql-toggle { + background: none; + border: none; + color: var(--muted); + font-size: 0.75rem; + cursor: pointer; + padding: 0.25rem 0; + font-family: inherit; +} +.routing-sql-toggle:hover { + color: var(--text); +} +.routing-sql-card { + background: rgba(0, 0, 0, 0.25); + border-radius: 6px; + padding: 0.5rem 0.6rem; + margin-top: 0.4rem; + border: 1px solid transparent; +} +.routing-sql-card:hover { + border-color: var(--border); +} +.routing-sql-error { + border-color: rgba(239, 68, 68, 0.3); +} +.routing-sql-header { + display: flex; + align-items: center; + justify-content: space-between; + cursor: pointer; +} +.routing-sql-meta { + display: flex; + align-items: center; + gap: 0.4rem; + font-size: 0.75rem; +} +.routing-sql-rows { + color: var(--green); + font-weight: 600; +} +.routing-sql-sep { + width: 3px; + height: 3px; + border-radius: 50%; + background: var(--muted); +} +.routing-sql-duration { + color: var(--muted); +} +.routing-sql-wh { + color: var(--muted); + font-family: var(--font-mono); + font-size: 0.65rem; +} +.routing-sql-expand { + color: var(--muted); + font-size: 0.65rem; +} +.routing-sql-label { + font-size: 0.75rem; + font-weight: 600; + color: var(--red); +} +.routing-sql-error-msg { + font-size: 0.7rem; + color: var(--muted); + margin-top: 0.3rem; + font-family: var(--font-mono); +} + +/* SQL detail (expanded) */ +.routing-sql-detail { + margin-top: 0.5rem; +} +.routing-sql-statement { + font-family: var(--font-mono); + font-size: 0.7rem; + color: #93c5fd; + background: rgba(0, 0, 0, 0.3); + padding: 0.5rem; + border-radius: 4px; + white-space: pre-wrap; + word-break: break-all; + line-height: 1.5; + margin-bottom: 0.4rem; +} +.routing-sql-params { + margin-top: 0.3rem; +} +.routing-sql-params-label { + font-size: 0.65rem; + color: var(--muted); + text-transform: uppercase; + letter-spacing: 0.05em; + font-weight: 600; + display: block; + margin-bottom: 0.2rem; +} +.routing-sql-param { + display: inline-flex; + align-items: center; + gap: 0.15rem; + font-size: 0.7rem; + margin-right: 0.5rem; + margin-bottom: 0.15rem; +} +.routing-sql-param-name { + color: #93c5fd; + font-family: var(--font-mono); +} +.routing-sql-param-eq { + color: var(--muted); +} +.routing-sql-param-value { + color: #86efac; + font-family: var(--font-mono); +} + +/* SQL schema columns */ +.routing-sql-schema { + margin-top: 0.4rem; +} +.routing-sql-columns { + display: flex; + flex-wrap: wrap; + gap: 0.25rem; + margin-top: 0.15rem; +} +.routing-sql-column { + display: inline-flex; + align-items: center; + gap: 0.3rem; + font-size: 0.65rem; + padding: 1px 6px; + background: rgba(255, 255, 255, 0.04); + border-radius: 3px; +} +.routing-sql-col-name { + color: var(--text); + font-family: var(--font-mono); +} +.routing-sql-col-type { + color: var(--muted); + font-family: var(--font-mono); + font-size: 0.6rem; +} + +/* ========== Artifacts Panel ========== */ +.artifacts-empty { + padding: 1.5rem 1rem; + text-align: center; +} +.artifacts-panel { + display: flex; + flex-direction: column; + gap: 0.5rem; +} +.artifact-card { + background: #111827; + border-radius: 6px; + overflow: hidden; +} +.artifact-header { + display: flex; + align-items: center; + gap: 0.6rem; + padding: 0.6rem 0.75rem; + cursor: pointer; + transition: background 0.1s; +} +.artifact-header:hover { + background: rgba(255, 255, 255, 0.03); +} +.artifact-icon { + display: flex; + align-items: center; + justify-content: center; + width: 28px; + height: 28px; + border-radius: 4px; + font-size: 0.6rem; + font-weight: 700; + font-family: var(--font-mono); + flex-shrink: 0; +} +.artifact-info { + flex: 1; + min-width: 0; +} +.artifact-label { + display: block; + font-size: 0.8rem; + font-weight: 500; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} +.artifact-meta { + display: block; + font-size: 0.7rem; + color: var(--muted); +} +.artifact-body { + padding: 0.75rem; + border-top: 1px solid var(--border); +} + +/* ========== Message Highlight Animation ========== */ +.message-bubble.highlighted { + animation: highlightGlow 2s ease-out; +} +@keyframes highlightGlow { + 0% { + box-shadow: 0 0 0 2px var(--accent), 0 0 12px rgba(59, 130, 246, 0.4); + } + 70% { + box-shadow: 0 0 0 2px var(--accent), 0 0 12px rgba(59, 130, 246, 0.4); + } + 100% { + box-shadow: none; + } +} + +/* ========== Responsive ========== */ +@media (max-width: 768px) { + .chat-layout { + grid-template-columns: 1fr; + } + .inspector-panel { + display: none; + } + .session-dropdown { + min-width: 250px; + } +} + +/* ========== System Builder — Wizard Layout ========== */ + +/* Break wizard out of .container max-width constraint */ +.container:has(> .sb-wizard) { + max-width: none; + padding: 0; +} + +/* Wizard container */ +.sb-wizard { + display: flex; + height: calc(100vh - 60px); +} +.sb-wizard-content { + flex: 1; + overflow: auto; + padding: 1.5rem; + display: flex; + flex-direction: column; +} +.sb-wizard-header { + margin-bottom: 1rem; +} +.sb-wizard-header h2 { + font-size: 1.25rem; + font-weight: 600; +} + +/* --- Wizard Sidebar --- */ +.sb-wizard-sidebar { + width: 240px; + background: var(--surface); + border-right: 1px solid var(--border); + display: flex; + flex-direction: column; + padding: 12px; + gap: 16px; + flex-shrink: 0; + transition: width 0.2s ease; +} +.sb-wizard-sidebar.collapsed { + width: 56px; + padding: 12px 8px; + align-items: center; +} +.sb-sidebar-toggle { + background: none; + border: 1px solid var(--border); + border-radius: 4px; + color: var(--muted); + cursor: pointer; + padding: 4px 8px; + font-size: 0.8rem; + align-self: flex-end; + transition: color 0.15s; +} +.sb-sidebar-toggle:hover { color: var(--text); border-color: var(--accent); } +.sb-sidebar-section { margin-bottom: 4px; } +.sb-sidebar-select { + font-size: 0.8rem; + padding: 5px 8px; +} +.sb-sidebar-nav { + display: flex; + flex-direction: column; + gap: 4px; + flex: 1; +} +.sb-sidebar-actions { + display: flex; + flex-direction: column; + gap: 6px; + padding-top: 12px; + border-top: 1px solid var(--border); +} + +/* Step items */ +.sb-step { + display: flex; + align-items: center; + gap: 10px; + padding: 10px 12px; + border: none; + border-radius: 6px; + background: transparent; + color: var(--text); + cursor: pointer; + font-size: 0.85rem; + text-align: left; + transition: background 0.15s; +} +.sb-step:hover { background: rgba(59, 130, 246, 0.08); } +.sb-step--active { + background: var(--accent); + color: #fff; +} +.sb-step--active:hover { background: var(--accent-hover); } +.sb-step--disabled { + opacity: 0.35; + pointer-events: none; +} +.sb-step--completed .sb-step-num { + background: var(--green); +} +.sb-step-num { + display: inline-flex; + align-items: center; + justify-content: center; + width: 24px; + height: 24px; + border-radius: 50%; + background: var(--border); + font-size: 0.75rem; + font-weight: 700; + flex-shrink: 0; +} +.sb-step--active .sb-step-num { background: rgba(255,255,255,0.25); } +.sb-step-icon { font-size: 1rem; } +.sb-step-label { font-weight: 500; } + +/* --- Shared form elements (preserved) --- */ +.sb-input { + width: 100%; + padding: 6px 10px; + border: 1px solid var(--border); + border-radius: 4px; + background: var(--bg); + color: var(--text); + font-size: 0.85rem; + font-family: inherit; + outline: none; + box-sizing: border-box; +} +.sb-input:focus { + border-color: var(--accent); + box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.2); +} +.sb-input--error { + border-color: var(--red); +} +.sb-label { + font-size: 0.75rem; + font-weight: 500; + color: var(--muted); + margin-top: 4px; +} +.sb-section-title { + font-size: 0.85rem; + font-weight: 600; + margin-bottom: 4px; +} +.sb-muted-center { + color: var(--muted); + font-size: 0.85rem; + text-align: center; + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + gap: 4px; +} + +/* --- Agent Palette (preserved) --- */ +.sb-palette { + display: flex; + flex-direction: column; + gap: 10px; + height: 100%; +} +.sb-palette-title { + margin: 0; + font-size: 0.9rem; + font-weight: 600; +} +.sb-palette-list { + display: flex; + flex-direction: column; + gap: 6px; + overflow-y: auto; + flex: 1; +} +.sb-palette-item { + padding: 8px 10px; + border: 1px solid var(--border); + border-radius: 6px; + cursor: pointer; + transition: border-color 0.15s; + position: relative; +} +.sb-palette-item:hover { border-color: var(--accent); } +.sb-palette-item--added { + opacity: 0.4; + cursor: default; +} +.sb-palette-item--added:hover { border-color: var(--border); } +.sb-palette-name { font-size: 0.85rem; font-weight: 600; } +.sb-palette-desc { + font-size: 0.75rem; + color: var(--muted); + margin-top: 2px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} +.sb-palette-caps { display: flex; flex-wrap: wrap; gap: 3px; margin-top: 4px; } +.sb-cap-badge { + display: inline-block; + padding: 1px 5px; + background: rgba(59, 130, 246, 0.15); + color: var(--accent); + border-radius: 3px; + font-size: 0.65rem; + font-weight: 500; +} +.sb-palette-added { + position: absolute; + top: 6px; + right: 6px; + font-size: 0.65rem; + color: var(--muted); +} + +/* --- Wiring Canvas --- */ +.sb-canvas { + width: 100%; + height: 100%; + min-height: 400px; +} + +/* --- Custom AgentNode --- */ +.sb-agent-node { + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + min-width: 200px; + max-width: 240px; + cursor: grab; + display: flex; + overflow: visible; + transition: opacity 0.2s, box-shadow 0.2s; +} +/* Make handles larger and visible for easier dragging */ +.sb-agent-node .react-flow__handle { + width: 14px; + height: 14px; + background: var(--accent); + border: 2px solid var(--surface); + transition: transform 0.15s, background 0.15s; + z-index: 10; +} +.sb-agent-node .react-flow__handle:hover { + transform: scale(1.4); + background: #60a5fa; +} +.sb-agent-node--dimmed { opacity: 0.2; } +.sb-agent-node--highlighted { + box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.35); + border-color: var(--accent); +} +.sb-agent-node-bar { + width: 4px; + flex-shrink: 0; +} +.sb-agent-node-body { + padding: 8px 10px; + flex: 1; + min-width: 0; +} +.sb-agent-node-header { + display: flex; + align-items: center; + justify-content: space-between; + gap: 6px; +} +.sb-agent-node-name { + font-size: 0.8rem; + font-weight: 600; + color: var(--text); + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} +.sb-agent-node-status { + width: 8px; + height: 8px; + border-radius: 50%; + flex-shrink: 0; +} +.sb-agent-node-desc { + font-size: 0.7rem; + color: var(--muted); + margin-top: 2px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} +.sb-agent-node-caps { + display: flex; + flex-wrap: wrap; + gap: 2px; + margin-top: 4px; +} + +/* --- Edge label --- */ +.sb-edge-label { + position: absolute; + padding: 2px 6px; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 4px; + font-size: 0.65rem; + font-weight: 500; + color: var(--muted); + cursor: pointer; + white-space: nowrap; +} +.sb-edge-label--selected { + border-color: var(--accent); + color: var(--text); + font-weight: 600; +} + +/* --- Properties panel (preserved for ConfigureStep) --- */ +.sb-props { + display: flex; + flex-direction: column; + gap: 14px; + height: 100%; +} +.sb-props-section { + display: flex; + flex-direction: column; + gap: 4px; + padding: 10px; + background: var(--bg); + border-radius: 6px; +} +.sb-props-actions { + display: flex; + flex-direction: column; + gap: 6px; + margin-top: auto; + padding-top: 12px; + border-top: 1px solid var(--border); +} + +/* --- Deploy progress --- */ +.sb-deploy { + display: flex; + flex-direction: column; + gap: 8px; + padding: 10px; + background: var(--bg); + border-radius: 6px; +} +.sb-deploy-header { display: flex; align-items: center; gap: 8px; } +.sb-deploy-badge { + display: inline-block; + padding: 2px 8px; + border-radius: 4px; + color: #fff; + font-size: 0.7rem; + font-weight: 700; + letter-spacing: 0.5px; +} +.sb-deploy-steps { display: flex; flex-direction: column; gap: 3px; } +.sb-deploy-step { + padding: 4px 6px; + border-radius: 4px; + cursor: pointer; + font-size: 0.8rem; +} +.sb-deploy-step:hover { background: var(--surface); } +.sb-deploy-step-row { display: flex; align-items: center; gap: 6px; } +.sb-deploy-dot { + width: 8px; + height: 8px; + border-radius: 50%; + flex-shrink: 0; +} +.sb-deploy-detail { + font-size: 0.75rem; + color: var(--muted); + padding: 4px 0 2px 14px; + word-break: break-word; +} + +/* Pulsing animation for in-progress deploy */ +.sb-deploy-pulse { + animation: sb-pulse 2s ease-in-out infinite; +} +@keyframes sb-pulse { + 0%, 100% { opacity: 1; } + 50% { opacity: 0.5; } +} + +/* Edge dash animation */ +@keyframes sb-edge-dash { + to { stroke-dashoffset: -20; } +} + +/* --- Step Content Layouts --- */ +.sb-step-content { + flex: 1; + display: flex; + min-height: 0; +} +.sb-step-header { + display: flex; + align-items: baseline; + gap: 12px; + margin-bottom: 8px; +} +.sb-step-header h3 { + font-size: 1rem; + font-weight: 600; + margin: 0; +} +.sb-step-hint { + font-size: 0.8rem; + color: var(--muted); +} + +/* Step 1: Select Agents */ +.sb-step-select { + gap: 1rem; +} +.sb-step-select-palette { + width: 260px; + flex-shrink: 0; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + padding: 1rem; + overflow-y: auto; +} +.sb-step-select-preview { + flex: 1; + display: flex; + flex-direction: column; + min-width: 0; +} +.sb-step-canvas-mini { + flex: 1; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + overflow: hidden; +} + +/* Step 2: Wire Connections */ +.sb-step-wire { + position: relative; +} +.sb-step-wire-canvas { + flex: 1; + display: flex; + flex-direction: column; + min-width: 0; +} +.sb-step-wire-panel { + position: absolute; + top: 60px; + right: 16px; + width: 260px; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + padding: 12px; + z-index: 10; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3); +} + +/* Step 3: Configure */ +.sb-step-configure { + gap: 1rem; +} +.sb-step-configure-canvas { + flex: 6; + min-width: 0; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + overflow: hidden; +} +.sb-step-configure-panel { + flex: 4; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + padding: 1rem; + overflow-y: auto; +} +.sb-env-table { + display: flex; + flex-direction: column; + gap: 6px; + margin-top: 4px; +} +.sb-env-row { + display: flex; + flex-direction: column; + gap: 2px; +} +.sb-env-agents { + font-size: 0.75rem; + color: var(--muted); +} +.sb-env-input { + font-family: var(--font-mono); + font-size: 0.8rem; +} +.sb-step-validation { + font-size: 0.8rem; + color: var(--red); + padding: 6px 0; +} + +/* Step 4: Deploy */ +.sb-step-deploy { + gap: 1rem; +} +.sb-step-deploy-canvas { + flex: 6; + min-width: 0; + display: flex; + flex-direction: column; +} +.sb-step-deploy-panel { + flex: 4; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 8px; + padding: 1rem; + overflow-y: auto; + display: flex; + flex-direction: column; + gap: 10px; +} + +/* Deploy terminal (build logs) */ +.sb-deploy-terminal { + background: #0a0a0a; + font-family: var(--font-mono); + font-size: 0.75rem; + color: var(--muted); + padding: 10px; + border-radius: 6px; + max-height: 200px; + overflow-y: auto; + margin-top: 6px; + line-height: 1.5; +} + +/* --- Dual coloring mode toggle --- */ +.sb-color-toggle { + position: absolute; + top: 10px; + right: 10px; + z-index: 10; + display: flex; + gap: 2px; + background: var(--surface); + border: 1px solid var(--border); + border-radius: 6px; + overflow: hidden; +} +.sb-color-btn { + padding: 4px 10px; + border: none; + background: transparent; + color: var(--muted); + font-size: 0.7rem; + font-weight: 500; + cursor: pointer; + transition: background 0.15s, color 0.15s; +} +.sb-color-btn:hover { color: var(--text); } +.sb-color-btn--active { + background: var(--accent); + color: #fff; +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/App.tsx b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/App.tsx new file mode 100644 index 00000000..df39b442 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/App.tsx @@ -0,0 +1,24 @@ +import { HashRouter, Routes, Route } from "react-router-dom"; +import { AgentProvider } from "./hooks/useAgents"; +import { Shell } from "./components/layout/Shell"; +import { AgentGrid } from "./components/agents/AgentGrid"; +import { AgentDetail } from "./components/detail/AgentDetail"; +import { LineagePage } from "./pages/LineagePage"; +import { SystemBuilderPage } from "./pages/SystemBuilderPage"; + +export function App() { + return ( + + + + + } /> + } /> + } /> + } /> + + + + + ); +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/agents.ts b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/agents.ts new file mode 100644 index 00000000..6d0f7598 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/agents.ts @@ -0,0 +1,14 @@ +import type { Agent, AgentCard, ScanResult } from "../types"; +import { apiFetch } from "./client"; + +export function fetchAgents(): Promise { + return apiFetch("/api/agents"); +} + +export function fetchAgentCard(name: string): Promise { + return apiFetch(`/api/agents/${encodeURIComponent(name)}/card`); +} + +export function triggerScan(): Promise { + return apiFetch("/api/scan", { method: "POST" }); +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/chat.ts b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/chat.ts new file mode 100644 index 00000000..8de15be9 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/chat.ts @@ -0,0 +1,81 @@ +import { apiFetch } from "./client"; + +export interface ChatResponse { + result: { + messageId?: string; + contextId?: string; + role?: string; + parts?: Array<{ text?: string; [key: string]: unknown }>; + _trace?: { + request_sent_at: string; + response_received_at: string; + latency_ms: number; + protocol: "a2a" | "mcp_fallback"; + request_payload?: unknown; + response_payload?: unknown; + sub_events?: Array<{ + type: string; + label: string; + duration_ms: number; + request?: unknown; + response?: unknown; + }>; + }; + [key: string]: unknown; + }; +} + +export function sendMessage( + agentName: string, + message: string, + contextId?: string, +): Promise { + return apiFetch( + `/api/agents/${encodeURIComponent(agentName)}/chat`, + { + method: "POST", + body: JSON.stringify({ message, context_id: contextId }), + }, + ); +} + +/** + * Send a streaming chat message — yields SSE lines. + * Falls back to non-streaming if /chat/stream returns an error. + */ +export async function* sendStreamingMessage( + agentName: string, + message: string, +): AsyncGenerator { + const response = await fetch( + `/api/agents/${encodeURIComponent(agentName)}/chat/stream`, + { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ message }), + }, + ); + + if (!response.ok || !response.body) { + throw new Error(`Stream failed: ${response.status}`); + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() ?? ""; + + for (const line of lines) { + if (line.startsWith("data: ")) { + yield line.slice(6); + } + } + } +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/client.ts b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/client.ts new file mode 100644 index 00000000..a4fa9736 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/client.ts @@ -0,0 +1,34 @@ +/** + * Minimal fetch wrapper with error handling. + * All API calls go through here for consistent behavior. + */ + +export class ApiError extends Error { + constructor( + public status: number, + message: string, + ) { + super(message); + this.name = "ApiError"; + } +} + +export async function apiFetch( + path: string, + options: RequestInit = {}, +): Promise { + const response = await fetch(path, { + ...options, + headers: { + "Content-Type": "application/json", + ...options.headers, + }, + }); + + if (!response.ok) { + const text = await response.text().catch(() => "Unknown error"); + throw new ApiError(response.status, text); + } + + return response.json() as Promise; +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/governance.ts b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/governance.ts new file mode 100644 index 00000000..1700f1ba --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/governance.ts @@ -0,0 +1,49 @@ +import type { LineageGraph, GovernanceStatus, AgentAnalytics, EvalResult } from "../types/lineage"; +import { apiFetch } from "./client"; + +export function fetchAgentLineage(name: string): Promise { + return apiFetch( + `/api/agents/${encodeURIComponent(name)}/lineage`, + ); +} + +export function fetchAgentGovernance(name: string): Promise { + return apiFetch( + `/api/agents/${encodeURIComponent(name)}/governance`, + ); +} + +export function fetchWorkspaceLineage( + warehouseId?: string, +): Promise { + const params = warehouseId ? `?warehouse_id=${encodeURIComponent(warehouseId)}` : ""; + return apiFetch(`/api/lineage${params}`); +} + +export function fetchAgentAnalytics(name: string): Promise { + return apiFetch( + `/api/agents/${encodeURIComponent(name)}/analytics`, + ); +} + +export function evaluateAgent( + name: string, + messages: Array<{ role: string; content: string }>, +): Promise { + return apiFetch( + `/api/agents/${encodeURIComponent(name)}/evaluate`, + { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ messages }), + }, + ); +} + +export function observeTrace(agentName: string, trace: Record): void { + fetch("/api/lineage/observe", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ agent_name: agentName, trace }), + }).catch(() => {}); +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/mcp.ts b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/mcp.ts new file mode 100644 index 00000000..53ab4fa0 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/mcp.ts @@ -0,0 +1,12 @@ +import type { JsonRpcRequest, JsonRpcResponse } from "../types"; +import { apiFetch } from "./client"; + +export function mcpRequest( + agentName: string, + payload: JsonRpcRequest, +): Promise { + return apiFetch( + `/api/agents/${encodeURIComponent(agentName)}/mcp`, + { method: "POST", body: JSON.stringify(payload) }, + ); +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/systems.ts b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/systems.ts new file mode 100644 index 00000000..4485876d --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/api/systems.ts @@ -0,0 +1,50 @@ +/* System Builder API client — uses apiFetch from client.ts */ + +import { apiFetch } from "./client"; +import type { SystemDefinition, SystemCreate, DeployResult, DeployProgress } from "../types/systems"; + +export async function fetchSystems(): Promise { + return apiFetch("/api/systems"); +} + +export async function fetchSystem(id: string): Promise { + return apiFetch(`/api/systems/${id}`); +} + +export async function createSystem(data: SystemCreate): Promise { + return apiFetch("/api/systems", { + method: "POST", + body: JSON.stringify(data), + }); +} + +export async function updateSystem( + id: string, + data: Partial, +): Promise { + return apiFetch(`/api/systems/${id}`, { + method: "PUT", + body: JSON.stringify(data), + }); +} + +export async function deleteSystem(id: string): Promise { + await apiFetch<{ ok: boolean }>(`/api/systems/${id}`, { method: "DELETE" }); +} + +export async function deploySystem(id: string): Promise { + return apiFetch(`/api/systems/${id}/deploy`, { method: "POST" }); +} + +/** Start async deploy — returns deploy_id for polling */ +export async function startDeploy(id: string): Promise<{ deploy_id: string; status: string }> { + return apiFetch<{ deploy_id: string; status: string }>(`/api/systems/${id}/deploy`, { + method: "POST", + body: JSON.stringify({ async: true }), + }); +} + +/** Poll deploy progress */ +export async function getDeployStatus(id: string): Promise { + return apiFetch(`/api/systems/${id}/deploy/status`); +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/agents/AgentCard.tsx b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/agents/AgentCard.tsx new file mode 100644 index 00000000..5046d3b3 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/agents/AgentCard.tsx @@ -0,0 +1,36 @@ +import { useNavigate } from "react-router-dom"; +import type { Agent } from "../../types"; + +interface Props { + agent: Agent; +} + +export function AgentCard({ agent }: Props) { + const navigate = useNavigate(); + + const capabilities = agent.capabilities + ? agent.capabilities.split(",").map((c) => c.trim()) + : []; + + return ( +
navigate(`/agent/${agent.name}`)}> +

{agent.name}

+

{agent.description ?? "No description"}

+
+ App: {agent.app_name} + {agent.protocol_version && ( + {agent.protocol_version} + )} +
+ {capabilities.length > 0 && ( +
+ {capabilities.map((cap) => ( + + {cap} + + ))} +
+ )} +
+ ); +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/agents/AgentGrid.tsx b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/agents/AgentGrid.tsx new file mode 100644 index 00000000..bd0105f1 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/agents/AgentGrid.tsx @@ -0,0 +1,47 @@ +import { useAgents } from "../../hooks/useAgents"; +import { AgentCard } from "./AgentCard"; +import { Spinner } from "../common/Spinner"; +import { ErrorBanner } from "../common/ErrorBanner"; +import { EmptyState } from "../common/EmptyState"; + +export function AgentGrid() { + const { agents, loading, error, scan } = useAgents(); + + return ( + <> +
+ + {loading + ? "Loading..." + : `${agents.length} agent${agents.length !== 1 ? "s" : ""} discovered`} + + +
+ + {error && } + + {!loading && agents.length === 0 && !error && ( + + )} + + {agents.length > 0 && ( +
+ {agents.map((agent) => ( + + ))} +
+ )} + + ); +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/chat/MessageBubble.tsx b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/chat/MessageBubble.tsx new file mode 100644 index 00000000..3ec2334e --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/chat/MessageBubble.tsx @@ -0,0 +1,49 @@ +import type { ChatMessage } from "../../types"; + +interface Props { + message: ChatMessage; + isHighlighted?: boolean; +} + +export function MessageBubble({ message, isHighlighted }: Props) { + const isUser = message.role === "user"; + + const textContent = message.parts + .filter((p) => p.type === "text") + .map((p) => (p as { type: "text"; text: string }).text) + .join("\n"); + + const toolCallParts = message.parts.filter((p) => p.type === "tool-call"); + + return ( +
+
{isUser ? "You" : "Agent"}
+
+ {textContent &&
{textContent}
} + {toolCallParts.length > 0 && ( +
+ {toolCallParts.map((p) => { + const tc = p as { + type: "tool-call"; + toolCallId: string; + toolName: string; + }; + return ( + + {tc.toolName} + + ); + })} +
+ )} +
+
+ ); +} diff --git a/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/chat/MessageInput.tsx b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/chat/MessageInput.tsx new file mode 100644 index 00000000..04248e90 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/dashboard/frontend/src/components/chat/MessageInput.tsx @@ -0,0 +1,50 @@ +import { useState, useRef, type KeyboardEvent } from "react"; + +interface Props { + onSend: (text: string) => void; + disabled: boolean; +} + +export function MessageInput({ onSend, disabled }: Props) { + const [text, setText] = useState(""); + const textareaRef = useRef(null); + + function handleSend() { + const trimmed = text.trim(); + if (!trimmed || disabled) return; + onSend(trimmed); + setText(""); + // Reset textarea height + if (textareaRef.current) { + textareaRef.current.style.height = "auto"; + } + } + + function handleKeyDown(e: KeyboardEvent) { + if (e.key === "Enter" && !e.shiftKey) { + e.preventDefault(); + handleSend(); + } + } + + return ( +
+ + +
+
+ +""", + ) diff --git a/dbx-agent-app/src/dbx_agent_app/deploy/__init__.py b/dbx-agent-app/src/dbx_agent_app/deploy/__init__.py new file mode 100644 index 00000000..4b8e0b3d --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/deploy/__init__.py @@ -0,0 +1,7 @@ +"""Deploy tooling for multi-agent Databricks Apps deployments.""" + +from .config import DeployConfig, AgentSpec +from .engine import DeployEngine +from .state import DeployState + +__all__ = ["DeployConfig", "AgentSpec", "DeployEngine", "DeployState"] diff --git a/dbx-agent-app/src/dbx_agent_app/deploy/config.py b/dbx-agent-app/src/dbx_agent_app/deploy/config.py new file mode 100644 index 00000000..b3e12cc0 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/deploy/config.py @@ -0,0 +1,438 @@ +""" +YAML config parsing, validation, and ${...} interpolation for agents.yaml. + +The config schema: + project: + name: sgp-multi-agent + workspace_path: /Workspace/Shared/apps + uc: + catalog: serverless_dxukih_catalog + schema: agents + warehouse: + id: 387bcda0f2ece20c + agents: + - name: research + source: ./agents/research + resources: + - name: transcripts-table + uc_securable: + securable_type: TABLE + securable_full_name: "${uc.catalog}.${uc.schema}.expert_transcripts" + permission: SELECT + - name: warehouse + sql_warehouse: + id: "${warehouse.id}" + permission: CAN_USE + - name: api-key + secret: + scope: agent-secrets + key: openai-key + permission: READ + - name: llm + serving_endpoint: + name: gpt-4-endpoint + permission: CAN_QUERY + - name: etl-job + job: + id: "12345" + permission: CAN_MANAGE_RUN + env: + UC_CATALOG: "${uc.catalog}" + + Legacy form (auto-converted to resources): + - name: research + source: ./agents/research + tables: [expert_transcripts] +""" + +from __future__ import annotations + +import re +from collections import deque +from pathlib import Path +from typing import Any + +import yaml +from pydantic import BaseModel, field_validator, model_validator + +from typing import Optional + + +# --------------------------------------------------------------------------- +# App resource declarations (maps to Databricks Apps API resource types) +# --------------------------------------------------------------------------- + +# Valid enum values for UC securable resources (matches Apps API) +UC_SECURABLE_TYPES = {"TABLE", "VOLUME", "FUNCTION", "CONNECTION"} +UC_SECURABLE_PERMISSIONS = { + "SELECT", "EXECUTE", "READ_VOLUME", "WRITE_VOLUME", "USE_CONNECTION", "MANAGE", +} + +# Resource type field names (for validation) +_RESOURCE_TYPE_FIELDS = ( + "uc_securable", "sql_warehouse", "job", "secret", "serving_endpoint", "database", + "genie_space", +) + + +class UCSecurableResource(BaseModel): + """A Unity Catalog securable resource the agent needs access to.""" + + securable_type: str # TABLE | VOLUME | FUNCTION | CONNECTION + securable_full_name: str # catalog.schema.name (supports ${...} interpolation) + permission: str # SELECT | EXECUTE | READ_VOLUME | WRITE_VOLUME | USE_CONNECTION | MANAGE + + @field_validator("securable_type") + @classmethod + def validate_securable_type(cls, v: str) -> str: + if v not in UC_SECURABLE_TYPES: + raise ValueError(f"securable_type must be one of {UC_SECURABLE_TYPES}, got '{v}'") + return v + + @field_validator("permission") + @classmethod + def validate_permission(cls, v: str) -> str: + if v not in UC_SECURABLE_PERMISSIONS: + raise ValueError(f"permission must be one of {UC_SECURABLE_PERMISSIONS}, got '{v}'") + return v + + +class SqlWarehouseResource(BaseModel): + """A SQL warehouse resource the agent needs access to.""" + + id: str # warehouse ID (supports ${...} interpolation) + permission: str = "CAN_USE" + + +class JobResource(BaseModel): + """A job resource the agent needs access to.""" + + id: str # job ID (supports ${...} interpolation) + permission: str = "CAN_MANAGE_RUN" + + +class SecretResource(BaseModel): + """A secret resource the agent needs access to.""" + + scope: str # secret scope name + key: str # secret key name + permission: str = "READ" + + +class ServingEndpointResource(BaseModel): + """A model serving endpoint resource the agent needs access to.""" + + name: str # endpoint name + permission: str = "CAN_QUERY" + + +class DatabaseResource(BaseModel): + """A database resource the agent needs access to.""" + + instance_name: str + database_name: str + permission: str = "CAN_CONNECT_AND_CREATE" + + +class GenieSpaceResource(BaseModel): + """A Genie Space resource — grants access to a Genie space for AI/BI queries.""" + + id: str + permission: str = "CAN_VIEW" + + +class AppResourceSpec(BaseModel): + """A resource declaration for an agent's Databricks App. + + Exactly one resource type field must be set. + """ + + name: str + description: str = "" + uc_securable: Optional[UCSecurableResource] = None + sql_warehouse: Optional[SqlWarehouseResource] = None + job: Optional[JobResource] = None + secret: Optional[SecretResource] = None + serving_endpoint: Optional[ServingEndpointResource] = None + database: Optional[DatabaseResource] = None + genie_space: Optional[GenieSpaceResource] = None + + @model_validator(mode="after") + def exactly_one_resource_type(self) -> AppResourceSpec: + set_types = [f for f in _RESOURCE_TYPE_FIELDS if getattr(self, f) is not None] + if len(set_types) == 0: + raise ValueError(f"Resource '{self.name}': exactly one resource type must be set, got none") + if len(set_types) > 1: + raise ValueError( + f"Resource '{self.name}': exactly one resource type must be set, got {set_types}" + ) + return self + + +class ProjectConfig(BaseModel): + name: str + workspace_path: str = "/Workspace/Shared/apps" + + +class UCConfig(BaseModel): + catalog: str + schema_: str # 'schema' is a Pydantic reserved name + + class Config: + populate_by_name = True + + @classmethod + def from_dict(cls, data: dict) -> UCConfig: + return cls(catalog=data["catalog"], schema_=data["schema"]) + + +class WarehouseConfig(BaseModel): + id: str + + +class AgentSpec(BaseModel): + """Specification for a single agent in the deployment.""" + + name: str + source: str + tables: list[str] = [] # Legacy — auto-converted to uc_securable resources + resources: list[AppResourceSpec] = [] + user_api_scopes: list[str] = [] # OAuth scopes for on-behalf-of-user auth + depends_on: list[str] = [] + url_env_map: dict[str, str] = {} + env: dict[str, str] = {} + + @field_validator("name") + @classmethod + def validate_name(cls, v: str) -> str: + if not re.match(r"^[a-z][a-z0-9-]*$", v): + raise ValueError(f"Agent name must be lowercase alphanumeric with hyphens: {v}") + return v + + +class DeployConfig(BaseModel): + """Parsed and validated deployment configuration.""" + + project: ProjectConfig + uc: UCConfig + warehouse: WarehouseConfig + agents: list[AgentSpec] + + @property + def ordered_agents(self) -> list[AgentSpec]: + """Return agents in topological order (dependencies first).""" + return _topological_sort(self.agents) + + def app_name(self, agent: AgentSpec) -> str: + """Derive the Databricks App name for an agent.""" + return f"{self.project.name}-{agent.name}" + + @classmethod + def from_yaml(cls, path: str | Path) -> DeployConfig: + """Load config from a YAML file, resolving ${...} interpolation.""" + path = Path(path) + if not path.exists(): + raise FileNotFoundError(f"Config file not found: {path}") + + raw = yaml.safe_load(path.read_text()) + return cls.from_dict(raw, base_dir=path.parent) + + @classmethod + def from_dict(cls, raw: dict[str, Any], base_dir: Path | None = None) -> DeployConfig: + """Build config from a raw dict (useful for testing).""" + # Build interpolation context from top-level sections + context = _build_context(raw) + + project = ProjectConfig(**raw["project"]) + uc = UCConfig.from_dict(raw["uc"]) + warehouse = WarehouseConfig(**raw["warehouse"]) + + agents = [] + for agent_data in raw.get("agents", []): + # Interpolate env values + env = {} + for k, v in agent_data.get("env", {}).items(): + env[k] = _interpolate(str(v), context) + + # Resolve source path relative to config file + source = agent_data["source"] + if base_dir and not Path(source).is_absolute(): + source = str((base_dir / source).resolve()) + + # Parse explicit resources with interpolation + resources: list[AppResourceSpec] = [] + for r in agent_data.get("resources", []): + resources.append(_parse_resource(r, context)) + + # Legacy backward compat: auto-convert tables → uc_securable resources + tables = agent_data.get("tables", []) + if tables and not resources: + for table in tables: + full_name = _interpolate( + f"${{uc.catalog}}.${{uc.schema}}.{table}", context + ) + resources.append(AppResourceSpec( + name=f"table-{table}", + uc_securable=UCSecurableResource( + securable_type="TABLE", + securable_full_name=full_name, + permission="SELECT", + ), + )) + # Also add warehouse (old PermissionManager always granted it) + wh_id = _interpolate("${warehouse.id}", context) + resources.append(AppResourceSpec( + name="warehouse", + sql_warehouse=SqlWarehouseResource(id=wh_id), + )) + + agents.append( + AgentSpec( + name=agent_data["name"], + source=source, + tables=tables, + resources=resources, + user_api_scopes=agent_data.get("user_api_scopes", []), + depends_on=agent_data.get("depends_on", []), + url_env_map=agent_data.get("url_env_map", {}), + env=env, + ) + ) + + return cls(project=project, uc=uc, warehouse=warehouse, agents=agents) + + +# --------------------------------------------------------------------------- +# Interpolation +# --------------------------------------------------------------------------- + +_INTERP_RE = re.compile(r"\$\{([^}]+)\}") + + +def _build_context(raw: dict[str, Any]) -> dict[str, str]: + """Flatten top-level YAML sections into dotted keys for interpolation. + + Example: {"uc": {"catalog": "foo"}} → {"uc.catalog": "foo"} + """ + ctx: dict[str, str] = {} + for section_key, section_val in raw.items(): + if section_key == "agents": + continue + if isinstance(section_val, dict): + for k, v in section_val.items(): + ctx[f"{section_key}.{k}"] = str(v) + return ctx + + +def _parse_resource(data: dict[str, Any], context: dict[str, str]) -> AppResourceSpec: + """Parse a resource block from YAML, interpolating all string values.""" + name = data["name"] + description = data.get("description", "") + kwargs: dict[str, Any] = {"name": name, "description": description} + + if "uc_securable" in data: + s = data["uc_securable"] + kwargs["uc_securable"] = UCSecurableResource( + securable_type=s["securable_type"], + securable_full_name=_interpolate(str(s["securable_full_name"]), context), + permission=s["permission"], + ) + + if "sql_warehouse" in data: + w = data["sql_warehouse"] + kwargs["sql_warehouse"] = SqlWarehouseResource( + id=_interpolate(str(w["id"]), context), + permission=w.get("permission", "CAN_USE"), + ) + + if "job" in data: + j = data["job"] + kwargs["job"] = JobResource( + id=_interpolate(str(j["id"]), context), + permission=j.get("permission", "CAN_MANAGE_RUN"), + ) + + if "secret" in data: + s = data["secret"] + kwargs["secret"] = SecretResource( + scope=s["scope"], + key=s["key"], + permission=s.get("permission", "READ"), + ) + + if "serving_endpoint" in data: + e = data["serving_endpoint"] + kwargs["serving_endpoint"] = ServingEndpointResource( + name=e["name"], + permission=e.get("permission", "CAN_QUERY"), + ) + + if "database" in data: + d = data["database"] + kwargs["database"] = DatabaseResource( + instance_name=d["instance_name"], + database_name=d["database_name"], + permission=d.get("permission", "CAN_CONNECT_AND_CREATE"), + ) + + if "genie_space" in data: + g = data["genie_space"] + kwargs["genie_space"] = GenieSpaceResource( + id=_interpolate(str(g["id"]), context), + permission=g.get("permission", "CAN_VIEW"), + ) + + return AppResourceSpec(**kwargs) + + +def _interpolate(value: str, context: dict[str, str]) -> str: + """Replace ${dotted.key} placeholders with values from context.""" + + def replacer(match: re.Match) -> str: + key = match.group(1) + if key not in context: + raise ValueError(f"Unknown interpolation key: ${{{key}}}") + return context[key] + + return _INTERP_RE.sub(replacer, value) + + +# --------------------------------------------------------------------------- +# Topological sort +# --------------------------------------------------------------------------- + + +def _topological_sort(agents: list[AgentSpec]) -> list[AgentSpec]: + """Kahn's algorithm — returns agents ordered so dependencies come first.""" + by_name: dict[str, AgentSpec] = {a.name: a for a in agents} + + # Validate all depends_on references exist + for a in agents: + for dep in a.depends_on: + if dep not in by_name: + raise ValueError(f"Agent '{a.name}' depends on unknown agent '{dep}'") + + # Build in-degree map + in_degree: dict[str, int] = {a.name: 0 for a in agents} + reverse_deps: dict[str, list[str]] = {a.name: [] for a in agents} + for a in agents: + for dep in a.depends_on: + in_degree[a.name] += 1 + reverse_deps[dep].append(a.name) + + # Start with nodes that have no dependencies + queue: deque[str] = deque(name for name, deg in in_degree.items() if deg == 0) + ordered: list[AgentSpec] = [] + + while queue: + name = queue.popleft() + ordered.append(by_name[name]) + for dependent in reverse_deps[name]: + in_degree[dependent] -= 1 + if in_degree[dependent] == 0: + queue.append(dependent) + + if len(ordered) != len(agents): + raise ValueError("Circular dependency detected among agents") + + return ordered diff --git a/dbx-agent-app/src/dbx_agent_app/deploy/engine.py b/dbx-agent-app/src/dbx_agent_app/deploy/engine.py new file mode 100644 index 00000000..32fdd7b4 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/deploy/engine.py @@ -0,0 +1,523 @@ +""" +DeployEngine — orchestrates full multi-agent deployment sequence. + +Phases: + 0. Parse + validate config, load existing state + 1. Deploy each agent (create app, upload code, deploy, extract metadata) + 2. Set app resources (uc_securable, warehouse, job, secret, serving_endpoint, database) + app-to-app grants + 3. Verify health (poll /health endpoints) +""" + +from __future__ import annotations + +import base64 +import logging +import os +import time +from pathlib import Path +from typing import Any + +import yaml +from databricks.sdk import WorkspaceClient +from databricks.sdk.service.apps import ( + App, + AppAccessControlRequest, + AppDeployment, + AppDeploymentMode, + AppPermissionLevel, +) +from databricks.sdk.service.workspace import ImportFormat + +from .config import AgentSpec, DeployConfig +from .state import DeployState + +logger = logging.getLogger(__name__) + + +class DeployEngine: + """Orchestrates multi-agent deployment to Databricks Apps.""" + + def __init__( + self, + config: DeployConfig, + profile: str | None = None, + state_path: str | Path = ".agents-deploy.json", + dry_run: bool = False, + mode: str = "snapshot", + ): + self.config = config + self.profile = profile + self.dry_run = dry_run + self.deploy_mode = mode.upper() # SNAPSHOT or AUTO_SYNC + self.state_path = Path(state_path) + + self._w: WorkspaceClient | None = None + self.state = DeployState.load(self.state_path) + self.state.project = config.project.name + self.state.profile = profile or "" + + @property + def w(self) -> WorkspaceClient: + if self._w is None: + self._w = WorkspaceClient(profile=self.profile) if self.profile else WorkspaceClient() + return self._w + + # ------------------------------------------------------------------ + # Full deploy sequence + # ------------------------------------------------------------------ + + def deploy(self, agent_filter: str | None = None) -> None: + """Run the full deploy sequence (or a single agent if filtered).""" + ordered = self.config.ordered_agents + if agent_filter: + ordered = [a for a in ordered if a.name == agent_filter] + if not ordered: + raise ValueError(f"Agent '{agent_filter}' not found in config") + + self._print_plan(ordered) + if self.dry_run: + return + + # Phase 1: Deploy each agent in dependency order + for agent in ordered: + self._deploy_agent(agent) + + # Phase 2: Set app resources (uc_securable, warehouse) + app-to-app grants + print(f"\n{'='*60}") + print("App Resources & Permissions") + print(f"{'='*60}") + + for agent in ordered: + self._set_app_resources(agent) + + for agent in ordered: + if not agent.depends_on: + continue + agent_state = self.state.get_agent(agent.name) + if not agent_state or not agent_state.sp_client_id: + continue + for dep_name in agent.depends_on: + dep_state = self.state.get_agent(dep_name) + if dep_state: + self._grant_app_to_app(agent_state.sp_client_id, dep_state.app_name) + + # Phase 3: Health checks + self._verify_health(ordered) + + self.state.save(self.state_path) + print(f"\nDeploy state saved to {self.state_path}") + + # ------------------------------------------------------------------ + # Status command + # ------------------------------------------------------------------ + + def status(self, as_json: bool = False) -> dict[str, Any] | None: + """Print or return status of all deployed agents.""" + if not self.state.agents: + print("No agents deployed. Run 'dbx-agent-app deploy' first.") + return None + + results: dict[str, dict[str, str]] = {} + print(f"\nMulti-Agent Deployment: {self.state.project}") + print(f"{'Agent':<16} {'Compute':<10} {'Deploy':<12} {'Health':<10} {'Last Deployed'}") + print("-" * 80) + + for name, agent_state in self.state.agents.items(): + compute = "UNKNOWN" + deploy = "UNKNOWN" + health = "UNKNOWN" + + try: + app = self.w.apps.get(name=agent_state.app_name) + cs = getattr(app, "compute_status", None) + compute = str(cs.state.value) if cs and cs.state else "UNKNOWN" + active = getattr(app, "active_deployment", None) + if active and active.status: + deploy = str(active.status.state.value) if active.status.state else "UNKNOWN" + except Exception: + compute = "NOT_FOUND" + deploy = "NOT_FOUND" + + # Quick health check (follow redirects for OAuth) + if agent_state.url: + try: + import httpx + + resp = httpx.get( + f"{agent_state.url}/health", + timeout=5.0, + follow_redirects=True, + ) + health = "HEALTHY" if resp.status_code == 200 else f"HTTP_{resp.status_code}" + except Exception: + health = "UNREACHABLE" + + deployed_at = agent_state.deployed_at[:16].replace("T", " ") if agent_state.deployed_at else "—" + print(f"{name:<16} {compute:<10} {deploy:<12} {health:<10} {deployed_at}") + results[name] = { + "compute": compute, + "deploy": deploy, + "health": health, + "deployed_at": deployed_at, + } + + healthy = sum(1 for r in results.values() if r["health"] == "HEALTHY") + print(f"\n{len(results)} agents deployed. {healthy}/{len(results)} healthy.") + + if as_json: + return results + return None + + # ------------------------------------------------------------------ + # Destroy command + # ------------------------------------------------------------------ + + def destroy(self) -> None: + """Tear down all deployed agents: stop apps, delete apps, clean state.""" + if not self.state.agents: + print("No agents to destroy.") + return + + for name, agent_state in list(self.state.agents.items()): + app_name = agent_state.app_name + print(f"Destroying {app_name}...") + try: + self.w.apps.stop(name=app_name) + logger.info("Stopped %s", app_name) + except Exception as e: + logger.warning("Failed to stop %s: %s", app_name, e) + try: + self.w.apps.delete(name=app_name) + logger.info("Deleted %s", app_name) + except Exception as e: + logger.warning("Failed to delete %s: %s", app_name, e) + self.state.remove_agent(name) + + self.state.save(self.state_path) + print("All agents destroyed. State cleaned.") + + # ------------------------------------------------------------------ + # Internal: deploy a single agent + # ------------------------------------------------------------------ + + def _deploy_agent(self, agent: AgentSpec) -> None: + app_name = self.config.app_name(agent) + print(f"\n{'='*60}") + print(f"Deploying: {agent.name} → {app_name}") + print(f"{'='*60}") + + # 1. Create app if not exists + self._create_app_if_needed(app_name) + + # 2. Build env vars (static env + url_env_map from already-deployed deps) + env_vars = dict(agent.env) + for dep_name, env_key in agent.url_env_map.items(): + dep_url = self.state.get_url(dep_name) + if dep_url: + env_vars[env_key] = dep_url + logger.info("Wired %s=%s", env_key, dep_url) + else: + logger.warning("Dependency %s not yet deployed, %s will be empty", dep_name, env_key) + + # 3. Upload source code to workspace (merges env vars into app.yaml) + workspace_path = f"{self.config.project.workspace_path}/{app_name}" + self._upload_source(agent.source, workspace_path, env_vars=env_vars) + + # 4. Deploy + self._deploy_app(app_name, workspace_path) + + # 5. Extract metadata (URL, SP) + self._extract_metadata(agent.name, app_name) + + def _create_app_if_needed(self, app_name: str) -> None: + """Create the Databricks App if it doesn't exist.""" + try: + self.w.apps.get(name=app_name) + logger.info("App %s already exists", app_name) + except Exception: + print(f" Creating app {app_name}...") + self.w.apps.create_and_wait(App(name=app_name)) + logger.info("Created app %s", app_name) + + def _upload_source( + self, source_dir: str, workspace_path: str, env_vars: dict[str, str] | None = None + ) -> None: + """Upload local source directory to Databricks workspace. + + If env_vars is provided, merges them into app.yaml before uploading. + """ + source = Path(source_dir) + if not source.exists(): + raise FileNotFoundError(f"Source directory not found: {source}") + + print(f" Uploading {source} → {workspace_path}") + + # Collect files to upload, tracking which directories need creation + dirs_needed: set[str] = {workspace_path} + files_to_upload: list[tuple[Path, str, bytes]] = [] + + for local_file in source.rglob("*"): + if local_file.is_dir(): + continue + # Skip common non-deployable files + if any( + part.startswith(".") + for part in local_file.relative_to(source).parts + if part != "." + ): + continue + if local_file.name == "__pycache__" or local_file.suffix == ".pyc": + continue + + rel_path = local_file.relative_to(source) + remote_path = f"{workspace_path}/{rel_path}" + + # Track parent directories + parent = str(Path(remote_path).parent) + while parent != workspace_path and parent.startswith(workspace_path): + dirs_needed.add(parent) + parent = str(Path(parent).parent) + + # Merge env vars into app.yaml before uploading + if rel_path.name == "app.yaml" and env_vars: + content = self._merge_app_yaml(local_file, env_vars) + else: + content = local_file.read_bytes() + + files_to_upload.append((rel_path, remote_path, content)) + + # Create all directories first (sorted so parents come before children) + for dir_path in sorted(dirs_needed): + try: + self.w.workspace.mkdirs(path=dir_path) + except Exception: + pass # Directory may already exist + + # Upload files + file_count = 0 + for rel_path, remote_path, content in files_to_upload: + try: + self.w.workspace.import_( + path=remote_path, + content=base64.b64encode(content).decode(), + format=ImportFormat.AUTO, + overwrite=True, + ) + file_count += 1 + except Exception as e: + logger.warning("Failed to upload %s: %s", rel_path, e) + + print(f" Uploaded {file_count} files") + + def _merge_app_yaml(self, app_yaml_path: Path, env_vars: dict[str, str]) -> bytes: + """Merge env vars from agents.yaml into the agent's app.yaml.""" + app_config = yaml.safe_load(app_yaml_path.read_text()) or {} + # Build env list in app.yaml format: [{name: X, value: Y}, ...] + env_list = [{"name": k, "value": v} for k, v in env_vars.items()] + app_config["env"] = env_list + merged = yaml.dump(app_config, default_flow_style=False, sort_keys=False) + logger.info("Merged %d env vars into app.yaml", len(env_vars)) + return merged.encode() + + def _deploy_app(self, app_name: str, workspace_path: str) -> None: + """Deploy (or redeploy) the app. Env vars are already in app.yaml.""" + mode = getattr(AppDeploymentMode, self.deploy_mode, AppDeploymentMode.SNAPSHOT) + print(f" Deploying {app_name} (mode={mode.value})...") + + deployment = AppDeployment( + source_code_path=workspace_path, + mode=mode, + ) + + result = self.w.apps.deploy_and_wait(app_name=app_name, app_deployment=deployment) + status = getattr(result, "status", None) + state = getattr(status, "state", "UNKNOWN") if status else "UNKNOWN" + print(f" Deploy status: {state}") + + def _extract_metadata(self, agent_name: str, app_name: str) -> None: + """Extract URL and SP info from the deployed app.""" + try: + app = self.w.apps.get(name=app_name) + url = getattr(app, "url", "") or "" + sp_name = getattr(app, "service_principal_name", "") or "" + sp_id = str(getattr(app, "service_principal_client_id", "") or "") + + self.state.set_agent( + agent_name, + app_name=app_name, + url=url, + sp_name=sp_name, + sp_client_id=sp_id, + ) + print(f" URL: {url}") + print(f" SP: {sp_name}") + except Exception as e: + logger.warning("Failed to extract metadata for %s: %s", app_name, e) + self.state.set_agent(agent_name, app_name=app_name) + + # ------------------------------------------------------------------ + # Health check + # ------------------------------------------------------------------ + + def _verify_health(self, agents: list[AgentSpec]) -> None: + """Poll /health for each agent using workspace auth, retrying up to 10 times.""" + import httpx + + print(f"\n{'='*60}") + print("Health Checks") + print(f"{'='*60}") + + # Get auth token from the workspace client for authenticated health checks + headers = {} + try: + token = self.w.config.authenticate() + if callable(token): + auth_headers = token() + headers = dict(auth_headers) if auth_headers else {} + except Exception: + pass + + for agent in agents: + agent_state = self.state.get_agent(agent.name) + if not agent_state or not agent_state.url: + print(f" {agent.name:<20} NO URL — skipping health check") + continue + + url = f"{agent_state.url}/health" + healthy = False + for attempt in range(1, 11): + try: + resp = httpx.get(url, headers=headers, timeout=10.0, follow_redirects=True) + if resp.status_code == 200: + healthy = True + break + except Exception: + pass + if attempt < 10: + time.sleep(5) + + status = "HEALTHY" if healthy else "UNHEALTHY" + print(f" {agent.name:<20} {status:<12} {agent_state.url}") + + # ------------------------------------------------------------------ + # App resources — all types via REST API + # ------------------------------------------------------------------ + + def _set_app_resources(self, agent: AgentSpec) -> None: + """Declare all resources on the app via REST API. + + Uses the REST API directly rather than SDK enums because the SDK's + uc_securable type enum only covers VOLUME. TABLE, FUNCTION, and + CONNECTION require REST (same approach as the Terraform provider). + """ + app_name = self.config.app_name(agent) + if not agent.resources and not agent.user_api_scopes: + return + + payload_resources = [] + for r in agent.resources: + res: dict[str, Any] = {"name": r.name} + if r.description: + res["description"] = r.description + + if r.uc_securable: + res["uc_securable"] = { + "securable_full_name": r.uc_securable.securable_full_name, + "securable_type": r.uc_securable.securable_type, + "permission": r.uc_securable.permission, + } + elif r.sql_warehouse: + res["sql_warehouse"] = { + "id": r.sql_warehouse.id, + "permission": r.sql_warehouse.permission, + } + elif r.job: + res["job"] = { + "id": r.job.id, + "permission": r.job.permission, + } + elif r.secret: + res["secret"] = { + "scope": r.secret.scope, + "key": r.secret.key, + "permission": r.secret.permission, + } + elif r.serving_endpoint: + res["serving_endpoint"] = { + "name": r.serving_endpoint.name, + "permission": r.serving_endpoint.permission, + } + elif r.database: + res["database"] = { + "instance_name": r.database.instance_name, + "database_name": r.database.database_name, + "permission": r.database.permission, + } + elif r.genie_space: + res["genie_space"] = { + "id": r.genie_space.id, + "permission": r.genie_space.permission, + } + else: + continue + + payload_resources.append(res) + + if not payload_resources and not agent.user_api_scopes: + return + + body: dict[str, Any] = {"name": app_name} + if payload_resources: + body["resources"] = payload_resources + if agent.user_api_scopes: + body["user_api_scopes"] = agent.user_api_scopes + + try: + self.w.api_client.do( + "PATCH", + f"/api/2.0/apps/{app_name}", + body=body, + ) + print(f" {agent.name:<20} {len(payload_resources)} resource(s) set") + except Exception as e: + logger.warning("Failed to set resources on %s: %s", app_name, e) + print(f" {agent.name:<20} FAILED setting resources ({e})") + + def _grant_app_to_app(self, supervisor_sp_id: str, target_app_name: str) -> None: + """Grant CAN_USE on sub-agent app to the supervisor's service principal.""" + try: + self.w.apps.update_permissions( + app_name=target_app_name, + access_control_list=[ + AppAccessControlRequest( + service_principal_name=supervisor_sp_id, + permission_level=AppPermissionLevel.CAN_USE, + ) + ], + ) + logger.info("Granted CAN_USE on %s to %s", target_app_name, supervisor_sp_id) + except Exception as e: + logger.warning("App-to-app grant failed for %s → %s: %s", supervisor_sp_id, target_app_name, e) + + # ------------------------------------------------------------------ + # Dry run + # ------------------------------------------------------------------ + + def _print_plan(self, agents: list[AgentSpec]) -> None: + """Print what would be deployed without executing.""" + print(f"\nDeployment Plan: {self.config.project.name}") + print(f"Profile: {self.profile or 'default'}") + print(f"Mode: {self.deploy_mode}") + print(f"UC: {self.config.uc.catalog}.{self.config.uc.schema_}") + print(f"Warehouse: {self.config.warehouse.id}") + print() + print(f"{'#':<4} {'Agent':<18} {'App Name':<35} {'Resources':<16} {'Deps'}") + print("-" * 100) + for i, agent in enumerate(agents, 1): + app_name = self.config.app_name(agent) + res_count = f"{len(agent.resources)} resource(s)" if agent.resources else "—" + deps = ", ".join(agent.depends_on) or "—" + print(f"{i:<4} {agent.name:<18} {app_name:<35} {res_count:<16} {deps}") + + if self.dry_run: + print("\n[DRY RUN] No changes will be made.") diff --git a/dbx-agent-app/src/dbx_agent_app/deploy/state.py b/dbx-agent-app/src/dbx_agent_app/deploy/state.py new file mode 100644 index 00000000..d49896f1 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/deploy/state.py @@ -0,0 +1,78 @@ +""" +Persistent deploy state stored in .agents-deploy.json. + +Tracks deployed app names, URLs, service principal IDs, and timestamps +to enable idempotent re-deploys and clean teardown. +""" + +from __future__ import annotations + +import json +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +from pydantic import BaseModel + + +class AgentState(BaseModel): + """Tracked state for a single deployed agent.""" + + app_name: str + url: str = "" + sp_name: str = "" + sp_client_id: str = "" + deployed_at: str = "" + + +class DeployState(BaseModel): + """Full deployment state persisted to disk.""" + + project: str = "" + profile: str = "" + agents: dict[str, AgentState] = {} + + @classmethod + def load(cls, path: str | Path) -> DeployState: + """Load state from disk, or return empty state if file doesn't exist.""" + path = Path(path) + if not path.exists(): + return cls() + data = json.loads(path.read_text()) + return cls(**data) + + def save(self, path: str | Path) -> None: + """Write state to disk.""" + path = Path(path) + path.write_text(json.dumps(self.model_dump(), indent=2) + "\n") + + def set_agent( + self, + name: str, + *, + app_name: str, + url: str = "", + sp_name: str = "", + sp_client_id: str = "", + ) -> None: + """Record or update state for a deployed agent.""" + self.agents[name] = AgentState( + app_name=app_name, + url=url, + sp_name=sp_name, + sp_client_id=sp_client_id, + deployed_at=datetime.now(timezone.utc).isoformat(), + ) + + def get_agent(self, name: str) -> AgentState | None: + """Get state for a deployed agent, or None if not tracked.""" + return self.agents.get(name) + + def get_url(self, name: str) -> str: + """Get the deployed URL for an agent.""" + agent = self.agents.get(name) + return agent.url if agent else "" + + def remove_agent(self, name: str) -> None: + """Remove agent from tracked state.""" + self.agents.pop(name, None) diff --git a/dbx-agent-app/src/dbx_agent_app/discovery/__init__.py b/dbx-agent-app/src/dbx_agent_app/discovery/__init__.py new file mode 100644 index 00000000..d6d04008 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/discovery/__init__.py @@ -0,0 +1,24 @@ +""" +Agent discovery for Databricks Apps. + +This module provides clients and utilities for discovering agent-enabled +Databricks Apps that expose A2A protocol agent cards. +""" + +from .agent_discovery import ( + AgentDiscovery, + DiscoveredAgent, + AgentDiscoveryResult, +) +from .a2a_client import ( + A2AClient, + A2AClientError, +) + +__all__ = [ + "AgentDiscovery", + "DiscoveredAgent", + "AgentDiscoveryResult", + "A2AClient", + "A2AClientError", +] diff --git a/dbx-agent-app/src/dbx_agent_app/discovery/a2a_client.py b/dbx-agent-app/src/dbx_agent_app/discovery/a2a_client.py new file mode 100644 index 00000000..b989cd3f --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/discovery/a2a_client.py @@ -0,0 +1,281 @@ +""" +A2A Client for agent-to-agent communication. + +Implements the A2A protocol for discovering and communicating with peer agents. +""" + +import json +import uuid +import logging +from typing import Dict, Any, Optional, AsyncIterator + +import httpx + +logger = logging.getLogger(__name__) + + +class A2AClientError(Exception): + """Raised when an A2A operation fails.""" + pass + + +class A2AClient: + """ + Async client for A2A protocol communication with peer agents. + + Usage: + async with A2AClient() as client: + card = await client.fetch_agent_card("https://app.databricksapps.com") + result = await client.send_message("https://app.databricksapps.com/api/a2a", "Hello") + """ + + def __init__(self, timeout: float = 60.0): + """ + Initialize A2A client. + + Args: + timeout: Request timeout in seconds + """ + self.timeout = timeout + self._client: Optional[httpx.AsyncClient] = None + + async def __aenter__(self): + self._client = httpx.AsyncClient( + timeout=self.timeout, + follow_redirects=True, + ) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self._client: + await self._client.aclose() + + def _auth_headers( + self, + auth_token: Optional[str] = None, + user_context: Optional["UserContext"] = None, + ) -> Dict[str, str]: + """Build authentication headers, optionally forwarding user identity.""" + headers = {"Content-Type": "application/json"} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + if user_context is not None: + headers.update(user_context.as_forwarded_headers()) + return headers + + async def fetch_agent_card( + self, + base_url: str, + auth_token: Optional[str] = None, + ) -> Dict[str, Any]: + """ + Fetch an agent's A2A protocol agent card. + + Tries /.well-known/agent.json first, then /card as fallback. + Handles OAuth redirects gracefully (returns error instead of following). + + Args: + base_url: Base URL of the agent application + auth_token: Optional OAuth token for authenticated requests + + Returns: + Agent card JSON data + + Raises: + A2AClientError: If agent card cannot be fetched + + Example: + >>> async with A2AClient() as client: + >>> card = await client.fetch_agent_card("https://app.databricksapps.com") + >>> print(card["name"], card["description"]) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + headers = {} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + # Use a client that doesn't follow redirects to detect OAuth flows + async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=False) as probe_client: + for path in ["/.well-known/agent.json", "/card"]: + try: + url = base_url.rstrip("/") + path + response = await probe_client.get(url, headers=headers) + + # OAuth redirect detected - app requires interactive auth + if response.status_code in (301, 302, 303, 307, 308): + logger.debug(f"OAuth redirect detected for {url}") + continue + + if response.status_code == 200: + if not response.text or response.text.isspace(): + logger.debug(f"Empty response body for {url}") + continue + return response.json() + + except Exception as e: + logger.debug(f"Agent card fetch failed for {url}: {e}") + continue + + raise A2AClientError(f"Could not fetch agent card from {base_url}") + + async def _jsonrpc_call( + self, + url: str, + method: str, + params: Dict[str, Any], + auth_token: Optional[str] = None, + user_context: Optional["UserContext"] = None, + ) -> Dict[str, Any]: + """ + Send a JSON-RPC 2.0 request to an agent. + + Args: + url: A2A endpoint URL + method: JSON-RPC method name (e.g., "message/send") + params: Method parameters + auth_token: Optional authentication token + user_context: Optional user context to forward via X-Forwarded-* headers + + Returns: + JSON-RPC result + + Raises: + A2AClientError: If request fails or returns error + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": method, + "params": params, + } + + try: + response = await self._client.post( + url, + json=payload, + headers=self._auth_headers(auth_token, user_context), + ) + response.raise_for_status() + result = response.json() + + if "error" in result: + error = result["error"] + raise A2AClientError( + f"A2A error: {error.get('message', 'Unknown')} " + f"(code: {error.get('code')})" + ) + + return result.get("result", {}) + + except httpx.TimeoutException as e: + raise A2AClientError(f"Request to {url} timed out: {e}") + except httpx.HTTPStatusError as e: + raise A2AClientError( + f"HTTP error from {url}: {e.response.status_code}" + ) + except json.JSONDecodeError as e: + raise A2AClientError(f"Invalid JSON from {url}: {e}") + + async def send_message( + self, + agent_url: str, + message: str, + context_id: Optional[str] = None, + auth_token: Optional[str] = None, + user_context: Optional["UserContext"] = None, + ) -> Dict[str, Any]: + """ + Send a message to a peer agent using A2A protocol. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + context_id: Optional conversation context ID + auth_token: Optional authentication token + user_context: Optional user context to forward via X-Forwarded-* headers + + Returns: + Agent's response + + Example: + >>> async with A2AClient() as client: + >>> response = await client.send_message( + >>> "https://app.databricksapps.com/api/a2a", + >>> "What are your capabilities?", + >>> user_context=request.user_context, + >>> ) + """ + params: Dict[str, Any] = { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + } + if context_id: + params["message"]["contextId"] = context_id + + return await self._jsonrpc_call( + agent_url, "message/send", params, auth_token, user_context + ) + + async def send_streaming_message( + self, + agent_url: str, + message: str, + auth_token: Optional[str] = None, + user_context: Optional["UserContext"] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """ + Send a streaming message and yield SSE events. + + Args: + agent_url: Agent's A2A endpoint URL + message: Text message to send + auth_token: Optional authentication token + user_context: Optional user context to forward via X-Forwarded-* headers + + Yields: + SSE events from the agent's response stream + + Example: + >>> async with A2AClient() as client: + >>> async for event in client.send_streaming_message(url, "Analyze this"): + >>> print(event) + """ + if not self._client: + raise A2AClientError("Client not initialized. Use async context manager.") + + stream_url = agent_url.rstrip("/") + "/stream" + + payload = { + "jsonrpc": "2.0", + "id": str(uuid.uuid4()), + "method": "message/stream", + "params": { + "message": { + "messageId": str(uuid.uuid4()), + "role": "user", + "parts": [{"text": message}], + } + }, + } + + async with self._client.stream( + "POST", + stream_url, + json=payload, + headers=self._auth_headers(auth_token, user_context), + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.startswith("data: "): + try: + yield json.loads(line[6:]) + except json.JSONDecodeError: + continue diff --git a/dbx-agent-app/src/dbx_agent_app/discovery/agent_discovery.py b/dbx-agent-app/src/dbx_agent_app/discovery/agent_discovery.py new file mode 100644 index 00000000..1563b304 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/discovery/agent_discovery.py @@ -0,0 +1,253 @@ +""" +Agent discovery for Databricks Apps. + +Discovers agent-enabled Databricks Apps by scanning workspace apps +and probing for A2A protocol agent cards. +""" + +import asyncio +import logging +from typing import List, Dict, Any, Optional +from dataclasses import dataclass + +from databricks.sdk import WorkspaceClient + +from .a2a_client import A2AClient, A2AClientError + +logger = logging.getLogger(__name__) + +# Agent card probe paths and timeout +AGENT_CARD_PATHS = ["/.well-known/agent.json", "/card"] +AGENT_CARD_PROBE_TIMEOUT = 5.0 + + +@dataclass +class DiscoveredAgent: + """ + An agent discovered from a Databricks App. + + Attributes: + name: Agent name (from agent card or app name) + endpoint_url: Agent's base URL + description: Agent description (from agent card) + capabilities: Comma-separated list of capabilities + protocol_version: A2A protocol version + app_name: Name of the backing Databricks App + """ + name: str + endpoint_url: str + app_name: str + description: Optional[str] = None + capabilities: Optional[str] = None + protocol_version: Optional[str] = None + + +@dataclass +class AgentDiscoveryResult: + """ + Results from agent discovery operation. + + Attributes: + agents: List of discovered agents + errors: List of error messages encountered during discovery + """ + agents: List[DiscoveredAgent] + errors: List[str] + + +class AgentDiscovery: + """ + Discovers agent-enabled Databricks Apps in a workspace. + + Scans running Databricks Apps and probes for A2A protocol agent cards + to identify which apps are agents. + + Usage: + discovery = AgentDiscovery(profile="my-profile") + result = await discovery.discover_agents() + for agent in result.agents: + print(f"Found agent: {agent.name} at {agent.endpoint_url}") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize agent discovery. + + Args: + profile: Databricks CLI profile name (uses default if not specified) + """ + self.profile = profile + self._workspace_token: Optional[str] = None + + async def discover_agents(self) -> AgentDiscoveryResult: + """ + Discover all agent-enabled Databricks Apps in the workspace. + + Returns: + AgentDiscoveryResult with discovered agents and any errors + + Example: + >>> discovery = AgentDiscovery(profile="my-profile") + >>> result = await discovery.discover_agents() + >>> print(f"Found {len(result.agents)} agents") + """ + agents: List[DiscoveredAgent] = [] + errors: List[str] = [] + + try: + app_list = await self._list_workspace_apps() + except Exception as e: + logger.error("Workspace app listing failed: %s", e) + return AgentDiscoveryResult( + agents=[], + errors=[f"Failed to list workspace apps: {e}"], + ) + + if not app_list: + return AgentDiscoveryResult(agents=[], errors=[]) + + # Probe each running app for agent card in parallel + probe_tasks = [ + self._probe_app_for_agent(app_info) + for app_info in app_list + if app_info.get("url") + ] + + if probe_tasks: + probe_results = await asyncio.gather( + *probe_tasks, return_exceptions=True + ) + + for result in probe_results: + if isinstance(result, Exception): + errors.append(str(result)) + elif result is not None: + agents.append(result) + + logger.info( + "Agent discovery: %d apps checked, %d agents found", + len(app_list), len(agents) + ) + + return AgentDiscoveryResult(agents=agents, errors=errors) + + async def _list_workspace_apps(self) -> List[Dict[str, Any]]: + """ + Enumerate Databricks Apps in the workspace. + + Returns: + List of running apps with name, url, owner + """ + def _list_sync() -> tuple: + client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + + # Extract auth token for cross-app requests + auth_headers = client.config.authenticate() + auth_val = auth_headers.get("Authorization", "") + token = auth_val[7:] if auth_val.startswith("Bearer ") else None + + results = [] + for app in client.apps.list(): + # Check if app is running via compute_status or deployment status + compute_state = None + cs = getattr(app, "compute_status", None) + if cs: + compute_state = str(getattr(cs, "state", "")) + + deploy_state = None + dep = getattr(app, "active_deployment", None) + if dep: + dep_status = getattr(dep, "status", None) + if dep_status: + deploy_state = str(getattr(dep_status, "state", "")) + + app_url = getattr(app, "url", None) or "" + app_url = app_url.rstrip("/") if app_url else "" + + results.append({ + "name": app.name, + "url": app_url, + "owner": getattr(app, "creator", None) or getattr(app, "updater", None), + "compute_state": compute_state, + "deploy_state": deploy_state, + }) + + return results, token + + loop = asyncio.get_event_loop() + result_tuple = await loop.run_in_executor(None, _list_sync) + all_apps, workspace_token = result_tuple + + # Store token for probing + self._workspace_token = workspace_token + + # Filter to running apps + running = [ + a for a in all_apps + if a.get("url") and ( + "ACTIVE" in (a.get("compute_state") or "") + or "SUCCEEDED" in (a.get("deploy_state") or "") + ) + ] + + logger.info( + "Workspace apps: %d total, %d running", + len(all_apps), len(running) + ) + + return running + + async def _probe_app_for_agent( + self, + app_info: Dict[str, Any], + ) -> Optional[DiscoveredAgent]: + """ + Probe a Databricks App for an A2A agent card. + + Args: + app_info: App metadata from workspace listing + + Returns: + DiscoveredAgent if agent card found, None otherwise + """ + app_url = app_info["url"] + app_name = app_info["name"] + + token = self._workspace_token + agent_card = None + + try: + logger.debug(f"Probing app '{app_name}' at {app_url}") + async with A2AClient(timeout=AGENT_CARD_PROBE_TIMEOUT) as client: + agent_card = await client.fetch_agent_card(app_url, auth_token=token) + logger.info(f"Found agent card for '{app_name}'") + except A2AClientError as e: + logger.debug(f"No agent card for '{app_name}': {e}") + return None + except Exception as e: + logger.warning(f"Probe failed for '{app_name}': {e}") + return None + + if not agent_card: + return None + + # Extract capabilities + capabilities_list = [] + caps = agent_card.get("capabilities") + if isinstance(caps, dict): + capabilities_list = list(caps.keys()) + elif isinstance(caps, list): + capabilities_list = caps + + return DiscoveredAgent( + name=agent_card.get("name", app_name), + endpoint_url=app_url, + app_name=app_name, + description=agent_card.get("description"), + capabilities=",".join(capabilities_list) if capabilities_list else None, + protocol_version=agent_card.get("protocolVersion"), + ) diff --git a/dbx-agent-app/src/dbx_agent_app/mcp/__init__.py b/dbx-agent-app/src/dbx_agent_app/mcp/__init__.py new file mode 100644 index 00000000..60ee38ad --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/mcp/__init__.py @@ -0,0 +1,11 @@ +""" +Model Context Protocol (MCP) support. + +This module provides utilities for integrating agents with MCP servers +and exposing UC Functions as MCP tools. +""" + +from .mcp_server import MCPServer, MCPServerConfig, setup_mcp_server +from .uc_functions import UCFunctionAdapter + +__all__ = ["MCPServer", "MCPServerConfig", "setup_mcp_server", "UCFunctionAdapter"] diff --git a/dbx-agent-app/src/dbx_agent_app/mcp/mcp_server.py b/dbx-agent-app/src/dbx_agent_app/mcp/mcp_server.py new file mode 100644 index 00000000..98a456fb --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/mcp/mcp_server.py @@ -0,0 +1,196 @@ +""" +MCP server implementation for agents. + +Provides an MCP server that exposes agent tools via the Model Context Protocol. +Works with any FastAPI app. +""" + +import json +import logging +from typing import Dict, Any, List, Optional +from dataclasses import dataclass + +from fastapi import Request + +logger = logging.getLogger(__name__) + + +@dataclass +class MCPServerConfig: + """ + Configuration for MCP server. + + Attributes: + name: Server name + version: Server version + description: Server description + """ + name: str + version: str = "1.0.0" + description: str = "MCP server for agent tools" + + +class MCPServer: + """ + MCP server that exposes tools via JSON-RPC. + + Accepts tools as dicts with name, description, parameters, function keys. + """ + + def __init__(self, tools, config: MCPServerConfig): + """ + Initialize MCP server. + + Args: + tools: List of ToolDefinition objects or dicts with name/description/parameters/function + config: MCP server configuration + """ + self._tools = tools + self.config = config + + def setup_routes(self, app): + """ + Set up MCP protocol routes on the FastAPI app. + + Adds: + - POST /api/mcp - MCP JSON-RPC endpoint + - GET /api/mcp/tools - List available tools + """ + + @app.post("/api/mcp") + async def mcp_jsonrpc(request: Request): + """MCP JSON-RPC endpoint.""" + try: + body = await request.json() + method = body.get("method") + params = body.get("params", {}) + request_id = body.get("id") + + if method == "tools/list": + result = await self._list_tools() + elif method == "tools/call": + result = await self._call_tool(params) + elif method == "server/info": + result = self._server_info() + else: + return { + "jsonrpc": "2.0", + "id": request_id, + "error": { + "code": -32601, + "message": f"Method not found: {method}" + } + } + + return { + "jsonrpc": "2.0", + "id": request_id, + "result": result + } + + except Exception as e: + logger.error("MCP request failed: %s", e) + return { + "jsonrpc": "2.0", + "id": body.get("id") if isinstance(body, dict) else None, + "error": { + "code": -32603, + "message": str(e) + } + } + + @app.get("/api/mcp/tools") + async def list_mcp_tools(): + """List available MCP tools.""" + return await self._list_tools() + + def _get_tool_name(self, tool) -> str: + return tool.name if hasattr(tool, "name") else tool["name"] + + def _get_tool_description(self, tool) -> str: + return tool.description if hasattr(tool, "description") else tool["description"] + + def _get_tool_parameters(self, tool) -> Dict[str, Any]: + return tool.parameters if hasattr(tool, "parameters") else tool.get("parameters", {}) + + def _get_tool_function(self, tool): + return tool.function if hasattr(tool, "function") else tool["function"] + + def _server_info(self) -> Dict[str, Any]: + """Get MCP server information.""" + return { + "name": self.config.name, + "version": self.config.version, + "description": self.config.description, + "protocol_version": "1.0", + } + + async def _list_tools(self) -> Dict[str, Any]: + """List all available tools in MCP format.""" + tools = [] + + for tool in self._tools: + mcp_tool = { + "name": self._get_tool_name(tool), + "description": self._get_tool_description(tool), + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + + for param_name, param_spec in self._get_tool_parameters(tool).items(): + param_type = param_spec.get("type", "string") + mcp_tool["inputSchema"]["properties"][param_name] = { + "type": param_type, + "description": param_spec.get("description", "") + } + if param_spec.get("required", False): + mcp_tool["inputSchema"]["required"].append(param_name) + + tools.append(mcp_tool) + + return {"tools": tools} + + async def _call_tool(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Call a tool via MCP.""" + tool_name = params.get("name") + arguments = params.get("arguments", {}) + + tool_def = None + for tool in self._tools: + if self._get_tool_name(tool) == tool_name: + tool_def = tool + break + + if not tool_def: + raise ValueError(f"Tool not found: {tool_name}") + + try: + result = await self._get_tool_function(tool_def)(**arguments) + return {"result": result} + except Exception as e: + logger.error("Tool execution failed: %s", e) + raise + + +def setup_mcp_server(tools, config: Optional[MCPServerConfig] = None, fastapi_app=None): + """ + Set up MCP server for an agent. + + Args: + tools: List of tool dicts with name/description/parameters/function keys + config: Optional MCP server configuration + fastapi_app: FastAPI app to add routes to (required) + + Returns: + MCPServer instance + """ + if config is None: + config = MCPServerConfig(name="mcp-server") + + server = MCPServer(tools, config) + server.setup_routes(fastapi_app) + + return server diff --git a/dbx-agent-app/src/dbx_agent_app/mcp/uc_functions.py b/dbx-agent-app/src/dbx_agent_app/mcp/uc_functions.py new file mode 100644 index 00000000..f1659960 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/mcp/uc_functions.py @@ -0,0 +1,240 @@ +""" +Unity Catalog Functions adapter for MCP. + +Automatically discovers UC Functions and exposes them as MCP tools. +""" + +import logging +from typing import List, Dict, Any, Optional + +from databricks.sdk import WorkspaceClient + +logger = logging.getLogger(__name__) + + +class UCFunctionAdapter: + """ + Adapter for Unity Catalog Functions to MCP protocol. + + Discovers UC Functions and converts them to MCP tool format for + use with agents. + + Usage: + adapter = UCFunctionAdapter(profile="my-profile") + tools = adapter.discover_functions(catalog="main", schema="functions") + """ + + def __init__(self, profile: Optional[str] = None): + """ + Initialize UC Functions adapter. + + Args: + profile: Databricks CLI profile name + """ + self.profile = profile + self._client: Optional[WorkspaceClient] = None + + def _get_client(self) -> WorkspaceClient: + """Get or create workspace client.""" + if self._client is None: + self._client = ( + WorkspaceClient(profile=self.profile) + if self.profile + else WorkspaceClient() + ) + return self._client + + def discover_functions( + self, + catalog: str, + schema: str, + name_pattern: Optional[str] = None, + ) -> List[Dict[str, Any]]: + """ + Discover UC Functions and convert to MCP tool format. + + Args: + catalog: UC catalog name + schema: UC schema name + name_pattern: Optional name pattern filter (SQL LIKE pattern) + + Returns: + List of tool definitions in MCP format + + Example: + >>> adapter = UCFunctionAdapter() + >>> tools = adapter.discover_functions("main", "functions") + >>> for tool in tools: + ... print(tool["name"], tool["description"]) + """ + client = self._get_client() + tools = [] + + try: + functions = client.functions.list( + catalog_name=catalog, + schema_name=schema, + ) + + for func in functions: + # Skip system functions + if func.name.startswith("system."): + continue + + # Apply name pattern filter + if name_pattern and name_pattern not in func.name: + continue + + # Convert to MCP tool format + tool = self._convert_function_to_tool(func) + if tool: + tools.append(tool) + + logger.info( + f"Discovered {len(tools)} UC Functions from {catalog}.{schema}" + ) + + except Exception as e: + logger.error(f"Failed to discover UC Functions: {e}") + + return tools + + def _convert_function_to_tool(self, func) -> Optional[Dict[str, Any]]: + """ + Convert a UC Function to MCP tool format. + + Args: + func: Function info from Databricks SDK + + Returns: + MCP tool definition or None if conversion fails + """ + try: + # Extract function metadata + name = func.name.split(".")[-1] # Get short name + description = func.comment or f"Unity Catalog function: {name}" + + # Build parameter schema + input_schema = { + "type": "object", + "properties": {}, + "required": [] + } + + # Parse function parameters + if hasattr(func, "input_params") and func.input_params: + for param in func.input_params.parameters: + param_name = param.name + param_type = self._map_uc_type_to_json_type(param.type_name) + + input_schema["properties"][param_name] = { + "type": param_type, + "description": param.comment or "" + } + + # Parameters without defaults are required + if not hasattr(param, "default_value") or param.default_value is None: + input_schema["required"].append(param_name) + + return { + "name": name, + "description": description, + "inputSchema": input_schema, + "full_name": func.full_name, + "source": "unity_catalog" + } + + except Exception as e: + logger.warning(f"Failed to convert function {func.name}: {e}") + return None + + def _map_uc_type_to_json_type(self, uc_type: str) -> str: + """ + Map Unity Catalog data type to JSON Schema type. + + Args: + uc_type: UC type name (e.g., "STRING", "BIGINT", "BOOLEAN") + + Returns: + JSON Schema type ("string", "number", "boolean", etc.) + """ + type_mapping = { + "STRING": "string", + "VARCHAR": "string", + "CHAR": "string", + "BIGINT": "integer", + "INT": "integer", + "INTEGER": "integer", + "SMALLINT": "integer", + "TINYINT": "integer", + "DOUBLE": "number", + "FLOAT": "number", + "DECIMAL": "number", + "BOOLEAN": "boolean", + "BINARY": "string", + "DATE": "string", + "TIMESTAMP": "string", + "ARRAY": "array", + "MAP": "object", + "STRUCT": "object", + } + + uc_type_upper = uc_type.upper() + return type_mapping.get(uc_type_upper, "string") + + async def call_function( + self, + full_name: str, + arguments: Dict[str, Any] + ) -> Any: + """ + Call a UC Function with given arguments. + + Args: + full_name: Full function name (catalog.schema.function) + arguments: Function arguments + + Returns: + Function result + + Example: + >>> adapter = UCFunctionAdapter() + >>> result = await adapter.call_function( + ... "main.functions.calculate_tax", + ... {"amount": 100, "rate": 0.08} + ... ) + """ + client = self._get_client() + + try: + # Build SQL query to call the function + args_list = [f":{key}" for key in arguments.keys()] + query = f"SELECT {full_name}({', '.join(args_list)})" + + # Execute via SQL warehouse + # Note: This requires a warehouse ID to be configured + result = client.statement_execution.execute_statement( + statement=query, + warehouse_id=self._get_default_warehouse(), + parameters=[ + {"name": key, "value": str(value)} + for key, value in arguments.items() + ] + ) + + return result.result.data_array[0][0] if result.result.data_array else None + + except Exception as e: + logger.error(f"Failed to call UC Function {full_name}: {e}") + raise + + def _get_default_warehouse(self) -> str: + """Get default SQL warehouse ID from environment or client.""" + import os + warehouse_id = os.getenv("DATABRICKS_WAREHOUSE_ID") + if not warehouse_id: + raise ValueError( + "DATABRICKS_WAREHOUSE_ID not set. " + "Set this environment variable to use UC Functions." + ) + return warehouse_id diff --git a/dbx-agent-app/src/dbx_agent_app/py.typed b/dbx-agent-app/src/dbx_agent_app/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/dbx-agent-app/src/dbx_agent_app/registry/__init__.py b/dbx-agent-app/src/dbx_agent_app/registry/__init__.py new file mode 100644 index 00000000..ae752490 --- /dev/null +++ b/dbx-agent-app/src/dbx_agent_app/registry/__init__.py @@ -0,0 +1,2 @@ +# Registry module — UC agent registration removed. +# Agents are now UC-governed via uc_securable resources on Databricks Apps. diff --git a/dbx-agent-app/tests/conftest.py b/dbx-agent-app/tests/conftest.py new file mode 100644 index 00000000..ca71124d --- /dev/null +++ b/dbx-agent-app/tests/conftest.py @@ -0,0 +1,76 @@ +"""Shared test fixtures for dbx-agent-app SDK tests.""" + +import pytest +from unittest.mock import MagicMock, patch +from fastapi import FastAPI +from fastapi.testclient import TestClient + +from dbx_agent_app import add_agent_card, add_mcp_endpoints + + +# --- Helper-based fixtures --- + + +@pytest.fixture +def plain_app(): + """Plain FastAPI app with agent card (no MCP).""" + app = FastAPI() + add_agent_card( + app, + name="test_agent", + description="Test agent", + capabilities=["test"], + ) + return app + + +@pytest.fixture +def plain_client(plain_app): + """TestClient for the plain app with agent card.""" + return TestClient(plain_app) + + +@pytest.fixture +def mcp_tool(): + """A sample async tool function.""" + async def echo(text: str) -> dict: + return {"echo": text} + + return { + "name": "echo", + "description": "Echo back the input", + "function": echo, + "parameters": { + "text": {"type": "string", "required": True}, + }, + } + + +@pytest.fixture +def mcp_app(mcp_tool): + """FastAPI app with agent card + MCP endpoints.""" + app = FastAPI() + add_agent_card( + app, + name="mcp_agent", + description="MCP test agent", + capabilities=["test"], + tools=[{"name": mcp_tool["name"], "description": mcp_tool["description"], "parameters": mcp_tool["parameters"]}], + ) + add_mcp_endpoints(app, tools=[mcp_tool]) + return app + + +@pytest.fixture +def mcp_client(mcp_app): + """TestClient for the MCP-enabled app.""" + return TestClient(mcp_app) + + +@pytest.fixture +def mock_workspace_client(): + """A mocked databricks.sdk.WorkspaceClient.""" + with patch("databricks.sdk.WorkspaceClient") as mock_cls: + mock_client = MagicMock() + mock_cls.return_value = mock_client + yield mock_client diff --git a/dbx-agent-app/tests/test_a2a_client.py b/dbx-agent-app/tests/test_a2a_client.py new file mode 100644 index 00000000..0fca8a90 --- /dev/null +++ b/dbx-agent-app/tests/test_a2a_client.py @@ -0,0 +1,224 @@ +"""Tests for A2AClient — agent-to-agent communication.""" + +import json +import pytest +import httpx +import respx + +from dbx_agent_app.discovery.a2a_client import A2AClient, A2AClientError + + +AGENT_CARD = { + "name": "test_agent", + "description": "A test agent", + "capabilities": ["search"], + "version": "1.0.0", +} + + +# --- fetch_agent_card --- + + +@pytest.mark.asyncio +async def test_fetch_agent_card_success(): + """Fetches agent card from /.well-known/agent.json.""" + with respx.mock: + respx.get("https://agent.example.com/.well-known/agent.json").mock( + return_value=httpx.Response(200, json=AGENT_CARD) + ) + + async with A2AClient() as client: + card = await client.fetch_agent_card("https://agent.example.com") + + assert card["name"] == "test_agent" + assert card["capabilities"] == ["search"] + + +@pytest.mark.asyncio +async def test_fetch_agent_card_fallback_path(): + """Falls back to /card when /.well-known/agent.json fails.""" + with respx.mock: + respx.get("https://agent.example.com/.well-known/agent.json").mock( + return_value=httpx.Response(404) + ) + respx.get("https://agent.example.com/card").mock( + return_value=httpx.Response(200, json=AGENT_CARD) + ) + + async with A2AClient() as client: + card = await client.fetch_agent_card("https://agent.example.com") + + assert card["name"] == "test_agent" + + +@pytest.mark.asyncio +async def test_fetch_agent_card_oauth_redirect(): + """Detects OAuth redirect and tries fallback path.""" + with respx.mock: + respx.get("https://agent.example.com/.well-known/agent.json").mock( + return_value=httpx.Response(302, headers={"Location": "https://login.example.com"}) + ) + respx.get("https://agent.example.com/card").mock( + return_value=httpx.Response(302, headers={"Location": "https://login.example.com"}) + ) + + async with A2AClient() as client: + with pytest.raises(A2AClientError, match="Could not fetch agent card"): + await client.fetch_agent_card("https://agent.example.com") + + +@pytest.mark.asyncio +async def test_fetch_agent_card_all_paths_fail(): + """Raises A2AClientError when all paths fail.""" + with respx.mock: + respx.get("https://agent.example.com/.well-known/agent.json").mock( + return_value=httpx.Response(404) + ) + respx.get("https://agent.example.com/card").mock( + return_value=httpx.Response(404) + ) + + async with A2AClient() as client: + with pytest.raises(A2AClientError, match="Could not fetch agent card"): + await client.fetch_agent_card("https://agent.example.com") + + +@pytest.mark.asyncio +async def test_fetch_agent_card_not_initialized(): + """Raises if client used outside context manager.""" + client = A2AClient() + with pytest.raises(A2AClientError, match="not initialized"): + await client.fetch_agent_card("https://agent.example.com") + + +# --- send_message --- + + +@pytest.mark.asyncio +async def test_send_message_success(): + """Sends a message and returns the result.""" + jsonrpc_response = { + "jsonrpc": "2.0", + "id": "abc", + "result": {"status": "ok", "text": "Hello back"}, + } + + with respx.mock: + route = respx.post("https://agent.example.com/api/a2a").mock( + return_value=httpx.Response(200, json=jsonrpc_response) + ) + + async with A2AClient() as client: + result = await client.send_message( + "https://agent.example.com/api/a2a", + "Hello agent", + ) + + assert result["status"] == "ok" + # Verify the request payload structure + sent = json.loads(route.calls[0].request.content) + assert sent["method"] == "message/send" + assert sent["params"]["message"]["parts"][0]["text"] == "Hello agent" + + +@pytest.mark.asyncio +async def test_send_message_with_context_id(): + """Context ID is included in the message payload.""" + with respx.mock: + route = respx.post("https://agent.example.com/api/a2a").mock( + return_value=httpx.Response(200, json={ + "jsonrpc": "2.0", "id": "x", "result": {}, + }) + ) + + async with A2AClient() as client: + await client.send_message( + "https://agent.example.com/api/a2a", + "Follow up", + context_id="ctx-123", + ) + + sent = json.loads(route.calls[0].request.content) + assert sent["params"]["message"]["contextId"] == "ctx-123" + + +@pytest.mark.asyncio +async def test_send_message_jsonrpc_error(): + """Raises A2AClientError when JSON-RPC returns an error.""" + with respx.mock: + respx.post("https://agent.example.com/api/a2a").mock( + return_value=httpx.Response(200, json={ + "jsonrpc": "2.0", + "id": "x", + "error": {"code": -32600, "message": "Invalid request"}, + }) + ) + + async with A2AClient() as client: + with pytest.raises(A2AClientError, match="Invalid request"): + await client.send_message( + "https://agent.example.com/api/a2a", "test" + ) + + +@pytest.mark.asyncio +async def test_send_message_timeout(): + """Raises A2AClientError on timeout.""" + with respx.mock: + respx.post("https://agent.example.com/api/a2a").mock( + side_effect=httpx.ReadTimeout("timed out") + ) + + async with A2AClient(timeout=1.0) as client: + with pytest.raises(A2AClientError, match="timed out"): + await client.send_message( + "https://agent.example.com/api/a2a", "test" + ) + + +@pytest.mark.asyncio +async def test_send_message_http_error(): + """Raises A2AClientError on HTTP error status.""" + with respx.mock: + respx.post("https://agent.example.com/api/a2a").mock( + return_value=httpx.Response(500) + ) + + async with A2AClient() as client: + with pytest.raises(A2AClientError, match="500"): + await client.send_message( + "https://agent.example.com/api/a2a", "test" + ) + + +# --- send_streaming_message --- + + +@pytest.mark.asyncio +async def test_send_streaming_message_parses_sse(): + """Parses SSE data lines into JSON objects.""" + sse_body = ( + 'data: {"type": "chunk", "text": "Hello"}\n\n' + 'data: {"type": "chunk", "text": " world"}\n\n' + 'data: {"type": "done"}\n\n' + ) + + with respx.mock: + respx.post("https://agent.example.com/api/a2a/stream").mock( + return_value=httpx.Response( + 200, + content=sse_body.encode(), + headers={"content-type": "text/event-stream"}, + ) + ) + + events = [] + async with A2AClient() as client: + async for event in client.send_streaming_message( + "https://agent.example.com/api/a2a", "Stream test" + ): + events.append(event) + + assert len(events) == 3 + assert events[0]["text"] == "Hello" + assert events[2]["type"] == "done" diff --git a/dbx-agent-app/tests/test_agent_app.py b/dbx-agent-app/tests/test_agent_app.py new file mode 100644 index 00000000..a8078e8d --- /dev/null +++ b/dbx-agent-app/tests/test_agent_app.py @@ -0,0 +1,186 @@ +"""Tests for add_agent_card() and add_mcp_endpoints() helpers.""" + +from fastapi import FastAPI +from fastapi.testclient import TestClient + +from dbx_agent_app import add_agent_card, add_mcp_endpoints + + +# =================================================================== +# Tests for add_agent_card() +# =================================================================== + + +def test_agent_card_endpoint(plain_client): + """add_agent_card() creates a working /.well-known/agent.json.""" + response = plain_client.get("/.well-known/agent.json") + + assert response.status_code == 200 + data = response.json() + + assert data["name"] == "test_agent" + assert data["description"] == "Test agent" + assert data["capabilities"] == ["test"] + assert data["schema_version"] == "a2a/1.0" + assert data["endpoints"]["invocations"] == "/invocations" + + +def test_health_endpoint(plain_client): + """add_agent_card() also adds /health.""" + response = plain_client.get("/health") + + assert response.status_code == 200 + data = response.json() + + assert data["status"] == "healthy" + assert data["agent"] == "test_agent" + + +def test_agent_card_with_tools(): + """Tools metadata appears in the agent card.""" + app = FastAPI() + add_agent_card( + app, + name="tooled", + description="Agent with tools", + capabilities=["test"], + tools=[ + {"name": "search", "description": "Search things", "parameters": {"q": {"type": "string"}}}, + ], + ) + + client = TestClient(app) + data = client.get("/.well-known/agent.json").json() + + assert len(data["tools"]) == 1 + assert data["tools"][0]["name"] == "search" + + +def test_agent_card_custom_version(): + """Version parameter is reflected in the card.""" + app = FastAPI() + add_agent_card(app, name="v2", description="V2", capabilities=[], version="2.0.0") + + client = TestClient(app) + data = client.get("/.well-known/agent.json").json() + assert data["version"] == "2.0.0" + + +# =================================================================== +# Tests for add_mcp_endpoints() +# =================================================================== + + +def test_mcp_tools_list(mcp_client): + """MCP tools/list returns registered tools.""" + response = mcp_client.post( + "/api/mcp", + json={"jsonrpc": "2.0", "method": "tools/list", "id": "1"}, + ) + assert response.status_code == 200 + tools = response.json()["result"]["tools"] + assert len(tools) == 1 + assert tools[0]["name"] == "echo" + assert "inputSchema" in tools[0] + + +def test_mcp_tools_call(mcp_client): + """MCP tools/call executes a tool.""" + response = mcp_client.post( + "/api/mcp", + json={ + "jsonrpc": "2.0", + "method": "tools/call", + "params": {"name": "echo", "arguments": {"text": "hello"}}, + "id": "2", + }, + ) + assert response.status_code == 200 + result = response.json()["result"]["result"] + assert result["echo"] == "hello" + + +def test_mcp_server_info(mcp_client): + """MCP server/info returns metadata.""" + response = mcp_client.post( + "/api/mcp", + json={"jsonrpc": "2.0", "method": "server/info", "id": "3"}, + ) + assert response.status_code == 200 + info = response.json()["result"] + assert "name" in info + assert "protocol_version" in info + + +def test_mcp_unknown_method(mcp_client): + """Unknown JSON-RPC method returns -32601.""" + response = mcp_client.post( + "/api/mcp", + json={"jsonrpc": "2.0", "method": "unknown/method", "id": "4"}, + ) + assert response.status_code == 200 + assert response.json()["error"]["code"] == -32601 + + +def test_mcp_tool_not_found(mcp_client): + """Calling nonexistent tool returns -32603.""" + response = mcp_client.post( + "/api/mcp", + json={ + "jsonrpc": "2.0", + "method": "tools/call", + "params": {"name": "nonexistent", "arguments": {}}, + "id": "5", + }, + ) + assert response.status_code == 200 + assert response.json()["error"]["code"] == -32603 + + +def test_mcp_get_tools_endpoint(mcp_client): + """GET /api/mcp/tools returns tool list.""" + response = mcp_client.get("/api/mcp/tools") + assert response.status_code == 200 + tools = response.json()["tools"] + assert len(tools) == 1 + assert tools[0]["name"] == "echo" + + +def test_no_mcp_without_add(): + """Without add_mcp_endpoints(), /api/mcp doesn't exist.""" + app = FastAPI() + add_agent_card(app, name="no_mcp", description="No MCP", capabilities=[]) + + client = TestClient(app) + response = client.post( + "/api/mcp", + json={"jsonrpc": "2.0", "method": "tools/list", "id": "1"}, + ) + assert response.status_code in (404, 405) + + +def test_mcp_multiple_tools(): + """Multiple tools all appear in MCP tools/list.""" + app = FastAPI() + + async def tool_a(x: str) -> dict: + return {"a": x} + + async def tool_b(y: int) -> dict: + return {"b": y} + + tools = [ + {"name": "tool_a", "description": "First", "function": tool_a, "parameters": {"x": {"type": "string", "required": True}}}, + {"name": "tool_b", "description": "Second", "function": tool_b, "parameters": {"y": {"type": "integer", "required": True}}}, + ] + add_mcp_endpoints(app, tools=tools) + + client = TestClient(app) + response = client.post( + "/api/mcp", + json={"jsonrpc": "2.0", "method": "tools/list", "id": "1"}, + ) + mcp_tools = response.json()["result"]["tools"] + assert len(mcp_tools) == 2 + names = {t["name"] for t in mcp_tools} + assert names == {"tool_a", "tool_b"} diff --git a/dbx-agent-app/tests/test_agent_discovery.py b/dbx-agent-app/tests/test_agent_discovery.py new file mode 100644 index 00000000..0906c44b --- /dev/null +++ b/dbx-agent-app/tests/test_agent_discovery.py @@ -0,0 +1,210 @@ +"""Tests for AgentDiscovery — workspace agent scanning.""" + +import pytest +from unittest.mock import MagicMock, patch, AsyncMock +from dataclasses import dataclass + +from dbx_agent_app.discovery.agent_discovery import ( + AgentDiscovery, + DiscoveredAgent, +) + + +@dataclass +class _FakeApp: + """Minimal stand-in for a Databricks SDK App object.""" + name: str + url: str = "" + creator: str = "user@example.com" + updater: str = "user@example.com" + compute_status: object = None + active_deployment: object = None + + +@dataclass +class _FakeComputeStatus: + state: str = "ACTIVE" + + +@dataclass +class _FakeDeploymentStatus: + state: str = "SUCCEEDED" + + +@dataclass +class _FakeDeployment: + status: object = None + + +def _make_running_app(name, url): + """Create a fake app that looks ACTIVE/SUCCEEDED.""" + return _FakeApp( + name=name, + url=url, + compute_status=_FakeComputeStatus(state="ACTIVE"), + active_deployment=_FakeDeployment( + status=_FakeDeploymentStatus(state="SUCCEEDED"), + ), + ) + + +def _make_stopped_app(name): + """Create a fake app with no URL / stopped compute.""" + return _FakeApp( + name=name, + url="", + compute_status=_FakeComputeStatus(state="STOPPED"), + ) + + +AGENT_CARD = { + "name": "research_agent", + "description": "Does research", + "capabilities": ["search", "analysis"], + "protocolVersion": "a2a/1.0", +} + + +# --- discover_agents --- + + +@pytest.mark.asyncio +async def test_discover_agents_finds_agent(): + """Discovers an agent from a running app with a valid agent card.""" + discovery = AgentDiscovery() + + with patch.object(discovery, "_list_workspace_apps", new_callable=AsyncMock) as mock_list: + mock_list.return_value = [ + {"name": "research-app", "url": "https://research.example.com", "compute_state": "ACTIVE", "deploy_state": "SUCCEEDED"}, + ] + + with patch.object(discovery, "_probe_app_for_agent", new_callable=AsyncMock) as mock_probe: + mock_probe.return_value = DiscoveredAgent( + name="research_agent", + endpoint_url="https://research.example.com", + app_name="research-app", + description="Does research", + capabilities="search,analysis", + protocol_version="a2a/1.0", + ) + + result = await discovery.discover_agents() + + assert len(result.agents) == 1 + assert result.agents[0].name == "research_agent" + assert len(result.errors) == 0 + + +@pytest.mark.asyncio +async def test_discover_agents_empty_workspace(): + """Returns empty result for a workspace with no apps.""" + discovery = AgentDiscovery() + + with patch.object(discovery, "_list_workspace_apps", new_callable=AsyncMock) as mock_list: + mock_list.return_value = [] + result = await discovery.discover_agents() + + assert result.agents == [] + assert result.errors == [] + + +@pytest.mark.asyncio +async def test_discover_agents_listing_failure(): + """Returns error when workspace listing fails.""" + discovery = AgentDiscovery() + + with patch.object(discovery, "_list_workspace_apps", new_callable=AsyncMock) as mock_list: + mock_list.side_effect = RuntimeError("Permission denied") + result = await discovery.discover_agents() + + assert result.agents == [] + assert len(result.errors) == 1 + assert "Permission denied" in result.errors[0] + + +@pytest.mark.asyncio +async def test_discover_agents_skip_no_url(): + """Apps without a URL are skipped during probing.""" + discovery = AgentDiscovery() + + with patch.object(discovery, "_list_workspace_apps", new_callable=AsyncMock) as mock_list: + mock_list.return_value = [ + {"name": "no-url-app", "compute_state": "ACTIVE", "deploy_state": "SUCCEEDED"}, + ] + + result = await discovery.discover_agents() + + assert result.agents == [] + + +@pytest.mark.asyncio +async def test_discover_agents_probe_returns_none(): + """Non-agent apps (probe returns None) are filtered out.""" + discovery = AgentDiscovery() + + with patch.object(discovery, "_list_workspace_apps", new_callable=AsyncMock) as mock_list: + mock_list.return_value = [ + {"name": "webapp", "url": "https://webapp.example.com", "compute_state": "ACTIVE", "deploy_state": "SUCCEEDED"}, + ] + + with patch.object(discovery, "_probe_app_for_agent", new_callable=AsyncMock) as mock_probe: + mock_probe.return_value = None + result = await discovery.discover_agents() + + assert result.agents == [] + + +# --- capabilities parsing --- + + +@pytest.mark.asyncio +async def test_probe_parses_dict_capabilities(): + """Capabilities as dict keys are extracted correctly.""" + discovery = AgentDiscovery() + discovery._workspace_token = "fake-token" + + card_with_dict_caps = { + "name": "agent", + "description": "test", + "capabilities": {"streaming": True, "pushNotifications": False}, + } + + with patch("dbx_agent_app.discovery.agent_discovery.A2AClient") as mock_cls: + mock_instance = AsyncMock() + mock_instance.fetch_agent_card = AsyncMock(return_value=card_with_dict_caps) + mock_cls.return_value.__aenter__ = AsyncMock(return_value=mock_instance) + mock_cls.return_value.__aexit__ = AsyncMock(return_value=False) + + result = await discovery._probe_app_for_agent( + {"name": "app", "url": "https://app.example.com"} + ) + + assert result is not None + assert "streaming" in result.capabilities + assert "pushNotifications" in result.capabilities + + +@pytest.mark.asyncio +async def test_probe_parses_list_capabilities(): + """Capabilities as list are joined with commas.""" + discovery = AgentDiscovery() + discovery._workspace_token = "fake-token" + + card_with_list_caps = { + "name": "agent", + "description": "test", + "capabilities": ["search", "analysis"], + } + + with patch("dbx_agent_app.discovery.agent_discovery.A2AClient") as mock_cls: + mock_instance = AsyncMock() + mock_instance.fetch_agent_card = AsyncMock(return_value=card_with_list_caps) + mock_cls.return_value.__aenter__ = AsyncMock(return_value=mock_instance) + mock_cls.return_value.__aexit__ = AsyncMock(return_value=False) + + result = await discovery._probe_app_for_agent( + {"name": "app", "url": "https://app.example.com"} + ) + + assert result is not None + assert result.capabilities == "search,analysis" diff --git a/dbx-agent-app/tests/test_analytics.py b/dbx-agent-app/tests/test_analytics.py new file mode 100644 index 00000000..95373037 --- /dev/null +++ b/dbx-agent-app/tests/test_analytics.py @@ -0,0 +1,64 @@ +"""Tests for the in-memory analytics tracker.""" + +from dbx_agent_app.dashboard.analytics import AnalyticsTracker + + +def test_empty_summary(): + tracker = AnalyticsTracker() + summary = tracker.get_summary("unknown") + assert summary["total"] == 0 + assert summary["success_rate"] == 0.0 + assert summary["recent"] == [] + + +def test_record_and_summary(): + tracker = AnalyticsTracker() + tracker.record("agent-a", success=True, latency_ms=100, source="test") + tracker.record("agent-a", success=True, latency_ms=200, source="chat") + tracker.record("agent-a", success=False, latency_ms=50, source="test", error="timeout") + + summary = tracker.get_summary("agent-a") + assert summary["total"] == 3 + assert summary["success_count"] == 2 + assert summary["failure_count"] == 1 + assert summary["success_rate"] == round(2 / 3, 3) + assert summary["avg_latency_ms"] == round((100 + 200 + 50) / 3) + assert len(summary["recent"]) == 3 + + +def test_ring_buffer_eviction(): + tracker = AnalyticsTracker(max_per_agent=5) + for i in range(10): + tracker.record("agent-b", success=True, latency_ms=i * 10, source="test") + + summary = tracker.get_summary("agent-b") + assert summary["total"] == 5 + + +def test_separate_agents(): + tracker = AnalyticsTracker() + tracker.record("a", success=True, latency_ms=100, source="test") + tracker.record("b", success=False, latency_ms=200, source="chat") + + assert tracker.get_summary("a")["success_count"] == 1 + assert tracker.get_summary("b")["failure_count"] == 1 + assert tracker.get_summary("a")["total"] == 1 + + +def test_recent_order_is_newest_first(): + tracker = AnalyticsTracker() + tracker.record("x", success=True, latency_ms=10, source="test") + tracker.record("x", success=False, latency_ms=20, source="chat") + + recent = tracker.get_summary("x")["recent"] + assert recent[0]["latency_ms"] == 20 # newest first + assert recent[1]["latency_ms"] == 10 + + +def test_error_field_recorded(): + tracker = AnalyticsTracker() + tracker.record("x", success=False, latency_ms=0, source="test", error="connection refused") + + recent = tracker.get_summary("x")["recent"] + assert recent[0]["error"] == "connection refused" + assert recent[0]["success"] is False diff --git a/dbx-agent-app/tests/test_app_agent.py b/dbx-agent-app/tests/test_app_agent.py new file mode 100644 index 00000000..4528c09f --- /dev/null +++ b/dbx-agent-app/tests/test_app_agent.py @@ -0,0 +1,366 @@ +"""Tests for @app_agent decorator and AppAgent class.""" + +import json + +import pytest +from fastapi.testclient import TestClient + +from dbx_agent_app import app_agent, AgentRequest, AgentResponse, StreamEvent, UserContext +from dbx_agent_app.core.app_agent import AppAgent + + +# =================================================================== +# Helper: create a basic agent +# =================================================================== + + +def _make_echo_agent(**kwargs): + """Create a simple echo agent for testing.""" + + @app_agent( + name=kwargs.get("name", "echo"), + description=kwargs.get("description", "Echo agent"), + capabilities=kwargs.get("capabilities", ["test"]), + enable_mcp=kwargs.get("enable_mcp", False), + ) + async def echo(request: AgentRequest) -> AgentResponse: + return AgentResponse.text(f"Echo: {request.last_user_message}") + + return echo + + +# =================================================================== +# Basic agent creation +# =================================================================== + + +def test_decorator_returns_app_agent(): + """@app_agent returns an AppAgent instance.""" + agent = _make_echo_agent() + assert isinstance(agent, AppAgent) + assert agent.name == "echo" + assert agent.description == "Echo agent" + assert agent.capabilities == ["test"] + + +def test_app_property_returns_fastapi(): + """agent.app returns a FastAPI instance.""" + from fastapi import FastAPI + + agent = _make_echo_agent() + assert isinstance(agent.app, FastAPI) + + +def test_app_property_is_cached(): + """agent.app returns the same instance on subsequent calls.""" + agent = _make_echo_agent() + assert agent.app is agent.app + + +# =================================================================== +# /invocations endpoint +# =================================================================== + + +def test_invocations_basic(): + """/invocations returns correct wire format.""" + agent = _make_echo_agent() + client = TestClient(agent.app) + + response = client.post( + "/invocations", + json={"input": [{"role": "user", "content": "hello"}]}, + ) + assert response.status_code == 200 + + data = response.json() + assert "output" in data + assert data["output"][0]["type"] == "message" + assert data["output"][0]["content"][0]["text"] == "Echo: hello" + + +def test_invocations_no_user_message(): + """/invocations returns 400 when no user message in input.""" + agent = _make_echo_agent() + client = TestClient(agent.app) + + response = client.post( + "/invocations", + json={"input": [{"role": "system", "content": "prompt"}]}, + ) + assert response.status_code == 400 + + +def test_invocations_empty_input(): + """/invocations returns 400 for empty input.""" + agent = _make_echo_agent() + client = TestClient(agent.app) + + response = client.post("/invocations", json={"input": []}) + assert response.status_code == 400 + + +# =================================================================== +# Return type coercion +# =================================================================== + + +def test_string_return_coercion(): + """Handler returning a string is auto-wrapped.""" + + @app_agent(name="str", description="str agent", capabilities=[]) + async def str_agent(request: AgentRequest) -> str: + return "plain string" + + client = TestClient(str_agent.app) + resp = client.post( + "/invocations", + json={"input": [{"role": "user", "content": "go"}]}, + ) + assert resp.status_code == 200 + assert resp.json()["output"][0]["content"][0]["text"] == "plain string" + + +def test_dict_return_coercion(): + """Handler returning a dict is auto-wrapped via from_dict.""" + + @app_agent(name="dict", description="dict agent", capabilities=[]) + async def dict_agent(request: AgentRequest) -> dict: + return {"response": "from dict"} + + client = TestClient(dict_agent.app) + resp = client.post( + "/invocations", + json={"input": [{"role": "user", "content": "go"}]}, + ) + assert resp.status_code == 200 + assert resp.json()["output"][0]["content"][0]["text"] == "from dict" + + +def test_dict_without_response_key(): + """Dict without 'response' key gets JSON-serialized.""" + + @app_agent(name="raw", description="raw dict", capabilities=[]) + async def raw_agent(request: AgentRequest) -> dict: + return {"foo": "bar"} + + client = TestClient(raw_agent.app) + resp = client.post( + "/invocations", + json={"input": [{"role": "user", "content": "go"}]}, + ) + text = resp.json()["output"][0]["content"][0]["text"] + assert json.loads(text) == {"foo": "bar"} + + +# =================================================================== +# Agent card and health +# =================================================================== + + +def test_agent_card(): + """/.well-known/agent.json returns correct metadata.""" + agent = _make_echo_agent() + client = TestClient(agent.app) + + resp = client.get("/.well-known/agent.json") + assert resp.status_code == 200 + + data = resp.json() + assert data["name"] == "echo" + assert data["schema_version"] == "a2a/1.0" + assert data["endpoints"]["invocations"] == "/invocations" + + +def test_health(): + """/health returns healthy status.""" + agent = _make_echo_agent() + client = TestClient(agent.app) + + resp = client.get("/health") + assert resp.status_code == 200 + assert resp.json()["status"] == "healthy" + assert resp.json()["agent"] == "echo" + + +# =================================================================== +# Tool registration +# =================================================================== + + +def test_tool_registration(): + """@agent.tool() registers tools visible in agent card.""" + agent = _make_echo_agent(enable_mcp=False) + + @agent.tool(description="Search things") + async def search(query: str) -> dict: + return {"results": [query]} + + client = TestClient(agent.app) + card = client.get("/.well-known/agent.json").json() + + assert len(card["tools"]) == 1 + assert card["tools"][0]["name"] == "search" + assert card["tools"][0]["description"] == "Search things" + + +def test_tool_endpoint(): + """Registered tools get /api/tools/ endpoints.""" + agent = _make_echo_agent(enable_mcp=False) + + @agent.tool(description="Echo tool") + async def my_tool(text: str) -> dict: + return {"echo": text} + + client = TestClient(agent.app) + # FastAPI treats bare str params as query params + resp = client.post("/api/tools/my_tool?text=hello") + assert resp.status_code == 200 + assert resp.json()["echo"] == "hello" + + +# =================================================================== +# Direct call (testing without HTTP) +# =================================================================== + + +@pytest.mark.asyncio +async def test_direct_call(): + """agent(request) calls handler directly.""" + agent = _make_echo_agent() + req = AgentRequest(input=[{"role": "user", "content": "direct"}]) + + result = await agent(req) + assert isinstance(result, AgentResponse) + assert result.output[0].content[0].text == "Echo: direct" + + +# =================================================================== +# MCP integration +# =================================================================== + + +# =================================================================== +# User context header extraction +# =================================================================== + + +def test_invocations_extracts_user_context(): + """/invocations populates user_context from X-Forwarded-* headers.""" + captured = {} + + @app_agent( + name="auth_test", + description="Auth test", + capabilities=[], + ) + async def auth_agent(request: AgentRequest) -> AgentResponse: + captured["user_context"] = request.user_context + return AgentResponse.text("ok") + + client = TestClient(auth_agent.app) + resp = client.post( + "/invocations", + json={"input": [{"role": "user", "content": "test"}]}, + headers={ + "X-Forwarded-Access-Token": "user-token-123", + "X-Forwarded-Email": "alice@example.com", + "X-Forwarded-User": "alice", + }, + ) + assert resp.status_code == 200 + + ctx = captured["user_context"] + assert ctx is not None + assert ctx.access_token == "user-token-123" + assert ctx.email == "alice@example.com" + assert ctx.user == "alice" + assert ctx.is_authenticated is True + + +def test_invocations_no_headers_means_no_user_context(): + """/invocations without X-Forwarded-* headers leaves user_context as None.""" + captured = {} + + @app_agent( + name="no_auth", + description="No auth", + capabilities=[], + ) + async def no_auth_agent(request: AgentRequest) -> AgentResponse: + captured["user_context"] = request.user_context + return AgentResponse.text("ok") + + client = TestClient(no_auth_agent.app) + resp = client.post( + "/invocations", + json={"input": [{"role": "user", "content": "test"}]}, + ) + assert resp.status_code == 200 + assert captured["user_context"] is None + + +def test_invocations_partial_headers(): + """/invocations with only email header still creates UserContext.""" + captured = {} + + @app_agent( + name="partial", + description="Partial", + capabilities=[], + ) + async def partial_agent(request: AgentRequest) -> AgentResponse: + captured["user_context"] = request.user_context + return AgentResponse.text("ok") + + client = TestClient(partial_agent.app) + resp = client.post( + "/invocations", + json={"input": [{"role": "user", "content": "test"}]}, + headers={"X-Forwarded-Email": "bob@example.com"}, + ) + assert resp.status_code == 200 + + ctx = captured["user_context"] + assert ctx is not None + assert ctx.email == "bob@example.com" + assert ctx.access_token is None + assert ctx.is_authenticated is False + + +# =================================================================== +# MCP integration +# =================================================================== + + +def test_mcp_with_tools(): + """MCP endpoints are added when enable_mcp=True and tools exist.""" + + @app_agent( + name="mcp_test", + description="MCP test", + capabilities=[], + enable_mcp=True, + ) + async def mcp_agent(request: AgentRequest) -> str: + return "ok" + + @mcp_agent.tool(description="Search") + async def search(query: str) -> dict: + return {"results": [query]} + + client = TestClient(mcp_agent.app) + + # Check agent card includes MCP endpoint + card = client.get("/.well-known/agent.json").json() + assert "mcp" in card["endpoints"] + + # Check MCP tools/list works + resp = client.post( + "/api/mcp", + json={"jsonrpc": "2.0", "method": "tools/list", "id": "1"}, + ) + assert resp.status_code == 200 + tools = resp.json()["result"]["tools"] + assert len(tools) == 1 + assert tools[0]["name"] == "search" diff --git a/dbx-agent-app/tests/test_dashboard.py b/dbx-agent-app/tests/test_dashboard.py new file mode 100644 index 00000000..7067b2fe --- /dev/null +++ b/dbx-agent-app/tests/test_dashboard.py @@ -0,0 +1,269 @@ +"""Tests for the developer dashboard — scanner, routes, and CLI.""" + +import pytest +from unittest.mock import AsyncMock, patch, MagicMock +from fastapi.testclient import TestClient + +from dbx_agent_app.discovery import DiscoveredAgent +from dbx_agent_app.dashboard.scanner import DashboardScanner +from dbx_agent_app.dashboard.app import create_dashboard_app +from dbx_agent_app.dashboard.cli import main as cli_main + + +# --- Fixtures ------------------------------------------------------------- + +FAKE_AGENTS = [ + DiscoveredAgent( + name="research_agent", + endpoint_url="https://research.example.com", + app_name="research-app", + description="Does research", + capabilities="search,analysis", + protocol_version="a2a/1.0", + ), + DiscoveredAgent( + name="data_agent", + endpoint_url="https://data.example.com", + app_name="data-app", + description="Queries data", + capabilities="sql", + protocol_version="a2a/1.0", + ), +] + +FAKE_CARD = { + "name": "research_agent", + "description": "Does research", + "capabilities": ["search", "analysis"], + "protocolVersion": "a2a/1.0", + "skills": [ + {"name": "web_search", "description": "Search the web"}, + {"name": "summarize", "description": "Summarize documents"}, + ], +} + + +@pytest.fixture +def scanner(): + """DashboardScanner pre-loaded with fake agents (no workspace call).""" + s = DashboardScanner.__new__(DashboardScanner) + s._discovery = MagicMock() + s._agents = list(FAKE_AGENTS) + s._scan_lock = __import__("asyncio").Lock() + s._scanned = True + + # Mock scan() for lifespan startup handler + async def _mock_scan(): + return list(FAKE_AGENTS) + s.scan = _mock_scan + + return s + + +@pytest.fixture +def dashboard_client(scanner): + """FastAPI TestClient wired to a pre-loaded scanner.""" + app = create_dashboard_app(scanner, profile="test-profile", auto_scan_interval=0) + return TestClient(app) + + +# --- DashboardScanner tests ----------------------------------------------- + + +@pytest.mark.asyncio +async def test_scan_populates_cache(): + """scan() calls discover_agents and caches results.""" + scanner = DashboardScanner.__new__(DashboardScanner) + scanner._agents = [] + scanner._scan_lock = __import__("asyncio").Lock() + scanner._scanned = False + + mock_discovery = AsyncMock() + mock_discovery.discover_agents.return_value = MagicMock( + agents=FAKE_AGENTS, errors=[] + ) + scanner._discovery = mock_discovery + + result = await scanner.scan() + + assert len(result) == 2 + assert scanner.get_agents() == list(FAKE_AGENTS) + mock_discovery.discover_agents.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_scan_returns_cached_on_get(): + """get_agents() returns cache without re-scanning.""" + scanner = DashboardScanner.__new__(DashboardScanner) + scanner._agents = list(FAKE_AGENTS) + scanner._scan_lock = __import__("asyncio").Lock() + scanner._scanned = True + scanner._discovery = MagicMock() + + agents = scanner.get_agents() + assert len(agents) == 2 + scanner._discovery.discover_agents.assert_not_called() + + +def test_get_agent_by_name(scanner): + """Look up agents by name or app_name.""" + assert scanner.get_agent_by_name("research_agent") is not None + assert scanner.get_agent_by_name("research-app") is not None + assert scanner.get_agent_by_name("nonexistent") is None + + +@pytest.mark.asyncio +async def test_get_agent_card(scanner): + """get_agent_card fetches via A2AClient.""" + with patch("dbx_agent_app.dashboard.scanner.A2AClient") as mock_cls: + mock_instance = AsyncMock() + mock_instance.fetch_agent_card = AsyncMock(return_value=FAKE_CARD) + mock_cls.return_value.__aenter__ = AsyncMock(return_value=mock_instance) + mock_cls.return_value.__aexit__ = AsyncMock(return_value=False) + + card = await scanner.get_agent_card("https://research.example.com") + + assert card["name"] == "research_agent" + assert len(card["skills"]) == 2 + + +@pytest.mark.asyncio +async def test_proxy_mcp(scanner): + """proxy_mcp forwards JSON-RPC to agent's /api/mcp endpoint.""" + scanner._discovery._workspace_token = "test-token" + + mcp_response = {"jsonrpc": "2.0", "id": "1", "result": {"tools": []}} + + with patch("dbx_agent_app.dashboard.scanner.httpx.AsyncClient") as mock_http_cls: + mock_http = AsyncMock() + mock_resp = MagicMock() + mock_resp.json.return_value = mcp_response + mock_resp.raise_for_status = MagicMock() + mock_http.post = AsyncMock(return_value=mock_resp) + mock_http_cls.return_value.__aenter__ = AsyncMock(return_value=mock_http) + mock_http_cls.return_value.__aexit__ = AsyncMock(return_value=False) + + result = await scanner.proxy_mcp( + "https://research.example.com", + {"jsonrpc": "2.0", "id": "1", "method": "tools/list", "params": {}}, + ) + + assert result == mcp_response + mock_http.post.assert_awaited_once() + call_url = mock_http.post.call_args[0][0] + assert call_url == "https://research.example.com/api/mcp" + + +# --- FastAPI route tests -------------------------------------------------- + + +def test_index_returns_spa(dashboard_client): + """GET / returns 200 with React SPA index.html.""" + resp = dashboard_client.get("/") + assert resp.status_code == 200 + assert "" in resp.text + assert '
' in resp.text + + +def test_spa_catch_all_returns_index(dashboard_client): + """GET /agent/{name} returns 200 with SPA (client-side routing).""" + resp = dashboard_client.get("/agent/research_agent") + assert resp.status_code == 200 + assert '
' in resp.text + + +def test_spa_catch_all_nonexistent(dashboard_client): + """GET for unknown route returns 200 with SPA (React handles 404).""" + resp = dashboard_client.get("/agent/nonexistent") + assert resp.status_code == 200 + assert '
' in resp.text + + +def test_api_agents(dashboard_client): + """GET /api/agents returns JSON agent list.""" + resp = dashboard_client.get("/api/agents") + assert resp.status_code == 200 + data = resp.json() + assert len(data) == 2 + assert data[0]["name"] == "research_agent" + + +def test_api_agent_card(dashboard_client, scanner): + """GET /api/agents/{name}/card returns card JSON.""" + with patch.object(scanner, "get_agent_card", new_callable=AsyncMock) as mock_card: + mock_card.return_value = FAKE_CARD + resp = dashboard_client.get("/api/agents/research_agent/card") + + assert resp.status_code == 200 + assert resp.json()["name"] == "research_agent" + + +def test_api_agent_card_not_found(dashboard_client): + """GET /api/agents/{name}/card returns 404 for unknown agent.""" + resp = dashboard_client.get("/api/agents/nonexistent/card") + assert resp.status_code == 404 + + +def test_api_mcp_proxy(dashboard_client, scanner): + """POST /api/agents/{name}/mcp proxies to agent's MCP endpoint.""" + mcp_response = {"jsonrpc": "2.0", "id": "1", "result": {"tools": []}} + with patch.object(scanner, "proxy_mcp", new_callable=AsyncMock) as mock_mcp: + mock_mcp.return_value = mcp_response + resp = dashboard_client.post( + "/api/agents/research_agent/mcp", + json={"jsonrpc": "2.0", "id": "1", "method": "tools/list", "params": {}}, + ) + + assert resp.status_code == 200 + assert resp.json() == mcp_response + + +def test_api_mcp_proxy_not_found(dashboard_client): + """POST /api/agents/{name}/mcp returns 404 for unknown agent.""" + resp = dashboard_client.post( + "/api/agents/nonexistent/mcp", + json={"jsonrpc": "2.0", "id": "1", "method": "tools/list", "params": {}}, + ) + assert resp.status_code == 404 + + +def test_api_scan(dashboard_client, scanner): + """POST /api/scan triggers rescan and returns count.""" + with patch.object(scanner, "scan", new_callable=AsyncMock) as mock_scan: + mock_scan.return_value = FAKE_AGENTS + resp = dashboard_client.post("/api/scan") + + assert resp.status_code == 200 + data = resp.json() + assert data["count"] == 2 + + +def test_health(dashboard_client): + """GET /health returns status and profile.""" + resp = dashboard_client.get("/health") + assert resp.status_code == 200 + data = resp.json() + assert data["status"] == "ok" + assert data["agents_cached"] == 2 + assert data["profile"] == "test-profile" + + +# --- CLI argument parsing ------------------------------------------------- + + +def test_cli_help(capsys): + """CLI --help exits cleanly.""" + with pytest.raises(SystemExit) as exc: + import sys + sys.argv = ["dbx-agent-app", "dashboard", "--help"] + cli_main() + assert exc.value.code == 0 + + +def test_cli_no_command(capsys): + """CLI with no subcommand prints help and exits.""" + import sys + sys.argv = ["dbx-agent-app"] + with pytest.raises(SystemExit) as exc: + cli_main() + assert exc.value.code == 1 diff --git a/dbx-agent-app/tests/test_dashboard_invocations.py b/dbx-agent-app/tests/test_dashboard_invocations.py new file mode 100644 index 00000000..64227aba --- /dev/null +++ b/dbx-agent-app/tests/test_dashboard_invocations.py @@ -0,0 +1,88 @@ +"""Tests for dashboard /api/agents/{name}/test endpoint (invocations proxy).""" + +import json +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from fastapi.testclient import TestClient + +from dbx_agent_app.dashboard.app import create_dashboard_app +from dbx_agent_app.dashboard.scanner import DashboardScanner +from dbx_agent_app.discovery import DiscoveredAgent + + +@pytest.fixture +def mock_scanner(): + """DashboardScanner with a fake agent and mocked call_invocations.""" + scanner = MagicMock(spec=DashboardScanner) + + fake_agent = DiscoveredAgent( + name="test_agent", + endpoint_url="https://fake-agent.example.com", + app_name="test-agent-app", + description="Test agent", + capabilities=["test"], + ) + + scanner.get_agents.return_value = [fake_agent] + scanner.get_agent_by_name.side_effect = lambda name: fake_agent if name in ("test_agent", "test-agent-app") else None + + scanner.call_invocations = AsyncMock(return_value={ + "parts": [{"text": "Hello from /invocations!"}], + "_trace": { + "protocol": "invocations", + "latency_ms": 150.0, + }, + }) + + # Disable auto-scan by not providing a real scan method + scanner.scan = AsyncMock(return_value=[fake_agent]) + + return scanner + + +@pytest.fixture +def dashboard_client(mock_scanner): + """TestClient for the dashboard app with mocked scanner.""" + app = create_dashboard_app(mock_scanner, auto_scan_interval=0) + return TestClient(app) + + +def test_test_endpoint_success(dashboard_client, mock_scanner): + """POST /api/agents/{name}/test calls agent via /invocations.""" + response = dashboard_client.post( + "/api/agents/test_agent/test", + json={"message": "hello"}, + ) + + assert response.status_code == 200 + data = response.json() + assert data["result"]["parts"][0]["text"] == "Hello from /invocations!" + assert data["result"]["_trace"]["protocol"] == "invocations" + + mock_scanner.call_invocations.assert_called_once_with( + "https://fake-agent.example.com", "hello" + ) + + +def test_test_endpoint_agent_not_found(dashboard_client): + """POST /api/agents/{name}/test returns 404 for unknown agent.""" + response = dashboard_client.post( + "/api/agents/nonexistent/test", + json={"message": "hello"}, + ) + + assert response.status_code == 404 + + +def test_test_endpoint_proxy_error(dashboard_client, mock_scanner): + """POST /api/agents/{name}/test returns 502 on proxy failure.""" + mock_scanner.call_invocations = AsyncMock(side_effect=Exception("Connection refused")) + + response = dashboard_client.post( + "/api/agents/test_agent/test", + json={"message": "hello"}, + ) + + assert response.status_code == 502 + assert "Connection refused" in response.json()["error"] diff --git a/dbx-agent-app/tests/test_deploy_config.py b/dbx-agent-app/tests/test_deploy_config.py new file mode 100644 index 00000000..0dc8aa05 --- /dev/null +++ b/dbx-agent-app/tests/test_deploy_config.py @@ -0,0 +1,406 @@ +"""Tests for deploy config parsing, resource types, and validation.""" + +import pytest +import yaml +from pathlib import Path + +from dbx_agent_app.deploy.config import ( + AppResourceSpec, + AgentSpec, + DeployConfig, + UCSecurableResource, + SqlWarehouseResource, + JobResource, + SecretResource, + ServingEndpointResource, + DatabaseResource, + GenieSpaceResource, + _parse_resource, + _interpolate, + _build_context, +) + + +# =================================================================== +# Interpolation +# =================================================================== + + +def test_interpolation_basic(): + ctx = {"uc.catalog": "main", "uc.schema": "agents"} + assert _interpolate("${uc.catalog}.${uc.schema}.table", ctx) == "main.agents.table" + + +def test_interpolation_unknown_key(): + with pytest.raises(ValueError, match="Unknown interpolation key"): + _interpolate("${missing.key}", {}) + + +def test_build_context_skips_agents(): + raw = { + "project": {"name": "test"}, + "uc": {"catalog": "cat", "schema": "sch"}, + "agents": [{"name": "skip-me"}], + } + ctx = _build_context(raw) + assert "uc.catalog" in ctx + assert "project.name" in ctx + assert not any(k.startswith("agents") for k in ctx) + + +# =================================================================== +# UC securable validation +# =================================================================== + + +def test_uc_securable_valid_types(): + for stype in ("TABLE", "VOLUME", "FUNCTION", "CONNECTION"): + r = UCSecurableResource(securable_type=stype, securable_full_name="a.b.c", permission="SELECT") + assert r.securable_type == stype + + +def test_uc_securable_invalid_type(): + with pytest.raises(ValueError, match="securable_type must be one of"): + UCSecurableResource(securable_type="INVALID", securable_full_name="a.b.c", permission="SELECT") + + +def test_uc_securable_valid_permissions(): + for perm in ("SELECT", "EXECUTE", "READ_VOLUME", "WRITE_VOLUME", "USE_CONNECTION", "MANAGE"): + r = UCSecurableResource(securable_type="TABLE", securable_full_name="a.b.c", permission=perm) + assert r.permission == perm + + +def test_uc_securable_invalid_permission(): + with pytest.raises(ValueError, match="permission must be one of"): + UCSecurableResource(securable_type="TABLE", securable_full_name="a.b.c", permission="DROP") + + +# =================================================================== +# AppResourceSpec — exactly one resource type +# =================================================================== + + +def test_resource_spec_no_type_set(): + with pytest.raises(ValueError, match="exactly one resource type must be set, got none"): + AppResourceSpec(name="empty") + + +def test_resource_spec_multiple_types(): + with pytest.raises(ValueError, match="exactly one resource type must be set"): + AppResourceSpec( + name="multi", + uc_securable=UCSecurableResource( + securable_type="TABLE", securable_full_name="a.b.c", permission="SELECT" + ), + sql_warehouse=SqlWarehouseResource(id="abc"), + ) + + +def test_resource_spec_each_type_valid(): + """Each resource type works when set alone.""" + specs = [ + AppResourceSpec( + name="t1", + uc_securable=UCSecurableResource( + securable_type="TABLE", securable_full_name="a.b.c", permission="SELECT" + ), + ), + AppResourceSpec(name="t2", sql_warehouse=SqlWarehouseResource(id="wh1")), + AppResourceSpec(name="t3", job=JobResource(id="j1")), + AppResourceSpec(name="t4", secret=SecretResource(scope="s", key="k")), + AppResourceSpec(name="t5", serving_endpoint=ServingEndpointResource(name="ep1")), + AppResourceSpec( + name="t6", + database=DatabaseResource(instance_name="db1", database_name="prod"), + ), + AppResourceSpec(name="t7", genie_space=GenieSpaceResource(id="gs1")), + ] + assert len(specs) == 7 + + +# =================================================================== +# _parse_resource — all 6 types +# =================================================================== + + +@pytest.fixture +def context(): + return {"uc.catalog": "cat", "uc.schema": "sch", "warehouse.id": "wh123"} + + +def test_parse_uc_securable(context): + data = { + "name": "my-table", + "uc_securable": { + "securable_type": "TABLE", + "securable_full_name": "${uc.catalog}.${uc.schema}.users", + "permission": "SELECT", + }, + } + spec = _parse_resource(data, context) + assert spec.uc_securable is not None + assert spec.uc_securable.securable_full_name == "cat.sch.users" + assert spec.uc_securable.securable_type == "TABLE" + assert spec.uc_securable.permission == "SELECT" + + +def test_parse_sql_warehouse(context): + data = {"name": "wh", "sql_warehouse": {"id": "${warehouse.id}", "permission": "CAN_USE"}} + spec = _parse_resource(data, context) + assert spec.sql_warehouse is not None + assert spec.sql_warehouse.id == "wh123" + assert spec.sql_warehouse.permission == "CAN_USE" + + +def test_parse_job(context): + data = {"name": "etl", "job": {"id": "999", "permission": "CAN_MANAGE_RUN"}} + spec = _parse_resource(data, context) + assert spec.job is not None + assert spec.job.id == "999" + assert spec.job.permission == "CAN_MANAGE_RUN" + + +def test_parse_secret(context): + data = {"name": "api-key", "secret": {"scope": "my-scope", "key": "openai", "permission": "READ"}} + spec = _parse_resource(data, context) + assert spec.secret is not None + assert spec.secret.scope == "my-scope" + assert spec.secret.key == "openai" + + +def test_parse_serving_endpoint(context): + data = {"name": "llm", "serving_endpoint": {"name": "gpt4-ep", "permission": "CAN_QUERY"}} + spec = _parse_resource(data, context) + assert spec.serving_endpoint is not None + assert spec.serving_endpoint.name == "gpt4-ep" + + +def test_parse_database(context): + data = { + "name": "ext-db", + "database": { + "instance_name": "my-rds", + "database_name": "prod", + "permission": "CAN_CONNECT_AND_CREATE", + }, + } + spec = _parse_resource(data, context) + assert spec.database is not None + assert spec.database.instance_name == "my-rds" + assert spec.database.database_name == "prod" + + +def test_parse_resource_default_permissions(context): + """Resource types use correct defaults when permission is omitted.""" + wh = _parse_resource({"name": "wh", "sql_warehouse": {"id": "x"}}, context) + assert wh.sql_warehouse.permission == "CAN_USE" + + job = _parse_resource({"name": "j", "job": {"id": "1"}}, context) + assert job.job.permission == "CAN_MANAGE_RUN" + + secret = _parse_resource({"name": "s", "secret": {"scope": "a", "key": "b"}}, context) + assert secret.secret.permission == "READ" + + ep = _parse_resource({"name": "e", "serving_endpoint": {"name": "x"}}, context) + assert ep.serving_endpoint.permission == "CAN_QUERY" + + db = _parse_resource( + {"name": "d", "database": {"instance_name": "x", "database_name": "y"}}, context + ) + assert db.database.permission == "CAN_CONNECT_AND_CREATE" + + +# =================================================================== +# DeployConfig.from_dict — full YAML parsing +# =================================================================== + + +def _base_raw(**overrides): + """Build a minimal valid raw config dict.""" + raw = { + "project": {"name": "test-project", "workspace_path": "/Workspace/Shared/apps"}, + "uc": {"catalog": "cat", "schema": "sch"}, + "warehouse": {"id": "wh1"}, + "agents": [], + } + raw.update(overrides) + return raw + + +def test_from_dict_basic(): + raw = _base_raw(agents=[{"name": "echo", "source": "/tmp/echo"}]) + config = DeployConfig.from_dict(raw) + assert config.project.name == "test-project" + assert len(config.agents) == 1 + assert config.agents[0].name == "echo" + + +def test_from_dict_with_resources(): + raw = _base_raw(agents=[{ + "name": "research", + "source": "/tmp/research", + "resources": [ + { + "name": "data", + "uc_securable": { + "securable_type": "TABLE", + "securable_full_name": "${uc.catalog}.${uc.schema}.t", + "permission": "SELECT", + }, + }, + {"name": "wh", "sql_warehouse": {"id": "${warehouse.id}"}}, + {"name": "j", "job": {"id": "42"}}, + {"name": "s", "secret": {"scope": "sc", "key": "k"}}, + {"name": "ep", "serving_endpoint": {"name": "gpt4"}}, + { + "name": "db", + "database": {"instance_name": "rds", "database_name": "prod"}, + }, + ], + }]) + config = DeployConfig.from_dict(raw) + agent = config.agents[0] + assert len(agent.resources) == 6 + assert agent.resources[0].uc_securable.securable_full_name == "cat.sch.t" + assert agent.resources[1].sql_warehouse.id == "wh1" + + +def test_from_dict_legacy_tables(): + """Legacy `tables` field auto-converts to uc_securable + warehouse resources.""" + raw = _base_raw(agents=[{ + "name": "legacy", + "source": "/tmp/legacy", + "tables": ["users", "orders"], + }]) + config = DeployConfig.from_dict(raw) + agent = config.agents[0] + # 2 tables + 1 warehouse = 3 resources + assert len(agent.resources) == 3 + assert agent.resources[0].uc_securable.securable_full_name == "cat.sch.users" + assert agent.resources[1].uc_securable.securable_full_name == "cat.sch.orders" + assert agent.resources[2].sql_warehouse.id == "wh1" + + +def test_from_dict_env_interpolation(): + raw = _base_raw(agents=[{ + "name": "test", + "source": "/tmp/test", + "resources": [{"name": "wh", "sql_warehouse": {"id": "x"}}], + "env": {"UC_CATALOG": "${uc.catalog}", "STATIC": "plain"}, + }]) + config = DeployConfig.from_dict(raw) + assert config.agents[0].env["UC_CATALOG"] == "cat" + assert config.agents[0].env["STATIC"] == "plain" + + +def test_from_dict_topological_order(): + raw = _base_raw(agents=[ + {"name": "supervisor", "source": "/s", "depends_on": ["research"], + "resources": [{"name": "wh", "sql_warehouse": {"id": "x"}}]}, + {"name": "research", "source": "/r", + "resources": [{"name": "wh", "sql_warehouse": {"id": "x"}}]}, + ]) + config = DeployConfig.from_dict(raw) + ordered = config.ordered_agents + names = [a.name for a in ordered] + assert names.index("research") < names.index("supervisor") + + +def test_from_dict_circular_dependency(): + raw = _base_raw(agents=[ + {"name": "a", "source": "/a", "depends_on": ["b"], + "resources": [{"name": "wh", "sql_warehouse": {"id": "x"}}]}, + {"name": "b", "source": "/b", "depends_on": ["a"], + "resources": [{"name": "wh", "sql_warehouse": {"id": "x"}}]}, + ]) + config = DeployConfig.from_dict(raw) + with pytest.raises(ValueError, match="Circular dependency"): + config.ordered_agents + + +def test_app_name(): + raw = _base_raw(agents=[{"name": "research", "source": "/tmp/r", + "resources": [{"name": "wh", "sql_warehouse": {"id": "x"}}]}]) + config = DeployConfig.from_dict(raw) + assert config.app_name(config.agents[0]) == "test-project-research" + + +def test_from_yaml(tmp_path): + yaml_content = { + "project": {"name": "yaml-test"}, + "uc": {"catalog": "c", "schema": "s"}, + "warehouse": {"id": "w"}, + "agents": [{ + "name": "agent", + "source": "./agent", + "resources": [{"name": "wh", "sql_warehouse": {"id": "${warehouse.id}"}}], + }], + } + config_file = tmp_path / "agents.yaml" + config_file.write_text(yaml.dump(yaml_content)) + + config = DeployConfig.from_yaml(config_file) + assert config.agents[0].resources[0].sql_warehouse.id == "w" + # Source path should be resolved relative to config file + assert Path(config.agents[0].source).is_absolute() + + +# =================================================================== +# Genie space resource type +# =================================================================== + + +def test_parse_genie_space(context): + data = {"name": "genie", "genie_space": {"id": "01abc", "permission": "CAN_VIEW"}} + spec = _parse_resource(data, context) + assert spec.genie_space is not None + assert spec.genie_space.id == "01abc" + assert spec.genie_space.permission == "CAN_VIEW" + + +def test_parse_genie_space_default_permission(context): + data = {"name": "genie", "genie_space": {"id": "01abc"}} + spec = _parse_resource(data, context) + assert spec.genie_space.permission == "CAN_VIEW" + + +def test_parse_genie_space_interpolation(context): + context["genie.id"] = "gs-123" + data = {"name": "genie", "genie_space": {"id": "${genie.id}"}} + spec = _parse_resource(data, context) + assert spec.genie_space.id == "gs-123" + + +def test_from_dict_with_genie_space(): + raw = _base_raw(agents=[{ + "name": "genie-agent", + "source": "/tmp/genie", + "resources": [{"name": "space", "genie_space": {"id": "01efg"}}], + }]) + config = DeployConfig.from_dict(raw) + agent = config.agents[0] + assert len(agent.resources) == 1 + assert agent.resources[0].genie_space.id == "01efg" + + +# =================================================================== +# user_api_scopes +# =================================================================== + + +def test_from_dict_with_user_api_scopes(): + raw = _base_raw(agents=[{ + "name": "scoped", + "source": "/tmp/scoped", + "resources": [{"name": "wh", "sql_warehouse": {"id": "x"}}], + "user_api_scopes": ["sql", "serving.serving-endpoints", "dashboards.genie"], + }]) + config = DeployConfig.from_dict(raw) + agent = config.agents[0] + assert agent.user_api_scopes == ["sql", "serving.serving-endpoints", "dashboards.genie"] + + +def test_from_dict_user_api_scopes_defaults_empty(): + raw = _base_raw(agents=[{"name": "basic", "source": "/tmp/basic"}]) + config = DeployConfig.from_dict(raw) + assert config.agents[0].user_api_scopes == [] diff --git a/dbx-agent-app/tests/test_deploy_engine.py b/dbx-agent-app/tests/test_deploy_engine.py new file mode 100644 index 00000000..54b2f464 --- /dev/null +++ b/dbx-agent-app/tests/test_deploy_engine.py @@ -0,0 +1,323 @@ +"""Tests for DeployEngine resource setting and permission grants.""" + +from unittest.mock import MagicMock, patch, call + +import pytest + +from dbx_agent_app.deploy.config import ( + AgentSpec, + AppResourceSpec, + DatabaseResource, + DeployConfig, + GenieSpaceResource, + JobResource, + ProjectConfig, + SecretResource, + ServingEndpointResource, + SqlWarehouseResource, + UCConfig, + UCSecurableResource, + WarehouseConfig, +) +from dbx_agent_app.deploy.engine import DeployEngine + + +# =================================================================== +# Fixtures +# =================================================================== + + +def _make_config(agents=None): + return DeployConfig( + project=ProjectConfig(name="test-project"), + uc=UCConfig(catalog="cat", schema_="sch"), + warehouse=WarehouseConfig(id="wh1"), + agents=agents or [], + ) + + +def _make_engine(config, mock_ws=None): + engine = DeployEngine(config, dry_run=True) + engine._w = mock_ws or MagicMock() + return engine + + +# =================================================================== +# _set_app_resources — builds correct REST payloads +# =================================================================== + + +def test_set_app_resources_uc_securable(): + agent = AgentSpec( + name="research", + source="/tmp", + resources=[ + AppResourceSpec( + name="data", + uc_securable=UCSecurableResource( + securable_type="TABLE", + securable_full_name="cat.sch.users", + permission="SELECT", + ), + ), + ], + ) + config = _make_config([agent]) + mock_ws = MagicMock() + engine = _make_engine(config, mock_ws) + + engine._set_app_resources(agent) + + mock_ws.api_client.do.assert_called_once() + args = mock_ws.api_client.do.call_args + assert args[0] == ("PATCH", "/api/2.0/apps/test-project-research") + body = args[1]["body"] + assert len(body["resources"]) == 1 + res = body["resources"][0] + assert res["name"] == "data" + assert res["uc_securable"]["securable_type"] == "TABLE" + assert res["uc_securable"]["securable_full_name"] == "cat.sch.users" + assert res["uc_securable"]["permission"] == "SELECT" + + +def test_set_app_resources_sql_warehouse(): + agent = AgentSpec( + name="agent", + source="/tmp", + resources=[AppResourceSpec(name="wh", sql_warehouse=SqlWarehouseResource(id="wh1"))], + ) + config = _make_config([agent]) + engine = _make_engine(config) + + engine._set_app_resources(agent) + + body = engine.w.api_client.do.call_args[1]["body"] + assert body["resources"][0]["sql_warehouse"]["id"] == "wh1" + assert body["resources"][0]["sql_warehouse"]["permission"] == "CAN_USE" + + +def test_set_app_resources_job(): + agent = AgentSpec( + name="agent", + source="/tmp", + resources=[AppResourceSpec(name="etl", job=JobResource(id="42"))], + ) + engine = _make_engine(_make_config([agent])) + engine._set_app_resources(agent) + + body = engine.w.api_client.do.call_args[1]["body"] + assert body["resources"][0]["job"]["id"] == "42" + assert body["resources"][0]["job"]["permission"] == "CAN_MANAGE_RUN" + + +def test_set_app_resources_secret(): + agent = AgentSpec( + name="agent", + source="/tmp", + resources=[AppResourceSpec(name="key", secret=SecretResource(scope="sc", key="k"))], + ) + engine = _make_engine(_make_config([agent])) + engine._set_app_resources(agent) + + body = engine.w.api_client.do.call_args[1]["body"] + assert body["resources"][0]["secret"]["scope"] == "sc" + assert body["resources"][0]["secret"]["key"] == "k" + + +def test_set_app_resources_serving_endpoint(): + agent = AgentSpec( + name="agent", + source="/tmp", + resources=[ + AppResourceSpec(name="llm", serving_endpoint=ServingEndpointResource(name="gpt4")) + ], + ) + engine = _make_engine(_make_config([agent])) + engine._set_app_resources(agent) + + body = engine.w.api_client.do.call_args[1]["body"] + assert body["resources"][0]["serving_endpoint"]["name"] == "gpt4" + assert body["resources"][0]["serving_endpoint"]["permission"] == "CAN_QUERY" + + +def test_set_app_resources_database(): + agent = AgentSpec( + name="agent", + source="/tmp", + resources=[ + AppResourceSpec( + name="ext", + database=DatabaseResource(instance_name="rds", database_name="prod"), + ) + ], + ) + engine = _make_engine(_make_config([agent])) + engine._set_app_resources(agent) + + body = engine.w.api_client.do.call_args[1]["body"] + assert body["resources"][0]["database"]["instance_name"] == "rds" + assert body["resources"][0]["database"]["database_name"] == "prod" + + +def test_set_app_resources_multiple(): + """Multiple resources of different types in one call.""" + agent = AgentSpec( + name="multi", + source="/tmp", + resources=[ + AppResourceSpec( + name="table", + uc_securable=UCSecurableResource( + securable_type="TABLE", securable_full_name="a.b.c", permission="SELECT" + ), + ), + AppResourceSpec(name="wh", sql_warehouse=SqlWarehouseResource(id="wh1")), + AppResourceSpec(name="secret", secret=SecretResource(scope="s", key="k")), + ], + ) + engine = _make_engine(_make_config([agent])) + engine._set_app_resources(agent) + + body = engine.w.api_client.do.call_args[1]["body"] + assert len(body["resources"]) == 3 + + +def test_set_app_resources_empty(): + """Agent with no resources — no API call made.""" + agent = AgentSpec(name="bare", source="/tmp", resources=[]) + engine = _make_engine(_make_config([agent])) + engine._set_app_resources(agent) + + engine.w.api_client.do.assert_not_called() + + +def test_set_app_resources_with_description(): + agent = AgentSpec( + name="agent", + source="/tmp", + resources=[ + AppResourceSpec( + name="data", + description="Main data table", + uc_securable=UCSecurableResource( + securable_type="TABLE", securable_full_name="a.b.c", permission="SELECT" + ), + ), + ], + ) + engine = _make_engine(_make_config([agent])) + engine._set_app_resources(agent) + + body = engine.w.api_client.do.call_args[1]["body"] + assert body["resources"][0]["description"] == "Main data table" + + +# =================================================================== +# _grant_app_to_app — permission grants +# =================================================================== + + +def test_grant_app_to_app(): + from databricks.sdk.service.apps import AppPermissionLevel + + engine = _make_engine(_make_config()) + engine._grant_app_to_app("sp-uuid-123", "target-app") + + engine.w.apps.update_permissions.assert_called_once() + call_args = engine.w.apps.update_permissions.call_args + assert call_args[1]["app_name"] == "target-app" + acl = call_args[1]["access_control_list"] + assert len(acl) == 1 + assert acl[0].service_principal_name == "sp-uuid-123" + assert acl[0].permission_level == AppPermissionLevel.CAN_USE + + +def test_grant_app_to_app_handles_failure(caplog): + engine = _make_engine(_make_config()) + engine.w.apps.update_permissions.side_effect = Exception("perm denied") + + # Should not raise + engine._grant_app_to_app("sp-1", "app-2") + + +# =================================================================== +# Dry run +# =================================================================== + + +def test_set_app_resources_genie_space(): + agent = AgentSpec( + name="genie-agent", + source="/tmp", + resources=[ + AppResourceSpec(name="space", genie_space=GenieSpaceResource(id="01abc")) + ], + ) + engine = _make_engine(_make_config([agent])) + engine._set_app_resources(agent) + + body = engine.w.api_client.do.call_args[1]["body"] + assert body["resources"][0]["genie_space"]["id"] == "01abc" + assert body["resources"][0]["genie_space"]["permission"] == "CAN_VIEW" + + +def test_set_app_resources_with_user_api_scopes(): + agent = AgentSpec( + name="scoped", + source="/tmp", + resources=[AppResourceSpec(name="wh", sql_warehouse=SqlWarehouseResource(id="wh1"))], + user_api_scopes=["sql", "serving.serving-endpoints"], + ) + engine = _make_engine(_make_config([agent])) + engine._set_app_resources(agent) + + body = engine.w.api_client.do.call_args[1]["body"] + assert body["resources"][0]["sql_warehouse"]["id"] == "wh1" + assert body["user_api_scopes"] == ["sql", "serving.serving-endpoints"] + + +def test_set_app_resources_scopes_only(): + """Agent with scopes but no resources still makes the PATCH call.""" + agent = AgentSpec( + name="scope-only", + source="/tmp", + resources=[], + user_api_scopes=["sql"], + ) + engine = _make_engine(_make_config([agent])) + engine._set_app_resources(agent) + + body = engine.w.api_client.do.call_args[1]["body"] + assert "resources" not in body + assert body["user_api_scopes"] == ["sql"] + + +def test_set_app_resources_no_scopes_in_body(): + """When no scopes set, body should not include user_api_scopes key.""" + agent = AgentSpec( + name="no-scopes", + source="/tmp", + resources=[AppResourceSpec(name="wh", sql_warehouse=SqlWarehouseResource(id="x"))], + ) + engine = _make_engine(_make_config([agent])) + engine._set_app_resources(agent) + + body = engine.w.api_client.do.call_args[1]["body"] + assert "user_api_scopes" not in body + + +def test_dry_run_skips_deployment(): + agent = AgentSpec( + name="test", + source="/tmp/test", + resources=[AppResourceSpec(name="wh", sql_warehouse=SqlWarehouseResource(id="x"))], + ) + config = _make_config([agent]) + engine = DeployEngine(config, dry_run=True) + engine._w = MagicMock() + + engine.deploy() + + # No app creation, no resource setting, no health checks + engine.w.apps.create_and_wait.assert_not_called() + engine.w.api_client.do.assert_not_called() diff --git a/dbx-agent-app/tests/test_eval_bridge.py b/dbx-agent-app/tests/test_eval_bridge.py new file mode 100644 index 00000000..2fb43a3d --- /dev/null +++ b/dbx-agent-app/tests/test_eval_bridge.py @@ -0,0 +1,242 @@ +"""Tests for the eval bridge — app_predict_fn().""" + +import json + +import httpx +import pytest +import respx + +from dbx_agent_app.bridge.eval import app_predict_fn + + +APP_URL = "https://my-agent.cloud.databricks.com" +TOKEN = "dapi-test-token-123" + + +# ------------------------------------------------------------------- +# Construction +# ------------------------------------------------------------------- + + +def test_creates_predict_fn(): + fn = app_predict_fn(APP_URL, token=TOKEN) + assert callable(fn) + + +def test_raises_without_token(monkeypatch): + monkeypatch.delenv("DATABRICKS_TOKEN", raising=False) + with pytest.raises(ValueError, match="No auth token"): + app_predict_fn(APP_URL) + + +def test_reads_token_from_env(monkeypatch): + monkeypatch.setenv("DATABRICKS_TOKEN", "env-token") + fn = app_predict_fn(APP_URL) + assert callable(fn) + + +# ------------------------------------------------------------------- +# Calling predict_fn +# ------------------------------------------------------------------- + + +AGENT_RESPONSE = { + "output": [ + { + "type": "message", + "id": "abc-123", + "content": [{"type": "output_text", "text": "Hello from agent!"}], + } + ] +} + + +@respx.mock +def test_predict_with_messages(): + route = respx.post(f"{APP_URL}/invocations").mock( + return_value=httpx.Response(200, json=AGENT_RESPONSE) + ) + + fn = app_predict_fn(APP_URL, token=TOKEN) + result = fn(messages=[{"role": "user", "content": "Hi"}]) + + assert route.called + request_body = json.loads(route.calls[0].request.content) + assert request_body == {"input": [{"role": "user", "content": "Hi"}]} + + assert result["response"] == "Hello from agent!" + assert len(result["output"]) == 1 + + +@respx.mock +def test_predict_with_question_kwarg(): + respx.post(f"{APP_URL}/invocations").mock( + return_value=httpx.Response(200, json=AGENT_RESPONSE) + ) + + fn = app_predict_fn(APP_URL, token=TOKEN) + result = fn(question="What is Databricks?") + + assert result["response"] == "Hello from agent!" + + +@respx.mock +def test_predict_with_input_kwarg(): + respx.post(f"{APP_URL}/invocations").mock( + return_value=httpx.Response(200, json=AGENT_RESPONSE) + ) + + fn = app_predict_fn(APP_URL, token=TOKEN) + result = fn(input="Tell me about agents") + + assert result["response"] == "Hello from agent!" + + +@respx.mock +def test_predict_with_query_kwarg(): + respx.post(f"{APP_URL}/invocations").mock( + return_value=httpx.Response(200, json=AGENT_RESPONSE) + ) + + fn = app_predict_fn(APP_URL, token=TOKEN) + result = fn(query="Search for experts") + + assert result["response"] == "Hello from agent!" + + +def test_predict_raises_without_input(): + fn = app_predict_fn(APP_URL, token=TOKEN) + with pytest.raises(ValueError, match="requires 'messages'"): + fn() + + +# ------------------------------------------------------------------- +# Wire format translation +# ------------------------------------------------------------------- + + +@respx.mock +def test_multi_content_blocks(): + """Multiple content blocks are joined with newlines.""" + multi_response = { + "output": [ + { + "type": "message", + "id": "1", + "content": [ + {"type": "output_text", "text": "First part."}, + {"type": "output_text", "text": "Second part."}, + ], + } + ] + } + respx.post(f"{APP_URL}/invocations").mock( + return_value=httpx.Response(200, json=multi_response) + ) + + fn = app_predict_fn(APP_URL, token=TOKEN) + result = fn(messages=[{"role": "user", "content": "Hi"}]) + + assert result["response"] == "First part.\nSecond part." + + +@respx.mock +def test_multi_output_items(): + """Multiple output items have all their text joined.""" + multi_item_response = { + "output": [ + {"type": "message", "id": "1", "content": [{"text": "A"}]}, + {"type": "message", "id": "2", "content": [{"text": "B"}]}, + ] + } + respx.post(f"{APP_URL}/invocations").mock( + return_value=httpx.Response(200, json=multi_item_response) + ) + + fn = app_predict_fn(APP_URL, token=TOKEN) + result = fn(messages=[{"role": "user", "content": "Hi"}]) + + assert result["response"] == "A\nB" + + +@respx.mock +def test_empty_output(): + respx.post(f"{APP_URL}/invocations").mock( + return_value=httpx.Response(200, json={"output": []}) + ) + + fn = app_predict_fn(APP_URL, token=TOKEN) + result = fn(messages=[{"role": "user", "content": "Hi"}]) + + assert result["response"] == "" + assert result["output"] == [] + + +# ------------------------------------------------------------------- +# Auth headers +# ------------------------------------------------------------------- + + +@respx.mock +def test_sends_auth_header(): + route = respx.post(f"{APP_URL}/invocations").mock( + return_value=httpx.Response(200, json=AGENT_RESPONSE) + ) + + fn = app_predict_fn(APP_URL, token=TOKEN) + fn(messages=[{"role": "user", "content": "Hi"}]) + + auth_header = route.calls[0].request.headers["authorization"] + assert auth_header == f"Bearer {TOKEN}" + + +# ------------------------------------------------------------------- +# Error handling +# ------------------------------------------------------------------- + + +@respx.mock +def test_raises_on_http_error(): + respx.post(f"{APP_URL}/invocations").mock( + return_value=httpx.Response(500, text="Internal Server Error") + ) + + fn = app_predict_fn(APP_URL, token=TOKEN) + with pytest.raises(httpx.HTTPStatusError): + fn(messages=[{"role": "user", "content": "Hi"}]) + + +@respx.mock +def test_trailing_slash_stripped(): + route = respx.post(f"{APP_URL}/invocations").mock( + return_value=httpx.Response(200, json=AGENT_RESPONSE) + ) + + fn = app_predict_fn(f"{APP_URL}/", token=TOKEN) + fn(messages=[{"role": "user", "content": "Hi"}]) + + assert route.called + + +# ------------------------------------------------------------------- +# Multi-turn conversation +# ------------------------------------------------------------------- + + +@respx.mock +def test_multi_turn_messages(): + """Verify multi-turn conversation messages are forwarded correctly.""" + route = respx.post(f"{APP_URL}/invocations").mock( + return_value=httpx.Response(200, json=AGENT_RESPONSE) + ) + + fn = app_predict_fn(APP_URL, token=TOKEN) + messages = [ + {"role": "user", "content": "What is MLflow?"}, + {"role": "assistant", "content": "MLflow is an open-source platform..."}, + {"role": "user", "content": "How does it handle agents?"}, + ] + fn(messages=messages) + + request_body = json.loads(route.calls[0].request.content) + assert request_body["input"] == messages diff --git a/dbx-agent-app/tests/test_mcp_server.py b/dbx-agent-app/tests/test_mcp_server.py new file mode 100644 index 00000000..d835c175 --- /dev/null +++ b/dbx-agent-app/tests/test_mcp_server.py @@ -0,0 +1,163 @@ +"""Tests for MCP server — works with plain FastAPI.""" + +from fastapi import FastAPI +from fastapi.testclient import TestClient + +from dbx_agent_app.mcp import MCPServerConfig, setup_mcp_server + + +def _make_mcp_app(): + """Create a plain FastAPI app with MCP endpoints via setup_mcp_server.""" + app = FastAPI() + + async def echo(text: str) -> dict: + return {"echo": text} + + tools = [ + { + "name": "echo", + "description": "Echo back the input", + "parameters": {"text": {"type": "string", "required": True}}, + "function": echo, + } + ] + + config = MCPServerConfig(name="test_mcp", description="Test MCP server") + setup_mcp_server(tools, config=config, fastapi_app=app) + + return app + + +# --- tools/list --- + + +def test_mcp_tools_list(): + """tools/list returns all registered tools in MCP format.""" + client = TestClient(_make_mcp_app()) + + response = client.post( + "/api/mcp", + json={"jsonrpc": "2.0", "method": "tools/list", "id": "1"}, + ) + + assert response.status_code == 200 + data = response.json() + assert data["jsonrpc"] == "2.0" + assert data["id"] == "1" + + tools = data["result"]["tools"] + assert len(tools) == 1 + assert tools[0]["name"] == "echo" + assert tools[0]["description"] == "Echo back the input" + assert "inputSchema" in tools[0] + + +def test_mcp_tools_list_empty(): + """tools/list returns empty list when no tools registered.""" + app = FastAPI() + config = MCPServerConfig(name="empty") + setup_mcp_server([], config=config, fastapi_app=app) + + client = TestClient(app) + response = client.post( + "/api/mcp", + json={"jsonrpc": "2.0", "method": "tools/list", "id": "1"}, + ) + + assert response.status_code == 200 + assert response.json()["result"]["tools"] == [] + + +# --- tools/call --- + + +def test_mcp_tools_call_success(): + """tools/call executes a tool and returns the result.""" + client = TestClient(_make_mcp_app()) + + response = client.post( + "/api/mcp", + json={ + "jsonrpc": "2.0", + "method": "tools/call", + "params": {"name": "echo", "arguments": {"text": "hello"}}, + "id": "2", + }, + ) + + assert response.status_code == 200 + data = response.json() + assert data["result"]["result"]["echo"] == "hello" + + +def test_mcp_tools_call_not_found(): + """tools/call with unknown tool name returns error.""" + client = TestClient(_make_mcp_app()) + + response = client.post( + "/api/mcp", + json={ + "jsonrpc": "2.0", + "method": "tools/call", + "params": {"name": "nonexistent", "arguments": {}}, + "id": "3", + }, + ) + + assert response.status_code == 200 + data = response.json() + assert "error" in data + assert data["error"]["code"] == -32603 + + +# --- server/info --- + + +def test_mcp_server_info(): + """server/info returns server metadata.""" + client = TestClient(_make_mcp_app()) + + response = client.post( + "/api/mcp", + json={"jsonrpc": "2.0", "method": "server/info", "id": "4"}, + ) + + assert response.status_code == 200 + data = response.json() + info = data["result"] + assert info["name"] == "test_mcp" + assert "version" in info + assert "protocol_version" in info + + +# --- unknown method --- + + +def test_mcp_unknown_method(): + """Unknown JSON-RPC method returns -32601 error.""" + client = TestClient(_make_mcp_app()) + + response = client.post( + "/api/mcp", + json={"jsonrpc": "2.0", "method": "unknown/method", "id": "5"}, + ) + + assert response.status_code == 200 + data = response.json() + assert data["error"]["code"] == -32601 + assert "unknown/method" in data["error"]["message"] + + +# --- GET /api/mcp/tools --- + + +def test_mcp_tools_get_endpoint(): + """GET /api/mcp/tools returns tool list.""" + client = TestClient(_make_mcp_app()) + + response = client.get("/api/mcp/tools") + + assert response.status_code == 200 + tools = response.json()["tools"] + assert len(tools) == 1 + assert tools[0]["name"] == "echo" diff --git a/dbx-agent-app/tests/test_system_builder.py b/dbx-agent-app/tests/test_system_builder.py new file mode 100644 index 00000000..6955871c --- /dev/null +++ b/dbx-agent-app/tests/test_system_builder.py @@ -0,0 +1,508 @@ +"""Tests for the System Builder service — CRUD, validation, deploy logic.""" + +import asyncio +import json +import pytest +from pathlib import Path +from unittest.mock import AsyncMock, MagicMock, patch + +from fastapi.testclient import TestClient + +from dbx_agent_app.discovery import DiscoveredAgent +from dbx_agent_app.dashboard.scanner import DashboardScanner +from dbx_agent_app.dashboard.app import create_dashboard_app +from dbx_agent_app.dashboard.system_builder import ( + SystemBuilderService, + SystemCreate, + SystemUpdate, + WiringEdge, + DeployProgress, +) + + +# --- Fixtures ---------------------------------------------------------------- + +FAKE_AGENTS = [ + DiscoveredAgent( + name="research_agent", + endpoint_url="https://research.example.com", + app_name="research-app", + description="Does research", + capabilities="search,analysis", + protocol_version="a2a/1.0", + ), + DiscoveredAgent( + name="data_agent", + endpoint_url="https://data.example.com", + app_name="data-app", + description="Queries data", + capabilities="sql", + protocol_version="a2a/1.0", + ), + DiscoveredAgent( + name="supervisor", + endpoint_url="https://supervisor.example.com", + app_name="supervisor-app", + description="Orchestrates agents", + capabilities="research_agent,data_agent", + protocol_version="a2a/1.0", + ), +] + + +@pytest.fixture +def scanner(): + """DashboardScanner pre-loaded with fake agents.""" + s = DashboardScanner.__new__(DashboardScanner) + s._discovery = MagicMock() + s._agents = list(FAKE_AGENTS) + s._scan_lock = __import__("asyncio").Lock() + s._scanned = True + + async def _mock_scan(): + return list(FAKE_AGENTS) + s.scan = _mock_scan + + return s + + +@pytest.fixture +def service(scanner, tmp_path): + """SystemBuilderService with temporary storage.""" + return SystemBuilderService( + scanner=scanner, + profile=None, + data_dir=tmp_path, + ) + + +@pytest.fixture +def dashboard_client(scanner, tmp_path): + """FastAPI TestClient with system builder enabled.""" + svc = SystemBuilderService( + scanner=scanner, + profile=None, + data_dir=tmp_path, + ) + app = create_dashboard_app( + scanner, profile="test", system_builder=svc, auto_scan_interval=0 + ) + return TestClient(app) + + +# --- CRUD Tests -------------------------------------------------------------- + + +def test_create_system(service): + """Creating a system persists it and assigns an ID.""" + defn = service.create_system(SystemCreate( + name="Test System", + description="A test", + agents=["research_agent", "data_agent"], + edges=[WiringEdge( + source_agent="research_agent", + target_agent="data_agent", + env_var="RESEARCH_URL", + )], + )) + + assert defn.id + assert defn.name == "Test System" + assert len(defn.agents) == 2 + assert len(defn.edges) == 1 + assert defn.edges[0].env_var == "RESEARCH_URL" + + +def test_list_systems(service): + """List returns all created systems.""" + service.create_system(SystemCreate(name="System A", agents=[])) + service.create_system(SystemCreate(name="System B", agents=[])) + + systems = service.list_systems() + assert len(systems) == 2 + names = {s.name for s in systems} + assert names == {"System A", "System B"} + + +def test_get_system(service): + """Get by ID returns the correct system.""" + created = service.create_system(SystemCreate(name="Lookup Test", agents=[])) + found = service.get_system(created.id) + + assert found is not None + assert found.name == "Lookup Test" + + +def test_get_system_not_found(service): + """Get with invalid ID returns None.""" + assert service.get_system("nonexistent-id") is None + + +def test_update_system(service): + """Updating a system modifies only specified fields.""" + created = service.create_system(SystemCreate( + name="Original", + description="Before", + agents=["research_agent"], + )) + + updated = service.update_system(created.id, SystemUpdate( + name="Updated", + agents=["research_agent", "data_agent"], + )) + + assert updated is not None + assert updated.name == "Updated" + assert updated.description == "Before" # unchanged + assert len(updated.agents) == 2 + + +def test_update_system_not_found(service): + """Updating a nonexistent system returns None.""" + result = service.update_system("bad-id", SystemUpdate(name="Nope")) + assert result is None + + +def test_delete_system(service): + """Deleting a system removes it from the store.""" + created = service.create_system(SystemCreate(name="To Delete", agents=[])) + assert service.delete_system(created.id) is True + assert service.get_system(created.id) is None + assert service.delete_system(created.id) is False + + +def test_persistence(scanner, tmp_path): + """Systems survive service restart (reload from JSON).""" + svc1 = SystemBuilderService(scanner=scanner, data_dir=tmp_path) + svc1.create_system(SystemCreate(name="Persistent", agents=["research_agent"])) + + # Create a new service instance that reads from same path + svc2 = SystemBuilderService(scanner=scanner, data_dir=tmp_path) + systems = svc2.list_systems() + + assert len(systems) == 1 + assert systems[0].name == "Persistent" + + +# --- Validation Tests --------------------------------------------------------- + + +def test_self_loop_rejected(service): + """Edges where source == target are rejected.""" + with pytest.raises(ValueError, match="Self-loop"): + service.create_system(SystemCreate( + name="Self Loop", + agents=["research_agent"], + edges=[WiringEdge( + source_agent="research_agent", + target_agent="research_agent", + env_var="SELF_URL", + )], + )) + + +def test_edge_source_not_in_agents(service): + """Edges referencing agents not in the agents list are rejected.""" + with pytest.raises(ValueError, match="not in agents"): + service.create_system(SystemCreate( + name="Bad Edge", + agents=["data_agent"], + edges=[WiringEdge( + source_agent="nonexistent", + target_agent="data_agent", + env_var="X_URL", + )], + )) + + +def test_edge_target_not_in_agents(service): + """Edges referencing unknown target agents are rejected.""" + with pytest.raises(ValueError, match="not in agents"): + service.create_system(SystemCreate( + name="Bad Target", + agents=["research_agent"], + edges=[WiringEdge( + source_agent="research_agent", + target_agent="nonexistent", + env_var="X_URL", + )], + )) + + +# --- Deploy Tests (with mocked Databricks APIs) ------------------------------ + + +@pytest.mark.asyncio +async def test_deploy_system_not_found(service): + """Deploy for nonexistent system returns failed result.""" + result = await service.deploy_system("nonexistent") + assert result.status == "failed" + assert len(result.steps) == 1 + assert result.steps[0].status == "failed" + + +@pytest.mark.asyncio +async def test_deploy_resolves_agents(service): + """Deploy resolves agent metadata from scanner.""" + defn = service.create_system(SystemCreate( + name="Deploy Test", + agents=["research_agent", "data_agent"], + edges=[WiringEdge( + source_agent="research_agent", + target_agent="data_agent", + env_var="RESEARCH_URL", + )], + )) + + # Mock workspace client to avoid real API calls + with patch.object(service, '_get_ws_client', return_value=None): + result = await service.deploy_system(defn.id) + + # Without ws_client, env_update and permissions are skipped + assert result.system_id == defn.id + # Should have skipped steps due to no workspace client + skipped = [s for s in result.steps if s.status == "skipped"] + assert len(skipped) > 0 + + +@pytest.mark.asyncio +async def test_deploy_unresolved_agent(service): + """Deploy reports failure for agents not found in scanner.""" + defn = service.create_system(SystemCreate( + name="Missing Agent", + agents=["unknown_agent"], + edges=[], + )) + + # Scanner won't find "unknown_agent" + with patch.object(service, '_get_ws_client', return_value=None): + result = await service.deploy_system(defn.id) + + failed = [s for s in result.steps if s.status == "failed" and s.action == "resolve"] + assert len(failed) == 1 + assert "not found" in failed[0].detail + + +# --- API Route Tests ---------------------------------------------------------- + + +def test_api_list_systems_empty(dashboard_client): + """GET /api/systems returns empty list initially.""" + resp = dashboard_client.get("/api/systems") + assert resp.status_code == 200 + assert resp.json() == [] + + +def test_api_create_and_get_system(dashboard_client): + """POST /api/systems creates, GET /api/systems/{id} retrieves.""" + resp = dashboard_client.post("/api/systems", json={ + "name": "API Test", + "agents": ["research_agent", "data_agent"], + "edges": [{ + "source_agent": "research_agent", + "target_agent": "data_agent", + "env_var": "RESEARCH_URL", + }], + }) + assert resp.status_code == 200 + data = resp.json() + assert data["name"] == "API Test" + system_id = data["id"] + + # GET by ID + resp2 = dashboard_client.get(f"/api/systems/{system_id}") + assert resp2.status_code == 200 + assert resp2.json()["name"] == "API Test" + + +def test_api_update_system(dashboard_client): + """PUT /api/systems/{id} updates the system.""" + resp = dashboard_client.post("/api/systems", json={ + "name": "Before Update", + "agents": [], + }) + system_id = resp.json()["id"] + + resp2 = dashboard_client.put(f"/api/systems/{system_id}", json={ + "name": "After Update", + }) + assert resp2.status_code == 200 + assert resp2.json()["name"] == "After Update" + + +def test_api_delete_system(dashboard_client): + """DELETE /api/systems/{id} removes the system.""" + resp = dashboard_client.post("/api/systems", json={ + "name": "To Delete", + "agents": [], + }) + system_id = resp.json()["id"] + + resp2 = dashboard_client.delete(f"/api/systems/{system_id}") + assert resp2.status_code == 200 + + resp3 = dashboard_client.get(f"/api/systems/{system_id}") + assert resp3.status_code == 404 + + +def test_api_create_system_validation_error(dashboard_client): + """POST /api/systems returns 400 for invalid edges.""" + resp = dashboard_client.post("/api/systems", json={ + "name": "Bad System", + "agents": ["research_agent"], + "edges": [{ + "source_agent": "research_agent", + "target_agent": "research_agent", + "env_var": "SELF_URL", + }], + }) + assert resp.status_code == 400 + assert "Self-loop" in resp.json()["error"] + + +def test_api_get_system_not_found(dashboard_client): + """GET /api/systems/{id} returns 404 for unknown ID.""" + resp = dashboard_client.get("/api/systems/nonexistent") + assert resp.status_code == 404 + + +def test_api_deploy_system(dashboard_client): + """POST /api/systems/{id}/deploy returns deploy result.""" + resp = dashboard_client.post("/api/systems", json={ + "name": "Deploy Test", + "agents": ["research_agent"], + "edges": [], + }) + system_id = resp.json()["id"] + + resp2 = dashboard_client.post(f"/api/systems/{system_id}/deploy") + assert resp2.status_code == 200 + data = resp2.json() + assert "system_id" in data + assert "steps" in data + assert "status" in data + + +# --- Async Deploy Tests ------------------------------------------------------- + + +@pytest.mark.asyncio +async def test_start_deploy_returns_progress(service): + """start_deploy returns DeployProgress with deploy_id and status='pending'.""" + defn = service.create_system(SystemCreate( + name="Async Deploy Test", + agents=["research_agent", "data_agent"], + edges=[WiringEdge( + source_agent="research_agent", + target_agent="data_agent", + env_var="RESEARCH_URL", + )], + )) + + with patch.object(service, '_get_ws_client', return_value=None): + progress = service.start_deploy(defn.id) + + # Check immediately — before yielding to event loop + assert isinstance(progress, DeployProgress) + assert progress.deploy_id # non-empty + assert progress.system_id == defn.id + assert progress.status == "pending" + assert progress.total_steps > 0 + + # Let background task finish to avoid warnings + await asyncio.sleep(0.1) + + +def test_start_deploy_nonexistent_system(service): + """start_deploy on a bad system_id returns status='failed'.""" + progress = service.start_deploy("nonexistent-system-id") + + assert isinstance(progress, DeployProgress) + assert progress.status == "failed" + assert len(progress.steps) == 1 + assert progress.steps[0].status == "failed" + assert "not found" in progress.steps[0].detail + + +def test_get_deploy_status_none(service): + """get_deploy_status returns None when no deploy is active.""" + defn = service.create_system(SystemCreate( + name="No Deploy", + agents=["research_agent"], + )) + + result = service.get_deploy_status(defn.id) + assert result is None + + +@pytest.mark.asyncio +async def test_get_deploy_status_after_start(service): + """After start_deploy, get_deploy_status returns the same deploy_id.""" + defn = service.create_system(SystemCreate( + name="Status Check", + agents=["research_agent"], + edges=[], + )) + + with patch.object(service, '_get_ws_client', return_value=None): + progress = service.start_deploy(defn.id) + # Allow the background task to run + await asyncio.sleep(0.1) + + status = service.get_deploy_status(defn.id) + assert status is not None + assert status.deploy_id == progress.deploy_id + assert status.system_id == defn.id + + +def test_api_deploy_async(dashboard_client): + """POST /api/systems/{id}/deploy with {"async": true} returns deploy_id.""" + resp = dashboard_client.post("/api/systems", json={ + "name": "Async API Test", + "agents": ["research_agent"], + "edges": [], + }) + system_id = resp.json()["id"] + + resp2 = dashboard_client.post( + f"/api/systems/{system_id}/deploy", + json={"async": True}, + ) + assert resp2.status_code == 200 + data = resp2.json() + assert "deploy_id" in data + assert data["system_id"] == system_id + assert data["status"] in ("pending", "deploying", "success", "partial", "failed") + + +def test_api_deploy_status_not_found(dashboard_client): + """GET /api/systems/{id}/deploy/status returns 404 when no deploy active.""" + resp = dashboard_client.post("/api/systems", json={ + "name": "No Deploy Status", + "agents": [], + }) + system_id = resp.json()["id"] + + resp2 = dashboard_client.get(f"/api/systems/{system_id}/deploy/status") + assert resp2.status_code == 404 + assert "No active deploy" in resp2.json()["error"] + + +def test_api_deploy_sync_still_works(dashboard_client): + """POST /api/systems/{id}/deploy without body still returns full DeployResult.""" + resp = dashboard_client.post("/api/systems", json={ + "name": "Sync Compat Test", + "agents": ["research_agent"], + "edges": [], + }) + system_id = resp.json()["id"] + + # No body — should fall back to sync deploy + resp2 = dashboard_client.post(f"/api/systems/{system_id}/deploy") + assert resp2.status_code == 200 + data = resp2.json() + # Sync deploy returns DeployResult shape (system_id, steps, status) + assert data["system_id"] == system_id + assert "steps" in data + assert isinstance(data["steps"], list) + assert data["status"] in ("success", "partial", "failed") diff --git a/dbx-agent-app/tests/test_types.py b/dbx-agent-app/tests/test_types.py new file mode 100644 index 00000000..9df8b5ab --- /dev/null +++ b/dbx-agent-app/tests/test_types.py @@ -0,0 +1,262 @@ +"""Tests for AgentRequest, AgentResponse, and StreamEvent wire protocol types.""" + +import json + +from dbx_agent_app.core.types import ( + AgentRequest, + AgentResponse, + InputItem, + OutputItem, + OutputTextContent, + StreamEvent, + UserContext, +) + + +# =================================================================== +# AgentRequest +# =================================================================== + + +def test_request_from_wire(): + """AgentRequest parses the standard wire format.""" + data = {"input": [{"role": "user", "content": "hello"}]} + req = AgentRequest(**data) + + assert len(req.input) == 1 + assert req.input[0].role == "user" + assert req.input[0].content == "hello" + + +def test_request_messages_alias(): + """.messages is an alias for .input.""" + req = AgentRequest(input=[InputItem(role="user", content="hi")]) + assert req.messages == req.input + + +def test_request_last_user_message(): + """.last_user_message returns the last user message.""" + req = AgentRequest( + input=[ + InputItem(role="user", content="first"), + InputItem(role="assistant", content="reply"), + InputItem(role="user", content="second"), + ] + ) + assert req.last_user_message == "second" + + +def test_request_last_user_message_empty(): + """.last_user_message returns empty string when no user messages.""" + req = AgentRequest(input=[InputItem(role="system", content="prompt")]) + assert req.last_user_message == "" + + +def test_request_empty_input(): + """AgentRequest with empty input is valid.""" + req = AgentRequest(input=[]) + assert req.messages == [] + assert req.last_user_message == "" + + +# =================================================================== +# AgentResponse +# =================================================================== + + +def test_response_text(): + """AgentResponse.text() creates a single-item text response.""" + resp = AgentResponse.text("Hello!") + assert len(resp.output) == 1 + assert resp.output[0].type == "message" + assert len(resp.output[0].content) == 1 + assert resp.output[0].content[0].text == "Hello!" + + +def test_response_text_custom_id(): + """AgentResponse.text() accepts a custom item_id.""" + resp = AgentResponse.text("hi", item_id="custom-id") + assert resp.output[0].id == "custom-id" + + +def test_response_from_dict_with_response_key(): + """AgentResponse.from_dict() uses the 'response' key as text.""" + resp = AgentResponse.from_dict({"response": "OK", "extra": 42}) + assert resp.output[0].content[0].text == "OK" + + +def test_response_from_dict_serializes(): + """AgentResponse.from_dict() serializes the whole dict when no 'response' key.""" + data = {"foo": "bar", "count": 3} + resp = AgentResponse.from_dict(data) + text = resp.output[0].content[0].text + parsed = json.loads(text) + assert parsed == data + + +def test_response_to_wire(): + """to_wire() produces the expected wire format.""" + resp = AgentResponse.text("test", item_id="id-1") + wire = resp.to_wire() + + assert "output" in wire + assert wire["output"][0]["type"] == "message" + assert wire["output"][0]["id"] == "id-1" + assert wire["output"][0]["content"][0]["type"] == "output_text" + assert wire["output"][0]["content"][0]["text"] == "test" + + +def test_response_wire_roundtrip(): + """Wire format can be serialized to JSON and back.""" + resp = AgentResponse.text("roundtrip") + wire = resp.to_wire() + json_str = json.dumps(wire) + parsed = json.loads(json_str) + + assert parsed["output"][0]["content"][0]["text"] == "roundtrip" + + +# =================================================================== +# StreamEvent +# =================================================================== + + +def test_stream_text_delta(): + """StreamEvent.text_delta() creates a delta event.""" + evt = StreamEvent.text_delta("chunk", item_id="s1") + assert evt.type == "response.output_text.delta" + assert evt.delta == "chunk" + assert evt.item_id == "s1" + assert evt.item is None + + +def test_stream_done(): + """StreamEvent.done() creates a done event with full text.""" + evt = StreamEvent.done("full text", item_id="s1") + assert evt.type == "response.output_item.done" + assert evt.item is not None + assert evt.item.content[0].text == "full text" + assert evt.item.id == "s1" + + +def test_stream_to_sse(): + """to_sse() produces valid SSE format.""" + evt = StreamEvent.text_delta("hi", item_id="s1") + sse = evt.to_sse() + + assert sse.startswith("data: ") + assert sse.endswith("\n\n") + + payload = json.loads(sse[len("data: "):].strip()) + assert payload["type"] == "response.output_text.delta" + assert payload["delta"] == "hi" + + +def test_stream_done_to_sse(): + """Done event serializes correctly to SSE.""" + evt = StreamEvent.done("final", item_id="d1") + sse = evt.to_sse() + payload = json.loads(sse[len("data: "):].strip()) + + assert payload["type"] == "response.output_item.done" + assert payload["item"]["content"][0]["text"] == "final" + + +def test_stream_auto_generates_item_id(): + """StreamEvent.text_delta() auto-generates item_id when not provided.""" + evt = StreamEvent.text_delta("chunk") + assert evt.item_id is not None + assert len(evt.item_id) > 0 + + +# =================================================================== +# UserContext +# =================================================================== + + +def test_user_context_is_authenticated_with_token(): + """is_authenticated returns True when access_token is present.""" + ctx = UserContext(access_token="tok-123", email="user@example.com") + assert ctx.is_authenticated is True + + +def test_user_context_is_not_authenticated_without_token(): + """is_authenticated returns False when no access_token.""" + ctx = UserContext(email="user@example.com") + assert ctx.is_authenticated is False + + +def test_user_context_as_forwarded_headers(): + """as_forwarded_headers() builds correct X-Forwarded-* dict.""" + ctx = UserContext( + access_token="tok-abc", + email="user@example.com", + user="user@example.com", + ) + headers = ctx.as_forwarded_headers() + assert headers["X-Forwarded-Access-Token"] == "tok-abc" + assert headers["X-Forwarded-Email"] == "user@example.com" + assert headers["X-Forwarded-User"] == "user@example.com" + + +def test_user_context_as_forwarded_headers_partial(): + """as_forwarded_headers() omits None fields.""" + ctx = UserContext(access_token="tok-abc") + headers = ctx.as_forwarded_headers() + assert "X-Forwarded-Access-Token" in headers + assert "X-Forwarded-Email" not in headers + assert "X-Forwarded-User" not in headers + + +def test_user_context_get_workspace_client_raises_without_token(): + """get_workspace_client() raises ValueError when no token.""" + ctx = UserContext(email="user@example.com") + import pytest + with pytest.raises(ValueError, match="No user access token"): + ctx.get_workspace_client() + + +def test_user_context_repr_hides_token(): + """repr() does not expose the access_token value.""" + ctx = UserContext(access_token="secret-token-123", email="user@example.com") + r = repr(ctx) + assert "secret-token-123" not in r + + +def test_user_context_empty(): + """UserContext with no fields is valid but not authenticated.""" + ctx = UserContext() + assert ctx.is_authenticated is False + assert ctx.as_forwarded_headers() == {} + + +# =================================================================== +# AgentRequest — user_context +# =================================================================== + + +def test_request_user_context_default_none(): + """AgentRequest.user_context is None by default.""" + req = AgentRequest(input=[InputItem(role="user", content="hi")]) + assert req.user_context is None + + +def test_request_user_context_set_via_private_attr(): + """user_context can be set via _user_context and read via property.""" + req = AgentRequest(input=[InputItem(role="user", content="hi")]) + ctx = UserContext(access_token="tok", email="user@example.com") + req._user_context = ctx + + assert req.user_context is ctx + assert req.user_context.email == "user@example.com" + + +def test_request_model_dump_excludes_user_context(): + """model_dump() does not include _user_context (wire format unchanged).""" + req = AgentRequest(input=[InputItem(role="user", content="hi")]) + req._user_context = UserContext(access_token="tok") + + dump = req.model_dump() + assert "_user_context" not in dump + assert "user_context" not in dump + assert "input" in dump diff --git a/dbx-agent-app/tests/test_uc_functions.py b/dbx-agent-app/tests/test_uc_functions.py new file mode 100644 index 00000000..6d0e34d1 --- /dev/null +++ b/dbx-agent-app/tests/test_uc_functions.py @@ -0,0 +1,233 @@ +"""Tests for UCFunctionAdapter — UC Functions to MCP tool conversion.""" + +import pytest +from unittest.mock import MagicMock +from dataclasses import dataclass +from typing import List, Optional + +from dbx_agent_app.mcp.uc_functions import UCFunctionAdapter + + +@dataclass +class _FakeParam: + name: str + type_name: str + comment: str = "" + default_value: Optional[str] = None + + +@dataclass +class _FakeInputParams: + parameters: List[_FakeParam] + + +@dataclass +class _FakeFunction: + name: str + full_name: str + comment: str = "" + input_params: Optional[_FakeInputParams] = None + + +# --- UC type → JSON Schema type mapping --- + + +class TestTypeMapping: + """Test _map_uc_type_to_json_type.""" + + def setup_method(self): + self.adapter = UCFunctionAdapter() + + def test_string_types(self): + assert self.adapter._map_uc_type_to_json_type("STRING") == "string" + assert self.adapter._map_uc_type_to_json_type("VARCHAR") == "string" + assert self.adapter._map_uc_type_to_json_type("CHAR") == "string" + + def test_integer_types(self): + assert self.adapter._map_uc_type_to_json_type("BIGINT") == "integer" + assert self.adapter._map_uc_type_to_json_type("INT") == "integer" + assert self.adapter._map_uc_type_to_json_type("INTEGER") == "integer" + assert self.adapter._map_uc_type_to_json_type("SMALLINT") == "integer" + assert self.adapter._map_uc_type_to_json_type("TINYINT") == "integer" + + def test_number_types(self): + assert self.adapter._map_uc_type_to_json_type("DOUBLE") == "number" + assert self.adapter._map_uc_type_to_json_type("FLOAT") == "number" + assert self.adapter._map_uc_type_to_json_type("DECIMAL") == "number" + + def test_boolean(self): + assert self.adapter._map_uc_type_to_json_type("BOOLEAN") == "boolean" + + def test_complex_types(self): + assert self.adapter._map_uc_type_to_json_type("ARRAY") == "array" + assert self.adapter._map_uc_type_to_json_type("MAP") == "object" + assert self.adapter._map_uc_type_to_json_type("STRUCT") == "object" + + def test_date_types_map_to_string(self): + assert self.adapter._map_uc_type_to_json_type("DATE") == "string" + assert self.adapter._map_uc_type_to_json_type("TIMESTAMP") == "string" + + def test_unknown_defaults_to_string(self): + assert self.adapter._map_uc_type_to_json_type("CUSTOM_TYPE") == "string" + + def test_case_insensitive(self): + assert self.adapter._map_uc_type_to_json_type("string") == "string" + assert self.adapter._map_uc_type_to_json_type("Int") == "integer" + + +# --- Function to MCP tool conversion --- + + +class TestConvertFunctionToTool: + """Test _convert_function_to_tool.""" + + def setup_method(self): + self.adapter = UCFunctionAdapter() + + def test_basic_conversion(self): + func = _FakeFunction( + name="main.funcs.calculate", + full_name="main.funcs.calculate", + comment="Calculate a value", + input_params=_FakeInputParams(parameters=[ + _FakeParam(name="amount", type_name="DOUBLE", comment="The amount"), + _FakeParam(name="rate", type_name="DOUBLE", comment="The rate"), + ]), + ) + + tool = self.adapter._convert_function_to_tool(func) + + assert tool is not None + assert tool["name"] == "calculate" + assert tool["description"] == "Calculate a value" + assert tool["full_name"] == "main.funcs.calculate" + assert tool["source"] == "unity_catalog" + + props = tool["inputSchema"]["properties"] + assert props["amount"]["type"] == "number" + assert props["rate"]["type"] == "number" + + def test_required_params(self): + """Parameters without defaults are marked required.""" + func = _FakeFunction( + name="main.funcs.search", + full_name="main.funcs.search", + comment="Search", + input_params=_FakeInputParams(parameters=[ + _FakeParam(name="query", type_name="STRING"), + _FakeParam(name="limit", type_name="INT", default_value="10"), + ]), + ) + + tool = self.adapter._convert_function_to_tool(func) + + required = tool["inputSchema"]["required"] + assert "query" in required + assert "limit" not in required + + def test_no_params(self): + """Function with no parameters still converts.""" + func = _FakeFunction( + name="main.funcs.get_time", + full_name="main.funcs.get_time", + comment="Get current time", + ) + + tool = self.adapter._convert_function_to_tool(func) + + assert tool is not None + assert tool["inputSchema"]["properties"] == {} + + def test_no_comment_uses_default_description(self): + """Missing comment generates a default description.""" + func = _FakeFunction( + name="main.funcs.mystery", + full_name="main.funcs.mystery", + comment="", + ) + + tool = self.adapter._convert_function_to_tool(func) + + assert "mystery" in tool["description"] + + +# --- discover_functions --- + + +class TestDiscoverFunctions: + """Test discover_functions with mocked WorkspaceClient.""" + + def test_discovers_functions(self): + adapter = UCFunctionAdapter() + mock_client = MagicMock() + adapter._client = mock_client + + mock_client.functions.list.return_value = [ + _FakeFunction( + name="calc", + full_name="main.funcs.calc", + comment="Calculate", + input_params=_FakeInputParams(parameters=[ + _FakeParam(name="x", type_name="INT"), + ]), + ), + _FakeFunction( + name="format", + full_name="main.funcs.format", + comment="Format data", + ), + ] + + tools = adapter.discover_functions("main", "funcs") + + assert len(tools) == 2 + assert tools[0]["name"] == "calc" + assert tools[1]["name"] == "format" + + def test_skips_system_functions(self): + adapter = UCFunctionAdapter() + mock_client = MagicMock() + adapter._client = mock_client + + mock_client.functions.list.return_value = [ + _FakeFunction( + name="system.builtin", + full_name="system.builtin", + comment="System function", + ), + _FakeFunction( + name="user_func", + full_name="main.funcs.user_func", + comment="User function", + ), + ] + + tools = adapter.discover_functions("main", "funcs") + + assert len(tools) == 1 + assert tools[0]["name"] == "user_func" + + def test_name_pattern_filter(self): + adapter = UCFunctionAdapter() + mock_client = MagicMock() + adapter._client = mock_client + + mock_client.functions.list.return_value = [ + _FakeFunction(name="calc_tax", full_name="main.funcs.calc_tax", comment="Tax"), + _FakeFunction(name="format_date", full_name="main.funcs.format_date", comment="Date"), + ] + + tools = adapter.discover_functions("main", "funcs", name_pattern="calc") + + assert len(tools) == 1 + assert tools[0]["name"] == "calc_tax" + + def test_handles_listing_error(self): + adapter = UCFunctionAdapter() + mock_client = MagicMock() + adapter._client = mock_client + + mock_client.functions.list.side_effect = Exception("Access denied") + + tools = adapter.discover_functions("main", "funcs") + assert tools == []