diff --git a/.env.example b/.env.example index 2707eae..f040f26 100644 --- a/.env.example +++ b/.env.example @@ -130,6 +130,12 @@ SMITHERY_API_KEY=your_smithery_api_key_here # Get this from https://console.anthropic.com/ ANTHROPIC_API_KEY=your_anthropic_api_key_here +# MCP Security Scanner LLM API Key (optional - only needed for LLM-based security analysis) +# Default analyzer is YARA (no API key required) +# To use LLM analyzer: ./cli/service_mgmt.sh add config.json yara,llm +# Get OpenAI API key from https://platform.openai.com/api-keys +MCP_SCANNER_LLM_API_KEY=your_openai_api_key_here + # ============================================================================= # CONTAINER REGISTRY CREDENTIALS (for CI/CD and local builds) # ============================================================================= diff --git a/.gitignore b/.gitignore index c7adad5..f694487 100644 --- a/.gitignore +++ b/.gitignore @@ -191,6 +191,9 @@ cookies.txt # Scratchpad for temporary notes and planning .scratchpad/ +# Roo IDE files +.roo/ + # OAuth tokens and credentials - never commit these! .oauth-tokens/ .agentcore-params @@ -281,4 +284,7 @@ coverage/ # Anthropic registry temporary files anthropic_servers_*.json -curated_import_list.txt \ No newline at end of file +curated_import_list.txt + +#Security scans +security_scans/ diff --git a/README.md b/README.md index c2e5dc7..10c32ba 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,7 @@ The **MCP Gateway & Registry** is an enterprise-ready platform that centralizes ## What's New +- **šŸ”’ MCP Server Security Scanning** - Integrated vulnerability scanning with Cisco AI Defence MCP Scanner. Automatic security scans during server registration, periodic registry-wide scans with detailed markdown reports, and automatic disabling of servers with security issues. - **šŸ“„ Import Servers from Anthropic MCP Registry** - Import curated MCP servers from Anthropic's registry with a single command. [Import Guide](docs/anthropic-registry-import.md) - **šŸ”Œ Anthropic MCP Registry REST API Compatibility** - Full compatibility with Anthropic's MCP Registry REST API specification. [API Documentation](docs/anthropic_registry_api.md) - **šŸš€ Pre-built Images** - Deploy instantly with pre-built Docker images. [Get Started](#option-a-pre-built-images-instant-setup) | [macOS Guide](docs/macos-setup-guide.md) @@ -373,6 +374,14 @@ Seamlessly integrate with Anthropic's official MCP Registry to import and access [Import Guide](docs/anthropic-registry-import.md) | [Registry API Documentation](docs/anthropic_registry_api.md) +### Security Scanning + +**Integrated Vulnerability Detection:** +- **Automated Security Scanning** - Integrated vulnerability scanning for MCP servers using Cisco AI Defence MCP Scanner, with automatic scans during registration and support for periodic registry-wide scans +- **Detailed Security Reports** - Comprehensive markdown reports with vulnerability details, severity assessments, and remediation recommendations +- **Automatic Protection** - Servers with security issues are automatically disabled with security-pending status to protect your infrastructure +- **Compliance Ready** - Security audit trails and vulnerability tracking for enterprise compliance requirements + ### Authentication & Authorization **Multiple Identity Modes:** diff --git a/auth_server/server.py b/auth_server/server.py index 0565dbf..5599dfa 100644 --- a/auth_server/server.py +++ b/auth_server/server.py @@ -846,7 +846,11 @@ async def validate_request(request: Request): try: # Extract headers + # Check for X-Authorization first (custom header used by this gateway) + # Only if X-Authorization is not present, check standard Authorization header authorization = request.headers.get("X-Authorization") + if not authorization: + authorization = request.headers.get("Authorization") cookie_header = request.headers.get("Cookie", "") user_pool_id = request.headers.get("X-User-Pool-Id") client_id = request.headers.get("X-Client-Id") diff --git a/cli/examples/cloudflare-docs-server-config.json b/cli/examples/cloudflare-docs-server-config.json new file mode 100644 index 0000000..2deb390 --- /dev/null +++ b/cli/examples/cloudflare-docs-server-config.json @@ -0,0 +1,7 @@ +{ + "server_name": "Cloudflare Documentation MCP Server", + "description": "Search Cloudflare documentation and get migration guides", + "path": "/cloudflare-docs", + "proxy_pass_url": "https://docs.mcp.cloudflare.com/mcp", + "supported_transports": ["streamable-http"] +} diff --git a/cli/examples/minimal-server-config.json b/cli/examples/minimal-server-config.json index 25aa627..2b619d8 100644 --- a/cli/examples/minimal-server-config.json +++ b/cli/examples/minimal-server-config.json @@ -2,5 +2,6 @@ "server_name": "Minimal MCP Server", "description": "A minimal server configuration with only required fields", "path": "/minimal-server", - "proxy_pass_url": "http://minimal-server:9001/" + "proxy_pass_url": "http://minimal-server:9001/", + "supported_transports": ["streamable-http"] } \ No newline at end of file diff --git a/cli/import_from_anthropic_registry.sh b/cli/import_from_anthropic_registry.sh index 4c16d98..f714d7e 100755 --- a/cli/import_from_anthropic_registry.sh +++ b/cli/import_from_anthropic_registry.sh @@ -6,11 +6,12 @@ # and registers them with the local MCP Gateway Registry. # # Usage: -# ./import_from_anthropic_registry.sh [--dry-run] [--import-list ] +# ./import_from_anthropic_registry.sh [--dry-run] [--import-list ] [--analyzers ] # # Environment Variables: # GATEWAY_URL - Gateway URL (default: http://localhost) # Example: export GATEWAY_URL=https://mcpgateway.ddns.net +# MCP_SCANNER_LLM_API_KEY - API key for LLM-based security analysis (required if using llm analyzer) # set -e @@ -87,17 +88,37 @@ validate_package() { # Parse arguments DRY_RUN=false IMPORT_LIST="$SCRIPT_DIR/import_server_list.txt" +ANALYZERS="yara" while [[ $# -gt 0 ]]; do case $1 in --dry-run) DRY_RUN=true; shift ;; --import-list) IMPORT_LIST="$2"; shift 2 ;; + --analyzers) ANALYZERS="$2"; shift 2 ;; --help) - echo "Usage: $0 [--dry-run] [--import-list ]" + echo "Usage: $0 [--dry-run] [--import-list ] [--analyzers ]" + echo "" + echo "Options:" + echo " --dry-run Dry run mode (don't register servers)" + echo " --import-list Server list file (default: import_server_list.txt)" + echo " --analyzers Security analyzers: yara, llm, or yara,llm (default: yara)" echo "" echo "Environment Variables:" echo " GATEWAY_URL - Gateway URL (default: http://localhost)" echo " Example: export GATEWAY_URL=https://mcpgateway.ddns.net" + echo " MCP_SCANNER_LLM_API_KEY - API key for LLM analyzer (required if using llm)" + echo "" + echo "Examples:" + echo " # Import with default YARA analyzer" + echo " $0" + echo "" + echo " # Import with both YARA and LLM analyzers" + echo " export MCP_SCANNER_LLM_API_KEY=sk-..." + echo " $0 --analyzers yara,llm" + echo "" + echo " # Import with only LLM analyzer" + echo " export MCP_SCANNER_LLM_API_KEY=sk-..." + echo " $0 --analyzers llm" exit 0 ;; *) echo "Unknown option: $1"; exit 1 ;; esac @@ -108,6 +129,21 @@ command -v jq >/dev/null || { print_error "jq required"; exit 1; } command -v curl >/dev/null || { print_error "curl required"; exit 1; } [ -f "$IMPORT_LIST" ] || { print_error "Import list not found: $IMPORT_LIST"; exit 1; } +# Check if LLM analyzer is requested and API key is available +if [[ "$ANALYZERS" == *"llm"* ]]; then + if [ -z "$MCP_SCANNER_LLM_API_KEY" ] || [[ "$MCP_SCANNER_LLM_API_KEY" == *"your_"* ]] || [[ "$MCP_SCANNER_LLM_API_KEY" == *"placeholder"* ]]; then + echo "" + print_error "LLM analyzer requested but MCP_SCANNER_LLM_API_KEY is not configured" + print_info "Current value: ${MCP_SCANNER_LLM_API_KEY:-}" + print_info "" + print_info "Options:" + print_info " 1. Add real API key to .env file: MCP_SCANNER_LLM_API_KEY=sk-..." + print_info " 2. Set environment variable: export MCP_SCANNER_LLM_API_KEY=sk-..." + print_info " 3. Use only YARA analyzer: $0 --analyzers yara" + exit 1 + fi +fi + mkdir -p "$TEMP_DIR" # Read server list @@ -119,6 +155,7 @@ while IFS= read -r line; do done < "$IMPORT_LIST" print_info "Found ${#servers[@]} servers to import" +print_info "Security analyzers: $ANALYZERS" # Process each server success_count=0 @@ -192,10 +229,9 @@ result['path'] = '/$safe_path' # Remove unsupported fields for register_service tool # The user-facing register_service tool only supports basic fields -# Note: auth_type, auth_provider, and headers are now kept for proper auth handling +# Note: auth_type, auth_provider, headers, supported_transports, and tool_list are kept unsupported_fields = [ - 'repository_url', 'website_url', 'package_npm', 'remote_url', - 'supported_transports', 'tool_list' + 'repository_url', 'website_url', 'package_npm', 'remote_url' ] for field in unsupported_fields: result.pop(field, None) @@ -209,14 +245,14 @@ with open('$config_file', 'w') as f: # Register with service_mgmt.sh (if not dry run) if [ "$DRY_RUN" = false ]; then - if GATEWAY_URL="$GATEWAY_URL" "$SCRIPT_DIR/service_mgmt.sh" add "$config_file"; then + if GATEWAY_URL="$GATEWAY_URL" "$SCRIPT_DIR/service_mgmt.sh" add "$config_file" "$ANALYZERS"; then print_success "Registered $server_name" success_count=$((success_count + 1)) else print_error "Failed to register $server_name" fi else - print_info "[DRY RUN] Would register $server_name" + print_info "[DRY RUN] Would register $server_name with analyzers: $ANALYZERS" success_count=$((success_count + 1)) fi diff --git a/cli/mcp_security_scanner.py b/cli/mcp_security_scanner.py new file mode 100644 index 0000000..31209fb --- /dev/null +++ b/cli/mcp_security_scanner.py @@ -0,0 +1,620 @@ +#!/usr/bin/env python3 +""" +MCP Security Scanner CLI Tool + +Scans MCP servers for security vulnerabilities using cisco-ai-mcp-scanner. +Integrates with service_mgmt.sh to provide security analysis during server registration. +""" + +import argparse +import json +import logging +import os +import re +import subprocess +import sys +from datetime import datetime, timezone +from pathlib import Path +from typing import Optional + +from pydantic import BaseModel, Field + + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s,p%(process)s,{%(filename)s:%(lineno)d},%(levelname)s,%(message)s", +) +logger = logging.getLogger(__name__) + + +# Constants +DEFAULT_ANALYZERS = "yara" +LLM_API_KEY_ENV = "MCP_SCANNER_LLM_API_KEY" +# Use absolute path relative to project root +PROJECT_ROOT = Path(__file__).parent.parent +OUTPUT_DIR = PROJECT_ROOT / "security_scans" + + +class SecurityScanResult(BaseModel): + """Security scan result model.""" + + server_url: str = Field(..., description="URL of the scanned MCP server") + scan_timestamp: str = Field(..., description="ISO timestamp of the scan") + is_safe: bool = Field(..., description="Overall safety assessment") + critical_issues: int = Field(default=0, description="Count of critical severity issues") + high_severity: int = Field(default=0, description="Count of high severity issues") + medium_severity: int = Field(default=0, description="Count of medium severity issues") + low_severity: int = Field(default=0, description="Count of low severity issues") + raw_output: dict = Field(..., description="Full scanner output") + output_file: str = Field(..., description="Path to detailed JSON output file") + + +def _get_llm_api_key( + cli_value: Optional[str] = None +) -> str: + """Retrieve LLM API key from CLI argument or environment variable. + + Args: + cli_value: API key provided via command line + + Returns: + LLM API key for security scanning + + Raises: + ValueError: If API key is not found + """ + if cli_value: + return cli_value + + env_value = os.getenv(LLM_API_KEY_ENV) + if env_value: + return env_value + + raise ValueError( + f"LLM API key must be provided via --api-key or {LLM_API_KEY_ENV} env var" + ) + + +def _ensure_output_directory() -> Path: + """Ensure output directory exists.""" + OUTPUT_DIR.mkdir(parents=True, exist_ok=True) + return OUTPUT_DIR + + +def _run_mcp_scanner( + server_url: str, + analyzers: str = DEFAULT_ANALYZERS, + api_key: Optional[str] = None, + headers: Optional[str] = None +) -> dict: + """Run mcp-scanner command and return raw output. + + Args: + server_url: URL of the MCP server to scan + analyzers: Comma-separated list of analyzers to use + api_key: OpenAI API key for LLM-based analysis + headers: JSON string of headers to include in requests + + Returns: + Dictionary containing raw scanner output + + Raises: + subprocess.CalledProcessError: If scanner command fails + """ + logger.info(f"Running security scan on: {server_url}") + logger.info(f"Using analyzers: {analyzers}") + + # Build command - global options before subcommand, subcommand options after + cmd = [ + "mcp-scanner", + "--analyzers", analyzers, + "--raw", # Use raw format instead of summary + "remote", # Subcommand to scan remote MCP server + "--server-url", server_url + ] + + # Add headers if provided - parse JSON and extract bearer token + if headers: + logger.info(f"Adding custom headers for scanning") + try: + headers_dict = json.loads(headers) + # Check for X-Authorization header with Bearer token + auth_header = headers_dict.get("X-Authorization", "") + if auth_header.startswith("Bearer "): + bearer_token = auth_header.replace("Bearer ", "") + cmd.extend(["--bearer-token", bearer_token]) + logger.info("Using bearer token authentication") + else: + logger.warning("Headers provided but no Bearer token found in X-Authorization header") + except json.JSONDecodeError as e: + logger.error(f"Failed to parse headers JSON: {e}") + raise ValueError(f"Invalid headers JSON: {headers}") from e + + # Set environment variable for API key if provided + env = os.environ.copy() + if api_key: + env[LLM_API_KEY_ENV] = api_key + + # Run scanner + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + check=True, + env=env + ) + + # Log raw output for debugging + logger.debug(f"Raw scanner stdout:\n{result.stdout[:500]}") + + # Parse JSON output - scanner outputs JSON array after log messages + stdout = result.stdout.strip() + + # Remove ANSI color codes that can interfere with JSON parsing + ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') + stdout = ansi_escape.sub('', stdout) + + # Find the start of JSON array - look for '[\n {' pattern (array with objects) + # This is more robust than just finding first '[' or '{' + json_start = -1 + + # Try to find JSON array start + for i in range(len(stdout) - 1): + if stdout[i] == '[' and (i == 0 or stdout[i-1] in '\n\r'): + # Found '[' at start of line, likely start of JSON + json_start = i + break + + # Fallback: find any '[' followed by whitespace and '{' + if json_start == -1: + pattern = r'\[\s*\{' + match = re.search(pattern, stdout) + if match: + json_start = match.start() + + if json_start == -1: + raise ValueError("No JSON array found in scanner output") + + # Extract and parse JSON + json_str = stdout[json_start:] + tool_results = json.loads(json_str) + + # Wrap in expected format with analysis_results + # Convert array of tool results to the expected structure + raw_output = { + "analysis_results": {}, + "tool_results": tool_results + } + + # Extract findings from tool results and organize by analyzer + for tool_result in tool_results: + findings_dict = tool_result.get("findings", {}) + for analyzer_name, analyzer_findings in findings_dict.items(): + if analyzer_name not in raw_output["analysis_results"]: + raw_output["analysis_results"][analyzer_name] = {"findings": []} + + # Convert analyzer findings to expected format + if isinstance(analyzer_findings, dict): + finding = { + "tool_name": tool_result.get("tool_name"), + "severity": analyzer_findings.get("severity", "unknown"), + "threat_names": analyzer_findings.get("threat_names", []), + "threat_summary": analyzer_findings.get("threat_summary", ""), + "is_safe": tool_result.get("is_safe", True) + } + raw_output["analysis_results"][analyzer_name]["findings"].append(finding) + + logger.debug(f"Scanner output:\n{json.dumps(raw_output, indent=2, default=str)}") + return raw_output + + except subprocess.CalledProcessError as e: + logger.error(f"Scanner command failed with exit code {e.returncode}") + logger.error(f"stderr: {e.stderr}") + raise + except json.JSONDecodeError as e: + logger.error(f"Failed to parse scanner output as JSON: {e}") + logger.error(f"Raw stdout: {result.stdout[:1000]}") + raise + + +def _analyze_scan_results( + raw_output: dict +) -> tuple[bool, int, int, int, int]: + """Analyze scan results and extract severity counts. + + Args: + raw_output: Raw scanner output dictionary + + Returns: + Tuple of (is_safe, critical_count, high_count, medium_count, low_count) + """ + critical_count = 0 + high_count = 0 + medium_count = 0 + low_count = 0 + + # Navigate the raw output structure to find findings + # Structure: raw_output -> analysis_results -> [analyzer_name] -> findings + analysis_results = raw_output.get("analysis_results", {}) + + for _analyzer_name, analyzer_data in analysis_results.items(): + if isinstance(analyzer_data, dict): + findings = analyzer_data.get("findings", []) + for finding in findings: + severity = finding.get("severity", "").lower() + if severity == "critical": + critical_count += 1 + elif severity == "high": + high_count += 1 + elif severity == "medium": + medium_count += 1 + elif severity == "low": + low_count += 1 + + # Determine if safe: no critical or high severity issues + is_safe = (critical_count == 0 and high_count == 0) + + logger.info(f"Security analysis results:") + logger.info(f" Critical Issues: {critical_count}") + logger.info(f" High Severity: {high_count}") + logger.info(f" Medium Severity: {medium_count}") + logger.info(f" Low Severity: {low_count}") + logger.info(f" Overall Assessment: {'SAFE' if is_safe else 'UNSAFE'}") + + return is_safe, critical_count, high_count, medium_count, low_count + + +def _save_scan_output( + server_url: str, + raw_output: dict +) -> str: + """Save detailed scan output to JSON file. + + Saves in two locations: + 1. security_scans/YYYY-MM-DD/scan__.json (archived) + 2. security_scans/scan__latest.json (always current) + + Args: + server_url: URL of the scanned server + raw_output: Raw scanner output + + Returns: + Path to saved output file (latest version) + """ + output_dir = _ensure_output_directory() + + # Generate safe filename from server URL + safe_url = server_url.replace("https://", "").replace("http://", "").replace("/", "_") + + # Create date-based subdirectory for archival + timestamp = datetime.now(timezone.utc) + date_folder = timestamp.strftime("%Y-%m-%d") + archive_dir = output_dir / date_folder + archive_dir.mkdir(exist_ok=True) + + # Save timestamped version in date folder (archived) + timestamp_str = timestamp.strftime("%Y%m%d_%H%M%S") + archived_filename = f"scan_{safe_url}_{timestamp_str}.json" + archived_file = archive_dir / archived_filename + + with open(archived_file, 'w') as f: + json.dump(raw_output, f, indent=2, default=str) + + logger.info(f"Archived scan output saved to: {archived_file}") + + # Save latest version in root security_scans folder (always current) + # Extract server name from URL for cleaner filename + # e.g., http://localhost/realserverfaketools/mcp -> realserverfaketools_mcp.json + server_name = safe_url.replace("localhost_", "") + latest_filename = f"{server_name}.json" + latest_file = output_dir / latest_filename + + with open(latest_file, 'w') as f: + json.dump(raw_output, f, indent=2, default=str) + + logger.info(f"Latest scan output saved to: {latest_file}") + + return str(latest_file) + + +def _disable_unsafe_server( + server_path: str +) -> bool: + """Disable a server that failed security scan. + + Args: + server_path: Path of the server to disable (e.g., /mcpgw) + + Returns: + True if server was disabled successfully, False otherwise + """ + logger.info(f"Disabling unsafe server: {server_path}") + + try: + # Call service_mgmt.sh to disable the server + cmd = [ + str(PROJECT_ROOT / "cli" / "service_mgmt.sh"), + "disable", + server_path + ] + + result = subprocess.run( + cmd, + capture_output=True, + text=True, + check=True + ) + + logger.info(f"Server {server_path} disabled successfully") + logger.debug(f"Output: {result.stdout}") + return True + + except subprocess.CalledProcessError as e: + logger.error(f"Failed to disable server {server_path}: {e}") + logger.error(f"stderr: {e.stderr}") + return False + except Exception as e: + logger.error(f"Unexpected error disabling server {server_path}: {e}") + return False + + +def _extract_server_path_from_url( + server_url: str +) -> Optional[str]: + """Extract server path from URL. + + Args: + server_url: Full server URL (e.g., http://localhost/mcpgw/mcp) + + Returns: + Server path (e.g., /mcpgw) or None if cannot be extracted + """ + try: + # Parse URL to extract path component + # Expected format: http://localhost/server-path/mcp + from urllib.parse import urlparse + + parsed = urlparse(server_url) + path_parts = [p for p in parsed.path.split('/') if p and p != 'mcp'] + + if path_parts: + server_path = f"/{path_parts[0]}" + logger.debug(f"Extracted server path '{server_path}' from URL '{server_url}'") + return server_path + else: + logger.warning(f"Could not extract server path from URL: {server_url}") + return None + + except Exception as e: + logger.error(f"Error parsing server URL {server_url}: {e}") + return None + + +def scan_server( + server_url: str, + analyzers: str = DEFAULT_ANALYZERS, + api_key: Optional[str] = None, + output_json: bool = False, + auto_disable: bool = False, + headers: Optional[str] = None +) -> SecurityScanResult: + """Scan an MCP server for security vulnerabilities. + + Args: + server_url: URL of the MCP server to scan + analyzers: Comma-separated list of analyzers to use + api_key: OpenAI API key for LLM-based analysis + output_json: If True, output raw mcp-scanner JSON directly + auto_disable: If True, automatically disable servers that fail security scan + headers: JSON string of headers to include in requests + + Returns: + SecurityScanResult containing scan results + """ + # Run scanner + try: + raw_output = _run_mcp_scanner(server_url, analyzers, api_key, headers) + except subprocess.CalledProcessError as e: + # Scanner failed - create error output and save it + logger.error(f"Scanner failed with exit code {e.returncode}") + raw_output = { + "error": str(e), + "stderr": e.stderr if hasattr(e, 'stderr') else "", + "analysis_results": {}, + "tool_results": [], + "scan_failed": True + } + # Save the error output + output_file = _save_scan_output(server_url, raw_output) + + # Create error result + result = SecurityScanResult( + server_url=server_url, + scan_timestamp=datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"), + is_safe=False, # Treat scanner failures as unsafe + critical_issues=0, + high_severity=0, + medium_severity=0, + low_severity=0, + raw_output=raw_output, + output_file=output_file + ) + + # Output result + if output_json: + print(json.dumps(result.model_dump(), indent=2, default=str)) + else: + print("\n" + "="*60) + print("SECURITY SCAN FAILED") + print("="*60) + print(f"Server URL: {result.server_url}") + print(f"Scan Time: {result.scan_timestamp}") + print(f"\nError: Scanner failed to complete scan") + print(f"Details: {e}") + print(f"\nMarking server as UNSAFE due to scanner failure") + print(f"\nDetailed output saved to: {result.output_file}") + print("="*60 + "\n") + + return result + + # Analyze results + is_safe, critical, high, medium, low = _analyze_scan_results(raw_output) + + # Save detailed output + output_file = _save_scan_output(server_url, raw_output) + + # Auto-disable server if unsafe + if auto_disable and not is_safe: + logger.warning(f"Server marked as UNSAFE - attempting to disable") + server_path = _extract_server_path_from_url(server_url) + if server_path: + if _disable_unsafe_server(server_path): + logger.info(f"āœ“ Server {server_path} has been disabled for security reasons") + else: + logger.error(f"āœ— Failed to disable server {server_path}") + else: + logger.error(f"āœ— Could not extract server path from URL - manual intervention required") + + # Create result object + result = SecurityScanResult( + server_url=server_url, + scan_timestamp=datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"), + is_safe=is_safe, + critical_issues=critical, + high_severity=high, + medium_severity=medium, + low_severity=low, + raw_output=raw_output, + output_file=output_file + ) + + # Output result + if output_json: + # Output raw mcp-scanner format directly (same as --raw) + print(json.dumps(raw_output, indent=2, default=str)) + else: + print("\n" + "="*60) + print("SECURITY SCAN SUMMARY") + print("="*60) + print(f"Server URL: {result.server_url}") + print(f"Scan Time: {result.scan_timestamp}") + print(f"\nEXECUTIVE SUMMARY OF ISSUES:") + print(f" Critical Issues: {result.critical_issues}") + print(f" High Severity: {result.high_severity}") + print(f" Medium Severity: {result.medium_severity}") + print(f" Low Severity: {result.low_severity}") + print(f"\nOverall Assessment: {'SAFE āœ“' if result.is_safe else 'UNSAFE āœ—'}") + + # Show auto-disable status if applicable + if auto_disable and not result.is_safe: + server_path = _extract_server_path_from_url(server_url) + if server_path: + print(f"\nāš ļø ACTION TAKEN: Server {server_path} has been DISABLED due to security issues") + else: + print(f"\nāš ļø WARNING: Could not auto-disable server - manual intervention required") + + print(f"\nDetailed output saved to: {result.output_file}") + print("="*60 + "\n") + + return result + + +def main(): + """Main entry point for CLI.""" + parser = argparse.ArgumentParser( + description="Scan MCP servers for security vulnerabilities", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Example usage: + # Basic scan with YARA analyzer (default) + uv run cli/mcp_security_scanner.py --server-url https://mcp.deepwki.com/mcp + + # Scan with both YARA and LLM analyzers + export MCP_SCANNER_LLM_API_KEY=sk-... + uv run cli/mcp_security_scanner.py --server-url https://example.com/mcp --analyzers yara,llm + + # Scan with LLM only, passing API key directly + uv run cli/mcp_security_scanner.py --server-url https://example.com/mcp --analyzers llm --api-key sk-... + + # Scan with custom headers (e.g., authentication) + uv run cli/mcp_security_scanner.py --server-url https://example.com/mcp --headers '{"X-Authorization": "Bearer token123"}' + + # Output as JSON + uv run cli/mcp_security_scanner.py --server-url https://example.com/mcp --json +""" + ) + + parser.add_argument( + "--server-url", + required=True, + help="URL of the MCP server to scan" + ) + + parser.add_argument( + "--analyzers", + default=DEFAULT_ANALYZERS, + help=f"Comma-separated list of analyzers to use (default: {DEFAULT_ANALYZERS})" + ) + + parser.add_argument( + "--api-key", + help=f"LLM API key for security scanning (can also use {LLM_API_KEY_ENV} env var)" + ) + + parser.add_argument( + "--json", + action="store_true", + help="Output result as JSON" + ) + + parser.add_argument( + "--debug", + action="store_true", + help="Enable debug logging" + ) + + parser.add_argument( + "--auto-disable", + action="store_true", + help="Automatically disable servers that fail security scan (is_safe: false)" + ) + + parser.add_argument( + "--headers", + help="JSON string of headers to include in requests (e.g., '{\"X-Authorization\": \"token\"}')" + ) + + args = parser.parse_args() + + # Set debug level if requested + if args.debug: + logging.getLogger().setLevel(logging.DEBUG) + + try: + # Get API key if needed for LLM analyzer + api_key = None + if "llm" in args.analyzers.lower(): + api_key = _get_llm_api_key(args.api_key) + + # Run scan + result = scan_server( + server_url=args.server_url, + analyzers=args.analyzers, + api_key=api_key, + output_json=args.json, + auto_disable=args.auto_disable, + headers=args.headers + ) + + # Exit with non-zero code if unsafe + sys.exit(0 if result.is_safe else 1) + + except Exception as e: + logger.exception(f"Security scan failed: {e}") + sys.exit(2) + + +if __name__ == "__main__": + main() diff --git a/cli/scan_all_servers.py b/cli/scan_all_servers.py new file mode 100755 index 0000000..468663e --- /dev/null +++ b/cli/scan_all_servers.py @@ -0,0 +1,792 @@ +#!/usr/bin/env python3 +""" +Scan all enabled MCP servers for security vulnerabilities. + +This script: +1. Loads ingress token from .oauth-tokens/ingress.json +2. Calls the registry API to get a list of all servers +3. Filters for enabled servers +4. Runs security scans on each enabled server using service_mgmt.sh + +Usage: + uv run python cli/scan_all_servers.py + uv run python cli/scan_all_servers.py --base-url http://localhost + uv run python cli/scan_all_servers.py --analyzers yara,llm + uv run python cli/scan_all_servers.py --token-file .oauth-tokens/ingress.json +""" + +import argparse +import base64 +import json +import logging +import subprocess +import sys +from datetime import datetime, timezone +from pathlib import Path +from typing import List, Dict, Any, Optional + +import requests + + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s,p%(process)s,{%(filename)s:%(lineno)d},%(levelname)s,%(message)s", +) +logger = logging.getLogger(__name__) + + +# Constants +SCRIPT_DIR = Path(__file__).parent +PROJECT_ROOT = SCRIPT_DIR.parent +DEFAULT_TOKEN_FILE = PROJECT_ROOT / ".oauth-tokens" / "ingress.json" +DEFAULT_BASE_URL = "http://localhost" +DEFAULT_ANALYZERS = "yara" +GENERATE_CREDS_SCRIPT = PROJECT_ROOT / "credentials-provider" / "generate_creds.sh" + + +def _check_token_expiration( + access_token: str +) -> None: + """Check if JWT token is expired and exit with instructions if so. + + Args: + access_token: JWT access token to check + + Raises: + SystemExit: If token is expired or will expire soon + """ + try: + # Decode JWT payload (without verification, just to check expiry) + parts = access_token.split('.') + if len(parts) != 3: + logger.warning("Invalid JWT format, cannot check expiration") + return + + # Decode payload + payload = parts[1] + # Add padding if needed + padding = len(payload) % 4 + if padding: + payload += '=' * (4 - padding) + + decoded = base64.urlsafe_b64decode(payload) + token_data = json.loads(decoded) + + # Check expiration + exp = token_data.get('exp') + if not exp: + logger.warning("Token does not have expiration field") + return + + exp_dt = datetime.fromtimestamp(exp, tz=timezone.utc) + now = datetime.now(timezone.utc) + time_until_expiry = exp_dt - now + + if time_until_expiry.total_seconds() < 0: + # Token is expired + logger.error("=" * 80) + logger.error("TOKEN EXPIRED") + logger.error("=" * 80) + logger.error(f"Token expired at: {exp_dt.strftime('%Y-%m-%d %H:%M:%S UTC')}") + logger.error(f"Current time is: {now.strftime('%Y-%m-%d %H:%M:%S UTC')}") + logger.error(f"Token expired {abs(time_until_expiry.total_seconds())} seconds ago") + logger.error("") + logger.error("Please regenerate your token:") + logger.error(f" {GENERATE_CREDS_SCRIPT}") + logger.error("=" * 80) + sys.exit(1) + + elif time_until_expiry.total_seconds() < 60: + # Token will expire in less than 60 seconds + logger.warning("=" * 80) + logger.warning("TOKEN EXPIRING SOON") + logger.warning("=" * 80) + logger.warning(f"Token will expire in {time_until_expiry.total_seconds():.0f} seconds") + logger.warning(f"Expiration time: {exp_dt.strftime('%Y-%m-%d %H:%M:%S UTC')}") + logger.warning("") + logger.warning("Consider regenerating your token:") + logger.warning(f" {GENERATE_CREDS_SCRIPT}") + logger.warning("=" * 80) + logger.warning("") + + else: + # Token is valid + logger.info(f"Token is valid until {exp_dt.strftime('%Y-%m-%d %H:%M:%S UTC')} ({time_until_expiry.total_seconds():.0f} seconds remaining)") + + except Exception as e: + logger.warning(f"Could not check token expiration: {e}") + return + + +def _load_token_from_file( + token_file: Path +) -> str: + """Load access token from JSON file and check expiration. + + Args: + token_file: Path to token file + + Returns: + Access token string + + Raises: + FileNotFoundError: If token file doesn't exist + ValueError: If token file format is invalid + SystemExit: If token is expired + """ + if not token_file.exists(): + raise FileNotFoundError( + f"Token file not found: {token_file}\n" + "Please generate credentials first:\n" + f" {GENERATE_CREDS_SCRIPT}" + ) + + with open(token_file, 'r') as f: + token_data = json.load(f) + + access_token = token_data.get("access_token") + if not access_token: + raise ValueError(f"No access_token found in {token_file}") + + logger.info(f"Loaded token from: {token_file}") + + # Check token expiration + _check_token_expiration(access_token) + + return access_token + + +def _get_server_list( + base_url: str, + access_token: str, + limit: int = 1000 +) -> List[Dict[str, Any]]: + """Get list of all servers from registry API. + + Args: + base_url: Base URL of the registry + access_token: JWT access token + limit: Maximum number of servers to retrieve + + Returns: + List of server objects + + Raises: + requests.HTTPError: If API request fails + """ + api_url = f"{base_url}/v0.1/servers" + params = {"limit": limit} + headers = { + "X-Authorization": f"Bearer {access_token}", + "Content-Type": "application/json" + } + + logger.info(f"Fetching server list from: {api_url}") + + response = requests.get( + api_url, + params=params, + headers=headers, + timeout=30 + ) + response.raise_for_status() + + data = response.json() + + # Print full API response for debugging + logger.debug("Full API Response:") + logger.debug(json.dumps(data, indent=2)) + + servers = data.get("servers", []) + + logger.info(f"Retrieved {len(servers)} servers from registry") + return servers + + +def _filter_enabled_servers( + servers: List[Dict[str, Any]] +) -> List[Dict[str, Any]]: + """Filter for only enabled servers. + + Args: + servers: List of server objects from API + + Returns: + List of enabled server objects + """ + enabled_servers = [] + + for server_obj in servers: + server = server_obj.get("server", {}) + meta = server.get("_meta", {}).get("io.mcpgateway/internal", {}) + + is_enabled = meta.get("is_enabled", False) + if is_enabled: + enabled_servers.append(server) + + logger.info(f"Found {len(enabled_servers)} enabled servers") + return enabled_servers + + +def _run_security_scan( + server_url: str, + analyzers: str, + api_key: Optional[str] = None, + access_token: Optional[str] = None +) -> Dict[str, Any]: + """Run security scan on a server using service_mgmt.sh. + + Args: + server_url: URL of the MCP server to scan + analyzers: Comma-separated list of analyzers (e.g., 'yara', 'yara,llm') + api_key: Optional API key for LLM analyzer + access_token: Optional access token for authenticated MCP servers + + Returns: + Dictionary with scan results including: + - success: bool + - scan_output_file: Path to scan results JSON file + - critical_issues: int + - high_severity: int + - medium_severity: int + - low_severity: int + - is_safe: bool + """ + service_mgmt_script = SCRIPT_DIR / "service_mgmt.sh" + + if not service_mgmt_script.exists(): + logger.error(f"service_mgmt.sh not found at: {service_mgmt_script}") + return False + + cmd = [ + str(service_mgmt_script), + "scan", + server_url, + analyzers + ] + + if api_key: + cmd.append(api_key) + else: + # Add empty string placeholder if we need to add headers next + if access_token: + cmd.append("") + + # Add headers with authorization token if provided + if access_token: + headers_json = json.dumps({"X-Authorization": f"Bearer {access_token}"}) + cmd.append(headers_json) + + # Log command with masked token for security + cmd_for_log = cmd.copy() + if access_token and len(cmd_for_log) > 0: + # Replace the last element (headers JSON) with masked version + headers_masked = json.dumps({"X-Authorization": f"Bearer {access_token[:20]}...{access_token[-10:]}"}) + cmd_for_log[-1] = headers_masked + logger.info(f"Running: {' '.join(cmd_for_log)}") + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + check=False + ) + + # Log output + if result.stdout: + logger.info(f"Scan output:\n{result.stdout}") + if result.stderr: + logger.warning(f"Scan stderr:\n{result.stderr}") + + # Parse scan results from security_scans directory + scan_result = { + "success": result.returncode == 0, + "scan_output_file": None, + "critical_issues": 0, + "high_severity": 0, + "medium_severity": 0, + "low_severity": 0, + "is_safe": result.returncode == 0, + "error_message": None + } + + # Try to find and parse the scan output file + try: + # Extract server name from URL for finding scan file + from urllib.parse import urlparse + parsed = urlparse(server_url) + path_parts = [p for p in parsed.path.split('/') if p and p != 'mcp'] + if path_parts: + server_name = path_parts[0] + scan_file = PROJECT_ROOT / "security_scans" / f"{server_name}_mcp.json" + + if scan_file.exists(): + scan_result["scan_output_file"] = str(scan_file) + with open(scan_file, 'r') as f: + scan_data = json.load(f) + + # Extract severity counts from analysis_results + analysis_results = scan_data.get("analysis_results", {}) + for analyzer_name, analyzer_data in analysis_results.items(): + if isinstance(analyzer_data, dict): + findings = analyzer_data.get("findings", []) + for finding in findings: + severity = finding.get("severity", "").lower() + if severity == "critical": + scan_result["critical_issues"] += 1 + elif severity == "high": + scan_result["high_severity"] += 1 + elif severity == "medium": + scan_result["medium_severity"] += 1 + elif severity == "low": + scan_result["low_severity"] += 1 + + # Determine if safe based on scan data + scan_result["is_safe"] = (scan_result["critical_issues"] == 0 and + scan_result["high_severity"] == 0) + except Exception as e: + logger.warning(f"Could not parse scan results: {e}") + + # Check exit code + if result.returncode == 0: + logger.info("āœ“ Scan completed successfully") + else: + logger.error(f"āœ— Scan failed with exit code: {result.returncode}") + scan_result["error_message"] = f"Scanner exit code: {result.returncode}" + + return scan_result + + except Exception as e: + logger.error(f"Failed to run scan: {e}") + return { + "success": False, + "scan_output_file": None, + "critical_issues": 0, + "high_severity": 0, + "medium_severity": 0, + "low_severity": 0, + "is_safe": False, + "error_message": str(e) + } + + +def _generate_markdown_report( + scan_results: List[Dict[str, Any]], + stats: Dict[str, int], + analyzers: str, + scan_timestamp: str +) -> str: + """Generate markdown report from scan results. + + Args: + scan_results: List of scan result dictionaries + stats: Dictionary with summary statistics + analyzers: Analyzers used for scanning + scan_timestamp: ISO timestamp of scan + + Returns: + Markdown formatted report as string + """ + lines = [] + + # Header + lines.append("# MCP Server Security Scan Report") + lines.append("") + lines.append(f"**Scan Date:** {scan_timestamp}") + lines.append(f"**Analyzers Used:** {analyzers}") + lines.append("") + + # Executive Summary + lines.append("## Executive Summary") + lines.append("") + total = stats['total'] + passed = stats['passed'] + failed = stats['failed'] + pass_rate = (passed / total * 100) if total > 0 else 0 + + lines.append(f"- **Total Servers Scanned:** {total}") + lines.append(f"- **Passed:** {passed} ({pass_rate:.1f}%)") + lines.append(f"- **Failed:** {failed} ({100 - pass_rate:.1f}%)") + lines.append("") + + # Aggregate Vulnerability Statistics + total_critical = sum(r.get('critical_issues', 0) for r in scan_results) + total_high = sum(r.get('high_severity', 0) for r in scan_results) + total_medium = sum(r.get('medium_severity', 0) for r in scan_results) + total_low = sum(r.get('low_severity', 0) for r in scan_results) + + lines.append("### Aggregate Vulnerability Statistics") + lines.append("") + lines.append("| Severity | Count |") + lines.append("|----------|-------|") + lines.append(f"| Critical | {total_critical} |") + lines.append(f"| High | {total_high} |") + lines.append(f"| Medium | {total_medium} |") + lines.append(f"| Low | {total_low} |") + lines.append("") + + # Per-Server Results + lines.append("## Per-Server Scan Results") + lines.append("") + + for result in scan_results: + server_name = result.get('server_name', 'Unknown') + server_url = result.get('server_url', 'Unknown') + is_safe = result.get('is_safe', False) + status = "āœ… SAFE" if is_safe else "āŒ UNSAFE" + + lines.append(f"### {server_name}") + lines.append("") + lines.append(f"- **URL:** `{server_url}`") + lines.append(f"- **Status:** {status}") + lines.append("") + + # Vulnerability table + lines.append("| Severity | Count |") + lines.append("|----------|-------|") + lines.append(f"| Critical | {result.get('critical_issues', 0)} |") + lines.append(f"| High | {result.get('high_severity', 0)} |") + lines.append(f"| Medium | {result.get('medium_severity', 0)} |") + lines.append(f"| Low | {result.get('low_severity', 0)} |") + lines.append("") + + # Show detailed findings for tools with issues + scan_file = result.get('scan_output_file') + if scan_file and Path(scan_file).exists(): + try: + with open(scan_file, 'r') as f: + scan_data = json.load(f) + + tool_results = scan_data.get('tool_results', []) + tools_with_findings = [ + tool for tool in tool_results + if any( + finding.get('total_findings', 0) > 0 + for finding in tool.get('findings', {}).values() + ) + ] + + if tools_with_findings: + lines.append("#### Detailed Findings") + lines.append("") + + for tool in tools_with_findings: + tool_name = tool.get('tool_name', 'Unknown') + lines.append(f"**Tool: `{tool_name}`**") + lines.append("") + + # Show findings for each analyzer + findings = tool.get('findings', {}) + for analyzer_name, analyzer_findings in findings.items(): + total_findings = analyzer_findings.get('total_findings', 0) + if total_findings > 0: + severity = analyzer_findings.get('severity', 'UNKNOWN') + threat_names = analyzer_findings.get('threat_names', []) + threat_summary = analyzer_findings.get('threat_summary', '') + + lines.append(f"- **Analyzer:** {analyzer_name}") + lines.append(f"- **Severity:** {severity}") + lines.append(f"- **Threats:** {', '.join(threat_names) if threat_names else 'None'}") + lines.append(f"- **Summary:** {threat_summary}") + + # Include taxonomy if available + taxonomy = analyzer_findings.get('mcp_taxonomy', {}) + if taxonomy: + lines.append("") + lines.append("**Taxonomy:**") + lines.append("```json") + lines.append(json.dumps(taxonomy, indent=2)) + lines.append("```") + + lines.append("") + + # Show tool description if available + tool_desc = tool.get('tool_description', '') + if tool_desc: + lines.append("
") + lines.append(f"Tool Description") + lines.append("") + lines.append("```") + lines.append(tool_desc) + lines.append("```") + lines.append("
") + lines.append("") + + except Exception as e: + logger.warning(f"Could not parse detailed findings from {scan_file}: {e}") + lines.append(f"**Detailed Report:** [{Path(scan_file).name}]({scan_file})") + lines.append("") + else: + if scan_file: + lines.append(f"**Detailed Report:** [{Path(scan_file).name}]({scan_file})") + lines.append("") + + if result.get('error_message'): + lines.append(f"**Error:** {result['error_message']}") + lines.append("") + + # Footer + lines.append("---") + lines.append("") + lines.append(f"*Report generated on {scan_timestamp}*") + lines.append("") + + return "\n".join(lines) + + +def _scan_all_servers( + base_url: str, + token: Optional[str] = None, + token_file: Optional[Path] = None, + analyzers: str = DEFAULT_ANALYZERS, + api_key: Optional[str] = None +) -> Dict[str, Any]: + """Scan all enabled servers. + + Args: + base_url: Base URL of the registry + token: Access token string (takes precedence over token_file) + token_file: Path to token file (default: .oauth-tokens/ingress.json) + analyzers: Comma-separated list of analyzers + api_key: Optional API key for LLM analyzer + + Returns: + Dictionary with scan statistics + """ + logger.info("=" * 80) + logger.info("Scan All MCP Servers - Security Vulnerability Scanner") + logger.info("=" * 80) + + # Load token - priority: --token > --token-file > default token file + try: + if token: + logger.info("Using token provided via --token argument") + access_token = token + # Still check expiration + _check_token_expiration(access_token) + else: + # Use token file (either provided or default) + if token_file is None: + token_file = DEFAULT_TOKEN_FILE + access_token = _load_token_from_file(token_file) + except Exception as e: + logger.error(f"Failed to load token: {e}") + sys.exit(1) + + # Get server list + try: + servers = _get_server_list(base_url, access_token) + except Exception as e: + logger.error(f"Failed to get server list: {e}") + sys.exit(1) + + # Filter enabled servers + enabled_servers = _filter_enabled_servers(servers) + + if not enabled_servers: + logger.warning("No enabled servers found to scan") + return {"total": 0, "passed": 0, "failed": 0} + + # Scan each server + stats = { + "total": len(enabled_servers), + "passed": 0, + "failed": 0 + } + + scan_results = [] + scan_timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC") + + logger.info("") + logger.info("=" * 80) + logger.info(f"Scanning {stats['total']} enabled servers") + logger.info("=" * 80) + logger.info("") + + for idx, server in enumerate(enabled_servers, 1): + server_name = server.get("name", "unknown") + + # Get the path from metadata - this is what we use with the gateway + meta = server.get("_meta", {}).get("io.mcpgateway/internal", {}) + server_path = meta.get("path") + + if not server_path: + logger.warning(f"[{idx}/{stats['total']}] {server_name}: No path found in metadata, skipping") + stats["failed"] += 1 + scan_results.append({ + "server_name": server_name, + "server_url": "N/A", + "success": False, + "is_safe": False, + "critical_issues": 0, + "high_severity": 0, + "medium_severity": 0, + "low_severity": 0, + "error_message": "No path found in metadata" + }) + continue + + # Construct the gateway proxy URL using the path and base_url + # Ensure path ends with / before adding mcp + if not server_path.endswith('/'): + server_path = server_path + '/' + server_url = f"{base_url}{server_path}mcp" + + logger.info("-" * 80) + logger.info(f"[{idx}/{stats['total']}] Scanning: {server_name}") + logger.info(f"URL: {server_url}") + logger.info(f"Analyzers: {analyzers}") + + # Run scan with access token for authentication + scan_result = _run_security_scan(server_url, analyzers, api_key, access_token) + scan_result["server_name"] = server_name + scan_result["server_url"] = server_url + scan_results.append(scan_result) + + if scan_result["success"] and scan_result["is_safe"]: + stats["passed"] += 1 + else: + stats["failed"] += 1 + + logger.info("") + + return { + "stats": stats, + "scan_results": scan_results, + "scan_timestamp": scan_timestamp, + "analyzers": analyzers + } + + +def main(): + parser = argparse.ArgumentParser( + description="Scan all enabled MCP servers for security vulnerabilities", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Scan all servers with default YARA analyzer + uv run python cli/scan_all_servers.py + + # Scan with both YARA and LLM analyzers + export MCP_SCANNER_LLM_API_KEY=sk-your-api-key + uv run python cli/scan_all_servers.py --analyzers yara,llm + + # Use specific base URL + uv run python cli/scan_all_servers.py --base-url http://localhost:7860 + + # Provide token directly via command line + uv run python cli/scan_all_servers.py --token "eyJhbGci..." + + # Use custom token file + uv run python cli/scan_all_servers.py --token-file .oauth-tokens/custom.json +""" + ) + + parser.add_argument( + "--base-url", + default=DEFAULT_BASE_URL, + help=f"Registry base URL (default: {DEFAULT_BASE_URL})" + ) + parser.add_argument( + "--token", + help="Access token string (takes precedence over --token-file)" + ) + parser.add_argument( + "--token-file", + type=Path, + default=DEFAULT_TOKEN_FILE, + help=f"Path to token file (default: {DEFAULT_TOKEN_FILE})" + ) + parser.add_argument( + "--analyzers", + default=DEFAULT_ANALYZERS, + help=f"Comma-separated list of analyzers: yara, llm, or yara,llm (default: {DEFAULT_ANALYZERS})" + ) + parser.add_argument( + "--api-key", + help="LLM API key (optional, can also use MCP_SCANNER_LLM_API_KEY env var)" + ) + parser.add_argument( + "--debug", + action="store_true", + help="Enable debug logging" + ) + + args = parser.parse_args() + + # Set debug level if requested + if args.debug: + logging.getLogger().setLevel(logging.DEBUG) + + # Run scans + results = _scan_all_servers( + base_url=args.base_url, + token=args.token, + token_file=args.token_file, + analyzers=args.analyzers, + api_key=args.api_key + ) + + stats = results["stats"] + scan_results = results["scan_results"] + scan_timestamp = results["scan_timestamp"] + analyzers = results["analyzers"] + + # Generate markdown report + logger.info("") + logger.info("=" * 80) + logger.info("Generating markdown report...") + logger.info("=" * 80) + + markdown_report = _generate_markdown_report( + scan_results=scan_results, + stats=stats, + analyzers=analyzers, + scan_timestamp=scan_timestamp + ) + + # Save markdown report + report_base_dir = PROJECT_ROOT / "security_scans" + report_base_dir.mkdir(parents=True, exist_ok=True) + + # Create reports subdirectory for timestamped reports + reports_dir = report_base_dir / "reports" + reports_dir.mkdir(parents=True, exist_ok=True) + + # Save timestamped report in reports/ subdirectory + timestamp_str = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S") + timestamped_report = reports_dir / f"scan_report_{timestamp_str}.md" + + with open(timestamped_report, 'w') as f: + f.write(markdown_report) + + # Save latest report directly in security_scans/ + latest_report = report_base_dir / "scan_report.md" + with open(latest_report, 'w') as f: + f.write(markdown_report) + + logger.info(f"Markdown report saved to: {timestamped_report}") + logger.info(f"Latest report: {latest_report}") + + # Print summary + logger.info("") + logger.info("=" * 80) + logger.info("SCAN SUMMARY") + logger.info("=" * 80) + logger.info(f"Total servers scanned: {stats['total']}") + logger.info(f"Passed: {stats['passed']}") + logger.info(f"Failed: {stats['failed']}") + logger.info("") + logger.info("Security scan results saved to: ./security_scans/") + logger.info(f"Markdown report: {latest_report}") + logger.info("=" * 80) + + # Exit with error code if any scans failed + if stats["failed"] > 0: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/cli/service_mgmt.sh b/cli/service_mgmt.sh index 2957b9c..f78aed7 100755 --- a/cli/service_mgmt.sh +++ b/cli/service_mgmt.sh @@ -19,6 +19,13 @@ CROSS_MARK="āœ—" SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" +# Load environment variables from .env file if it exists +if [ -f "$PROJECT_ROOT/.env" ]; then + set -a # automatically export all variables + source "$PROJECT_ROOT/.env" + set +a +fi + # Gateway URL (can be overridden with GATEWAY_URL environment variable) GATEWAY_URL="${GATEWAY_URL:-http://localhost}" @@ -426,10 +433,12 @@ except Exception as e: add_service() { local config_file="${1}" + local analyzers="${2:-yara}" if [ -z "$config_file" ]; then - print_error "Usage: $0 add " + print_error "Usage: $0 add [analyzers]" print_error "Example: $0 add cli/examples/example-server-config.json" + print_error "Example: $0 add cli/examples/example-server-config.json yara,llm" exit 1 fi @@ -458,11 +467,130 @@ add_service() { # Use the modified config for registration config_json="$modified_config" + # Extract service_path from config for later use + local service_path + service_path=$(python3 -c " +import json +config = json.loads('''$config_json''') +print(config.get('path', '')) +") + echo "=== Adding Service: $service_name ===" # Check prerequisites check_prerequisites + # Extract proxy_pass_url for security scanning + local proxy_pass_url + proxy_pass_url=$(python3 -c " +import json +config = json.loads('''$config_json''') +print(config.get('proxy_pass_url', '')) +") + + # Extract headers from config if present + local headers_json + headers_json=$(python3 -c " +import json +config = json.loads('''$config_json''') +headers = config.get('headers', {}) +if headers: + print(json.dumps(headers)) +else: + print('') +") + + # Check if LLM analyzer is requested and API key is available + if [[ "$analyzers" == *"llm"* ]]; then + if [ -z "$MCP_SCANNER_LLM_API_KEY" ] || [[ "$MCP_SCANNER_LLM_API_KEY" == *"your_"* ]] || [[ "$MCP_SCANNER_LLM_API_KEY" == *"placeholder"* ]]; then + echo "" + print_error "LLM analyzer requested but MCP_SCANNER_LLM_API_KEY is not configured" + print_info "Current value: ${MCP_SCANNER_LLM_API_KEY:-}" + print_info "" + print_info "Options:" + print_info " 1. Add real API key to .env file: MCP_SCANNER_LLM_API_KEY=sk-..." + print_info " 2. Set environment variable: export MCP_SCANNER_LLM_API_KEY=sk-..." + print_info " 3. Use only YARA analyzer: $0 add $config_file yara" + exit 1 + fi + fi + + # Run security scan + echo "" + echo "=== Security Scan ===" + print_info "Scanning server for security vulnerabilities..." + print_info "Using analyzers: $analyzers" + + local is_safe="true" + local scan_output="" + + # Prepare scan URL - append /mcp if not already present + local scan_url="$proxy_pass_url" + if [[ ! "$scan_url" =~ /mcp/?$ ]] && [[ ! "$scan_url" =~ /sse/?$ ]]; then + # Remove trailing slash if present, then add /mcp + scan_url="${scan_url%/}/mcp" + print_info "Appending /mcp to scan URL: $scan_url" + fi + + # Run scan using Python CLI and capture JSON output + # Note: Scanner exits with code 1 when unsafe, so we need to capture both success and "failure" cases + local scan_exit_code=0 + local scan_cmd="cd \"$PROJECT_ROOT\" && uv run cli/mcp_security_scanner.py --server-url \"$scan_url\" --analyzers \"$analyzers\" --json" + + # Add headers if present in config + if [ -n "$headers_json" ]; then + print_info "Using custom headers from config for security scan" + scan_cmd="$scan_cmd --headers '$headers_json'" + fi + + scan_output=$(eval "$scan_cmd" 2>&1) || scan_exit_code=$? + print_info "scan_exit_code - $scan_exit_code" + + # Exit code 0 = safe, exit code 1 = unsafe, exit code 2 = error + if [ $scan_exit_code -eq 0 ]; then + print_success "Security scan passed - Server is SAFE" + elif [ $scan_exit_code -eq 1 ]; then + print_error "Security scan failed - Server has critical or high severity issues" + print_info "Server will be registered but marked as UNHEALTHY with security-pending status" + + # Add security-pending tag to config_json BEFORE registration + echo "" + echo "====Adding security-pending tag to configuration====" + print_info "Adding 'security-pending' tag to server configuration before registration..." + + config_json=$(python3 -c " +import json +import sys + +try: + config = json.loads('''$config_json''') + + # Add security-pending tag if not already present + tags = config.get('tags', []) + if 'security-pending' not in tags: + tags.append('security-pending') + config['tags'] = tags + + print(json.dumps(config)) + sys.exit(0) +except Exception as e: + print(f'Failed to add tag: {e}', file=sys.stderr) + sys.exit(1) +") + + if [ $? -eq 0 ]; then + print_success "Added 'security-pending' tag to configuration" + else + print_error "Failed to add 'security-pending' tag to configuration" + exit 1 + fi + else + print_error "Security scan encountered an error (exit code: $scan_exit_code)" + print_info "Server will be registered but marked as UNHEALTHY with security-pending status" + fi + + echo "" + # Register the service if ! run_mcp_command "register_service" "$config_json" "Registering service"; then exit 1 @@ -484,6 +612,44 @@ add_service() { exit 1 fi + if [ $scan_exit_code -eq 1 ]; then + #Disabling the server + echo "" + echo "====Disabling the server====" + + # Get admin credentials from environment + local admin_user="${ADMIN_USER:-admin}" + local admin_password="${ADMIN_PASSWORD}" + + if [ -z "$admin_password" ]; then + print_error "ADMIN_PASSWORD not set in environment - cannot disable server" + else + # Call the internal toggle endpoint to set service to disabled (false) + # Since the server was just auto-enabled during registration, we need to toggle it OFF + print_info "Calling toggle endpoint with: ${GATEWAY_URL}/api/internal/toggle" + print_info "Service path: $service_path" + + output=$(curl -s -w "\nHTTP_STATUS:%{http_code}" -X POST "${GATEWAY_URL}/api/internal/toggle" \ + --user "$admin_user:$admin_password" \ + --data-urlencode "service_path=$service_path" 2>&1) + + # Extract HTTP status code from response + http_status=$(echo "$output" | grep "HTTP_STATUS:" | cut -d':' -f2) + response_body=$(echo "$output" | sed '/HTTP_STATUS:/d') + + print_info "Toggle API HTTP Status: $http_status" + print_info "Toggle API Response: $response_body" + + if [ "$http_status" = "200" ]; then + print_success "Server disabled due to failed security scan" + else + print_error "Failed to disable server - HTTP Status: $http_status" + print_error "Response: $response_body" + fi + print_info "Review the security scan report before enabling this server" + fi + fi + # Run health check echo "" echo "=== Health Check ===" @@ -669,14 +835,87 @@ monitor_services() { print_success "Monitoring completed!" } +scan_server_security() { + local server_url="$1" + local analyzers="${2:-yara}" + local api_key="${3:-}" + local headers="${4:-}" + + if [ -z "$server_url" ]; then + print_error "Usage: $0 scan [analyzers] [api-key] [headers]" + print_error "Example: $0 scan https://mcp.deepwki.com/mcp" + print_error "Example: $0 scan https://mcp.deepwki.com/mcp yara,llm" + print_error "Example: $0 scan https://mcp.deepwki.com/mcp yara,llm \$MCP_SCANNER_LLM_API_KEY" + print_error "Example: $0 scan https://mcp.deepwki.com/mcp yara '' '{\"X-Authorization\": \"token123\"}'" + print_error "" + print_error "Note: For LLM analyzer, set MCP_SCANNER_LLM_API_KEY environment variable" + print_error " or pass API key as third argument" + print_error "Note: For custom headers, pass JSON string as fourth argument" + exit 1 + fi + + echo "=== Security Scan: $server_url ===" + + # Check if LLM analyzer is requested and API key is available + if [[ "$analyzers" == *"llm"* ]]; then + # Check both environment variable and CLI argument + local key_to_check="${api_key:-$MCP_SCANNER_LLM_API_KEY}" + if [ -z "$key_to_check" ] || [[ "$key_to_check" == *"your_"* ]] || [[ "$key_to_check" == *"placeholder"* ]]; then + echo "" + print_error "LLM analyzer requested but MCP_SCANNER_LLM_API_KEY is not configured" + print_info "Current value: ${MCP_SCANNER_LLM_API_KEY:-}" + print_info "" + print_info "Options:" + print_info " 1. Add real API key to .env file: MCP_SCANNER_LLM_API_KEY=sk-..." + print_info " 2. Set environment variable: export MCP_SCANNER_LLM_API_KEY=sk-..." + print_info " 3. Pass API key as argument: $0 scan $server_url $analyzers sk-your-key" + print_info " 4. Use only YARA analyzer: $0 scan $server_url yara" + return 1 + fi + fi + + # Build command + local cmd="cd \"$PROJECT_ROOT\" && uv run cli/mcp_security_scanner.py --server-url \"$server_url\" --analyzers \"$analyzers\"" + + # Add API key if provided + if [ -n "$api_key" ]; then + cmd="$cmd --api-key \"$api_key\"" + fi + + # Add headers if provided + if [ -n "$headers" ]; then + cmd="$cmd --headers '$headers'" + fi + + print_info "Running security scan..." + print_info "Analyzers: $analyzers" + + # Run scan and capture exit code + if eval "$cmd"; then + print_success "Security scan completed - Server is SAFE" + return 0 + else + local exit_code=$? + if [ $exit_code -eq 1 ]; then + print_error "Security scan completed - Server is UNSAFE (has critical or high severity issues)" + else + print_error "Security scan failed with error code $exit_code" + fi + return $exit_code + fi +} + show_usage() { - echo "Usage: $0 {add|delete|monitor|test|add-to-groups|remove-from-groups|create-group|delete-group|list-groups} [args...]" + echo "Usage: $0 {add|delete|monitor|test|scan|add-to-groups|remove-from-groups|create-group|delete-group|list-groups} [args...]" echo "" echo "Service Commands:" - echo " add - Add a service using JSON config and verify registration" + echo " add [analyzers] - Add a service using JSON config and verify registration" + echo " analyzers: yara (default), llm, or yara,llm" echo " delete - Delete a service by path and name" echo " monitor [config-file] - Run health check (all services or specific service from config)" echo " test - Test service searchability using intelligent_tool_finder" + echo " scan [analyzers] [api-key] - Run security scan on MCP server" + echo " analyzers: yara (default), llm, or yara,llm" echo "" echo "Server-to-Group Commands:" echo " add-to-groups - Add server to specific scopes groups (comma-separated)" @@ -704,12 +943,22 @@ show_usage() { echo "" echo "Examples:" echo " # Service operations" - echo " $0 add cli/examples/example-server-config.json" + echo " $0 add cli/examples/example-server-config.json # Add with default YARA analyzer" + echo " export MCP_SCANNER_LLM_API_KEY=sk-..." + echo " $0 add cli/examples/example-server-config.json yara,llm # Add with both analyzers" + echo " $0 add cli/examples/example-server-config.json llm # Add with only LLM analyzer" echo " $0 delete /example-server example-server" echo " $0 monitor # All services" echo " $0 monitor cli/examples/example-server-config.json # Specific service" echo " $0 test cli/examples/example-server-config.json # Test searchability" echo "" + echo " # Security scanning" + echo " $0 scan https://mcp.deepwki.com/mcp # Security scan with default YARA" + echo " export MCP_SCANNER_LLM_API_KEY=sk-..." + echo " $0 scan https://mcp.deepwki.com/mcp yara,llm # Scan with both analyzers (uses env var)" + echo " $0 scan https://mcp.deepwki.com/mcp llm sk-... # Scan with only LLM (pass API key directly)" + echo " $0 scan https://mcp.deepwki.com/mcp yara '' '{\"X-Authorization\": \"token\"}' # Scan with custom headers" + echo "" echo " # Server-to-group operations" echo " $0 add-to-groups example-server 'mcp-servers-restricted/read,mcp-servers-restricted/execute'" echo " $0 remove-from-groups example-server 'mcp-servers-restricted/read,mcp-servers-restricted/execute'" @@ -969,7 +1218,7 @@ list_groups() { # Main script logic case "${1:-}" in add) - add_service "$2" + add_service "$2" "$3" ;; delete) delete_service "$2" "$3" @@ -980,6 +1229,9 @@ case "${1:-}" in test) test_service "$2" ;; + scan) + scan_server_security "$2" "$3" "$4" "$5" + ;; add-to-groups) add_to_groups "$2" "$3" ;; diff --git a/docs/scan_report_example.md b/docs/scan_report_example.md new file mode 100644 index 0000000..e76cd7c --- /dev/null +++ b/docs/scan_report_example.md @@ -0,0 +1,223 @@ +# MCP Server Security Scan Report + +**Scan Date:** 2025-10-21 23:50:03 UTC +**Analyzers Used:** yara + +## Executive Summary + +- **Total Servers Scanned:** 6 +- **Passed:** 2 (33.3%) +- **Failed:** 4 (66.7%) + +### Aggregate Vulnerability Statistics + +| Severity | Count | +|----------|-------| +| Critical | 0 | +| High | 3 | +| Medium | 0 | +| Low | 0 | + +## Per-Server Scan Results + +### io.mcpgateway/atlassian + +- **URL:** `https://mcpgateway.ddns.net/atlassian/mcp` +- **Status:** āŒ UNSAFE + +| Severity | Count | +|----------|-------| +| Critical | 0 | +| High | 2 | +| Medium | 0 | +| Low | 0 | + +#### Detailed Findings + +**Tool: `jira_get_user_profile`** + +- **Analyzer:** yara_analyzer +- **Severity:** HIGH +- **Threats:** INJECTION ATTACK +- **Summary:** Detected 1 threat: sql injection + +**Taxonomy:** +```json +{ + "scanner_category": "INJECTION ATTACK", + "aitech": "AITech-9.1", + "aitech_name": "Model or Agentic System Manipulation", + "aisubtech": "AISubtech-9.1.4", + "aisubtech_name": "Injection Attacks (SQL, Command Execution, XSS)", + "description": "Injecting malicious payloads such as SQL queries, command sequences, or scripts into MCP servers or tools that process model or user input, leading to data exposure, remote code execution, or compromise of the underlying system environment." +} +``` + +
+Tool Description + +``` + + Retrieve profile information for a specific Jira user. + + Args: + ctx: The FastMCP context. + user_identifier: User identifier (email, username, key, or account ID). + + Returns: + JSON string representing the Jira user profile object, or an error object if not found. + + Raises: + ValueError: If the Jira client is not configured or available. + +``` +
+ +**Tool: `jira_get_agile_boards`** + +- **Analyzer:** yara_analyzer +- **Severity:** HIGH +- **Threats:** INJECTION ATTACK +- **Summary:** Detected 1 threat: sql injection + +**Taxonomy:** +```json +{ + "scanner_category": "INJECTION ATTACK", + "aitech": "AITech-9.1", + "aitech_name": "Model or Agentic System Manipulation", + "aisubtech": "AISubtech-9.1.4", + "aisubtech_name": "Injection Attacks (SQL, Command Execution, XSS)", + "description": "Injecting malicious payloads such as SQL queries, command sequences, or scripts into MCP servers or tools that process model or user input, leading to data exposure, remote code execution, or compromise of the underlying system environment." +} +``` + +
+Tool Description + +``` +Get jira agile boards by name, project key, or type. + + Args: + ctx: The FastMCP context. + board_name: Name of the board (fuzzy search). + project_key: Project key. + board_type: Board type ('scrum' or 'kanban'). + start_at: Starting index. + limit: Maximum results. + + Returns: + JSON string representing a list of board objects. + +``` +
+ +**Error:** Scanner exit code: 1 + +### io.mcpgateway/currenttime + +- **URL:** `https://mcpgateway.ddns.net/currenttime/mcp` +- **Status:** āœ… SAFE + +| Severity | Count | +|----------|-------| +| Critical | 0 | +| High | 0 | +| Medium | 0 | +| Low | 0 | + +### io.mcpgateway/fininfo + +- **URL:** `https://mcpgateway.ddns.net/fininfo/mcp` +- **Status:** āœ… SAFE + +| Severity | Count | +|----------|-------| +| Critical | 0 | +| High | 0 | +| Medium | 0 | +| Low | 0 | + +**Error:** Scanner exit code: 1 + +### io.mcpgateway/mcpgw + +- **URL:** `https://mcpgateway.ddns.net/mcpgw/mcp` +- **Status:** āŒ UNSAFE + +| Severity | Count | +|----------|-------| +| Critical | 0 | +| High | 1 | +| Medium | 0 | +| Low | 0 | + +#### Detailed Findings + +**Tool: `healthcheck`** + +- **Analyzer:** yara_analyzer +- **Severity:** HIGH +- **Threats:** INJECTION ATTACK +- **Summary:** Detected 1 threat: sql injection + +**Taxonomy:** +```json +{ + "scanner_category": "INJECTION ATTACK", + "aitech": "AITech-9.1", + "aitech_name": "Model or Agentic System Manipulation", + "aisubtech": "AISubtech-9.1.4", + "aisubtech_name": "Injection Attacks (SQL, Command Execution, XSS)", + "description": "Injecting malicious payloads such as SQL queries, command sequences, or scripts into MCP servers or tools that process model or user input, leading to data exposure, remote code execution, or compromise of the underlying system environment." +} +``` + +
+Tool Description + +``` +Retrieves health status information from all registered MCP servers via the registry's internal API. + +Returns: + Dict[str, Any]: Health status information for all registered servers, including: + - status: 'healthy' or 'disabled' + - last_checked_iso: ISO timestamp of when the server was last checked + - num_tools: Number of tools provided by the server + +Raises: + Exception: If the API call fails or data cannot be retrieved +``` +
+ +**Error:** Scanner exit code: 1 + +### io.mcpgateway/realserverfaketools + +- **URL:** `https://mcpgateway.ddns.net/realserverfaketools/mcp` +- **Status:** āœ… SAFE + +| Severity | Count | +|----------|-------| +| Critical | 0 | +| High | 0 | +| Medium | 0 | +| Low | 0 | + +### io.mcpgateway/sre-gateway + +- **URL:** `https://mcpgateway.ddns.net/sre-gateway/mcp` +- **Status:** āœ… SAFE + +| Severity | Count | +|----------|-------| +| Critical | 0 | +| High | 0 | +| Medium | 0 | +| Low | 0 | + +**Error:** Scanner exit code: 1 + +--- + +*Report generated on 2025-10-21 23:50:03 UTC* diff --git a/docs/security-scanner.md b/docs/security-scanner.md new file mode 100644 index 0000000..4872e19 --- /dev/null +++ b/docs/security-scanner.md @@ -0,0 +1,368 @@ +# MCP Security Scanner - Supply Chain Security for MCP Servers + +## Introduction + +As organizations integrate Model Context Protocol (MCP) servers into their AI workflows, supply chain security becomes critical. MCP servers are third-party components that provide tools and capabilities to AI agents, making them potential vectors for security vulnerabilities, malicious code injection, and data exfiltration. + +The MCP Gateway Registry addresses this challenge by integrating automated security scanning powered by the **Cisco AI Defence MCP Scanner**. This open-source security tool performs deep analysis of MCP server tools to identify vulnerabilities before they can be exploited in production environments. + +**GitHub Repository:** https://github.com/cisco-ai-defense/mcp-scanner + +### Security Scanning Workflows + +The registry implements two complementary security scanning workflows: + +1. **Automated Scanning During Server Addition** - Every new server is scanned before being made available to AI agents +2. **Periodic Registry Scans** - Comprehensive security audits across all enabled servers in the registry + +These workflows ensure continuous security monitoring throughout the MCP server lifecycle, from initial registration through ongoing operations. + +## Security Scanning During Server Addition + +When adding a new MCP server to the registry, a security scan is automatically performed as part of the registration workflow. This pre-deployment scanning prevents vulnerable or malicious servers from being exposed to AI agents. + +### Command Format + +```bash +./cli/service_mgmt.sh add [analyzers] +``` + +**Parameters:** +- ``: JSON configuration file containing server details +- `[analyzers]`: Optional comma-separated list of analyzers to use (default: `yara`) + - `yara` - Fast pattern-based detection (no API key required) + - `llm` - AI-powered deep analysis (requires API key) + - `yara,llm` - Both analyzers for comprehensive coverage + +### Example: Adding Cloudflare Documentation Server + +**Configuration File** (`cli/examples/cloudflare-docs-server-config.json`): + +```json +{ + "server_name": "Cloudflare Documentation MCP Server", + "description": "Search Cloudflare documentation and get migration guides", + "path": "/cloudflare-docs", + "proxy_pass_url": "https://docs.mcp.cloudflare.com/mcp", + "supported_transports": ["streamable-http"] +} +``` + +**Adding the Server with Security Scan:** + +```bash +# Add with default YARA analyzer (fast, no API key required) +./cli/service_mgmt.sh add cli/examples/cloudflare-docs-server-config.json + +# Add with both YARA and LLM analyzers (comprehensive scan) +./cli/service_mgmt.sh add cli/examples/cloudflare-docs-server-config.json yara,llm +``` + +### Security Scan Results + +The scanner analyzes each tool provided by the MCP server and generates a detailed security report. Here's an example of scan results for the Cloudflare Documentation server: + +**Scan Output** (`security_scans/docs.mcp.cloudflare.com_mcp.json`): + +```json +{ + "analysis_results": { + "yara_analyzer": { + "findings": [ + { + "tool_name": "search_cloudflare_documentation", + "severity": "SAFE", + "threat_names": [], + "threat_summary": "No threats detected", + "is_safe": true + }, + { + "tool_name": "migrate_pages_to_workers_guide", + "severity": "SAFE", + "threat_names": [], + "threat_summary": "No threats detected", + "is_safe": true + } + ] + } + }, + "tool_results": [ + { + "tool_name": "search_cloudflare_documentation", + "tool_description": "Search the Cloudflare documentation...", + "status": "completed", + "is_safe": true, + "findings": { + "yara_analyzer": { + "severity": "SAFE", + "threat_names": [], + "threat_summary": "No threats detected", + "total_findings": 0 + } + } + }, + { + "tool_name": "migrate_pages_to_workers_guide", + "tool_description": "ALWAYS read this guide before migrating Pages projects to Workers.", + "status": "completed", + "is_safe": true, + "findings": { + "yara_analyzer": { + "severity": "SAFE", + "threat_names": [], + "threat_summary": "No threats detected", + "total_findings": 0 + } + } + } + ] +} +``` + +### What Happens When a Scan Fails + +If the security scan detects critical or high severity vulnerabilities: + +1. **Server is Added but Disabled** - The server is registered in the database but marked as `disabled` +2. **Security-Pending Tag** - The server receives a `security-pending` tag to flag it for review +3. **AI Agents Cannot Access** - Disabled servers are excluded from agent discovery and tool routing +4. **Visible in UI** - The server appears in the registry UI with clear indicators of its security status +5. **Detailed Report Generated** - A comprehensive JSON report is saved to `security_scans/` directory + +**Console Output for Failed Scan:** + +``` +=== Security Scan === +Scanning server for security vulnerabilities... +Security scan failed - Server has critical or high severity issues +Server will be registered but marked as UNHEALTHY with security-pending status + +Security Issues Found: + Critical: 2 + High: 3 + Medium: 1 + Low: 0 + +Detailed report: security_scans/scan_example.com_mcp_20251022_103045.json + +=== Security Status Update === +Marking server as UNHEALTHY due to failed security scan... +Server registered but flagged as security-pending +Review the security scan report before enabling this server + +Service example-server successfully added and verified +WARNING: Server failed security scan - Review required before use +``` + +**Screenshot:** + +![Failed Security Scan - Server in Disabled State with Security-Pending Tag](img/failed_scan.png) + +*Servers that fail security scans are automatically added in disabled state with a `security-pending` tag, requiring administrator review before being enabled.* + +This workflow ensures that vulnerable servers never become accessible to AI agents without explicit administrator review and remediation. + +## Periodic Registry Scans + +Beyond initial registration security checks, the registry supports comprehensive periodic scans of all enabled servers. This ongoing monitoring detects newly discovered vulnerabilities and ensures continued security compliance. + +### Command to Run Periodic Scans + +```bash +cd /home/ubuntu/repos/mcp-gateway-registry +uv run cli/scan_all_servers.py --base-url https://mcpgateway.example.com +``` + +**Command Options:** + +```bash +# Scan with default YARA analyzer +uv run cli/scan_all_servers.py --base-url https://mcpgateway.example.com + +# Scan with both YARA and LLM analyzers (requires API key in .env) +uv run cli/scan_all_servers.py --base-url https://mcpgateway.example.com --analyzers yara,llm + +# Specify custom output directory +uv run cli/scan_all_servers.py --base-url https://mcpgateway.example.com --output-dir custom_scans +``` + +### Generated Report + +The periodic scan generates a comprehensive markdown report that provides an executive summary and detailed vulnerability breakdown for each server in the registry. + +**Report Locations:** +- **Latest Report:** `security_scans/scan_report.md` (always current) +- **Archived Reports:** `security_scans/reports/scan_report_YYYYMMDD_HHMMSS.md` (timestamped history) + +For a complete example of the report format and structure, see [scan_report_example.md](scan_report_example.md). + +### Report Contents + +The generated security report includes: + +1. **Executive Summary** + - Total servers scanned + - Pass/fail statistics + - Overall security posture metrics + +2. **Aggregate Vulnerability Statistics** + - Total count by severity level (Critical, High, Medium, Low) + - Trend analysis across multiple scans + +3. **Per-Server Vulnerability Breakdown** + - Individual server security status + - Severity distribution per server + - Scan timestamp and analyzer information + +4. **Detailed Findings for Vulnerable Tools** + - Specific tool names and descriptions + - Threat categories and taxonomy + - AI Security Framework (AITech) classification + - Remediation guidance + +**Example Report Summary:** + +--- + +# MCP Server Security Scan Report + +**Scan Date:** 2025-10-21 23:50:03 UTC +**Analyzers Used:** yara + +## Executive Summary + +- **Total Servers Scanned:** 6 +- **Passed:** 2 (33.3%) +- **Failed:** 4 (66.7%) + +### Aggregate Vulnerability Statistics + +| Severity | Count | +|----------|-------| +| Critical | 0 | +| High | 3 | +| Medium | 0 | +| Low | 0 | + +--- + +These reports enable security teams to track vulnerability trends, prioritize remediation efforts, and maintain compliance with organizational security policies. + +## Analyzers + +The MCP Scanner supports two analyzer types, each with distinct capabilities and use cases: + +### YARA Analyzer + +**Type:** Pattern-based detection +**Speed:** Fast (seconds per server) +**API Key Required:** No +**Best For:** Known threat patterns, common vulnerabilities + +The YARA analyzer uses signature-based detection rules to identify known security threats including: +- SQL injection patterns +- Command injection vulnerabilities +- Cross-site scripting (XSS) vectors +- Path traversal attempts +- Hardcoded credentials +- Malicious code patterns + +YARA scanning is ideal for automated workflows and continuous integration pipelines due to its speed and zero-configuration requirements. + +### LLM Analyzer + +**Type:** AI-powered semantic analysis +**Speed:** Slower (requires API calls) +**API Key Required:** Yes (OpenAI-compatible API) +**Best For:** Sophisticated threats, zero-day vulnerabilities, context-aware analysis + +The LLM analyzer uses large language models to perform deep semantic analysis of tool code and descriptions. It can detect: +- Subtle logic vulnerabilities +- Context-dependent security issues +- Novel attack patterns +- Business logic flaws +- Privacy concerns in data handling + +LLM scanning is recommended for high-value or high-risk MCP servers where comprehensive security analysis justifies the additional time and API costs. + +### Analyzer Comparison + +| Feature | YARA | LLM | Both | +|---------|------|-----|------| +| **Speed** | Fast (seconds) | Slower (minutes) | Slower | +| **API Key** | Not required | Required | Required | +| **Detection Type** | Pattern-based | Semantic analysis | Comprehensive | +| **False Positives** | Low | Medium | Low | +| **Coverage** | Known threats | Known + novel threats | Maximum | +| **Use Case** | Automated scans, CI/CD | Critical servers, deep analysis | High-security environments | + +### Configuring LLM Analyzer + +To use the LLM analyzer, set the API key in your `.env` file: + +```bash +# Add to .env file +MCP_SCANNER_LLM_API_KEY=sk-your-openai-api-key +``` + +**Using Both Analyzers:** + +```bash +# During server addition +./cli/service_mgmt.sh add config.json yara,llm + +# During periodic scans +uv run cli/scan_all_servers.py --base-url https://mcpgateway.example.com --analyzers yara,llm +``` + +### Recommendation + +- **Default (YARA only):** Suitable for most use cases, provides fast scanning with no API costs +- **Add LLM:** For critical production servers, sensitive data environments, or when unknown threats are a concern +- **Both analyzers:** Recommended for maximum security coverage in high-stakes deployments + +The combination of both analyzers provides defense-in-depth, with YARA catching known threats quickly and LLM performing deeper analysis for sophisticated attacks. + +## Prerequisites + +### Set LLM API Key (Optional) + +Only required if using the LLM analyzer: + +```bash +# Add to .env file (recommended) +echo "MCP_SCANNER_LLM_API_KEY=sk-your-api-key" >> .env +``` + +## Troubleshooting + +### API Key Issues + +If you see errors about missing API keys when using the LLM analyzer: + +```bash +# Verify the key is set +echo $MCP_SCANNER_LLM_API_KEY + +# Add to .env file +echo "MCP_SCANNER_LLM_API_KEY=sk-your-key" >> .env +``` + +Note: The scanner uses `MCP_SCANNER_LLM_API_KEY`, not `OPENAI_API_KEY`. + +### Permission Issues + +Ensure the `security_scans/` directory is writable: + +```bash +mkdir -p security_scans +chmod 755 security_scans +``` + +## Additional Resources + +- **Cisco AI Defence MCP Scanner:** https://github.com/cisco-ai-defense/mcp-scanner +- **Service Management Script:** `cli/service_mgmt.sh` +- **Security Scanner CLI:** `cli/mcp_security_scanner.py` +- **Periodic Scan Script:** `cli/scan_all_servers.py` +- **Example Report:** [scan_report_example.md](scan_report_example.md) diff --git a/frontend/src/components/ServerCard.tsx b/frontend/src/components/ServerCard.tsx index 0fb7783..10fc87d 100644 --- a/frontend/src/components/ServerCard.tsx +++ b/frontend/src/components/ServerCard.tsx @@ -292,6 +292,9 @@ const ServerCard: React.FC = ({ server, onToggle, onEdit, canMo // Check if this is an Anthropic registry server const isAnthropicServer = server.tags?.includes('anthropic-registry'); + // Check if this server has security pending + const isSecurityPending = server.tags?.includes('security-pending'); + console.log('isSecurityPending', isSecurityPending) return ( <>
= ({ server, onToggle, onEdit, canMo ANTHROPIC )} + {isSecurityPending && ( + + SECURITY PENDING + + )}
diff --git a/pyproject.toml b/pyproject.toml index bdd7ea5..0008b09 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,6 +38,7 @@ dependencies = [ "aiohttp>=3.8.0", "rich>=13.0.0", "requests>=2.31.0", + "cisco-ai-mcp-scanner>=3.0.1", ] [project.optional-dependencies] diff --git a/registry/core/mcp_client.py b/registry/core/mcp_client.py index d3beb2b..2e24fa3 100644 --- a/registry/core/mcp_client.py +++ b/registry/core/mcp_client.py @@ -59,15 +59,19 @@ def normalize_sse_endpoint_url(endpoint_url: str) -> str: def _build_headers_for_server(server_info: dict = None) -> Dict[str, str]: """ Build HTTP headers for server requests by merging server-specific headers. - + Args: server_info: Server configuration dictionary - + Returns: Headers dictionary with server-specific headers """ - headers = {} - + # Start with default MCP headers (required by some servers like Cloudflare) + headers = { + 'Accept': 'application/json, text/event-stream', + 'Content-Type': 'application/json' + } + # Merge server-specific headers if present if server_info: server_headers = server_info.get("headers", []) @@ -76,7 +80,7 @@ def _build_headers_for_server(server_info: dict = None) -> Dict[str, str]: if isinstance(header_dict, dict): headers.update(header_dict) logger.debug(f"Added server headers to MCP client: {header_dict}") - + return headers @@ -228,9 +232,8 @@ async def _get_tools_streamable_http(base_url: str, server_info: dict = None) -> # If URL already has MCP endpoint, use it directly if base_url.endswith('/mcp') or '/mcp/' in base_url: mcp_url = base_url - if not mcp_url.endswith('/'): - mcp_url += '/' - + # Don't add trailing slash - some servers like Cloudflare reject it + # Handle streamable-http and sse servers imported from anthropinc by adding required query parameter if server_info and 'tags' in server_info and 'anthropic-registry' in server_info.get('tags', []): if '?' not in mcp_url: diff --git a/registry/health/service.py b/registry/health/service.py index 5825178..322862b 100644 --- a/registry/health/service.py +++ b/registry/health/service.py @@ -368,10 +368,30 @@ async def _check_single_service(self, client: httpx.AsyncClient, service_path: s if is_healthy: new_status = status_detail # Could be "healthy" or "healthy-auth-expired" - - # If service transitioned to healthy (including auth-expired), fetch tool list (but don't block) + + # Fetch tools in these cases: + # 1. First health check (previous_status == UNKNOWN) + # 2. Service transitioned to healthy from unhealthy + # 3. Service is healthy but has no tools yet (tool_list is empty) # Only do this for fully healthy status, not auth-expired - if previous_status != HealthStatus.HEALTHY and status_detail == HealthStatus.HEALTHY: + should_fetch_tools = False + if status_detail == HealthStatus.HEALTHY: + if previous_status == HealthStatus.UNKNOWN: + # First health check - always fetch tools + should_fetch_tools = True + logger.info(f"First health check for {service_path} - will fetch tools") + elif previous_status != HealthStatus.HEALTHY: + # Transitioned to healthy - fetch tools + should_fetch_tools = True + logger.info(f"Service {service_path} transitioned to healthy - will fetch tools") + else: + # Already healthy - only fetch if we don't have tools + current_tool_list = server_info.get("tool_list", []) + if not current_tool_list: + should_fetch_tools = True + logger.info(f"Service {service_path} is healthy but has no tools - will fetch tools") + + if should_fetch_tools: asyncio.create_task(self._update_tools_background(service_path, proxy_pass_url)) else: new_status = status_detail # Detailed error message from transport check @@ -391,22 +411,35 @@ async def _check_single_service(self, client: httpx.AsyncClient, service_path: s return previous_status != new_status - def _build_headers_for_server(self, server_info: Dict) -> Dict[str, str]: + def _build_headers_for_server( + self, + server_info: Dict, + include_session_id: bool = False + ) -> Dict[str, str]: """ Build HTTP headers for server requests by merging default headers with server-specific headers. - + Args: server_info: Server configuration dictionary - + include_session_id: Whether to generate and include Mcp-Session-Id header + Returns: Merged headers dictionary """ + import uuid + # Start with default headers for MCP endpoints headers = { 'Accept': 'application/json, text/event-stream', 'Content-Type': 'application/json' } - + + # Add session ID if requested (required by some MCP servers like Cloudflare) + if include_session_id: + session_id = str(uuid.uuid4()) + headers['Mcp-Session-Id'] = session_id + logger.debug(f"Generated Mcp-Session-Id: {session_id}") + # Merge server-specific headers if present server_headers = server_info.get("headers", []) if server_headers and isinstance(server_headers, list): @@ -414,33 +447,106 @@ def _build_headers_for_server(self, server_info: Dict) -> Dict[str, str]: if isinstance(header_dict, dict): headers.update(header_dict) logger.debug(f"Added server headers: {header_dict}") - + return headers + async def _initialize_mcp_session( + self, + client: httpx.AsyncClient, + endpoint: str, + headers: Dict[str, str] + ) -> Optional[str]: + """ + Initialize an MCP session and retrieve the session ID from the server. + + Args: + client: httpx AsyncClient instance + endpoint: The MCP endpoint URL + headers: Headers to send with the request + + Returns: + Session ID string if successful, None otherwise + """ + import uuid + + try: + # Send initialize request without session ID + # The server will generate and return a session ID in the response header + init_headers = headers.copy() + + initialize_payload = { + "jsonrpc": "2.0", + "id": "0", + "method": "initialize", + "params": { + "protocolVersion": "2024-11-05", + "capabilities": {}, + "clientInfo": { + "name": "mcp-gateway-registry", + "version": "1.0.0" + } + } + } + + response = await client.post( + endpoint, + headers=init_headers, + json=initialize_payload, + timeout=httpx.Timeout(5.0), + follow_redirects=True + ) + + # Check if initialize succeeded + if response.status_code not in [200, 201]: + logger.warning( + f"MCP initialize failed for {endpoint}: " + f"Status {response.status_code}, Response: {response.text[:200]}" + ) + return None + + # Get session ID from response headers (server-generated) + server_session_id = response.headers.get('Mcp-Session-Id') or response.headers.get('mcp-session-id') + if server_session_id: + logger.debug(f"Server returned session ID: {server_session_id}") + return server_session_id + else: + # If server doesn't return a session ID, generate one for stateless servers + client_session_id = str(uuid.uuid4()) + logger.debug(f"Server did not return session ID, using client-generated: {client_session_id}") + return client_session_id + + except Exception as e: + logger.warning(f"MCP initialize failed for {endpoint}: {e}") + return None + + async def _try_ping_without_auth(self, client: httpx.AsyncClient, endpoint: str) -> bool: """ Try a simple ping without authentication headers. Used as fallback when auth fails to determine if server is reachable. - + Args: client: httpx AsyncClient instance endpoint: The MCP endpoint URL to ping - + Returns: bool: True if server responds (indicating it's reachable but auth expired) """ + import uuid + try: - # Minimal headers without auth + # Minimal headers without auth but with session ID (required by some servers) headers = { 'Accept': 'application/json', - 'Content-Type': 'application/json' + 'Content-Type': 'application/json', + 'Mcp-Session-Id': str(uuid.uuid4()) } ping_payload = '{ "jsonrpc": "2.0", "id": "0", "method": "ping" }' - + response = await client.post( - endpoint, - headers=headers, + endpoint, + headers=headers, content=ping_payload, timeout=httpx.Timeout(5.0), follow_redirects=True @@ -470,9 +576,13 @@ async def _check_server_endpoint_transport_aware(self, client: httpx.AsyncClient # Get transport information from server_info supported_transports = server_info.get("supported_transports", ["streamable-http"]) - + # If URL already has transport endpoint, use it directly - if proxy_pass_url.endswith('/mcp') or proxy_pass_url.endswith('/sse') or '/mcp/' in proxy_pass_url or '/sse/' in proxy_pass_url: + # BUT skip this shortcut for streamable-http to ensure proper POST ping is used + has_transport_in_url = (proxy_pass_url.endswith('/mcp') or proxy_pass_url.endswith('/sse') or + '/mcp/' in proxy_pass_url or '/sse/' in proxy_pass_url) + + if has_transport_in_url and "streamable-http" not in supported_transports: logger.info(f"[TRACE] Found transport endpoint in URL: {proxy_pass_url}") logger.info(f"[TRACE] URL contains /mcp: {'/mcp' in proxy_pass_url}, URL contains /sse: {'/sse' in proxy_pass_url}") try: @@ -530,21 +640,39 @@ async def _check_server_endpoint_transport_aware(self, client: httpx.AsyncClient # Try streamable-http first (default preference) if "streamable-http" in supported_transports: logger.info(f"[TRACE] Trying streamable-http transport") - headers = self._build_headers_for_server(server_info) - + # Build base headers without session ID + headers = self._build_headers_for_server(server_info, include_session_id=False) + # Only try /mcp endpoint for streamable-http transport - if base_url.endswith('/mcp'): + # Don't append /mcp if URL already has a transport endpoint (/mcp or /sse) + if base_url.endswith('/mcp') or base_url.endswith('/sse') or '/mcp/' in base_url or '/sse/' in base_url: endpoint = f"{base_url}" else: endpoint = f"{base_url}/mcp" - ping_payload = '{ "jsonrpc": "2.0", "id": "0", "method": "ping" }' - + try: - logger.info(f"[TRACE] Trying endpoint: {endpoint}") + # Step 1: Initialize session to get session ID + logger.info(f"[TRACE] Initializing MCP session for endpoint: {endpoint}") + session_id = await self._initialize_mcp_session(client, endpoint, headers) + + # If initialize failed, check if it was due to auth (401/403) + # Try ping without auth before giving up + if not session_id: + logger.warning(f"Failed to initialize MCP session for {endpoint}, trying ping without auth") + if await self._try_ping_without_auth(client, endpoint): + return True, HealthStatus.HEALTHY + else: + return False, "unhealthy: session initialization failed and ping without auth failed" + + # Step 2: Add session ID to headers for ping + headers['Mcp-Session-Id'] = session_id + ping_payload = '{ "jsonrpc": "2.0", "id": "0", "method": "ping" }' + + logger.info(f"[TRACE] Sending ping to endpoint: {endpoint}") logger.info(f"[TRACE] Headers being sent: {headers}") response = await client.post(endpoint, headers=headers, content=ping_payload, follow_redirects=True) logger.info(f"[TRACE] Response status: {response.status_code}") - + # Check for auth failures first if response.status_code in [401, 403]: logger.info(f"[TRACE] Auth failure detected ({response.status_code}) for {endpoint}, trying ping without auth") diff --git a/uv.lock b/uv.lock index 337621c..9630deb 100644 --- a/uv.lock +++ b/uv.lock @@ -241,6 +241,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, ] +[[package]] +name = "cisco-ai-mcp-scanner" +version = "3.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "fastapi" }, + { name = "httpx" }, + { name = "litellm" }, + { name = "mcp", extra = ["cli"] }, + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "uvicorn" }, + { name = "yara-python" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/64/52/9a7a9123100e7d4745c55109222dedc9dc0ad0f8565513d5d912769ec896/cisco_ai_mcp_scanner-3.0.1.tar.gz", hash = "sha256:c53442251441eb6fd5f3d7b413d097297ac1aad670010a299e881903e4d8bed6", size = 146935, upload-time = "2025-10-21T04:35:52.081Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/bb/76deb9e00c1a46dd1c73bde7ad16d08d43b39cc3312404d8c1ffea019bde/cisco_ai_mcp_scanner-3.0.1-py3-none-any.whl", hash = "sha256:bfb5b95304cbf576cc6ce8e3427ec94cc04935e497429443dc57eeeb3e771712", size = 168864, upload-time = "2025-10-21T04:35:50.399Z" }, +] + [[package]] name = "click" version = "8.2.1" @@ -421,6 +440,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/b3/b51f09c2ba432a576fe63758bddc81f78f0c6309d9e5c10d194313bf021e/fastapi-0.115.12-py3-none-any.whl", hash = "sha256:e94613d6c05e27be7ffebdd6ea5f388112e5e430c8f7d6494a9d1d88d43e814d", size = 95164, upload-time = "2025-03-23T22:55:42.101Z" }, ] +[[package]] +name = "fastuuid" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/7d/d9daedf0f2ebcacd20d599928f8913e9d2aea1d56d2d355a93bfa2b611d7/fastuuid-0.14.0.tar.gz", hash = "sha256:178947fc2f995b38497a74172adee64fdeb8b7ec18f2a5934d037641ba265d26", size = 18232, upload-time = "2025-10-19T22:19:22.402Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/a2/e78fcc5df65467f0d207661b7ef86c5b7ac62eea337c0c0fcedbeee6fb13/fastuuid-0.14.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:77e94728324b63660ebf8adb27055e92d2e4611645bf12ed9d88d30486471d0a", size = 510164, upload-time = "2025-10-19T22:31:45.635Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b3/c846f933f22f581f558ee63f81f29fa924acd971ce903dab1a9b6701816e/fastuuid-0.14.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:caa1f14d2102cb8d353096bc6ef6c13b2c81f347e6ab9d6fbd48b9dea41c153d", size = 261837, upload-time = "2025-10-19T22:38:38.53Z" }, + { url = "https://files.pythonhosted.org/packages/54/ea/682551030f8c4fa9a769d9825570ad28c0c71e30cf34020b85c1f7ee7382/fastuuid-0.14.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d23ef06f9e67163be38cece704170486715b177f6baae338110983f99a72c070", size = 251370, upload-time = "2025-10-19T22:40:26.07Z" }, + { url = "https://files.pythonhosted.org/packages/14/dd/5927f0a523d8e6a76b70968e6004966ee7df30322f5fc9b6cdfb0276646a/fastuuid-0.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c9ec605ace243b6dbe3bd27ebdd5d33b00d8d1d3f580b39fdd15cd96fd71796", size = 277766, upload-time = "2025-10-19T22:37:23.779Z" }, + { url = "https://files.pythonhosted.org/packages/16/6e/c0fb547eef61293153348f12e0f75a06abb322664b34a1573a7760501336/fastuuid-0.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:808527f2407f58a76c916d6aa15d58692a4a019fdf8d4c32ac7ff303b7d7af09", size = 278105, upload-time = "2025-10-19T22:26:56.821Z" }, + { url = "https://files.pythonhosted.org/packages/2d/b1/b9c75e03b768f61cf2e84ee193dc18601aeaf89a4684b20f2f0e9f52b62c/fastuuid-0.14.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fb3c0d7fef6674bbeacdd6dbd386924a7b60b26de849266d1ff6602937675c8", size = 301564, upload-time = "2025-10-19T22:30:31.604Z" }, + { url = "https://files.pythonhosted.org/packages/fc/fa/f7395fdac07c7a54f18f801744573707321ca0cee082e638e36452355a9d/fastuuid-0.14.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab3f5d36e4393e628a4df337c2c039069344db5f4b9d2a3c9cea48284f1dd741", size = 459659, upload-time = "2025-10-19T22:31:32.341Z" }, + { url = "https://files.pythonhosted.org/packages/66/49/c9fd06a4a0b1f0f048aacb6599e7d96e5d6bc6fa680ed0d46bf111929d1b/fastuuid-0.14.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b9a0ca4f03b7e0b01425281ffd44e99d360e15c895f1907ca105854ed85e2057", size = 478430, upload-time = "2025-10-19T22:26:22.962Z" }, + { url = "https://files.pythonhosted.org/packages/be/9c/909e8c95b494e8e140e8be6165d5fc3f61fdc46198c1554df7b3e1764471/fastuuid-0.14.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3acdf655684cc09e60fb7e4cf524e8f42ea760031945aa8086c7eae2eeeabeb8", size = 450894, upload-time = "2025-10-19T22:27:01.647Z" }, + { url = "https://files.pythonhosted.org/packages/90/eb/d29d17521976e673c55ef7f210d4cdd72091a9ec6755d0fd4710d9b3c871/fastuuid-0.14.0-cp312-cp312-win32.whl", hash = "sha256:9579618be6280700ae36ac42c3efd157049fe4dd40ca49b021280481c78c3176", size = 154374, upload-time = "2025-10-19T22:29:19.879Z" }, + { url = "https://files.pythonhosted.org/packages/cc/fc/f5c799a6ea6d877faec0472d0b27c079b47c86b1cdc577720a5386483b36/fastuuid-0.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:d9e4332dc4ba054434a9594cbfaf7823b57993d7d8e7267831c3e059857cf397", size = 156550, upload-time = "2025-10-19T22:27:49.658Z" }, +] + [[package]] name = "filelock" version = "3.18.0" @@ -784,6 +822,33 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" }, ] +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + [[package]] name = "kiwisolver" version = "1.4.9" @@ -939,6 +1004,29 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c8/10/ad3107b666c3203b7938d10ea6b8746b9735c399cf737a51386d58e41d34/langsmith-0.4.5-py3-none-any.whl", hash = "sha256:4167717a2cccc4dff5809dbddc439628e836f6fd13d4fdb31ea013bc8d5cfaf5", size = 367795, upload-time = "2025-07-10T22:08:02.548Z" }, ] +[[package]] +name = "litellm" +version = "1.76.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "click" }, + { name = "fastuuid" }, + { name = "httpx" }, + { name = "importlib-metadata" }, + { name = "jinja2" }, + { name = "jsonschema" }, + { name = "openai" }, + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "tiktoken" }, + { name = "tokenizers" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/46/57b6539365616452bb6f4401487448ce62e62755738fce55d8222d7a557e/litellm-1.76.3.tar.gz", hash = "sha256:fc81219c59b17b26cc81276ce32582f3715612877ab11c1ea2c26e4853ac67e8", size = 10210403, upload-time = "2025-09-07T01:59:19.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/d9/5f8ed27241b487f51f04573b8ba06d4460ebed9f792ff5cc148649fbf862/litellm-1.76.3-py3-none-any.whl", hash = "sha256:d62e3ff2a80ec5e551c6d7a0fe199ffe718ecb6cbaa43fc9250dd8d7c0944352", size = 9000797, upload-time = "2025-09-07T01:59:16.261Z" }, +] + [[package]] name = "markdown" version = "3.8.2" @@ -1006,22 +1094,30 @@ wheels = [ [[package]] name = "mcp" -version = "1.9.3" +version = "1.18.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, { name = "httpx" }, { name = "httpx-sse" }, + { name = "jsonschema" }, { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-multipart" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, { name = "sse-starlette" }, { name = "starlette" }, { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f2/df/8fefc0c6c7a5c66914763e3ff3893f9a03435628f6625d5e3b0dc45d73db/mcp-1.9.3.tar.gz", hash = "sha256:587ba38448e81885e5d1b84055cfcc0ca56d35cd0c58f50941cab01109405388", size = 333045, upload-time = "2025-06-05T15:48:25.681Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1a/e0/fe34ce16ea2bacce489ab859abd1b47ae28b438c3ef60b9c5eee6c02592f/mcp-1.18.0.tar.gz", hash = "sha256:aa278c44b1efc0a297f53b68df865b988e52dd08182d702019edcf33a8e109f6", size = 482926, upload-time = "2025-10-16T19:19:55.125Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/79/45/823ad05504bea55cb0feb7470387f151252127ad5c72f8882e8fe6cf5c0e/mcp-1.9.3-py3-none-any.whl", hash = "sha256:69b0136d1ac9927402ed4cf221d4b8ff875e7132b0b06edd446448766f34f9b9", size = 131063, upload-time = "2025-06-05T15:48:24.171Z" }, + { url = "https://files.pythonhosted.org/packages/1b/44/f5970e3e899803823826283a70b6003afd46f28e082544407e24575eccd3/mcp-1.18.0-py3-none-any.whl", hash = "sha256:42f10c270de18e7892fdf9da259029120b1ea23964ff688248c69db9d72b1d0a", size = 168762, upload-time = "2025-10-16T19:19:53.2Z" }, +] + +[package.optional-dependencies] +cli = [ + { name = "python-dotenv" }, + { name = "typer" }, ] [[package]] @@ -1031,6 +1127,7 @@ source = { virtual = "." } dependencies = [ { name = "aiohttp" }, { name = "bandit" }, + { name = "cisco-ai-mcp-scanner" }, { name = "faiss-cpu" }, { name = "fastapi" }, { name = "httpcore", extra = ["asyncio"] }, @@ -1091,6 +1188,7 @@ docs = [ requires-dist = [ { name = "aiohttp", specifier = ">=3.8.0" }, { name = "bandit", specifier = ">=1.8.3" }, + { name = "cisco-ai-mcp-scanner", specifier = ">=3.0.1" }, { name = "coverage", extras = ["toml"], marker = "extra == 'dev'", specifier = ">=7.4.0" }, { name = "factory-boy", marker = "extra == 'dev'", specifier = ">=3.3.0" }, { name = "faiss-cpu", specifier = ">=1.7.4" }, @@ -1456,6 +1554,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9e/4e/0d0c945463719429b7bd21dece907ad0bde437a2ff12b9b12fee94722ab0/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6574241a3ec5fdc9334353ab8c479fe75841dbe8f4532a8fc97ce63503330ba1", size = 89265, upload-time = "2024-10-01T17:00:38.172Z" }, ] +[[package]] +name = "openai" +version = "2.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "httpx" }, + { name = "jiter" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ee/c7/e42bcd89dfd47fec8a30b9e20f93e512efdbfbb3391b05bbb79a2fb295fa/openai-2.6.0.tar.gz", hash = "sha256:f119faf7fc07d7e558c1e7c32c873e241439b01bd7480418234291ee8c8f4b9d", size = 592904, upload-time = "2025-10-20T17:17:24.588Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/0a/58e9dcd34abe273eaeac3807a8483073767b5609d01bb78ea2f048e515a0/openai-2.6.0-py3-none-any.whl", hash = "sha256:f33fa12070fe347b5787a7861c8dd397786a4a17e1c3186e239338dac7e2e743", size = 1005403, upload-time = "2025-10-20T17:17:22.091Z" }, +] + [[package]] name = "opentelemetry-api" version = "1.33.1" @@ -1975,6 +2092,16 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" }, ] +[[package]] +name = "pywin32" +version = "311" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/ab/01ea1943d4eba0f850c3c61e78e8dd59757ff815ff3ccd0a84de5f541f42/pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31", size = 8706543, upload-time = "2025-07-14T20:13:20.765Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a8/a0e8d07d4d051ec7502cd58b291ec98dcc0c3fff027caad0470b72cfcc2f/pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067", size = 9495040, upload-time = "2025-07-14T20:13:22.543Z" }, + { url = "https://files.pythonhosted.org/packages/ba/3a/2ae996277b4b50f17d61f0603efd8253cb2d79cc7ae159468007b586396d/pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852", size = 8710102, upload-time = "2025-07-14T20:13:24.682Z" }, +] + [[package]] name = "pyyaml" version = "6.0.2" @@ -2004,6 +2131,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/04/11/432f32f8097b03e3cd5fe57e88efb685d964e2e5178a48ed61e841f7fdce/pyyaml_env_tag-1.1-py3-none-any.whl", hash = "sha256:17109e1a528561e32f026364712fee1264bc2ea6715120891174ed1b980d2e04", size = 4722, upload-time = "2025-05-13T15:23:59.629Z" }, ] +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + [[package]] name = "regex" version = "2024.11.6" @@ -2067,6 +2208,29 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0", size = 243229, upload-time = "2025-03-30T14:15:12.283Z" }, ] +[[package]] +name = "rpds-py" +version = "0.27.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e9/dd/2c0cbe774744272b0ae725f44032c77bdcab6e8bcf544bffa3b6e70c8dba/rpds_py-0.27.1.tar.gz", hash = "sha256:26a1c73171d10b7acccbded82bf6a586ab8203601e565badc74bbbf8bc5a10f8", size = 27479, upload-time = "2025-08-27T12:16:36.024Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/fe/38de28dee5df58b8198c743fe2bea0c785c6d40941b9950bac4cdb71a014/rpds_py-0.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ae2775c1973e3c30316892737b91f9283f9908e3cc7625b9331271eaaed7dc90", size = 361887, upload-time = "2025-08-27T12:13:10.233Z" }, + { url = "https://files.pythonhosted.org/packages/7c/9a/4b6c7eedc7dd90986bf0fab6ea2a091ec11c01b15f8ba0a14d3f80450468/rpds_py-0.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2643400120f55c8a96f7c9d858f7be0c88d383cd4653ae2cf0d0c88f668073e5", size = 345795, upload-time = "2025-08-27T12:13:11.65Z" }, + { url = "https://files.pythonhosted.org/packages/6f/0e/e650e1b81922847a09cca820237b0edee69416a01268b7754d506ade11ad/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16323f674c089b0360674a4abd28d5042947d54ba620f72514d69be4ff64845e", size = 385121, upload-time = "2025-08-27T12:13:13.008Z" }, + { url = "https://files.pythonhosted.org/packages/1b/ea/b306067a712988e2bff00dcc7c8f31d26c29b6d5931b461aa4b60a013e33/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a1f4814b65eacac94a00fc9a526e3fdafd78e439469644032032d0d63de4881", size = 398976, upload-time = "2025-08-27T12:13:14.368Z" }, + { url = "https://files.pythonhosted.org/packages/2c/0a/26dc43c8840cb8fe239fe12dbc8d8de40f2365e838f3d395835dde72f0e5/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ba32c16b064267b22f1850a34051121d423b6f7338a12b9459550eb2096e7ec", size = 525953, upload-time = "2025-08-27T12:13:15.774Z" }, + { url = "https://files.pythonhosted.org/packages/22/14/c85e8127b573aaf3a0cbd7fbb8c9c99e735a4a02180c84da2a463b766e9e/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5c20f33fd10485b80f65e800bbe5f6785af510b9f4056c5a3c612ebc83ba6cb", size = 407915, upload-time = "2025-08-27T12:13:17.379Z" }, + { url = "https://files.pythonhosted.org/packages/ed/7b/8f4fee9ba1fb5ec856eb22d725a4efa3deb47f769597c809e03578b0f9d9/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:466bfe65bd932da36ff279ddd92de56b042f2266d752719beb97b08526268ec5", size = 386883, upload-time = "2025-08-27T12:13:18.704Z" }, + { url = "https://files.pythonhosted.org/packages/86/47/28fa6d60f8b74fcdceba81b272f8d9836ac0340570f68f5df6b41838547b/rpds_py-0.27.1-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:41e532bbdcb57c92ba3be62c42e9f096431b4cf478da9bc3bc6ce5c38ab7ba7a", size = 405699, upload-time = "2025-08-27T12:13:20.089Z" }, + { url = "https://files.pythonhosted.org/packages/d0/fd/c5987b5e054548df56953a21fe2ebed51fc1ec7c8f24fd41c067b68c4a0a/rpds_py-0.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f149826d742b406579466283769a8ea448eed82a789af0ed17b0cd5770433444", size = 423713, upload-time = "2025-08-27T12:13:21.436Z" }, + { url = "https://files.pythonhosted.org/packages/ac/ba/3c4978b54a73ed19a7d74531be37a8bcc542d917c770e14d372b8daea186/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:80c60cfb5310677bd67cb1e85a1e8eb52e12529545441b43e6f14d90b878775a", size = 562324, upload-time = "2025-08-27T12:13:22.789Z" }, + { url = "https://files.pythonhosted.org/packages/b5/6c/6943a91768fec16db09a42b08644b960cff540c66aab89b74be6d4a144ba/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7ee6521b9baf06085f62ba9c7a3e5becffbc32480d2f1b351559c001c38ce4c1", size = 593646, upload-time = "2025-08-27T12:13:24.122Z" }, + { url = "https://files.pythonhosted.org/packages/11/73/9d7a8f4be5f4396f011a6bb7a19fe26303a0dac9064462f5651ced2f572f/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a512c8263249a9d68cac08b05dd59d2b3f2061d99b322813cbcc14c3c7421998", size = 558137, upload-time = "2025-08-27T12:13:25.557Z" }, + { url = "https://files.pythonhosted.org/packages/6e/96/6772cbfa0e2485bcceef8071de7821f81aeac8bb45fbfd5542a3e8108165/rpds_py-0.27.1-cp312-cp312-win32.whl", hash = "sha256:819064fa048ba01b6dadc5116f3ac48610435ac9a0058bbde98e569f9e785c39", size = 221343, upload-time = "2025-08-27T12:13:26.967Z" }, + { url = "https://files.pythonhosted.org/packages/67/b6/c82f0faa9af1c6a64669f73a17ee0eeef25aff30bb9a1c318509efe45d84/rpds_py-0.27.1-cp312-cp312-win_amd64.whl", hash = "sha256:d9199717881f13c32c4046a15f024971a3b78ad4ea029e8da6b86e5aa9cf4594", size = 232497, upload-time = "2025-08-27T12:13:28.326Z" }, + { url = "https://files.pythonhosted.org/packages/e1/96/2817b44bd2ed11aebacc9251da03689d56109b9aba5e311297b6902136e2/rpds_py-0.27.1-cp312-cp312-win_arm64.whl", hash = "sha256:33aa65b97826a0e885ef6e278fbd934e98cdcfed80b63946025f01e2f5b29502", size = 222790, upload-time = "2025-08-27T12:13:29.71Z" }, +] + [[package]] name = "s3transfer" version = "0.13.0" @@ -2168,6 +2332,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, ] +[[package]] +name = "shellingham" +version = "1.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" }, +] + [[package]] name = "six" version = "1.17.0" @@ -2327,6 +2500,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb", size = 18638, upload-time = "2025-03-13T13:49:21.846Z" }, ] +[[package]] +name = "tiktoken" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "regex" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/ab/4d017d0f76ec3171d469d80fc03dfbb4e48a4bcaddaa831b31d526f05edc/tiktoken-0.12.0.tar.gz", hash = "sha256:b18ba7ee2b093863978fcb14f74b3707cdc8d4d4d3836853ce7ec60772139931", size = 37806, upload-time = "2025-10-06T20:22:45.419Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/85/be65d39d6b647c79800fd9d29241d081d4eeb06271f383bb87200d74cf76/tiktoken-0.12.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b97f74aca0d78a1ff21b8cd9e9925714c15a9236d6ceacf5c7327c117e6e21e8", size = 1050728, upload-time = "2025-10-06T20:21:52.756Z" }, + { url = "https://files.pythonhosted.org/packages/4a/42/6573e9129bc55c9bf7300b3a35bef2c6b9117018acca0dc760ac2d93dffe/tiktoken-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2b90f5ad190a4bb7c3eb30c5fa32e1e182ca1ca79f05e49b448438c3e225a49b", size = 994049, upload-time = "2025-10-06T20:21:53.782Z" }, + { url = "https://files.pythonhosted.org/packages/66/c5/ed88504d2f4a5fd6856990b230b56d85a777feab84e6129af0822f5d0f70/tiktoken-0.12.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:65b26c7a780e2139e73acc193e5c63ac754021f160df919add909c1492c0fb37", size = 1129008, upload-time = "2025-10-06T20:21:54.832Z" }, + { url = "https://files.pythonhosted.org/packages/f4/90/3dae6cc5436137ebd38944d396b5849e167896fc2073da643a49f372dc4f/tiktoken-0.12.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:edde1ec917dfd21c1f2f8046b86348b0f54a2c0547f68149d8600859598769ad", size = 1152665, upload-time = "2025-10-06T20:21:56.129Z" }, + { url = "https://files.pythonhosted.org/packages/a3/fe/26df24ce53ffde419a42f5f53d755b995c9318908288c17ec3f3448313a3/tiktoken-0.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:35a2f8ddd3824608b3d650a000c1ef71f730d0c56486845705a8248da00f9fe5", size = 1194230, upload-time = "2025-10-06T20:21:57.546Z" }, + { url = "https://files.pythonhosted.org/packages/20/cc/b064cae1a0e9fac84b0d2c46b89f4e57051a5f41324e385d10225a984c24/tiktoken-0.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83d16643edb7fa2c99eff2ab7733508aae1eebb03d5dfc46f5565862810f24e3", size = 1254688, upload-time = "2025-10-06T20:21:58.619Z" }, + { url = "https://files.pythonhosted.org/packages/81/10/b8523105c590c5b8349f2587e2fdfe51a69544bd5a76295fc20f2374f470/tiktoken-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ffc5288f34a8bc02e1ea7047b8d041104791d2ddbf42d1e5fa07822cbffe16bd", size = 878694, upload-time = "2025-10-06T20:21:59.876Z" }, +] + [[package]] name = "tokenizers" version = "0.21.1" @@ -2431,6 +2623,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/11/53/ce18470914ab6cfbec9384ee565d23c4d1c55f0548160b1c7b33000b11fd/triton-3.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b68c778f6c4218403a6bd01be7484f6dc9e20fe2083d22dd8aef33e3b87a10a3", size = 156504509, upload-time = "2025-04-09T20:27:40.413Z" }, ] +[[package]] +name = "typer" +version = "0.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "rich" }, + { name = "shellingham" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8f/28/7c85c8032b91dbe79725b6f17d2fffc595dff06a35c7a30a37bef73a1ab4/typer-0.20.0.tar.gz", hash = "sha256:1aaf6494031793e4876fb0bacfa6a912b551cf43c1e63c800df8b1a866720c37", size = 106492, upload-time = "2025-10-20T17:03:49.445Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/64/7713ffe4b5983314e9d436a90d5bd4f63b6054e2aca783a3cfc44cb95bbf/typer-0.20.0-py3-none-any.whl", hash = "sha256:5b463df6793ec1dca6213a3cf4c0f03bc6e322ac5e16e13ddd622a889489784a", size = 47028, upload-time = "2025-10-20T17:03:47.617Z" }, +] + [[package]] name = "typing-extensions" version = "4.13.2" @@ -2625,6 +2832,26 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0f/eb/04405305f290173acc0350eba6d2f1a794b57925df0398861a20fbafa415/xxhash-3.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:586886c7e89cb9828bcd8a5686b12e161368e0064d040e225e72607b43858ba2", size = 26796, upload-time = "2024-08-17T09:18:45.29Z" }, ] +[[package]] +name = "yara-python" +version = "4.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/38/347d1fcde4edabd338d5872ca5759ccfb95ff1cf5207dafded981fd08c4f/yara_python-4.5.4.tar.gz", hash = "sha256:4c682170f3d5cb3a73aa1bd0dc9ab1c0957437b937b7a83ff6d7ffd366415b9c", size = 551142, upload-time = "2025-05-27T14:15:49.035Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/cc/deaf10b6b31ee81842176affce79d57c3b6df50e894cf6cdbb1f0eb12af2/yara_python-4.5.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:ade234700c492bce0efda96c1cdcd763425016e40df4a8d30c4c4e6897be5ace", size = 2471771, upload-time = "2025-05-27T14:14:47.381Z" }, + { url = "https://files.pythonhosted.org/packages/b1/47/9227c56450be00db6a3a50ccf88ba201945ae14a34b80b3aae4607954159/yara_python-4.5.4-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:e1dedd149be61992781f085b592d169d1d813f9b5ffc7c8c2b74e429b443414c", size = 208991, upload-time = "2025-05-27T14:14:48.832Z" }, + { url = "https://files.pythonhosted.org/packages/5b/0d/1f2e054f7ddf9fd4c873fccf63a08f2647b205398e11ea75cf44c161e702/yara_python-4.5.4-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:92b233aae320ee9e59728ee23f9faf4a423ae407d4768b47c8f0e472a34dbae2", size = 2478413, upload-time = "2025-05-27T14:14:50.205Z" }, + { url = "https://files.pythonhosted.org/packages/10/ab/96e2d06c909ba07941d6d303f23624e46751a54d6fd358069275f982168c/yara_python-4.5.4-cp312-cp312-macosx_15_0_x86_64.whl", hash = "sha256:1f238f10d26e4701559f73a69b22e1e192a6fa20abdd76f57a7054566780aa89", size = 209401, upload-time = "2025-05-27T14:14:52.017Z" }, + { url = "https://files.pythonhosted.org/packages/df/7d/e51ecb0db87094976904a52eb521e3faf9c18f7c889b9d5cf996ae3bb680/yara_python-4.5.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d29d0e137e0d77dd110186369276e88381f784bdc45b5932a2fb3463e2a1b1c7", size = 2245326, upload-time = "2025-05-27T14:14:53.338Z" }, + { url = "https://files.pythonhosted.org/packages/4c/5e/fe93d8609b8470148ebbae111f209c6f208bb834cee64046fce0532fcc70/yara_python-4.5.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7d0039d734705b123494acad7a00b67df171dd5b1c16ff7b18ff07578efd4cd", size = 2327041, upload-time = "2025-05-27T14:14:54.915Z" }, + { url = "https://files.pythonhosted.org/packages/52/06/104c4daa22e34a7edb49051798126c37f6280d4f1ea7e8888b043314e72d/yara_python-4.5.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5eae935b05a9f8dc71df55a79c38f52abd93f8840310fe4e0d75fbd78284f24", size = 2332343, upload-time = "2025-05-27T14:14:56.424Z" }, + { url = "https://files.pythonhosted.org/packages/cd/38/4b788b8fe15faca08e4a52c0b3dc8787953115ce1811e7bf9439914b6a5b/yara_python-4.5.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:30fc7959394532c6e3f48faf59337f5da124f1630668258276b6cfa54e555a6e", size = 2949346, upload-time = "2025-05-27T14:14:57.924Z" }, + { url = "https://files.pythonhosted.org/packages/53/3a/c1d97172aa9672f381df78b4d3a9f60378f35431ff48f4a6c45037057e07/yara_python-4.5.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e6d8c2acaf33931338fdb78aba8a68462b0151d833b2eeda712db87713ac2abf", size = 2421057, upload-time = "2025-05-27T14:15:00.118Z" }, + { url = "https://files.pythonhosted.org/packages/ba/cc/c6366d6d047f73594badd5444f6a32501e8b20ab1a7124837d305c08b42b/yara_python-4.5.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a3c7bc8cd0db5fb87ab579c755de83723030522f3c0cd5b3374044055a8ce6c6", size = 2639871, upload-time = "2025-05-27T14:15:02.63Z" }, + { url = "https://files.pythonhosted.org/packages/e6/5c/edacbd11db432ac04a37c9e3a7c569986256d9659d1859c6f828268dfeed/yara_python-4.5.4-cp312-cp312-win32.whl", hash = "sha256:d12e57101683e9270738a1bccf676747f93e86b5bc529e7a7fb7adf94f20bd77", size = 1447403, upload-time = "2025-05-27T14:15:04.193Z" }, + { url = "https://files.pythonhosted.org/packages/ad/e1/f6a72c155f3241360da890c218911d09bf63329eca9cfa1af64b1498339b/yara_python-4.5.4-cp312-cp312-win_amd64.whl", hash = "sha256:bf14a8af06b2b980a889bdc3f9e8ccd6e703d2b3fa1c98da5fd3a1c3b551eb47", size = 1825743, upload-time = "2025-05-27T14:15:05.678Z" }, +] + [[package]] name = "yarl" version = "1.20.1"