From 01c87a5425025a68a57b43c42219cb687d81da80 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Mon, 22 Dec 2025 06:10:13 +0100 Subject: [PATCH 01/12] feat: Add OpenAI and Vercel provider packages --- .github/workflows/ci.yml | 106 ++++ .github/workflows/release-please.yml | 114 ++++ .release-please-manifest.json | 4 +- Makefile | 42 ++ .../ai-providers/server-ai-openai/Makefile | 30 + .../ai-providers/server-ai-openai/README.md | 72 +++ .../server-ai-openai/pyproject.toml | 60 ++ .../ai-providers/server-ai-openai/setup.cfg | 7 + .../src/ldai_openai/__init__.py | 6 + .../src/ldai_openai/openai_provider.py | 253 +++++++++ .../server-ai-openai/tests/__init__.py | 2 + .../tests/test_openai_provider.py | 354 ++++++++++++ .../ai-providers/server-ai-vercel/Makefile | 30 + .../ai-providers/server-ai-vercel/README.md | 97 ++++ .../server-ai-vercel/pyproject.toml | 60 ++ .../ai-providers/server-ai-vercel/setup.cfg | 7 + .../src/ldai_vercel/__init__.py | 24 + .../server-ai-vercel/src/ldai_vercel/types.py | 118 ++++ .../src/ldai_vercel/vercel_provider.py | 394 +++++++++++++ .../server-ai-vercel/tests/__init__.py | 2 + .../tests/test_vercel_provider.py | 528 ++++++++++++++++++ release-please-config.json | 16 + 22 files changed, 2325 insertions(+), 1 deletion(-) create mode 100644 packages/ai-providers/server-ai-openai/Makefile create mode 100644 packages/ai-providers/server-ai-openai/README.md create mode 100644 packages/ai-providers/server-ai-openai/pyproject.toml create mode 100644 packages/ai-providers/server-ai-openai/setup.cfg create mode 100644 packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py create mode 100644 packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py create mode 100644 packages/ai-providers/server-ai-openai/tests/__init__.py create mode 100644 packages/ai-providers/server-ai-openai/tests/test_openai_provider.py create mode 100644 packages/ai-providers/server-ai-vercel/Makefile create mode 100644 packages/ai-providers/server-ai-vercel/README.md create mode 100644 packages/ai-providers/server-ai-vercel/pyproject.toml create mode 100644 packages/ai-providers/server-ai-vercel/setup.cfg create mode 100644 packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py create mode 100644 packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py create mode 100644 packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py create mode 100644 packages/ai-providers/server-ai-vercel/tests/__init__.py create mode 100644 packages/ai-providers/server-ai-vercel/tests/test_vercel_provider.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3584e65..e4ee0f0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,3 +111,109 @@ jobs: - name: Run tests run: make -C packages/ai-providers/server-ai-langchain test + + server-ai-openai-linux: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - uses: ./.github/actions/ci + with: + workspace_path: packages/ai-providers/server-ai-openai + python_version: ${{ matrix.python-version }} + + - uses: ./.github/actions/build + with: + workspace_path: packages/ai-providers/server-ai-openai + + server-ai-openai-windows: + runs-on: windows-latest + defaults: + run: + shell: powershell + + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install poetry + uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 + + - name: Configure poetry for local virtualenvs + run: poetry config virtualenvs.in-project true + + - name: Install server-ai dependency first + working-directory: packages/sdk/server-ai + run: poetry install + + - name: Install requirements + working-directory: packages/ai-providers/server-ai-openai + run: poetry install + + - name: Run tests + run: make -C packages/ai-providers/server-ai-openai test + + server-ai-vercel-linux: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - uses: ./.github/actions/ci + with: + workspace_path: packages/ai-providers/server-ai-vercel + python_version: ${{ matrix.python-version }} + + - uses: ./.github/actions/build + with: + workspace_path: packages/ai-providers/server-ai-vercel + + server-ai-vercel-windows: + runs-on: windows-latest + defaults: + run: + shell: powershell + + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install poetry + uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 + + - name: Configure poetry for local virtualenvs + run: poetry config virtualenvs.in-project true + + - name: Install server-ai dependency first + working-directory: packages/sdk/server-ai + run: poetry install + + - name: Install requirements + working-directory: packages/ai-providers/server-ai-vercel + run: poetry install + + - name: Run tests + run: make -C packages/ai-providers/server-ai-vercel test diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 051f969..ce0bf51 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -26,6 +26,8 @@ on: options: - packages/sdk/server-ai - packages/ai-providers/server-ai-langchain + - packages/ai-providers/server-ai-openai + - packages/ai-providers/server-ai-vercel dry_run: description: 'Is this a dry run. If so no package will be published.' type: boolean @@ -43,6 +45,10 @@ jobs: package-server-ai-tag-name: ${{ steps.release.outputs['packages/sdk/server-ai--tag_name'] }} package-server-ai-langchain-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-langchain--release_created'] }} package-server-ai-langchain-tag-name: ${{ steps.release.outputs['packages/ai-providers/server-ai-langchain--tag_name'] }} + package-server-ai-openai-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-openai--release_created'] }} + package-server-ai-openai-tag-name: ${{ steps.release.outputs['packages/ai-providers/server-ai-openai--tag_name'] }} + package-server-ai-vercel-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-vercel--release_created'] }} + package-server-ai-vercel-tag-name: ${{ steps.release.outputs['packages/ai-providers/server-ai-vercel--tag_name'] }} steps: - uses: googleapis/release-please-action@v4 id: release @@ -193,3 +199,111 @@ jobs: base64-subjects: "${{ needs.release-server-ai-langchain.outputs.package-hashes }}" upload-assets: true upload-tag-name: ${{ needs.release-please.outputs.package-server-ai-langchain-tag-name }} + + release-server-ai-openai: + runs-on: ubuntu-latest + needs: ['release-please'] + permissions: + id-token: write # Needed for OIDC to get release secrets from AWS. + if: ${{ needs.release-please.outputs.package-server-ai-openai-released == 'true' }} + outputs: + package-hashes: ${{ steps.build.outputs.package-hashes }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install poetry + uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 + + - uses: ./.github/actions/ci + with: + workspace_path: packages/ai-providers/server-ai-openai + + - uses: ./.github/actions/build + id: build + with: + workspace_path: packages/ai-providers/server-ai-openai + + - uses: launchdarkly/gh-actions/actions/release-secrets@release-secrets-v1.2.0 + name: 'Get PyPI token' + with: + aws_assume_role: ${{ vars.AWS_ROLE_ARN }} + ssm_parameter_pairs: '/production/common/releasing/pypi/token = PYPI_AUTH_TOKEN' + + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 + with: + password: ${{ env.PYPI_AUTH_TOKEN }} + packages-dir: packages/ai-providers/server-ai-openai/dist/ + + release-server-ai-openai-provenance: + needs: ['release-please', 'release-server-ai-openai'] + if: ${{ needs.release-please.outputs.package-server-ai-openai-released == 'true' }} + permissions: + actions: read # Needed for detecting the GitHub Actions environment. + id-token: write # Needed for provenance signing. + contents: write # Needed for uploading assets to the release. + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0 + with: + base64-subjects: "${{ needs.release-server-ai-openai.outputs.package-hashes }}" + upload-assets: true + upload-tag-name: ${{ needs.release-please.outputs.package-server-ai-openai-tag-name }} + + release-server-ai-vercel: + runs-on: ubuntu-latest + needs: ['release-please'] + permissions: + id-token: write # Needed for OIDC to get release secrets from AWS. + if: ${{ needs.release-please.outputs.package-server-ai-vercel-released == 'true' }} + outputs: + package-hashes: ${{ steps.build.outputs.package-hashes }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install poetry + uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 + + - uses: ./.github/actions/ci + with: + workspace_path: packages/ai-providers/server-ai-vercel + + - uses: ./.github/actions/build + id: build + with: + workspace_path: packages/ai-providers/server-ai-vercel + + - uses: launchdarkly/gh-actions/actions/release-secrets@release-secrets-v1.2.0 + name: 'Get PyPI token' + with: + aws_assume_role: ${{ vars.AWS_ROLE_ARN }} + ssm_parameter_pairs: '/production/common/releasing/pypi/token = PYPI_AUTH_TOKEN' + + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 + with: + password: ${{ env.PYPI_AUTH_TOKEN }} + packages-dir: packages/ai-providers/server-ai-vercel/dist/ + + release-server-ai-vercel-provenance: + needs: ['release-please', 'release-server-ai-vercel'] + if: ${{ needs.release-please.outputs.package-server-ai-vercel-released == 'true' }} + permissions: + actions: read # Needed for detecting the GitHub Actions environment. + id-token: write # Needed for provenance signing. + contents: write # Needed for uploading assets to the release. + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0 + with: + base64-subjects: "${{ needs.release-server-ai-vercel.outputs.package-hashes }}" + upload-assets: true + upload-tag-name: ${{ needs.release-please.outputs.package-server-ai-vercel-tag-name }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9c01a32..0317cff 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,4 +1,6 @@ { "packages/sdk/server-ai": "0.11.0", - "packages/ai-providers/server-ai-langchain": "0.2.0" + "packages/ai-providers/server-ai-langchain": "0.2.0", + "packages/ai-providers/server-ai-openai": "0.1.0", + "packages/ai-providers/server-ai-vercel": "0.1.0" } diff --git a/Makefile b/Makefile index e895105..f56fbf9 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,8 @@ BUILDDIR = $(SOURCEDIR)/build # Package paths SERVER_AI_PKG = packages/sdk/server-ai LANGCHAIN_PKG = packages/ai-providers/server-ai-langchain +OPENAI_PKG = packages/ai-providers/server-ai-openai +VERCEL_PKG = packages/ai-providers/server-ai-vercel .PHONY: help help: #! Show this help message @@ -25,6 +27,8 @@ help: #! Show this help message install: #! Install all packages $(MAKE) install-server-ai $(MAKE) install-langchain + $(MAKE) install-openai + $(MAKE) install-vercel .PHONY: install-server-ai install-server-ai: #! Install server-ai package @@ -34,6 +38,14 @@ install-server-ai: #! Install server-ai package install-langchain: #! Install langchain provider package $(MAKE) -C $(LANGCHAIN_PKG) install +.PHONY: install-openai +install-openai: #! Install openai provider package + $(MAKE) -C $(OPENAI_PKG) install + +.PHONY: install-vercel +install-vercel: #! Install vercel provider package + $(MAKE) -C $(VERCEL_PKG) install + # # Quality control checks # @@ -42,6 +54,8 @@ install-langchain: #! Install langchain provider package test: #! Run unit tests for all packages $(MAKE) test-server-ai $(MAKE) test-langchain + $(MAKE) test-openai + $(MAKE) test-vercel .PHONY: test-server-ai test-server-ai: #! Run unit tests for server-ai package @@ -51,10 +65,20 @@ test-server-ai: #! Run unit tests for server-ai package test-langchain: #! Run unit tests for langchain provider package $(MAKE) -C $(LANGCHAIN_PKG) test +.PHONY: test-openai +test-openai: #! Run unit tests for openai provider package + $(MAKE) -C $(OPENAI_PKG) test + +.PHONY: test-vercel +test-vercel: #! Run unit tests for vercel provider package + $(MAKE) -C $(VERCEL_PKG) test + .PHONY: lint lint: #! Run type analysis and linting checks for all packages $(MAKE) lint-server-ai $(MAKE) lint-langchain + $(MAKE) lint-openai + $(MAKE) lint-vercel .PHONY: lint-server-ai lint-server-ai: #! Run type analysis and linting checks for server-ai package @@ -64,6 +88,14 @@ lint-server-ai: #! Run type analysis and linting checks for server-ai package lint-langchain: #! Run type analysis and linting checks for langchain provider package $(MAKE) -C $(LANGCHAIN_PKG) lint +.PHONY: lint-openai +lint-openai: #! Run type analysis and linting checks for openai provider package + $(MAKE) -C $(OPENAI_PKG) lint + +.PHONY: lint-vercel +lint-vercel: #! Run type analysis and linting checks for vercel provider package + $(MAKE) -C $(VERCEL_PKG) lint + # # Build targets # @@ -72,6 +104,8 @@ lint-langchain: #! Run type analysis and linting checks for langchain provider p build: #! Build all packages $(MAKE) build-server-ai $(MAKE) build-langchain + $(MAKE) build-openai + $(MAKE) build-vercel .PHONY: build-server-ai build-server-ai: #! Build server-ai package @@ -81,6 +115,14 @@ build-server-ai: #! Build server-ai package build-langchain: #! Build langchain provider package $(MAKE) -C $(LANGCHAIN_PKG) build +.PHONY: build-openai +build-openai: #! Build openai provider package + $(MAKE) -C $(OPENAI_PKG) build + +.PHONY: build-vercel +build-vercel: #! Build vercel provider package + $(MAKE) -C $(VERCEL_PKG) build + # # Documentation generation # diff --git a/packages/ai-providers/server-ai-openai/Makefile b/packages/ai-providers/server-ai-openai/Makefile new file mode 100644 index 0000000..b14dfd9 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/Makefile @@ -0,0 +1,30 @@ +PYTEST_FLAGS=-W error::SyntaxWarning + +.PHONY: help +help: #! Show this help message + @echo 'Usage: make [target] ... ' + @echo '' + @echo 'Targets:' + @grep -h -F '#!' $(MAKEFILE_LIST) | grep -v grep | sed 's/:.*#!/:/' | column -t -s":" + +.PHONY: install +install: #! Install package dependencies + poetry install + +.PHONY: test +test: #! Run unit tests +test: install + poetry run pytest $(PYTEST_FLAGS) + +.PHONY: lint +lint: #! Run type analysis and linting checks +lint: install + poetry run mypy src/ldai_openai + poetry run isort --check --atomic src/ldai_openai + poetry run pycodestyle src/ldai_openai + +.PHONY: build +build: #! Build distribution files +build: install + poetry build + diff --git a/packages/ai-providers/server-ai-openai/README.md b/packages/ai-providers/server-ai-openai/README.md new file mode 100644 index 0000000..5f7619b --- /dev/null +++ b/packages/ai-providers/server-ai-openai/README.md @@ -0,0 +1,72 @@ +# LaunchDarkly AI SDK OpenAI Provider + +[![PyPI](https://img.shields.io/pypi/v/launchdarkly-server-sdk-ai-openai-dev.svg?style=flat-square)](https://pypi.org/project/launchdarkly-server-sdk-ai-openai-dev/) + +This package provides an OpenAI integration for the LaunchDarkly AI SDK. + +## Installation + +```bash +pip install launchdarkly-server-sdk-ai-openai-dev +``` + +## Quick Start + +```python +import asyncio +from ldai import AIClient +from ldai_openai import OpenAIProvider + +async def main(): + # Initialize the AI client + ai_client = AIClient(ld_client) + + # Get AI config + ai_config = ai_client.config( + "my-ai-config-key", + context, + default_value + ) + + # Create an OpenAI provider from the config + provider = await OpenAIProvider.create(ai_config) + + # Invoke the model + response = await provider.invoke_model(ai_config.messages) + print(response.message.content) + +asyncio.run(main()) +``` + +## Features + +- Full integration with OpenAI's chat completions API +- Automatic token usage tracking +- Support for structured output (JSON schema) +- Static utility methods for custom integrations + +## API Reference + +### OpenAIProvider + +#### Constructor + +```python +OpenAIProvider(client: OpenAI, model_name: str, parameters: Dict[str, Any], logger: Optional[Any] = None) +``` + +#### Static Methods + +- `create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> OpenAIProvider` - Factory method to create a provider from an AI config +- `get_ai_metrics_from_response(response: Any) -> LDAIMetrics` - Extract metrics from an OpenAI response + +#### Instance Methods + +- `invoke_model(messages: List[LDMessage]) -> ChatResponse` - Invoke the model with messages +- `invoke_structured_model(messages: List[LDMessage], response_structure: Dict[str, Any]) -> StructuredResponse` - Invoke the model with structured output +- `get_client() -> OpenAI` - Get the underlying OpenAI client + +## License + +Apache-2.0 + diff --git a/packages/ai-providers/server-ai-openai/pyproject.toml b/packages/ai-providers/server-ai-openai/pyproject.toml new file mode 100644 index 0000000..715ee4e --- /dev/null +++ b/packages/ai-providers/server-ai-openai/pyproject.toml @@ -0,0 +1,60 @@ +[tool.poetry] +# TODO: Rename before official release +name = "launchdarkly-server-sdk-ai-openai-dev" +version = "0.1.0" +description = "LaunchDarkly AI SDK OpenAI Provider" +authors = ["LaunchDarkly "] +license = "Apache-2.0" +readme = "README.md" +homepage = "https://docs.launchdarkly.com/sdk/ai/python" +repository = "https://github.com/launchdarkly/python-server-sdk-ai" +classifiers = [ + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", +] +packages = [{ include = "ldai_openai", from = "src" }] + +[tool.poetry.dependencies] +python = ">=3.9,<4" +launchdarkly-server-sdk-ai = ">=0.11.0" +openai = ">=1.0.0" + +[tool.poetry.group.dev.dependencies] +pytest = ">=2.8" +pytest-cov = ">=2.4.0" +pytest-asyncio = ">=0.21.0" +mypy = "==1.18.2" +pycodestyle = ">=2.11.0" +isort = ">=5.12.0" + +[tool.mypy] +python_version = "3.9" +ignore_missing_imports = true +install_types = true +non_interactive = true + +[tool.isort] +profile = "black" +known_third_party = ["openai", "ldai"] +sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] + + +[tool.pytest.ini_options] +addopts = ["-ra"] +testpaths = ["tests"] +asyncio_mode = "auto" + + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + diff --git a/packages/ai-providers/server-ai-openai/setup.cfg b/packages/ai-providers/server-ai-openai/setup.cfg new file mode 100644 index 0000000..3217550 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/setup.cfg @@ -0,0 +1,7 @@ +[pycodestyle] +count = True +# E501 - max line length - will be left to the developer to determine if something is too long +# W503 - line break before binary operator - is not PEP8 compliant +ignore = E501, W503 +max-line-length = 200 + diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py new file mode 100644 index 0000000..666a68f --- /dev/null +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py @@ -0,0 +1,6 @@ +"""LaunchDarkly AI SDK OpenAI Provider.""" + +from ldai_openai.openai_provider import OpenAIProvider + +__all__ = ['OpenAIProvider'] + diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py new file mode 100644 index 0000000..244d708 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py @@ -0,0 +1,253 @@ +"""OpenAI implementation of AIProvider for LaunchDarkly AI SDK.""" + +import json +import os +from typing import Any, Dict, Iterable, List, Optional, cast + +from openai import AsyncOpenAI +from openai.types.chat import ChatCompletionMessageParam + +from ldai import LDMessage +from ldai.models import AIConfigKind +from ldai.providers import AIProvider +from ldai.providers.types import ChatResponse, LDAIMetrics, StructuredResponse +from ldai.tracker import TokenUsage + + +class OpenAIProvider(AIProvider): + """ + OpenAI implementation of AIProvider. + + This provider integrates OpenAI's chat completions API with LaunchDarkly's tracking capabilities. + """ + + def __init__( + self, + client: AsyncOpenAI, + model_name: str, + parameters: Dict[str, Any], + logger: Optional[Any] = None + ): + """ + Initialize the OpenAI provider. + + :param client: An AsyncOpenAI client instance + :param model_name: The name of the model to use + :param parameters: Additional model parameters + :param logger: Optional logger for logging provider operations + """ + super().__init__(logger) + self._client = client + self._model_name = model_name + self._parameters = parameters + + # ============================================================================= + # MAIN FACTORY METHOD + # ============================================================================= + + @staticmethod + async def create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> 'OpenAIProvider': + """ + Static factory method to create an OpenAI AIProvider from an AI configuration. + + :param ai_config: The LaunchDarkly AI configuration + :param logger: Optional logger for the provider + :return: Configured OpenAIProvider instance + """ + client = AsyncOpenAI( + api_key=os.environ.get('OPENAI_API_KEY'), + ) + + config_dict = ai_config.to_dict() + model_dict = config_dict.get('model') or {} + model_name = model_dict.get('name', '') + parameters = model_dict.get('parameters') or {} + + return OpenAIProvider(client, model_name, parameters, logger) + + # ============================================================================= + # INSTANCE METHODS (AIProvider Implementation) + # ============================================================================= + + async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: + """ + Invoke the OpenAI model with an array of messages. + + :param messages: Array of LDMessage objects representing the conversation + :return: ChatResponse containing the model's response and metrics + """ + try: + # Convert LDMessage to OpenAI message format + openai_messages: Iterable[ChatCompletionMessageParam] = cast( + Iterable[ChatCompletionMessageParam], + [{'role': msg.role, 'content': msg.content} for msg in messages] + ) + + response = await self._client.chat.completions.create( + model=self._model_name, + messages=openai_messages, + **self._parameters, + ) + + # Generate metrics early (assumes success by default) + metrics = OpenAIProvider.get_ai_metrics_from_response(response) + + # Safely extract the first choice content + content = '' + if response.choices and len(response.choices) > 0: + message = response.choices[0].message + if message and message.content: + content = message.content + + if not content: + if self.logger: + self.logger.warn('OpenAI response has no content available') + metrics = LDAIMetrics(success=False, usage=metrics.usage) + + return ChatResponse( + message=LDMessage(role='assistant', content=content), + metrics=metrics, + ) + except Exception as error: + if self.logger: + self.logger.warn(f'OpenAI model invocation failed: {error}') + + return ChatResponse( + message=LDMessage(role='assistant', content=''), + metrics=LDAIMetrics(success=False, usage=None), + ) + + async def invoke_structured_model( + self, + messages: List[LDMessage], + response_structure: Dict[str, Any], + ) -> StructuredResponse: + """ + Invoke the OpenAI model with structured output support. + + :param messages: Array of LDMessage objects representing the conversation + :param response_structure: Dictionary defining the JSON schema for output structure + :return: StructuredResponse containing the structured data + """ + try: + # Convert LDMessage to OpenAI message format + openai_messages: Iterable[ChatCompletionMessageParam] = cast( + Iterable[ChatCompletionMessageParam], + [{'role': msg.role, 'content': msg.content} for msg in messages] + ) + + response = await self._client.chat.completions.create( + model=self._model_name, + messages=openai_messages, + response_format={ # type: ignore[arg-type] + 'type': 'json_schema', + 'json_schema': { + 'name': 'structured_output', + 'schema': response_structure, + 'strict': True, + }, + }, + **self._parameters, + ) + + # Generate metrics early (assumes success by default) + metrics = OpenAIProvider.get_ai_metrics_from_response(response) + + # Safely extract the first choice content + content = '' + if response.choices and len(response.choices) > 0: + message = response.choices[0].message + if message and message.content: + content = message.content + + if not content: + if self.logger: + self.logger.warn('OpenAI structured response has no content available') + metrics = LDAIMetrics(success=False, usage=metrics.usage) + return StructuredResponse( + data={}, + raw_response='', + metrics=metrics, + ) + + try: + data = json.loads(content) + return StructuredResponse( + data=data, + raw_response=content, + metrics=metrics, + ) + except json.JSONDecodeError as parse_error: + if self.logger: + self.logger.warn(f'OpenAI structured response contains invalid JSON: {parse_error}') + metrics = LDAIMetrics(success=False, usage=metrics.usage) + return StructuredResponse( + data={}, + raw_response=content, + metrics=metrics, + ) + except Exception as error: + if self.logger: + self.logger.warn(f'OpenAI structured model invocation failed: {error}') + + return StructuredResponse( + data={}, + raw_response='', + metrics=LDAIMetrics(success=False, usage=None), + ) + + def get_client(self) -> AsyncOpenAI: + """ + Get the underlying OpenAI client instance. + + :return: The underlying AsyncOpenAI client + """ + return self._client + + # ============================================================================= + # STATIC UTILITY METHODS + # ============================================================================= + + @staticmethod + def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: + """ + Get AI metrics from an OpenAI response. + + This method extracts token usage information and success status from OpenAI responses + and returns a LaunchDarkly AIMetrics object. + + :param response: The response from OpenAI chat completions API + :return: LDAIMetrics with success status and token usage + + Example: + response = await tracker.track_metrics_of( + lambda: client.chat.completions.create(config), + OpenAIProvider.get_ai_metrics_from_response + ) + """ + # Extract token usage if available + usage: Optional[TokenUsage] = None + if hasattr(response, 'usage') and response.usage: + usage = TokenUsage( + total=response.usage.total_tokens or 0, + input=response.usage.prompt_tokens or 0, + output=response.usage.completion_tokens or 0, + ) + + # OpenAI responses that complete successfully are considered successful by default + return LDAIMetrics(success=True, usage=usage) + + @staticmethod + def create_ai_metrics(openai_response: Any) -> LDAIMetrics: + """ + Create AI metrics information from an OpenAI response. + + This method extracts token usage information and success status from OpenAI responses + and returns a LaunchDarkly AIMetrics object. + + :deprecated: Use `get_ai_metrics_from_response()` instead. + :param openai_response: The response from OpenAI chat completions API + :return: LDAIMetrics with success status and token usage + """ + return OpenAIProvider.get_ai_metrics_from_response(openai_response) + diff --git a/packages/ai-providers/server-ai-openai/tests/__init__.py b/packages/ai-providers/server-ai-openai/tests/__init__.py new file mode 100644 index 0000000..4a4a397 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/tests/__init__.py @@ -0,0 +1,2 @@ +"""Tests for LaunchDarkly AI SDK OpenAI Provider.""" + diff --git a/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py new file mode 100644 index 0000000..457f9b0 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py @@ -0,0 +1,354 @@ +"""Tests for OpenAI Provider.""" + +import pytest +from unittest.mock import AsyncMock, MagicMock, patch + +from ldai import LDMessage + +from ldai_openai import OpenAIProvider + + +class TestGetAIMetricsFromResponse: + """Tests for get_ai_metrics_from_response static method.""" + + def test_creates_metrics_with_success_true_and_token_usage(self): + """Should create metrics with success=True and token usage.""" + mock_response = MagicMock() + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 50 + mock_response.usage.completion_tokens = 50 + mock_response.usage.total_tokens = 100 + + result = OpenAIProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 100 + assert result.usage.input == 50 + assert result.usage.output == 50 + + def test_creates_metrics_with_success_true_and_no_usage_when_usage_missing(self): + """Should create metrics with success=True and no usage when usage is missing.""" + mock_response = MagicMock() + mock_response.usage = None + + result = OpenAIProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is None + + def test_handles_partial_usage_data(self): + """Should handle partial usage data.""" + mock_response = MagicMock() + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 30 + mock_response.usage.completion_tokens = None + mock_response.usage.total_tokens = None + + result = OpenAIProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 0 + assert result.usage.input == 30 + assert result.usage.output == 0 + + +class TestInvokeModel: + """Tests for invoke_model instance method.""" + + @pytest.fixture + def mock_client(self): + """Create a mock OpenAI client.""" + return MagicMock() + + @pytest.fixture + def mock_logger(self): + """Create a mock logger.""" + return MagicMock() + + @pytest.mark.asyncio + async def test_invokes_openai_chat_completions_and_returns_response(self, mock_client, mock_logger): + """Should invoke OpenAI chat completions and return response.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = 'Hello! How can I help you today?' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 15 + mock_response.usage.total_tokens = 25 + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + messages = [LDMessage(role='user', content='Hello!')] + result = await provider.invoke_model(messages) + + mock_client.chat.completions.create.assert_called_once_with( + model='gpt-3.5-turbo', + messages=[{'role': 'user', 'content': 'Hello!'}], + ) + + assert result.message.role == 'assistant' + assert result.message.content == 'Hello! How can I help you today?' + assert result.metrics.success is True + assert result.metrics.usage is not None + assert result.metrics.usage.total == 25 + assert result.metrics.usage.input == 10 + assert result.metrics.usage.output == 15 + + @pytest.mark.asyncio + async def test_returns_unsuccessful_response_when_no_content(self, mock_client, mock_logger): + """Should return unsuccessful response when no content in response.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = None + mock_response.usage = None + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + messages = [LDMessage(role='user', content='Hello!')] + result = await provider.invoke_model(messages) + + assert result.message.role == 'assistant' + assert result.message.content == '' + assert result.metrics.success is False + + @pytest.mark.asyncio + async def test_returns_unsuccessful_response_when_choices_empty(self, mock_client, mock_logger): + """Should return unsuccessful response when choices array is empty.""" + mock_response = MagicMock() + mock_response.choices = [] + mock_response.usage = None + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + messages = [LDMessage(role='user', content='Hello!')] + result = await provider.invoke_model(messages) + + assert result.message.role == 'assistant' + assert result.message.content == '' + assert result.metrics.success is False + + @pytest.mark.asyncio + async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_client, mock_logger): + """Should return unsuccessful response when exception is thrown.""" + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(side_effect=Exception('API Error')) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + messages = [LDMessage(role='user', content='Hello!')] + result = await provider.invoke_model(messages) + + assert result.message.role == 'assistant' + assert result.message.content == '' + assert result.metrics.success is False + mock_logger.warn.assert_called() + + +class TestInvokeStructuredModel: + """Tests for invoke_structured_model instance method.""" + + @pytest.fixture + def mock_client(self): + """Create a mock OpenAI client.""" + return MagicMock() + + @pytest.fixture + def mock_logger(self): + """Create a mock logger.""" + return MagicMock() + + @pytest.mark.asyncio + async def test_invokes_openai_with_structured_output(self, mock_client, mock_logger): + """Should invoke OpenAI with structured output and return parsed response.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = '{"name": "John", "age": 30, "city": "New York"}' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 20 + mock_response.usage.completion_tokens = 10 + mock_response.usage.total_tokens = 30 + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + messages = [LDMessage(role='user', content='Tell me about a person')] + response_structure = { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'age': {'type': 'number'}, + 'city': {'type': 'string'}, + }, + 'required': ['name', 'age', 'city'], + } + + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.data == {'name': 'John', 'age': 30, 'city': 'New York'} + assert result.raw_response == '{"name": "John", "age": 30, "city": "New York"}' + assert result.metrics.success is True + assert result.metrics.usage is not None + assert result.metrics.usage.total == 30 + assert result.metrics.usage.input == 20 + assert result.metrics.usage.output == 10 + + @pytest.mark.asyncio + async def test_returns_unsuccessful_when_no_content_in_structured_response(self, mock_client, mock_logger): + """Should return unsuccessful response when no content in structured response.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = None + mock_response.usage = None + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + messages = [LDMessage(role='user', content='Tell me about a person')] + response_structure = {'type': 'object'} + + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.data == {} + assert result.raw_response == '' + assert result.metrics.success is False + + @pytest.mark.asyncio + async def test_handles_json_parsing_errors(self, mock_client, mock_logger): + """Should handle JSON parsing errors gracefully.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = 'invalid json content' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 5 + mock_response.usage.total_tokens = 15 + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + messages = [LDMessage(role='user', content='Tell me about a person')] + response_structure = {'type': 'object'} + + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.data == {} + assert result.raw_response == 'invalid json content' + assert result.metrics.success is False + assert result.metrics.usage is not None + assert result.metrics.usage.total == 15 + mock_logger.warn.assert_called() + + @pytest.mark.asyncio + async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_client, mock_logger): + """Should return unsuccessful response when exception is thrown.""" + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(side_effect=Exception('API Error')) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + messages = [LDMessage(role='user', content='Tell me about a person')] + response_structure = {'type': 'object'} + + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.data == {} + assert result.raw_response == '' + assert result.metrics.success is False + mock_logger.warn.assert_called() + + +class TestGetClient: + """Tests for get_client instance method.""" + + def test_returns_underlying_client(self): + """Should return the underlying OpenAI client.""" + mock_client = MagicMock() + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + + assert provider.get_client() is mock_client + + +class TestCreate: + """Tests for create static factory method.""" + + @pytest.mark.asyncio + async def test_creates_provider_with_correct_model_and_parameters(self): + """Should create OpenAIProvider with correct model and parameters.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = { + 'model': { + 'name': 'gpt-4', + 'parameters': { + 'temperature': 0.7, + 'max_tokens': 1000, + }, + }, + 'provider': {'name': 'openai'}, + } + + with patch('ldai_openai.openai_provider.AsyncOpenAI') as mock_openai_class: + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + result = await OpenAIProvider.create(mock_ai_config) + + assert isinstance(result, OpenAIProvider) + assert result._model_name == 'gpt-4' + assert result._parameters == {'temperature': 0.7, 'max_tokens': 1000} + + @pytest.mark.asyncio + async def test_handles_missing_model_config(self): + """Should handle missing model configuration.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = {} + + with patch('ldai_openai.openai_provider.AsyncOpenAI') as mock_openai_class: + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + result = await OpenAIProvider.create(mock_ai_config) + + assert isinstance(result, OpenAIProvider) + assert result._model_name == '' + assert result._parameters == {} + + +class TestCreateAIMetrics: + """Tests for deprecated create_ai_metrics static method.""" + + def test_delegates_to_get_ai_metrics_from_response(self): + """Should delegate to get_ai_metrics_from_response.""" + mock_response = MagicMock() + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 50 + mock_response.usage.completion_tokens = 50 + mock_response.usage.total_tokens = 100 + + result = OpenAIProvider.create_ai_metrics(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 100 + diff --git a/packages/ai-providers/server-ai-vercel/Makefile b/packages/ai-providers/server-ai-vercel/Makefile new file mode 100644 index 0000000..805aedb --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/Makefile @@ -0,0 +1,30 @@ +PYTEST_FLAGS=-W error::SyntaxWarning + +.PHONY: help +help: #! Show this help message + @echo 'Usage: make [target] ... ' + @echo '' + @echo 'Targets:' + @grep -h -F '#!' $(MAKEFILE_LIST) | grep -v grep | sed 's/:.*#!/:/' | column -t -s":" + +.PHONY: install +install: #! Install package dependencies + poetry install + +.PHONY: test +test: #! Run unit tests +test: install + poetry run pytest $(PYTEST_FLAGS) + +.PHONY: lint +lint: #! Run type analysis and linting checks +lint: install + poetry run mypy src/ldai_vercel + poetry run isort --check --atomic src/ldai_vercel + poetry run pycodestyle src/ldai_vercel + +.PHONY: build +build: #! Build distribution files +build: install + poetry build + diff --git a/packages/ai-providers/server-ai-vercel/README.md b/packages/ai-providers/server-ai-vercel/README.md new file mode 100644 index 0000000..80f0580 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/README.md @@ -0,0 +1,97 @@ +# LaunchDarkly AI SDK Vercel Provider + +[![PyPI](https://img.shields.io/pypi/v/launchdarkly-server-sdk-ai-vercel-dev.svg?style=flat-square)](https://pypi.org/project/launchdarkly-server-sdk-ai-vercel-dev/) + +This package provides a multi-provider integration for the LaunchDarkly AI SDK, similar to the Vercel AI SDK in JavaScript. It uses [LiteLLM](https://github.com/BerriAI/litellm) under the hood to support 100+ LLM providers. + +## Installation + +```bash +pip install launchdarkly-server-sdk-ai-vercel-dev +``` + +## Supported Providers + +This provider supports all LiteLLM-compatible providers, including: + +- OpenAI +- Anthropic +- Google (Gemini) +- Cohere +- Mistral +- Azure OpenAI +- AWS Bedrock +- And many more... + +## Quick Start + +```python +import asyncio +from ldai import AIClient +from ldai_vercel import VercelProvider + +async def main(): + # Initialize the AI client + ai_client = AIClient(ld_client) + + # Get AI config + ai_config = ai_client.config( + "my-ai-config-key", + context, + default_value + ) + + # Create a Vercel provider from the config + provider = await VercelProvider.create(ai_config) + + # Invoke the model + response = await provider.invoke_model(ai_config.messages) + print(response.message.content) + +asyncio.run(main()) +``` + +## Features + +- Multi-provider support through LiteLLM +- Automatic token usage tracking +- Support for structured output (JSON schema) +- Parameter mapping between LaunchDarkly and LiteLLM formats +- Static utility methods for custom integrations + +## API Reference + +### VercelProvider + +#### Constructor + +```python +VercelProvider(model_name: str, parameters: VercelModelParameters, logger: Optional[Any] = None) +``` + +#### Static Methods + +- `create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> VercelProvider` - Factory method to create a provider from an AI config +- `get_ai_metrics_from_response(response: Any) -> LDAIMetrics` - Extract metrics from a LiteLLM response +- `map_provider(ld_provider_name: str) -> str` - Map LD provider names to LiteLLM format +- `map_parameters(parameters: Dict) -> VercelModelParameters` - Map LD parameters to LiteLLM format + +#### Instance Methods + +- `invoke_model(messages: List[LDMessage]) -> ChatResponse` - Invoke the model with messages +- `invoke_structured_model(messages: List[LDMessage], response_structure: Dict[str, Any]) -> StructuredResponse` - Invoke the model with structured output + +## Environment Variables + +Make sure to set the appropriate API key environment variables for your chosen provider: + +- `OPENAI_API_KEY` - For OpenAI +- `ANTHROPIC_API_KEY` - For Anthropic +- `GOOGLE_API_KEY` - For Google/Gemini +- `COHERE_API_KEY` - For Cohere +- `MISTRAL_API_KEY` - For Mistral + +## License + +Apache-2.0 + diff --git a/packages/ai-providers/server-ai-vercel/pyproject.toml b/packages/ai-providers/server-ai-vercel/pyproject.toml new file mode 100644 index 0000000..e19274a --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/pyproject.toml @@ -0,0 +1,60 @@ +[tool.poetry] +# TODO: Rename before official release +name = "launchdarkly-server-sdk-ai-vercel-dev" +version = "0.1.0" +description = "LaunchDarkly AI SDK Vercel Provider (Multi-Provider Support via LiteLLM)" +authors = ["LaunchDarkly "] +license = "Apache-2.0" +readme = "README.md" +homepage = "https://docs.launchdarkly.com/sdk/ai/python" +repository = "https://github.com/launchdarkly/python-server-sdk-ai" +classifiers = [ + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", +] +packages = [{ include = "ldai_vercel", from = "src" }] + +[tool.poetry.dependencies] +python = ">=3.9,<4" +launchdarkly-server-sdk-ai = ">=0.11.0" +litellm = ">=1.0.0" + +[tool.poetry.group.dev.dependencies] +pytest = ">=2.8" +pytest-cov = ">=2.4.0" +pytest-asyncio = ">=0.21.0" +mypy = "==1.18.2" +pycodestyle = ">=2.11.0" +isort = ">=5.12.0" + +[tool.mypy] +python_version = "3.9" +ignore_missing_imports = true +install_types = true +non_interactive = true + +[tool.isort] +profile = "black" +known_third_party = ["litellm", "ldai"] +sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] + + +[tool.pytest.ini_options] +addopts = ["-ra"] +testpaths = ["tests"] +asyncio_mode = "auto" + + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + diff --git a/packages/ai-providers/server-ai-vercel/setup.cfg b/packages/ai-providers/server-ai-vercel/setup.cfg new file mode 100644 index 0000000..3217550 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/setup.cfg @@ -0,0 +1,7 @@ +[pycodestyle] +count = True +# E501 - max line length - will be left to the developer to determine if something is too long +# W503 - line break before binary operator - is not PEP8 compliant +ignore = E501, W503 +max-line-length = 200 + diff --git a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py new file mode 100644 index 0000000..8409dfe --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py @@ -0,0 +1,24 @@ +"""LaunchDarkly AI SDK Vercel Provider (Multi-Provider Support via LiteLLM).""" + +from ldai_vercel.vercel_provider import VercelProvider +from ldai_vercel.types import ( + VercelModelParameters, + VercelSDKConfig, + VercelSDKMapOptions, + VercelProviderFunction, + ModelUsageTokens, + TextResponse, + StreamResponse, +) + +__all__ = [ + 'VercelProvider', + 'VercelModelParameters', + 'VercelSDKConfig', + 'VercelSDKMapOptions', + 'VercelProviderFunction', + 'ModelUsageTokens', + 'TextResponse', + 'StreamResponse', +] + diff --git a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py new file mode 100644 index 0000000..22674a1 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py @@ -0,0 +1,118 @@ +"""Types for Vercel AI provider.""" + +from dataclasses import dataclass, field +from typing import Any, Callable, Dict, List, Optional + +from ldai import LDMessage + + +# Type alias for provider function +VercelProviderFunction = Callable[[str], Any] + + +@dataclass +class VercelModelParameters: + """ + Vercel/LiteLLM model parameters. + + These are the parameters that can be passed to LiteLLM methods. + """ + max_tokens: Optional[int] = None + temperature: Optional[float] = None + top_p: Optional[float] = None + top_k: Optional[int] = None + presence_penalty: Optional[float] = None + frequency_penalty: Optional[float] = None + stop: Optional[List[str]] = None + seed: Optional[int] = None + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary, excluding None values.""" + result: Dict[str, Any] = {} + if self.max_tokens is not None: + result['max_tokens'] = self.max_tokens + if self.temperature is not None: + result['temperature'] = self.temperature + if self.top_p is not None: + result['top_p'] = self.top_p + if self.top_k is not None: + result['top_k'] = self.top_k + if self.presence_penalty is not None: + result['presence_penalty'] = self.presence_penalty + if self.frequency_penalty is not None: + result['frequency_penalty'] = self.frequency_penalty + if self.stop is not None: + result['stop'] = self.stop + if self.seed is not None: + result['seed'] = self.seed + return result + + +@dataclass +class VercelSDKMapOptions: + """Options for mapping to Vercel/LiteLLM SDK configuration.""" + non_interpolated_messages: Optional[List[LDMessage]] = None + + +@dataclass +class VercelSDKConfig: + """Configuration format compatible with LiteLLM's completion methods.""" + model: str + messages: Optional[List[LDMessage]] = None + max_tokens: Optional[int] = None + temperature: Optional[float] = None + top_p: Optional[float] = None + top_k: Optional[int] = None + presence_penalty: Optional[float] = None + frequency_penalty: Optional[float] = None + stop: Optional[List[str]] = None + seed: Optional[int] = None + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary, excluding None values.""" + result: Dict[str, Any] = {'model': self.model} + if self.messages is not None: + result['messages'] = [{'role': m.role, 'content': m.content} for m in self.messages] + if self.max_tokens is not None: + result['max_tokens'] = self.max_tokens + if self.temperature is not None: + result['temperature'] = self.temperature + if self.top_p is not None: + result['top_p'] = self.top_p + if self.top_k is not None: + result['top_k'] = self.top_k + if self.presence_penalty is not None: + result['presence_penalty'] = self.presence_penalty + if self.frequency_penalty is not None: + result['frequency_penalty'] = self.frequency_penalty + if self.stop is not None: + result['stop'] = self.stop + if self.seed is not None: + result['seed'] = self.seed + return result + + +@dataclass +class ModelUsageTokens: + """ + Token usage information from LiteLLM operations. + """ + prompt_tokens: Optional[int] = None + completion_tokens: Optional[int] = None + total_tokens: Optional[int] = None + + +@dataclass +class TextResponse: + """Response type for non-streaming LiteLLM operations.""" + finish_reason: Optional[str] = None + usage: Optional[ModelUsageTokens] = None + + +@dataclass +class StreamResponse: + """Response type for streaming LiteLLM operations.""" + # Note: In async streaming, these would be resolved after the stream completes + finish_reason: Optional[str] = None + usage: Optional[ModelUsageTokens] = None + diff --git a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py new file mode 100644 index 0000000..cbe483a --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py @@ -0,0 +1,394 @@ +"""Vercel AI implementation of AIProvider for LaunchDarkly AI SDK using LiteLLM.""" + +import json +from typing import Any, Callable, Dict, List, Optional, Union + +import litellm +from litellm import acompletion + +from ldai import LDMessage +from ldai.models import AIConfigKind +from ldai.providers import AIProvider +from ldai.providers.types import ChatResponse, LDAIMetrics, StructuredResponse +from ldai.tracker import TokenUsage + +from ldai_vercel.types import ( + ModelUsageTokens, + TextResponse, + VercelModelParameters, + VercelProviderFunction, + VercelSDKConfig, + VercelSDKMapOptions, +) + + +class VercelProvider(AIProvider): + """ + Vercel AI implementation of AIProvider using LiteLLM. + + This provider integrates multiple AI providers (OpenAI, Anthropic, Google, etc.) + with LaunchDarkly's tracking capabilities through LiteLLM. + """ + + def __init__( + self, + model_name: str, + parameters: VercelModelParameters, + logger: Optional[Any] = None + ): + """ + Initialize the Vercel provider. + + :param model_name: The full model name in LiteLLM format (e.g., 'openai/gpt-4', 'anthropic/claude-3-opus') + :param parameters: Model parameters + :param logger: Optional logger for logging provider operations + """ + super().__init__(logger) + self._model_name = model_name + self._parameters = parameters + + # ============================================================================= + # MAIN FACTORY METHODS + # ============================================================================= + + @staticmethod + async def create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> 'VercelProvider': + """ + Static factory method to create a Vercel AIProvider from an AI configuration. + This method auto-detects the provider and creates the model. + + :param ai_config: The LaunchDarkly AI configuration + :param logger: Optional logger + :return: A configured VercelProvider + """ + model_name = VercelProvider.create_model_name(ai_config) + parameters = VercelProvider.map_parameters(ai_config.to_dict().get('model', {}).get('parameters')) + return VercelProvider(model_name, parameters, logger) + + # ============================================================================= + # INSTANCE METHODS (AIProvider Implementation) + # ============================================================================= + + async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: + """ + Invoke the AI model with an array of messages. + + :param messages: Array of LDMessage objects representing the conversation + :return: ChatResponse containing the model's response and metrics + """ + try: + # Convert LDMessage to LiteLLM message format + litellm_messages = [ + {'role': msg.role, 'content': msg.content} + for msg in messages + ] + + # Call LiteLLM acompletion + response = await acompletion( + model=self._model_name, + messages=litellm_messages, + **self._parameters.to_dict(), + ) + + # Extract metrics including token usage and success status + metrics = VercelProvider.get_ai_metrics_from_response(response) + + # Create the assistant message + content = '' + if response.choices and len(response.choices) > 0: + message = response.choices[0].message + if message and message.content: + content = message.content + + return ChatResponse( + message=LDMessage(role='assistant', content=content), + metrics=metrics, + ) + except Exception as error: + if self.logger: + self.logger.warn(f'Vercel AI model invocation failed: {error}') + + return ChatResponse( + message=LDMessage(role='assistant', content=''), + metrics=LDAIMetrics(success=False, usage=None), + ) + + async def invoke_structured_model( + self, + messages: List[LDMessage], + response_structure: Dict[str, Any], + ) -> StructuredResponse: + """ + Invoke the AI model with structured output support. + + :param messages: Array of LDMessage objects representing the conversation + :param response_structure: Dictionary defining the JSON schema for output structure + :return: StructuredResponse containing the structured data + """ + try: + # Convert LDMessage to LiteLLM message format + litellm_messages = [ + {'role': msg.role, 'content': msg.content} + for msg in messages + ] + + # Call LiteLLM acompletion with JSON response format + response = await acompletion( + model=self._model_name, + messages=litellm_messages, + response_format={'type': 'json_object'}, + **self._parameters.to_dict(), + ) + + # Extract metrics + metrics = VercelProvider.get_ai_metrics_from_response(response) + + # Safely extract the content + content = '' + if response.choices and len(response.choices) > 0: + message = response.choices[0].message + if message and message.content: + content = message.content + + if not content: + if self.logger: + self.logger.warn('Vercel AI structured response has no content available') + metrics = LDAIMetrics(success=False, usage=metrics.usage) + return StructuredResponse( + data={}, + raw_response='', + metrics=metrics, + ) + + try: + data = json.loads(content) + return StructuredResponse( + data=data, + raw_response=content, + metrics=metrics, + ) + except json.JSONDecodeError as parse_error: + if self.logger: + self.logger.warn(f'Vercel AI structured response contains invalid JSON: {parse_error}') + metrics = LDAIMetrics(success=False, usage=metrics.usage) + return StructuredResponse( + data={}, + raw_response=content, + metrics=metrics, + ) + except Exception as error: + if self.logger: + self.logger.warn(f'Vercel AI structured model invocation failed: {error}') + + return StructuredResponse( + data={}, + raw_response='', + metrics=LDAIMetrics(success=False, usage=None), + ) + + def get_model_name(self) -> str: + """ + Get the model name. + + :return: The model name + """ + return self._model_name + + # ============================================================================= + # STATIC UTILITY METHODS + # ============================================================================= + + @staticmethod + def map_provider(ld_provider_name: str) -> str: + """ + Map LaunchDarkly provider names to LiteLLM provider prefixes. + + This method enables seamless integration between LaunchDarkly's standardized + provider naming and LiteLLM's naming conventions. + + :param ld_provider_name: LaunchDarkly provider name + :return: LiteLLM-compatible provider prefix + """ + lowercased_name = ld_provider_name.lower() + + mapping: Dict[str, str] = { + 'gemini': 'gemini', + 'google': 'gemini', + 'openai': 'openai', + 'anthropic': 'anthropic', + 'cohere': 'cohere', + 'mistral': 'mistral', + 'azure': 'azure', + 'bedrock': 'bedrock', + } + + return mapping.get(lowercased_name, lowercased_name) + + @staticmethod + def map_usage_data_to_ld_token_usage(usage_data: Any) -> TokenUsage: + """ + Map LiteLLM usage data to LaunchDarkly token usage. + + :param usage_data: Usage data from LiteLLM + :return: TokenUsage + """ + if not usage_data: + return TokenUsage(total=0, input=0, output=0) + + total_tokens = getattr(usage_data, 'total_tokens', None) or 0 + prompt_tokens = getattr(usage_data, 'prompt_tokens', None) or 0 + completion_tokens = getattr(usage_data, 'completion_tokens', None) or 0 + + return TokenUsage( + total=total_tokens, + input=prompt_tokens, + output=completion_tokens, + ) + + @staticmethod + def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: + """ + Get AI metrics from a LiteLLM response. + + This method extracts token usage information and success status from LiteLLM responses + and returns a LaunchDarkly AIMetrics object. + + :param response: The response from LiteLLM + :return: LDAIMetrics with success status and token usage + + Example: + response = await tracker.track_metrics_of( + lambda: acompletion(config), + VercelProvider.get_ai_metrics_from_response + ) + """ + # Check finish reason for error + finish_reason = 'unknown' + if response and hasattr(response, 'choices') and response.choices: + choice = response.choices[0] + if hasattr(choice, 'finish_reason'): + finish_reason = choice.finish_reason or 'unknown' + + # Extract token usage if available + usage: Optional[TokenUsage] = None + if hasattr(response, 'usage') and response.usage: + usage = VercelProvider.map_usage_data_to_ld_token_usage(response.usage) + + success = finish_reason != 'error' + + return LDAIMetrics(success=success, usage=usage) + + @staticmethod + def create_ai_metrics(response: Any) -> LDAIMetrics: + """ + Create AI metrics information from a LiteLLM response. + + :deprecated: Use `get_ai_metrics_from_response()` instead. + :param response: The response from LiteLLM + :return: LDAIMetrics with success status and token usage + """ + return VercelProvider.get_ai_metrics_from_response(response) + + @staticmethod + def map_parameters(parameters: Optional[Dict[str, Any]]) -> VercelModelParameters: + """ + Map LaunchDarkly model parameters to LiteLLM parameters. + + Parameter mappings: + - max_tokens → max_tokens + - max_completion_tokens → max_tokens + - temperature → temperature + - top_p → top_p + - top_k → top_k + - presence_penalty → presence_penalty + - frequency_penalty → frequency_penalty + - stop → stop + - seed → seed + + :param parameters: The LaunchDarkly model parameters to map + :return: VercelModelParameters + """ + if not parameters: + return VercelModelParameters() + + return VercelModelParameters( + max_tokens=parameters.get('max_tokens') or parameters.get('max_completion_tokens'), + temperature=parameters.get('temperature'), + top_p=parameters.get('top_p'), + top_k=parameters.get('top_k'), + presence_penalty=parameters.get('presence_penalty'), + frequency_penalty=parameters.get('frequency_penalty'), + stop=parameters.get('stop'), + seed=parameters.get('seed'), + ) + + @staticmethod + def to_litellm_config( + ai_config: AIConfigKind, + options: Optional[VercelSDKMapOptions] = None, + ) -> VercelSDKConfig: + """ + Convert an AI configuration to LiteLLM configuration. + + :param ai_config: The LaunchDarkly AI configuration + :param options: Optional mapping options + :return: A configuration directly usable in LiteLLM + """ + config_dict = ai_config.to_dict() + model_dict = config_dict.get('model') or {} + provider_dict = config_dict.get('provider') or {} + + # Build full model name + provider_name = VercelProvider.map_provider(provider_dict.get('name', '')) + model_name = model_dict.get('name', '') + + full_model_name = f'{provider_name}/{model_name}' if provider_name else model_name + + # Merge messages from config and options + messages: Optional[List[LDMessage]] = None + config_messages = config_dict.get('messages') + if config_messages or (options and options.non_interpolated_messages): + messages = [] + if config_messages: + for msg in config_messages: + messages.append(LDMessage(role=msg['role'], content=msg['content'])) + if options and options.non_interpolated_messages: + messages.extend(options.non_interpolated_messages) + + # Map parameters using the shared mapping method + params = VercelProvider.map_parameters(model_dict.get('parameters')) + + # Build and return the LiteLLM configuration + return VercelSDKConfig( + model=full_model_name, + messages=messages, + max_tokens=params.max_tokens, + temperature=params.temperature, + top_p=params.top_p, + top_k=params.top_k, + presence_penalty=params.presence_penalty, + frequency_penalty=params.frequency_penalty, + stop=params.stop, + seed=params.seed, + ) + + @staticmethod + def create_model_name(ai_config: AIConfigKind) -> str: + """ + Create a LiteLLM model name from an AI configuration. + + :param ai_config: The LaunchDarkly AI configuration + :return: A LiteLLM-compatible model name + """ + config_dict = ai_config.to_dict() + model_dict = config_dict.get('model') or {} + provider_dict = config_dict.get('provider') or {} + + provider_name = VercelProvider.map_provider(provider_dict.get('name', '')) + model_name = model_dict.get('name', '') + + # LiteLLM uses provider/model format + if provider_name: + return f'{provider_name}/{model_name}' + return model_name + diff --git a/packages/ai-providers/server-ai-vercel/tests/__init__.py b/packages/ai-providers/server-ai-vercel/tests/__init__.py new file mode 100644 index 0000000..8bf5209 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/tests/__init__.py @@ -0,0 +1,2 @@ +"""Tests for LaunchDarkly AI SDK Vercel Provider.""" + diff --git a/packages/ai-providers/server-ai-vercel/tests/test_vercel_provider.py b/packages/ai-providers/server-ai-vercel/tests/test_vercel_provider.py new file mode 100644 index 0000000..0e9c935 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/tests/test_vercel_provider.py @@ -0,0 +1,528 @@ +"""Tests for Vercel Provider.""" + +import pytest +from unittest.mock import AsyncMock, MagicMock, patch + +from ldai import LDMessage + +from ldai_vercel import VercelProvider, VercelModelParameters, VercelSDKMapOptions + + +class TestGetAIMetricsFromResponse: + """Tests for get_ai_metrics_from_response static method.""" + + def test_creates_metrics_with_success_true_and_token_usage(self): + """Should create metrics with success=True and token usage.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].finish_reason = 'stop' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 50 + mock_response.usage.completion_tokens = 50 + mock_response.usage.total_tokens = 100 + + result = VercelProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 100 + assert result.usage.input == 50 + assert result.usage.output == 50 + + def test_creates_metrics_with_success_true_and_no_usage_when_usage_missing(self): + """Should create metrics with success=True and no usage when usage is missing.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].finish_reason = 'stop' + mock_response.usage = None + + result = VercelProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is None + + def test_handles_partial_usage_data(self): + """Should handle partial usage data.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].finish_reason = 'stop' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 30 + mock_response.usage.completion_tokens = None + mock_response.usage.total_tokens = None + + result = VercelProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 0 + assert result.usage.input == 30 + assert result.usage.output == 0 + + def test_returns_success_false_for_error_finish_reason(self): + """Should return success=False for error finish reason.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].finish_reason = 'error' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 50 + mock_response.usage.completion_tokens = 50 + mock_response.usage.total_tokens = 100 + + result = VercelProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is False + assert result.usage is not None + assert result.usage.total == 100 + + +class TestInvokeModel: + """Tests for invoke_model instance method.""" + + @pytest.fixture + def mock_logger(self): + """Create a mock logger.""" + return MagicMock() + + @pytest.mark.asyncio + async def test_invokes_litellm_and_returns_response(self, mock_logger): + """Should invoke LiteLLM and return response.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = 'Hello! How can I help you today?' + mock_response.choices[0].finish_reason = 'stop' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 15 + mock_response.usage.total_tokens = 25 + + with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: + mock_acompletion.return_value = mock_response + + provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) + messages = [LDMessage(role='user', content='Hello!')] + result = await provider.invoke_model(messages) + + mock_acompletion.assert_called_once_with( + model='openai/gpt-3.5-turbo', + messages=[{'role': 'user', 'content': 'Hello!'}], + ) + + assert result.message.role == 'assistant' + assert result.message.content == 'Hello! How can I help you today?' + assert result.metrics.success is True + assert result.metrics.usage is not None + assert result.metrics.usage.total == 25 + assert result.metrics.usage.input == 10 + assert result.metrics.usage.output == 15 + + @pytest.mark.asyncio + async def test_handles_response_without_usage_data(self, mock_logger): + """Should handle response without usage data.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = 'Hello! How can I help you today?' + mock_response.choices[0].finish_reason = 'stop' + mock_response.usage = None + + with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: + mock_acompletion.return_value = mock_response + + provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) + messages = [LDMessage(role='user', content='Hello!')] + result = await provider.invoke_model(messages) + + assert result.message.role == 'assistant' + assert result.message.content == 'Hello! How can I help you today?' + assert result.metrics.success is True + assert result.metrics.usage is None + + @pytest.mark.asyncio + async def test_handles_errors_and_returns_failure_metrics(self, mock_logger): + """Should handle errors and return failure metrics.""" + with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: + mock_acompletion.side_effect = Exception('API call failed') + + provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) + messages = [LDMessage(role='user', content='Hello!')] + result = await provider.invoke_model(messages) + + mock_logger.warn.assert_called() + assert result.message.role == 'assistant' + assert result.message.content == '' + assert result.metrics.success is False + + +class TestInvokeStructuredModel: + """Tests for invoke_structured_model instance method.""" + + @pytest.fixture + def mock_logger(self): + """Create a mock logger.""" + return MagicMock() + + @pytest.mark.asyncio + async def test_invokes_litellm_with_structured_output(self, mock_logger): + """Should invoke LiteLLM with structured output and return parsed response.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = '{"name": "John Doe", "age": 30, "isActive": true}' + mock_response.choices[0].finish_reason = 'stop' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 15 + mock_response.usage.total_tokens = 25 + + with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: + mock_acompletion.return_value = mock_response + + provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) + messages = [LDMessage(role='user', content='Generate user data')] + response_structure = {'name': 'string', 'age': 0, 'isActive': True} + + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.data == {'name': 'John Doe', 'age': 30, 'isActive': True} + assert result.raw_response == '{"name": "John Doe", "age": 30, "isActive": true}' + assert result.metrics.success is True + assert result.metrics.usage is not None + assert result.metrics.usage.total == 25 + + @pytest.mark.asyncio + async def test_handles_structured_response_without_usage_data(self, mock_logger): + """Should handle structured response without usage data.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = '{"result": "success"}' + mock_response.choices[0].finish_reason = 'stop' + mock_response.usage = None + + with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: + mock_acompletion.return_value = mock_response + + provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) + messages = [LDMessage(role='user', content='Generate result')] + response_structure = {'result': 'string'} + + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.data == {'result': 'success'} + assert result.metrics.success is True + assert result.metrics.usage is None + + @pytest.mark.asyncio + async def test_handles_errors_and_returns_failure_metrics(self, mock_logger): + """Should handle errors and return failure metrics.""" + with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: + mock_acompletion.side_effect = Exception('API call failed') + + provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) + messages = [LDMessage(role='user', content='Generate result')] + response_structure = {'result': 'string'} + + result = await provider.invoke_structured_model(messages, response_structure) + + mock_logger.warn.assert_called() + assert result.data == {} + assert result.raw_response == '' + assert result.metrics.success is False + + @pytest.mark.asyncio + async def test_handles_invalid_json_response(self, mock_logger): + """Should handle invalid JSON response gracefully.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = 'invalid json content' + mock_response.choices[0].finish_reason = 'stop' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 5 + mock_response.usage.total_tokens = 15 + + with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: + mock_acompletion.return_value = mock_response + + provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) + messages = [LDMessage(role='user', content='Generate result')] + response_structure = {'result': 'string'} + + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.data == {} + assert result.raw_response == 'invalid json content' + assert result.metrics.success is False + mock_logger.warn.assert_called() + + +class TestGetModelName: + """Tests for get_model_name instance method.""" + + def test_returns_model_name(self): + """Should return the model name.""" + provider = VercelProvider('openai/gpt-4', VercelModelParameters()) + assert provider.get_model_name() == 'openai/gpt-4' + + +class TestMapProvider: + """Tests for map_provider static method.""" + + def test_maps_gemini_to_gemini(self): + """Should map gemini to gemini.""" + assert VercelProvider.map_provider('gemini') == 'gemini' + assert VercelProvider.map_provider('Gemini') == 'gemini' + assert VercelProvider.map_provider('GEMINI') == 'gemini' + + def test_maps_google_to_gemini(self): + """Should map google to gemini.""" + assert VercelProvider.map_provider('google') == 'gemini' + + def test_returns_provider_name_unchanged_for_standard_providers(self): + """Should return provider name unchanged for standard providers.""" + assert VercelProvider.map_provider('openai') == 'openai' + assert VercelProvider.map_provider('anthropic') == 'anthropic' + assert VercelProvider.map_provider('cohere') == 'cohere' + assert VercelProvider.map_provider('mistral') == 'mistral' + + def test_returns_provider_name_unchanged_for_unmapped_providers(self): + """Should return provider name unchanged for unmapped providers.""" + assert VercelProvider.map_provider('unknown') == 'unknown' + + +class TestMapParameters: + """Tests for map_parameters static method.""" + + def test_maps_parameters_correctly(self): + """Should map parameters correctly.""" + parameters = { + 'max_tokens': 100, + 'temperature': 0.7, + 'top_p': 0.9, + 'top_k': 50, + 'presence_penalty': 0.1, + 'frequency_penalty': 0.2, + 'stop': ['stop1', 'stop2'], + 'seed': 42, + } + + result = VercelProvider.map_parameters(parameters) + + assert result.max_tokens == 100 + assert result.temperature == 0.7 + assert result.top_p == 0.9 + assert result.top_k == 50 + assert result.presence_penalty == 0.1 + assert result.frequency_penalty == 0.2 + assert result.stop == ['stop1', 'stop2'] + assert result.seed == 42 + + def test_handles_max_completion_tokens(self): + """Should use max_completion_tokens if max_tokens is not present.""" + parameters = { + 'max_completion_tokens': 200, + } + + result = VercelProvider.map_parameters(parameters) + + assert result.max_tokens == 200 + + def test_prefers_max_tokens_over_max_completion_tokens(self): + """Should prefer max_tokens over max_completion_tokens.""" + parameters = { + 'max_tokens': 100, + 'max_completion_tokens': 200, + } + + result = VercelProvider.map_parameters(parameters) + + assert result.max_tokens == 100 + + def test_returns_empty_parameters_for_none_input(self): + """Should return empty parameters for None input.""" + result = VercelProvider.map_parameters(None) + + assert result.max_tokens is None + assert result.temperature is None + + +class TestToLitellmConfig: + """Tests for to_litellm_config static method.""" + + def test_creates_config_with_correct_model_name(self): + """Should create config with correct model name.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = { + 'model': {'name': 'gpt-4'}, + 'provider': {'name': 'openai'}, + } + + result = VercelProvider.to_litellm_config(mock_ai_config) + + assert result.model == 'openai/gpt-4' + + def test_handles_missing_provider(self): + """Should handle missing provider.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = { + 'model': {'name': 'gpt-4'}, + } + + result = VercelProvider.to_litellm_config(mock_ai_config) + + assert result.model == 'gpt-4' + + def test_merges_messages_and_non_interpolated_messages(self): + """Should merge messages and non_interpolated_messages.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = { + 'model': {'name': 'gpt-4'}, + 'provider': {'name': 'openai'}, + 'messages': [{'role': 'user', 'content': 'Hello'}], + } + + options = VercelSDKMapOptions( + non_interpolated_messages=[LDMessage(role='assistant', content='Hi there')] + ) + + result = VercelProvider.to_litellm_config(mock_ai_config, options) + + assert len(result.messages) == 2 + assert result.messages[0].role == 'user' + assert result.messages[0].content == 'Hello' + assert result.messages[1].role == 'assistant' + assert result.messages[1].content == 'Hi there' + + def test_maps_parameters(self): + """Should map parameters correctly.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = { + 'model': { + 'name': 'gpt-4', + 'parameters': { + 'max_tokens': 100, + 'temperature': 0.7, + }, + }, + 'provider': {'name': 'openai'}, + } + + result = VercelProvider.to_litellm_config(mock_ai_config) + + assert result.max_tokens == 100 + assert result.temperature == 0.7 + + +class TestCreateModelName: + """Tests for create_model_name static method.""" + + def test_creates_model_name_with_provider(self): + """Should create model name with provider.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = { + 'model': {'name': 'gpt-4'}, + 'provider': {'name': 'openai'}, + } + + result = VercelProvider.create_model_name(mock_ai_config) + + assert result == 'openai/gpt-4' + + def test_creates_model_name_without_provider(self): + """Should create model name without provider.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = { + 'model': {'name': 'gpt-4'}, + } + + result = VercelProvider.create_model_name(mock_ai_config) + + assert result == 'gpt-4' + + def test_maps_provider_name(self): + """Should map provider name.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = { + 'model': {'name': 'claude-3-opus'}, + 'provider': {'name': 'anthropic'}, + } + + result = VercelProvider.create_model_name(mock_ai_config) + + assert result == 'anthropic/claude-3-opus' + + +class TestCreate: + """Tests for create static factory method.""" + + @pytest.mark.asyncio + async def test_creates_provider_with_correct_model_and_parameters(self): + """Should create VercelProvider with correct model and parameters.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = { + 'model': { + 'name': 'gpt-4', + 'parameters': { + 'temperature': 0.7, + 'max_tokens': 1000, + }, + }, + 'provider': {'name': 'openai'}, + } + + result = await VercelProvider.create(mock_ai_config) + + assert isinstance(result, VercelProvider) + assert result.get_model_name() == 'openai/gpt-4' + assert result._parameters.temperature == 0.7 + assert result._parameters.max_tokens == 1000 + + +class TestCreateAIMetrics: + """Tests for deprecated create_ai_metrics static method.""" + + def test_delegates_to_get_ai_metrics_from_response(self): + """Should delegate to get_ai_metrics_from_response.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].finish_reason = 'stop' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 50 + mock_response.usage.completion_tokens = 50 + mock_response.usage.total_tokens = 100 + + result = VercelProvider.create_ai_metrics(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 100 + + +class TestVercelModelParameters: + """Tests for VercelModelParameters dataclass.""" + + def test_to_dict_excludes_none_values(self): + """Should exclude None values from dict.""" + params = VercelModelParameters( + max_tokens=100, + temperature=0.7, + ) + + result = params.to_dict() + + assert result == { + 'max_tokens': 100, + 'temperature': 0.7, + } + + def test_to_dict_returns_empty_for_all_none(self): + """Should return empty dict for all None values.""" + params = VercelModelParameters() + + result = params.to_dict() + + assert result == {} + diff --git a/release-please-config.json b/release-please-config.json index 9852902..b83fca1 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -17,6 +17,22 @@ "include-v-in-tag": false, "extra-files": ["src/ldai_langchain/__init__.py"], "component": "launchdarkly-server-sdk-ai-langchain" + }, + "packages/ai-providers/server-ai-openai": { + "release-type": "python", + "versioning": "default", + "bump-minor-pre-major": true, + "include-v-in-tag": false, + "extra-files": ["src/ldai_openai/__init__.py"], + "component": "launchdarkly-server-sdk-ai-openai-dev" + }, + "packages/ai-providers/server-ai-vercel": { + "release-type": "python", + "versioning": "default", + "bump-minor-pre-major": true, + "include-v-in-tag": false, + "extra-files": ["src/ldai_vercel/__init__.py"], + "component": "launchdarkly-server-sdk-ai-vercel-dev" } } } From 8cfabc3e0bb7d5529ed1ed7f2f693be5456b18b2 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Mon, 22 Dec 2025 06:19:16 +0100 Subject: [PATCH 02/12] fix lint --- packages/ai-providers/server-ai-openai/setup.cfg | 7 +------ .../server-ai-openai/src/ldai_openai/__init__.py | 1 - .../src/ldai_openai/openai_provider.py | 6 ++---- packages/ai-providers/server-ai-vercel/setup.cfg | 7 +------ .../server-ai-vercel/src/ldai_vercel/__init__.py | 11 +++++------ .../server-ai-vercel/src/ldai_vercel/types.py | 2 -- .../src/ldai_vercel/vercel_provider.py | 4 +--- 7 files changed, 10 insertions(+), 28 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/setup.cfg b/packages/ai-providers/server-ai-openai/setup.cfg index 3217550..6224f31 100644 --- a/packages/ai-providers/server-ai-openai/setup.cfg +++ b/packages/ai-providers/server-ai-openai/setup.cfg @@ -1,7 +1,2 @@ [pycodestyle] -count = True -# E501 - max line length - will be left to the developer to determine if something is too long -# W503 - line break before binary operator - is not PEP8 compliant -ignore = E501, W503 -max-line-length = 200 - +max-line-length = 120 diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py index 666a68f..5d5120f 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py @@ -3,4 +3,3 @@ from ldai_openai.openai_provider import OpenAIProvider __all__ = ['OpenAIProvider'] - diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py index 244d708..9fb1fbe 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py @@ -4,14 +4,13 @@ import os from typing import Any, Dict, Iterable, List, Optional, cast -from openai import AsyncOpenAI -from openai.types.chat import ChatCompletionMessageParam - from ldai import LDMessage from ldai.models import AIConfigKind from ldai.providers import AIProvider from ldai.providers.types import ChatResponse, LDAIMetrics, StructuredResponse from ldai.tracker import TokenUsage +from openai import AsyncOpenAI +from openai.types.chat import ChatCompletionMessageParam class OpenAIProvider(AIProvider): @@ -250,4 +249,3 @@ def create_ai_metrics(openai_response: Any) -> LDAIMetrics: :return: LDAIMetrics with success status and token usage """ return OpenAIProvider.get_ai_metrics_from_response(openai_response) - diff --git a/packages/ai-providers/server-ai-vercel/setup.cfg b/packages/ai-providers/server-ai-vercel/setup.cfg index 3217550..6224f31 100644 --- a/packages/ai-providers/server-ai-vercel/setup.cfg +++ b/packages/ai-providers/server-ai-vercel/setup.cfg @@ -1,7 +1,2 @@ [pycodestyle] -count = True -# E501 - max line length - will be left to the developer to determine if something is too long -# W503 - line break before binary operator - is not PEP8 compliant -ignore = E501, W503 -max-line-length = 200 - +max-line-length = 120 diff --git a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py index 8409dfe..f470479 100644 --- a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py +++ b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py @@ -1,15 +1,15 @@ """LaunchDarkly AI SDK Vercel Provider (Multi-Provider Support via LiteLLM).""" -from ldai_vercel.vercel_provider import VercelProvider from ldai_vercel.types import ( + ModelUsageTokens, + StreamResponse, + TextResponse, VercelModelParameters, + VercelProviderFunction, VercelSDKConfig, VercelSDKMapOptions, - VercelProviderFunction, - ModelUsageTokens, - TextResponse, - StreamResponse, ) +from ldai_vercel.vercel_provider import VercelProvider __all__ = [ 'VercelProvider', @@ -21,4 +21,3 @@ 'TextResponse', 'StreamResponse', ] - diff --git a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py index 22674a1..1a59205 100644 --- a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py +++ b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py @@ -5,7 +5,6 @@ from ldai import LDMessage - # Type alias for provider function VercelProviderFunction = Callable[[str], Any] @@ -115,4 +114,3 @@ class StreamResponse: # Note: In async streaming, these would be resolved after the stream completes finish_reason: Optional[str] = None usage: Optional[ModelUsageTokens] = None - diff --git a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py index cbe483a..994fea8 100644 --- a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py +++ b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py @@ -4,13 +4,12 @@ from typing import Any, Callable, Dict, List, Optional, Union import litellm -from litellm import acompletion - from ldai import LDMessage from ldai.models import AIConfigKind from ldai.providers import AIProvider from ldai.providers.types import ChatResponse, LDAIMetrics, StructuredResponse from ldai.tracker import TokenUsage +from litellm import acompletion from ldai_vercel.types import ( ModelUsageTokens, @@ -391,4 +390,3 @@ def create_model_name(ai_config: AIConfigKind) -> str: if provider_name: return f'{provider_name}/{model_name}' return model_name - From 2ea03ac3ac74c38977a046a6aa4c9c94a309acd5 Mon Sep 17 00:00:00 2001 From: Edwin Okonkwo Date: Mon, 22 Dec 2025 06:24:38 +0100 Subject: [PATCH 03/12] fixes --- packages/ai-providers/server-ai-openai/pyproject.toml | 2 +- packages/ai-providers/server-ai-vercel/pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/pyproject.toml b/packages/ai-providers/server-ai-openai/pyproject.toml index 715ee4e..df7392f 100644 --- a/packages/ai-providers/server-ai-openai/pyproject.toml +++ b/packages/ai-providers/server-ai-openai/pyproject.toml @@ -31,7 +31,7 @@ openai = ">=1.0.0" [tool.poetry.group.dev.dependencies] pytest = ">=2.8" pytest-cov = ">=2.4.0" -pytest-asyncio = ">=0.21.0" +pytest-asyncio = ">=0.21.0,<1.0.0" mypy = "==1.18.2" pycodestyle = ">=2.11.0" isort = ">=5.12.0" diff --git a/packages/ai-providers/server-ai-vercel/pyproject.toml b/packages/ai-providers/server-ai-vercel/pyproject.toml index e19274a..775e198 100644 --- a/packages/ai-providers/server-ai-vercel/pyproject.toml +++ b/packages/ai-providers/server-ai-vercel/pyproject.toml @@ -31,7 +31,7 @@ litellm = ">=1.0.0" [tool.poetry.group.dev.dependencies] pytest = ">=2.8" pytest-cov = ">=2.4.0" -pytest-asyncio = ">=0.21.0" +pytest-asyncio = ">=0.21.0,<1.0.0" mypy = "==1.18.2" pycodestyle = ">=2.11.0" isort = ">=5.12.0" From 820b202f9743bf68a48ddbf207959001045c68ee Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 24 Dec 2025 23:26:02 +0000 Subject: [PATCH 04/12] remove vercel package --- .github/workflows/ci.yml | 53 -- .github/workflows/release-please.yml | 57 -- .release-please-manifest.json | 3 +- Makefile | 21 - .../ai-providers/server-ai-vercel/Makefile | 30 - .../ai-providers/server-ai-vercel/README.md | 97 ---- .../server-ai-vercel/pyproject.toml | 60 -- .../ai-providers/server-ai-vercel/setup.cfg | 2 - .../src/ldai_vercel/__init__.py | 23 - .../server-ai-vercel/src/ldai_vercel/types.py | 116 ---- .../src/ldai_vercel/vercel_provider.py | 392 ------------- .../server-ai-vercel/tests/__init__.py | 2 - .../tests/test_vercel_provider.py | 528 ------------------ release-please-config.json | 8 - 14 files changed, 1 insertion(+), 1391 deletions(-) delete mode 100644 packages/ai-providers/server-ai-vercel/Makefile delete mode 100644 packages/ai-providers/server-ai-vercel/README.md delete mode 100644 packages/ai-providers/server-ai-vercel/pyproject.toml delete mode 100644 packages/ai-providers/server-ai-vercel/setup.cfg delete mode 100644 packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py delete mode 100644 packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py delete mode 100644 packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py delete mode 100644 packages/ai-providers/server-ai-vercel/tests/__init__.py delete mode 100644 packages/ai-providers/server-ai-vercel/tests/test_vercel_provider.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e4ee0f0..9ee818d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -164,56 +164,3 @@ jobs: - name: Run tests run: make -C packages/ai-providers/server-ai-openai test - - server-ai-vercel-linux: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] - - steps: - - uses: actions/checkout@v4 - - - uses: ./.github/actions/ci - with: - workspace_path: packages/ai-providers/server-ai-vercel - python_version: ${{ matrix.python-version }} - - - uses: ./.github/actions/build - with: - workspace_path: packages/ai-providers/server-ai-vercel - - server-ai-vercel-windows: - runs-on: windows-latest - defaults: - run: - shell: powershell - - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] - - steps: - - uses: actions/checkout@v4 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - name: Install poetry - uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 - - - name: Configure poetry for local virtualenvs - run: poetry config virtualenvs.in-project true - - - name: Install server-ai dependency first - working-directory: packages/sdk/server-ai - run: poetry install - - - name: Install requirements - working-directory: packages/ai-providers/server-ai-vercel - run: poetry install - - - name: Run tests - run: make -C packages/ai-providers/server-ai-vercel test diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index ce0bf51..eb6d555 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -27,7 +27,6 @@ on: - packages/sdk/server-ai - packages/ai-providers/server-ai-langchain - packages/ai-providers/server-ai-openai - - packages/ai-providers/server-ai-vercel dry_run: description: 'Is this a dry run. If so no package will be published.' type: boolean @@ -47,8 +46,6 @@ jobs: package-server-ai-langchain-tag-name: ${{ steps.release.outputs['packages/ai-providers/server-ai-langchain--tag_name'] }} package-server-ai-openai-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-openai--release_created'] }} package-server-ai-openai-tag-name: ${{ steps.release.outputs['packages/ai-providers/server-ai-openai--tag_name'] }} - package-server-ai-vercel-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-vercel--release_created'] }} - package-server-ai-vercel-tag-name: ${{ steps.release.outputs['packages/ai-providers/server-ai-vercel--tag_name'] }} steps: - uses: googleapis/release-please-action@v4 id: release @@ -253,57 +250,3 @@ jobs: base64-subjects: "${{ needs.release-server-ai-openai.outputs.package-hashes }}" upload-assets: true upload-tag-name: ${{ needs.release-please.outputs.package-server-ai-openai-tag-name }} - - release-server-ai-vercel: - runs-on: ubuntu-latest - needs: ['release-please'] - permissions: - id-token: write # Needed for OIDC to get release secrets from AWS. - if: ${{ needs.release-please.outputs.package-server-ai-vercel-released == 'true' }} - outputs: - package-hashes: ${{ steps.build.outputs.package-hashes }} - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - uses: actions/setup-python@v5 - with: - python-version: '3.11' - - - name: Install poetry - uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 - - - uses: ./.github/actions/ci - with: - workspace_path: packages/ai-providers/server-ai-vercel - - - uses: ./.github/actions/build - id: build - with: - workspace_path: packages/ai-providers/server-ai-vercel - - - uses: launchdarkly/gh-actions/actions/release-secrets@release-secrets-v1.2.0 - name: 'Get PyPI token' - with: - aws_assume_role: ${{ vars.AWS_ROLE_ARN }} - ssm_parameter_pairs: '/production/common/releasing/pypi/token = PYPI_AUTH_TOKEN' - - - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 - with: - password: ${{ env.PYPI_AUTH_TOKEN }} - packages-dir: packages/ai-providers/server-ai-vercel/dist/ - - release-server-ai-vercel-provenance: - needs: ['release-please', 'release-server-ai-vercel'] - if: ${{ needs.release-please.outputs.package-server-ai-vercel-released == 'true' }} - permissions: - actions: read # Needed for detecting the GitHub Actions environment. - id-token: write # Needed for provenance signing. - contents: write # Needed for uploading assets to the release. - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0 - with: - base64-subjects: "${{ needs.release-server-ai-vercel.outputs.package-hashes }}" - upload-assets: true - upload-tag-name: ${{ needs.release-please.outputs.package-server-ai-vercel-tag-name }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 0317cff..0897b74 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,6 +1,5 @@ { "packages/sdk/server-ai": "0.11.0", "packages/ai-providers/server-ai-langchain": "0.2.0", - "packages/ai-providers/server-ai-openai": "0.1.0", - "packages/ai-providers/server-ai-vercel": "0.1.0" + "packages/ai-providers/server-ai-openai": "0.1.0" } diff --git a/Makefile b/Makefile index f56fbf9..34ddfeb 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,6 @@ BUILDDIR = $(SOURCEDIR)/build SERVER_AI_PKG = packages/sdk/server-ai LANGCHAIN_PKG = packages/ai-providers/server-ai-langchain OPENAI_PKG = packages/ai-providers/server-ai-openai -VERCEL_PKG = packages/ai-providers/server-ai-vercel .PHONY: help help: #! Show this help message @@ -28,7 +27,6 @@ install: #! Install all packages $(MAKE) install-server-ai $(MAKE) install-langchain $(MAKE) install-openai - $(MAKE) install-vercel .PHONY: install-server-ai install-server-ai: #! Install server-ai package @@ -42,10 +40,6 @@ install-langchain: #! Install langchain provider package install-openai: #! Install openai provider package $(MAKE) -C $(OPENAI_PKG) install -.PHONY: install-vercel -install-vercel: #! Install vercel provider package - $(MAKE) -C $(VERCEL_PKG) install - # # Quality control checks # @@ -55,7 +49,6 @@ test: #! Run unit tests for all packages $(MAKE) test-server-ai $(MAKE) test-langchain $(MAKE) test-openai - $(MAKE) test-vercel .PHONY: test-server-ai test-server-ai: #! Run unit tests for server-ai package @@ -69,16 +62,11 @@ test-langchain: #! Run unit tests for langchain provider package test-openai: #! Run unit tests for openai provider package $(MAKE) -C $(OPENAI_PKG) test -.PHONY: test-vercel -test-vercel: #! Run unit tests for vercel provider package - $(MAKE) -C $(VERCEL_PKG) test - .PHONY: lint lint: #! Run type analysis and linting checks for all packages $(MAKE) lint-server-ai $(MAKE) lint-langchain $(MAKE) lint-openai - $(MAKE) lint-vercel .PHONY: lint-server-ai lint-server-ai: #! Run type analysis and linting checks for server-ai package @@ -92,10 +80,6 @@ lint-langchain: #! Run type analysis and linting checks for langchain provider p lint-openai: #! Run type analysis and linting checks for openai provider package $(MAKE) -C $(OPENAI_PKG) lint -.PHONY: lint-vercel -lint-vercel: #! Run type analysis and linting checks for vercel provider package - $(MAKE) -C $(VERCEL_PKG) lint - # # Build targets # @@ -105,7 +89,6 @@ build: #! Build all packages $(MAKE) build-server-ai $(MAKE) build-langchain $(MAKE) build-openai - $(MAKE) build-vercel .PHONY: build-server-ai build-server-ai: #! Build server-ai package @@ -119,10 +102,6 @@ build-langchain: #! Build langchain provider package build-openai: #! Build openai provider package $(MAKE) -C $(OPENAI_PKG) build -.PHONY: build-vercel -build-vercel: #! Build vercel provider package - $(MAKE) -C $(VERCEL_PKG) build - # # Documentation generation # diff --git a/packages/ai-providers/server-ai-vercel/Makefile b/packages/ai-providers/server-ai-vercel/Makefile deleted file mode 100644 index 805aedb..0000000 --- a/packages/ai-providers/server-ai-vercel/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -PYTEST_FLAGS=-W error::SyntaxWarning - -.PHONY: help -help: #! Show this help message - @echo 'Usage: make [target] ... ' - @echo '' - @echo 'Targets:' - @grep -h -F '#!' $(MAKEFILE_LIST) | grep -v grep | sed 's/:.*#!/:/' | column -t -s":" - -.PHONY: install -install: #! Install package dependencies - poetry install - -.PHONY: test -test: #! Run unit tests -test: install - poetry run pytest $(PYTEST_FLAGS) - -.PHONY: lint -lint: #! Run type analysis and linting checks -lint: install - poetry run mypy src/ldai_vercel - poetry run isort --check --atomic src/ldai_vercel - poetry run pycodestyle src/ldai_vercel - -.PHONY: build -build: #! Build distribution files -build: install - poetry build - diff --git a/packages/ai-providers/server-ai-vercel/README.md b/packages/ai-providers/server-ai-vercel/README.md deleted file mode 100644 index 80f0580..0000000 --- a/packages/ai-providers/server-ai-vercel/README.md +++ /dev/null @@ -1,97 +0,0 @@ -# LaunchDarkly AI SDK Vercel Provider - -[![PyPI](https://img.shields.io/pypi/v/launchdarkly-server-sdk-ai-vercel-dev.svg?style=flat-square)](https://pypi.org/project/launchdarkly-server-sdk-ai-vercel-dev/) - -This package provides a multi-provider integration for the LaunchDarkly AI SDK, similar to the Vercel AI SDK in JavaScript. It uses [LiteLLM](https://github.com/BerriAI/litellm) under the hood to support 100+ LLM providers. - -## Installation - -```bash -pip install launchdarkly-server-sdk-ai-vercel-dev -``` - -## Supported Providers - -This provider supports all LiteLLM-compatible providers, including: - -- OpenAI -- Anthropic -- Google (Gemini) -- Cohere -- Mistral -- Azure OpenAI -- AWS Bedrock -- And many more... - -## Quick Start - -```python -import asyncio -from ldai import AIClient -from ldai_vercel import VercelProvider - -async def main(): - # Initialize the AI client - ai_client = AIClient(ld_client) - - # Get AI config - ai_config = ai_client.config( - "my-ai-config-key", - context, - default_value - ) - - # Create a Vercel provider from the config - provider = await VercelProvider.create(ai_config) - - # Invoke the model - response = await provider.invoke_model(ai_config.messages) - print(response.message.content) - -asyncio.run(main()) -``` - -## Features - -- Multi-provider support through LiteLLM -- Automatic token usage tracking -- Support for structured output (JSON schema) -- Parameter mapping between LaunchDarkly and LiteLLM formats -- Static utility methods for custom integrations - -## API Reference - -### VercelProvider - -#### Constructor - -```python -VercelProvider(model_name: str, parameters: VercelModelParameters, logger: Optional[Any] = None) -``` - -#### Static Methods - -- `create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> VercelProvider` - Factory method to create a provider from an AI config -- `get_ai_metrics_from_response(response: Any) -> LDAIMetrics` - Extract metrics from a LiteLLM response -- `map_provider(ld_provider_name: str) -> str` - Map LD provider names to LiteLLM format -- `map_parameters(parameters: Dict) -> VercelModelParameters` - Map LD parameters to LiteLLM format - -#### Instance Methods - -- `invoke_model(messages: List[LDMessage]) -> ChatResponse` - Invoke the model with messages -- `invoke_structured_model(messages: List[LDMessage], response_structure: Dict[str, Any]) -> StructuredResponse` - Invoke the model with structured output - -## Environment Variables - -Make sure to set the appropriate API key environment variables for your chosen provider: - -- `OPENAI_API_KEY` - For OpenAI -- `ANTHROPIC_API_KEY` - For Anthropic -- `GOOGLE_API_KEY` - For Google/Gemini -- `COHERE_API_KEY` - For Cohere -- `MISTRAL_API_KEY` - For Mistral - -## License - -Apache-2.0 - diff --git a/packages/ai-providers/server-ai-vercel/pyproject.toml b/packages/ai-providers/server-ai-vercel/pyproject.toml deleted file mode 100644 index 775e198..0000000 --- a/packages/ai-providers/server-ai-vercel/pyproject.toml +++ /dev/null @@ -1,60 +0,0 @@ -[tool.poetry] -# TODO: Rename before official release -name = "launchdarkly-server-sdk-ai-vercel-dev" -version = "0.1.0" -description = "LaunchDarkly AI SDK Vercel Provider (Multi-Provider Support via LiteLLM)" -authors = ["LaunchDarkly "] -license = "Apache-2.0" -readme = "README.md" -homepage = "https://docs.launchdarkly.com/sdk/ai/python" -repository = "https://github.com/launchdarkly/python-server-sdk-ai" -classifiers = [ - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Operating System :: OS Independent", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", - "Topic :: Software Development", - "Topic :: Software Development :: Libraries", -] -packages = [{ include = "ldai_vercel", from = "src" }] - -[tool.poetry.dependencies] -python = ">=3.9,<4" -launchdarkly-server-sdk-ai = ">=0.11.0" -litellm = ">=1.0.0" - -[tool.poetry.group.dev.dependencies] -pytest = ">=2.8" -pytest-cov = ">=2.4.0" -pytest-asyncio = ">=0.21.0,<1.0.0" -mypy = "==1.18.2" -pycodestyle = ">=2.11.0" -isort = ">=5.12.0" - -[tool.mypy] -python_version = "3.9" -ignore_missing_imports = true -install_types = true -non_interactive = true - -[tool.isort] -profile = "black" -known_third_party = ["litellm", "ldai"] -sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] - - -[tool.pytest.ini_options] -addopts = ["-ra"] -testpaths = ["tests"] -asyncio_mode = "auto" - - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" - diff --git a/packages/ai-providers/server-ai-vercel/setup.cfg b/packages/ai-providers/server-ai-vercel/setup.cfg deleted file mode 100644 index 6224f31..0000000 --- a/packages/ai-providers/server-ai-vercel/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[pycodestyle] -max-line-length = 120 diff --git a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py deleted file mode 100644 index f470479..0000000 --- a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -"""LaunchDarkly AI SDK Vercel Provider (Multi-Provider Support via LiteLLM).""" - -from ldai_vercel.types import ( - ModelUsageTokens, - StreamResponse, - TextResponse, - VercelModelParameters, - VercelProviderFunction, - VercelSDKConfig, - VercelSDKMapOptions, -) -from ldai_vercel.vercel_provider import VercelProvider - -__all__ = [ - 'VercelProvider', - 'VercelModelParameters', - 'VercelSDKConfig', - 'VercelSDKMapOptions', - 'VercelProviderFunction', - 'ModelUsageTokens', - 'TextResponse', - 'StreamResponse', -] diff --git a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py deleted file mode 100644 index 1a59205..0000000 --- a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/types.py +++ /dev/null @@ -1,116 +0,0 @@ -"""Types for Vercel AI provider.""" - -from dataclasses import dataclass, field -from typing import Any, Callable, Dict, List, Optional - -from ldai import LDMessage - -# Type alias for provider function -VercelProviderFunction = Callable[[str], Any] - - -@dataclass -class VercelModelParameters: - """ - Vercel/LiteLLM model parameters. - - These are the parameters that can be passed to LiteLLM methods. - """ - max_tokens: Optional[int] = None - temperature: Optional[float] = None - top_p: Optional[float] = None - top_k: Optional[int] = None - presence_penalty: Optional[float] = None - frequency_penalty: Optional[float] = None - stop: Optional[List[str]] = None - seed: Optional[int] = None - - def to_dict(self) -> Dict[str, Any]: - """Convert to dictionary, excluding None values.""" - result: Dict[str, Any] = {} - if self.max_tokens is not None: - result['max_tokens'] = self.max_tokens - if self.temperature is not None: - result['temperature'] = self.temperature - if self.top_p is not None: - result['top_p'] = self.top_p - if self.top_k is not None: - result['top_k'] = self.top_k - if self.presence_penalty is not None: - result['presence_penalty'] = self.presence_penalty - if self.frequency_penalty is not None: - result['frequency_penalty'] = self.frequency_penalty - if self.stop is not None: - result['stop'] = self.stop - if self.seed is not None: - result['seed'] = self.seed - return result - - -@dataclass -class VercelSDKMapOptions: - """Options for mapping to Vercel/LiteLLM SDK configuration.""" - non_interpolated_messages: Optional[List[LDMessage]] = None - - -@dataclass -class VercelSDKConfig: - """Configuration format compatible with LiteLLM's completion methods.""" - model: str - messages: Optional[List[LDMessage]] = None - max_tokens: Optional[int] = None - temperature: Optional[float] = None - top_p: Optional[float] = None - top_k: Optional[int] = None - presence_penalty: Optional[float] = None - frequency_penalty: Optional[float] = None - stop: Optional[List[str]] = None - seed: Optional[int] = None - - def to_dict(self) -> Dict[str, Any]: - """Convert to dictionary, excluding None values.""" - result: Dict[str, Any] = {'model': self.model} - if self.messages is not None: - result['messages'] = [{'role': m.role, 'content': m.content} for m in self.messages] - if self.max_tokens is not None: - result['max_tokens'] = self.max_tokens - if self.temperature is not None: - result['temperature'] = self.temperature - if self.top_p is not None: - result['top_p'] = self.top_p - if self.top_k is not None: - result['top_k'] = self.top_k - if self.presence_penalty is not None: - result['presence_penalty'] = self.presence_penalty - if self.frequency_penalty is not None: - result['frequency_penalty'] = self.frequency_penalty - if self.stop is not None: - result['stop'] = self.stop - if self.seed is not None: - result['seed'] = self.seed - return result - - -@dataclass -class ModelUsageTokens: - """ - Token usage information from LiteLLM operations. - """ - prompt_tokens: Optional[int] = None - completion_tokens: Optional[int] = None - total_tokens: Optional[int] = None - - -@dataclass -class TextResponse: - """Response type for non-streaming LiteLLM operations.""" - finish_reason: Optional[str] = None - usage: Optional[ModelUsageTokens] = None - - -@dataclass -class StreamResponse: - """Response type for streaming LiteLLM operations.""" - # Note: In async streaming, these would be resolved after the stream completes - finish_reason: Optional[str] = None - usage: Optional[ModelUsageTokens] = None diff --git a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py b/packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py deleted file mode 100644 index 994fea8..0000000 --- a/packages/ai-providers/server-ai-vercel/src/ldai_vercel/vercel_provider.py +++ /dev/null @@ -1,392 +0,0 @@ -"""Vercel AI implementation of AIProvider for LaunchDarkly AI SDK using LiteLLM.""" - -import json -from typing import Any, Callable, Dict, List, Optional, Union - -import litellm -from ldai import LDMessage -from ldai.models import AIConfigKind -from ldai.providers import AIProvider -from ldai.providers.types import ChatResponse, LDAIMetrics, StructuredResponse -from ldai.tracker import TokenUsage -from litellm import acompletion - -from ldai_vercel.types import ( - ModelUsageTokens, - TextResponse, - VercelModelParameters, - VercelProviderFunction, - VercelSDKConfig, - VercelSDKMapOptions, -) - - -class VercelProvider(AIProvider): - """ - Vercel AI implementation of AIProvider using LiteLLM. - - This provider integrates multiple AI providers (OpenAI, Anthropic, Google, etc.) - with LaunchDarkly's tracking capabilities through LiteLLM. - """ - - def __init__( - self, - model_name: str, - parameters: VercelModelParameters, - logger: Optional[Any] = None - ): - """ - Initialize the Vercel provider. - - :param model_name: The full model name in LiteLLM format (e.g., 'openai/gpt-4', 'anthropic/claude-3-opus') - :param parameters: Model parameters - :param logger: Optional logger for logging provider operations - """ - super().__init__(logger) - self._model_name = model_name - self._parameters = parameters - - # ============================================================================= - # MAIN FACTORY METHODS - # ============================================================================= - - @staticmethod - async def create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> 'VercelProvider': - """ - Static factory method to create a Vercel AIProvider from an AI configuration. - This method auto-detects the provider and creates the model. - - :param ai_config: The LaunchDarkly AI configuration - :param logger: Optional logger - :return: A configured VercelProvider - """ - model_name = VercelProvider.create_model_name(ai_config) - parameters = VercelProvider.map_parameters(ai_config.to_dict().get('model', {}).get('parameters')) - return VercelProvider(model_name, parameters, logger) - - # ============================================================================= - # INSTANCE METHODS (AIProvider Implementation) - # ============================================================================= - - async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: - """ - Invoke the AI model with an array of messages. - - :param messages: Array of LDMessage objects representing the conversation - :return: ChatResponse containing the model's response and metrics - """ - try: - # Convert LDMessage to LiteLLM message format - litellm_messages = [ - {'role': msg.role, 'content': msg.content} - for msg in messages - ] - - # Call LiteLLM acompletion - response = await acompletion( - model=self._model_name, - messages=litellm_messages, - **self._parameters.to_dict(), - ) - - # Extract metrics including token usage and success status - metrics = VercelProvider.get_ai_metrics_from_response(response) - - # Create the assistant message - content = '' - if response.choices and len(response.choices) > 0: - message = response.choices[0].message - if message and message.content: - content = message.content - - return ChatResponse( - message=LDMessage(role='assistant', content=content), - metrics=metrics, - ) - except Exception as error: - if self.logger: - self.logger.warn(f'Vercel AI model invocation failed: {error}') - - return ChatResponse( - message=LDMessage(role='assistant', content=''), - metrics=LDAIMetrics(success=False, usage=None), - ) - - async def invoke_structured_model( - self, - messages: List[LDMessage], - response_structure: Dict[str, Any], - ) -> StructuredResponse: - """ - Invoke the AI model with structured output support. - - :param messages: Array of LDMessage objects representing the conversation - :param response_structure: Dictionary defining the JSON schema for output structure - :return: StructuredResponse containing the structured data - """ - try: - # Convert LDMessage to LiteLLM message format - litellm_messages = [ - {'role': msg.role, 'content': msg.content} - for msg in messages - ] - - # Call LiteLLM acompletion with JSON response format - response = await acompletion( - model=self._model_name, - messages=litellm_messages, - response_format={'type': 'json_object'}, - **self._parameters.to_dict(), - ) - - # Extract metrics - metrics = VercelProvider.get_ai_metrics_from_response(response) - - # Safely extract the content - content = '' - if response.choices and len(response.choices) > 0: - message = response.choices[0].message - if message and message.content: - content = message.content - - if not content: - if self.logger: - self.logger.warn('Vercel AI structured response has no content available') - metrics = LDAIMetrics(success=False, usage=metrics.usage) - return StructuredResponse( - data={}, - raw_response='', - metrics=metrics, - ) - - try: - data = json.loads(content) - return StructuredResponse( - data=data, - raw_response=content, - metrics=metrics, - ) - except json.JSONDecodeError as parse_error: - if self.logger: - self.logger.warn(f'Vercel AI structured response contains invalid JSON: {parse_error}') - metrics = LDAIMetrics(success=False, usage=metrics.usage) - return StructuredResponse( - data={}, - raw_response=content, - metrics=metrics, - ) - except Exception as error: - if self.logger: - self.logger.warn(f'Vercel AI structured model invocation failed: {error}') - - return StructuredResponse( - data={}, - raw_response='', - metrics=LDAIMetrics(success=False, usage=None), - ) - - def get_model_name(self) -> str: - """ - Get the model name. - - :return: The model name - """ - return self._model_name - - # ============================================================================= - # STATIC UTILITY METHODS - # ============================================================================= - - @staticmethod - def map_provider(ld_provider_name: str) -> str: - """ - Map LaunchDarkly provider names to LiteLLM provider prefixes. - - This method enables seamless integration between LaunchDarkly's standardized - provider naming and LiteLLM's naming conventions. - - :param ld_provider_name: LaunchDarkly provider name - :return: LiteLLM-compatible provider prefix - """ - lowercased_name = ld_provider_name.lower() - - mapping: Dict[str, str] = { - 'gemini': 'gemini', - 'google': 'gemini', - 'openai': 'openai', - 'anthropic': 'anthropic', - 'cohere': 'cohere', - 'mistral': 'mistral', - 'azure': 'azure', - 'bedrock': 'bedrock', - } - - return mapping.get(lowercased_name, lowercased_name) - - @staticmethod - def map_usage_data_to_ld_token_usage(usage_data: Any) -> TokenUsage: - """ - Map LiteLLM usage data to LaunchDarkly token usage. - - :param usage_data: Usage data from LiteLLM - :return: TokenUsage - """ - if not usage_data: - return TokenUsage(total=0, input=0, output=0) - - total_tokens = getattr(usage_data, 'total_tokens', None) or 0 - prompt_tokens = getattr(usage_data, 'prompt_tokens', None) or 0 - completion_tokens = getattr(usage_data, 'completion_tokens', None) or 0 - - return TokenUsage( - total=total_tokens, - input=prompt_tokens, - output=completion_tokens, - ) - - @staticmethod - def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: - """ - Get AI metrics from a LiteLLM response. - - This method extracts token usage information and success status from LiteLLM responses - and returns a LaunchDarkly AIMetrics object. - - :param response: The response from LiteLLM - :return: LDAIMetrics with success status and token usage - - Example: - response = await tracker.track_metrics_of( - lambda: acompletion(config), - VercelProvider.get_ai_metrics_from_response - ) - """ - # Check finish reason for error - finish_reason = 'unknown' - if response and hasattr(response, 'choices') and response.choices: - choice = response.choices[0] - if hasattr(choice, 'finish_reason'): - finish_reason = choice.finish_reason or 'unknown' - - # Extract token usage if available - usage: Optional[TokenUsage] = None - if hasattr(response, 'usage') and response.usage: - usage = VercelProvider.map_usage_data_to_ld_token_usage(response.usage) - - success = finish_reason != 'error' - - return LDAIMetrics(success=success, usage=usage) - - @staticmethod - def create_ai_metrics(response: Any) -> LDAIMetrics: - """ - Create AI metrics information from a LiteLLM response. - - :deprecated: Use `get_ai_metrics_from_response()` instead. - :param response: The response from LiteLLM - :return: LDAIMetrics with success status and token usage - """ - return VercelProvider.get_ai_metrics_from_response(response) - - @staticmethod - def map_parameters(parameters: Optional[Dict[str, Any]]) -> VercelModelParameters: - """ - Map LaunchDarkly model parameters to LiteLLM parameters. - - Parameter mappings: - - max_tokens → max_tokens - - max_completion_tokens → max_tokens - - temperature → temperature - - top_p → top_p - - top_k → top_k - - presence_penalty → presence_penalty - - frequency_penalty → frequency_penalty - - stop → stop - - seed → seed - - :param parameters: The LaunchDarkly model parameters to map - :return: VercelModelParameters - """ - if not parameters: - return VercelModelParameters() - - return VercelModelParameters( - max_tokens=parameters.get('max_tokens') or parameters.get('max_completion_tokens'), - temperature=parameters.get('temperature'), - top_p=parameters.get('top_p'), - top_k=parameters.get('top_k'), - presence_penalty=parameters.get('presence_penalty'), - frequency_penalty=parameters.get('frequency_penalty'), - stop=parameters.get('stop'), - seed=parameters.get('seed'), - ) - - @staticmethod - def to_litellm_config( - ai_config: AIConfigKind, - options: Optional[VercelSDKMapOptions] = None, - ) -> VercelSDKConfig: - """ - Convert an AI configuration to LiteLLM configuration. - - :param ai_config: The LaunchDarkly AI configuration - :param options: Optional mapping options - :return: A configuration directly usable in LiteLLM - """ - config_dict = ai_config.to_dict() - model_dict = config_dict.get('model') or {} - provider_dict = config_dict.get('provider') or {} - - # Build full model name - provider_name = VercelProvider.map_provider(provider_dict.get('name', '')) - model_name = model_dict.get('name', '') - - full_model_name = f'{provider_name}/{model_name}' if provider_name else model_name - - # Merge messages from config and options - messages: Optional[List[LDMessage]] = None - config_messages = config_dict.get('messages') - if config_messages or (options and options.non_interpolated_messages): - messages = [] - if config_messages: - for msg in config_messages: - messages.append(LDMessage(role=msg['role'], content=msg['content'])) - if options and options.non_interpolated_messages: - messages.extend(options.non_interpolated_messages) - - # Map parameters using the shared mapping method - params = VercelProvider.map_parameters(model_dict.get('parameters')) - - # Build and return the LiteLLM configuration - return VercelSDKConfig( - model=full_model_name, - messages=messages, - max_tokens=params.max_tokens, - temperature=params.temperature, - top_p=params.top_p, - top_k=params.top_k, - presence_penalty=params.presence_penalty, - frequency_penalty=params.frequency_penalty, - stop=params.stop, - seed=params.seed, - ) - - @staticmethod - def create_model_name(ai_config: AIConfigKind) -> str: - """ - Create a LiteLLM model name from an AI configuration. - - :param ai_config: The LaunchDarkly AI configuration - :return: A LiteLLM-compatible model name - """ - config_dict = ai_config.to_dict() - model_dict = config_dict.get('model') or {} - provider_dict = config_dict.get('provider') or {} - - provider_name = VercelProvider.map_provider(provider_dict.get('name', '')) - model_name = model_dict.get('name', '') - - # LiteLLM uses provider/model format - if provider_name: - return f'{provider_name}/{model_name}' - return model_name diff --git a/packages/ai-providers/server-ai-vercel/tests/__init__.py b/packages/ai-providers/server-ai-vercel/tests/__init__.py deleted file mode 100644 index 8bf5209..0000000 --- a/packages/ai-providers/server-ai-vercel/tests/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""Tests for LaunchDarkly AI SDK Vercel Provider.""" - diff --git a/packages/ai-providers/server-ai-vercel/tests/test_vercel_provider.py b/packages/ai-providers/server-ai-vercel/tests/test_vercel_provider.py deleted file mode 100644 index 0e9c935..0000000 --- a/packages/ai-providers/server-ai-vercel/tests/test_vercel_provider.py +++ /dev/null @@ -1,528 +0,0 @@ -"""Tests for Vercel Provider.""" - -import pytest -from unittest.mock import AsyncMock, MagicMock, patch - -from ldai import LDMessage - -from ldai_vercel import VercelProvider, VercelModelParameters, VercelSDKMapOptions - - -class TestGetAIMetricsFromResponse: - """Tests for get_ai_metrics_from_response static method.""" - - def test_creates_metrics_with_success_true_and_token_usage(self): - """Should create metrics with success=True and token usage.""" - mock_response = MagicMock() - mock_response.choices = [MagicMock()] - mock_response.choices[0].finish_reason = 'stop' - mock_response.usage = MagicMock() - mock_response.usage.prompt_tokens = 50 - mock_response.usage.completion_tokens = 50 - mock_response.usage.total_tokens = 100 - - result = VercelProvider.get_ai_metrics_from_response(mock_response) - - assert result.success is True - assert result.usage is not None - assert result.usage.total == 100 - assert result.usage.input == 50 - assert result.usage.output == 50 - - def test_creates_metrics_with_success_true_and_no_usage_when_usage_missing(self): - """Should create metrics with success=True and no usage when usage is missing.""" - mock_response = MagicMock() - mock_response.choices = [MagicMock()] - mock_response.choices[0].finish_reason = 'stop' - mock_response.usage = None - - result = VercelProvider.get_ai_metrics_from_response(mock_response) - - assert result.success is True - assert result.usage is None - - def test_handles_partial_usage_data(self): - """Should handle partial usage data.""" - mock_response = MagicMock() - mock_response.choices = [MagicMock()] - mock_response.choices[0].finish_reason = 'stop' - mock_response.usage = MagicMock() - mock_response.usage.prompt_tokens = 30 - mock_response.usage.completion_tokens = None - mock_response.usage.total_tokens = None - - result = VercelProvider.get_ai_metrics_from_response(mock_response) - - assert result.success is True - assert result.usage is not None - assert result.usage.total == 0 - assert result.usage.input == 30 - assert result.usage.output == 0 - - def test_returns_success_false_for_error_finish_reason(self): - """Should return success=False for error finish reason.""" - mock_response = MagicMock() - mock_response.choices = [MagicMock()] - mock_response.choices[0].finish_reason = 'error' - mock_response.usage = MagicMock() - mock_response.usage.prompt_tokens = 50 - mock_response.usage.completion_tokens = 50 - mock_response.usage.total_tokens = 100 - - result = VercelProvider.get_ai_metrics_from_response(mock_response) - - assert result.success is False - assert result.usage is not None - assert result.usage.total == 100 - - -class TestInvokeModel: - """Tests for invoke_model instance method.""" - - @pytest.fixture - def mock_logger(self): - """Create a mock logger.""" - return MagicMock() - - @pytest.mark.asyncio - async def test_invokes_litellm_and_returns_response(self, mock_logger): - """Should invoke LiteLLM and return response.""" - mock_response = MagicMock() - mock_response.choices = [MagicMock()] - mock_response.choices[0].message = MagicMock() - mock_response.choices[0].message.content = 'Hello! How can I help you today?' - mock_response.choices[0].finish_reason = 'stop' - mock_response.usage = MagicMock() - mock_response.usage.prompt_tokens = 10 - mock_response.usage.completion_tokens = 15 - mock_response.usage.total_tokens = 25 - - with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: - mock_acompletion.return_value = mock_response - - provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) - messages = [LDMessage(role='user', content='Hello!')] - result = await provider.invoke_model(messages) - - mock_acompletion.assert_called_once_with( - model='openai/gpt-3.5-turbo', - messages=[{'role': 'user', 'content': 'Hello!'}], - ) - - assert result.message.role == 'assistant' - assert result.message.content == 'Hello! How can I help you today?' - assert result.metrics.success is True - assert result.metrics.usage is not None - assert result.metrics.usage.total == 25 - assert result.metrics.usage.input == 10 - assert result.metrics.usage.output == 15 - - @pytest.mark.asyncio - async def test_handles_response_without_usage_data(self, mock_logger): - """Should handle response without usage data.""" - mock_response = MagicMock() - mock_response.choices = [MagicMock()] - mock_response.choices[0].message = MagicMock() - mock_response.choices[0].message.content = 'Hello! How can I help you today?' - mock_response.choices[0].finish_reason = 'stop' - mock_response.usage = None - - with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: - mock_acompletion.return_value = mock_response - - provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) - messages = [LDMessage(role='user', content='Hello!')] - result = await provider.invoke_model(messages) - - assert result.message.role == 'assistant' - assert result.message.content == 'Hello! How can I help you today?' - assert result.metrics.success is True - assert result.metrics.usage is None - - @pytest.mark.asyncio - async def test_handles_errors_and_returns_failure_metrics(self, mock_logger): - """Should handle errors and return failure metrics.""" - with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: - mock_acompletion.side_effect = Exception('API call failed') - - provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) - messages = [LDMessage(role='user', content='Hello!')] - result = await provider.invoke_model(messages) - - mock_logger.warn.assert_called() - assert result.message.role == 'assistant' - assert result.message.content == '' - assert result.metrics.success is False - - -class TestInvokeStructuredModel: - """Tests for invoke_structured_model instance method.""" - - @pytest.fixture - def mock_logger(self): - """Create a mock logger.""" - return MagicMock() - - @pytest.mark.asyncio - async def test_invokes_litellm_with_structured_output(self, mock_logger): - """Should invoke LiteLLM with structured output and return parsed response.""" - mock_response = MagicMock() - mock_response.choices = [MagicMock()] - mock_response.choices[0].message = MagicMock() - mock_response.choices[0].message.content = '{"name": "John Doe", "age": 30, "isActive": true}' - mock_response.choices[0].finish_reason = 'stop' - mock_response.usage = MagicMock() - mock_response.usage.prompt_tokens = 10 - mock_response.usage.completion_tokens = 15 - mock_response.usage.total_tokens = 25 - - with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: - mock_acompletion.return_value = mock_response - - provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) - messages = [LDMessage(role='user', content='Generate user data')] - response_structure = {'name': 'string', 'age': 0, 'isActive': True} - - result = await provider.invoke_structured_model(messages, response_structure) - - assert result.data == {'name': 'John Doe', 'age': 30, 'isActive': True} - assert result.raw_response == '{"name": "John Doe", "age": 30, "isActive": true}' - assert result.metrics.success is True - assert result.metrics.usage is not None - assert result.metrics.usage.total == 25 - - @pytest.mark.asyncio - async def test_handles_structured_response_without_usage_data(self, mock_logger): - """Should handle structured response without usage data.""" - mock_response = MagicMock() - mock_response.choices = [MagicMock()] - mock_response.choices[0].message = MagicMock() - mock_response.choices[0].message.content = '{"result": "success"}' - mock_response.choices[0].finish_reason = 'stop' - mock_response.usage = None - - with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: - mock_acompletion.return_value = mock_response - - provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) - messages = [LDMessage(role='user', content='Generate result')] - response_structure = {'result': 'string'} - - result = await provider.invoke_structured_model(messages, response_structure) - - assert result.data == {'result': 'success'} - assert result.metrics.success is True - assert result.metrics.usage is None - - @pytest.mark.asyncio - async def test_handles_errors_and_returns_failure_metrics(self, mock_logger): - """Should handle errors and return failure metrics.""" - with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: - mock_acompletion.side_effect = Exception('API call failed') - - provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) - messages = [LDMessage(role='user', content='Generate result')] - response_structure = {'result': 'string'} - - result = await provider.invoke_structured_model(messages, response_structure) - - mock_logger.warn.assert_called() - assert result.data == {} - assert result.raw_response == '' - assert result.metrics.success is False - - @pytest.mark.asyncio - async def test_handles_invalid_json_response(self, mock_logger): - """Should handle invalid JSON response gracefully.""" - mock_response = MagicMock() - mock_response.choices = [MagicMock()] - mock_response.choices[0].message = MagicMock() - mock_response.choices[0].message.content = 'invalid json content' - mock_response.choices[0].finish_reason = 'stop' - mock_response.usage = MagicMock() - mock_response.usage.prompt_tokens = 10 - mock_response.usage.completion_tokens = 5 - mock_response.usage.total_tokens = 15 - - with patch('ldai_vercel.vercel_provider.acompletion', new_callable=AsyncMock) as mock_acompletion: - mock_acompletion.return_value = mock_response - - provider = VercelProvider('openai/gpt-3.5-turbo', VercelModelParameters(), mock_logger) - messages = [LDMessage(role='user', content='Generate result')] - response_structure = {'result': 'string'} - - result = await provider.invoke_structured_model(messages, response_structure) - - assert result.data == {} - assert result.raw_response == 'invalid json content' - assert result.metrics.success is False - mock_logger.warn.assert_called() - - -class TestGetModelName: - """Tests for get_model_name instance method.""" - - def test_returns_model_name(self): - """Should return the model name.""" - provider = VercelProvider('openai/gpt-4', VercelModelParameters()) - assert provider.get_model_name() == 'openai/gpt-4' - - -class TestMapProvider: - """Tests for map_provider static method.""" - - def test_maps_gemini_to_gemini(self): - """Should map gemini to gemini.""" - assert VercelProvider.map_provider('gemini') == 'gemini' - assert VercelProvider.map_provider('Gemini') == 'gemini' - assert VercelProvider.map_provider('GEMINI') == 'gemini' - - def test_maps_google_to_gemini(self): - """Should map google to gemini.""" - assert VercelProvider.map_provider('google') == 'gemini' - - def test_returns_provider_name_unchanged_for_standard_providers(self): - """Should return provider name unchanged for standard providers.""" - assert VercelProvider.map_provider('openai') == 'openai' - assert VercelProvider.map_provider('anthropic') == 'anthropic' - assert VercelProvider.map_provider('cohere') == 'cohere' - assert VercelProvider.map_provider('mistral') == 'mistral' - - def test_returns_provider_name_unchanged_for_unmapped_providers(self): - """Should return provider name unchanged for unmapped providers.""" - assert VercelProvider.map_provider('unknown') == 'unknown' - - -class TestMapParameters: - """Tests for map_parameters static method.""" - - def test_maps_parameters_correctly(self): - """Should map parameters correctly.""" - parameters = { - 'max_tokens': 100, - 'temperature': 0.7, - 'top_p': 0.9, - 'top_k': 50, - 'presence_penalty': 0.1, - 'frequency_penalty': 0.2, - 'stop': ['stop1', 'stop2'], - 'seed': 42, - } - - result = VercelProvider.map_parameters(parameters) - - assert result.max_tokens == 100 - assert result.temperature == 0.7 - assert result.top_p == 0.9 - assert result.top_k == 50 - assert result.presence_penalty == 0.1 - assert result.frequency_penalty == 0.2 - assert result.stop == ['stop1', 'stop2'] - assert result.seed == 42 - - def test_handles_max_completion_tokens(self): - """Should use max_completion_tokens if max_tokens is not present.""" - parameters = { - 'max_completion_tokens': 200, - } - - result = VercelProvider.map_parameters(parameters) - - assert result.max_tokens == 200 - - def test_prefers_max_tokens_over_max_completion_tokens(self): - """Should prefer max_tokens over max_completion_tokens.""" - parameters = { - 'max_tokens': 100, - 'max_completion_tokens': 200, - } - - result = VercelProvider.map_parameters(parameters) - - assert result.max_tokens == 100 - - def test_returns_empty_parameters_for_none_input(self): - """Should return empty parameters for None input.""" - result = VercelProvider.map_parameters(None) - - assert result.max_tokens is None - assert result.temperature is None - - -class TestToLitellmConfig: - """Tests for to_litellm_config static method.""" - - def test_creates_config_with_correct_model_name(self): - """Should create config with correct model name.""" - mock_ai_config = MagicMock() - mock_ai_config.to_dict.return_value = { - 'model': {'name': 'gpt-4'}, - 'provider': {'name': 'openai'}, - } - - result = VercelProvider.to_litellm_config(mock_ai_config) - - assert result.model == 'openai/gpt-4' - - def test_handles_missing_provider(self): - """Should handle missing provider.""" - mock_ai_config = MagicMock() - mock_ai_config.to_dict.return_value = { - 'model': {'name': 'gpt-4'}, - } - - result = VercelProvider.to_litellm_config(mock_ai_config) - - assert result.model == 'gpt-4' - - def test_merges_messages_and_non_interpolated_messages(self): - """Should merge messages and non_interpolated_messages.""" - mock_ai_config = MagicMock() - mock_ai_config.to_dict.return_value = { - 'model': {'name': 'gpt-4'}, - 'provider': {'name': 'openai'}, - 'messages': [{'role': 'user', 'content': 'Hello'}], - } - - options = VercelSDKMapOptions( - non_interpolated_messages=[LDMessage(role='assistant', content='Hi there')] - ) - - result = VercelProvider.to_litellm_config(mock_ai_config, options) - - assert len(result.messages) == 2 - assert result.messages[0].role == 'user' - assert result.messages[0].content == 'Hello' - assert result.messages[1].role == 'assistant' - assert result.messages[1].content == 'Hi there' - - def test_maps_parameters(self): - """Should map parameters correctly.""" - mock_ai_config = MagicMock() - mock_ai_config.to_dict.return_value = { - 'model': { - 'name': 'gpt-4', - 'parameters': { - 'max_tokens': 100, - 'temperature': 0.7, - }, - }, - 'provider': {'name': 'openai'}, - } - - result = VercelProvider.to_litellm_config(mock_ai_config) - - assert result.max_tokens == 100 - assert result.temperature == 0.7 - - -class TestCreateModelName: - """Tests for create_model_name static method.""" - - def test_creates_model_name_with_provider(self): - """Should create model name with provider.""" - mock_ai_config = MagicMock() - mock_ai_config.to_dict.return_value = { - 'model': {'name': 'gpt-4'}, - 'provider': {'name': 'openai'}, - } - - result = VercelProvider.create_model_name(mock_ai_config) - - assert result == 'openai/gpt-4' - - def test_creates_model_name_without_provider(self): - """Should create model name without provider.""" - mock_ai_config = MagicMock() - mock_ai_config.to_dict.return_value = { - 'model': {'name': 'gpt-4'}, - } - - result = VercelProvider.create_model_name(mock_ai_config) - - assert result == 'gpt-4' - - def test_maps_provider_name(self): - """Should map provider name.""" - mock_ai_config = MagicMock() - mock_ai_config.to_dict.return_value = { - 'model': {'name': 'claude-3-opus'}, - 'provider': {'name': 'anthropic'}, - } - - result = VercelProvider.create_model_name(mock_ai_config) - - assert result == 'anthropic/claude-3-opus' - - -class TestCreate: - """Tests for create static factory method.""" - - @pytest.mark.asyncio - async def test_creates_provider_with_correct_model_and_parameters(self): - """Should create VercelProvider with correct model and parameters.""" - mock_ai_config = MagicMock() - mock_ai_config.to_dict.return_value = { - 'model': { - 'name': 'gpt-4', - 'parameters': { - 'temperature': 0.7, - 'max_tokens': 1000, - }, - }, - 'provider': {'name': 'openai'}, - } - - result = await VercelProvider.create(mock_ai_config) - - assert isinstance(result, VercelProvider) - assert result.get_model_name() == 'openai/gpt-4' - assert result._parameters.temperature == 0.7 - assert result._parameters.max_tokens == 1000 - - -class TestCreateAIMetrics: - """Tests for deprecated create_ai_metrics static method.""" - - def test_delegates_to_get_ai_metrics_from_response(self): - """Should delegate to get_ai_metrics_from_response.""" - mock_response = MagicMock() - mock_response.choices = [MagicMock()] - mock_response.choices[0].finish_reason = 'stop' - mock_response.usage = MagicMock() - mock_response.usage.prompt_tokens = 50 - mock_response.usage.completion_tokens = 50 - mock_response.usage.total_tokens = 100 - - result = VercelProvider.create_ai_metrics(mock_response) - - assert result.success is True - assert result.usage is not None - assert result.usage.total == 100 - - -class TestVercelModelParameters: - """Tests for VercelModelParameters dataclass.""" - - def test_to_dict_excludes_none_values(self): - """Should exclude None values from dict.""" - params = VercelModelParameters( - max_tokens=100, - temperature=0.7, - ) - - result = params.to_dict() - - assert result == { - 'max_tokens': 100, - 'temperature': 0.7, - } - - def test_to_dict_returns_empty_for_all_none(self): - """Should return empty dict for all None values.""" - params = VercelModelParameters() - - result = params.to_dict() - - assert result == {} - diff --git a/release-please-config.json b/release-please-config.json index b83fca1..14a138b 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -25,14 +25,6 @@ "include-v-in-tag": false, "extra-files": ["src/ldai_openai/__init__.py"], "component": "launchdarkly-server-sdk-ai-openai-dev" - }, - "packages/ai-providers/server-ai-vercel": { - "release-type": "python", - "versioning": "default", - "bump-minor-pre-major": true, - "include-v-in-tag": false, - "extra-files": ["src/ldai_vercel/__init__.py"], - "component": "launchdarkly-server-sdk-ai-vercel-dev" } } } From a7af08b2aecac510c3f121e3122e20e0eaae4024 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 24 Dec 2025 23:26:27 +0000 Subject: [PATCH 05/12] remove unnecessary method --- .../src/ldai_openai/openai_provider.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py index 9fb1fbe..1ea0be1 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py @@ -235,17 +235,3 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: # OpenAI responses that complete successfully are considered successful by default return LDAIMetrics(success=True, usage=usage) - - @staticmethod - def create_ai_metrics(openai_response: Any) -> LDAIMetrics: - """ - Create AI metrics information from an OpenAI response. - - This method extracts token usage information and success status from OpenAI responses - and returns a LaunchDarkly AIMetrics object. - - :deprecated: Use `get_ai_metrics_from_response()` instead. - :param openai_response: The response from OpenAI chat completions API - :return: LDAIMetrics with success status and token usage - """ - return OpenAIProvider.get_ai_metrics_from_response(openai_response) From 0ffaea05470c29ef325f3ba2d66599e28c88c0ef Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 2 Jan 2026 16:36:34 +0000 Subject: [PATCH 06/12] improve logging --- .../src/ldai_openai/openai_provider.py | 25 +++++++------------ 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py index 1ea0be1..29ca58c 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py @@ -4,6 +4,8 @@ import os from typing import Any, Dict, Iterable, List, Optional, cast +from ldclient import log + from ldai import LDMessage from ldai.models import AIConfigKind from ldai.providers import AIProvider @@ -25,7 +27,6 @@ def __init__( client: AsyncOpenAI, model_name: str, parameters: Dict[str, Any], - logger: Optional[Any] = None ): """ Initialize the OpenAI provider. @@ -33,9 +34,7 @@ def __init__( :param client: An AsyncOpenAI client instance :param model_name: The name of the model to use :param parameters: Additional model parameters - :param logger: Optional logger for logging provider operations """ - super().__init__(logger) self._client = client self._model_name = model_name self._parameters = parameters @@ -45,12 +44,11 @@ def __init__( # ============================================================================= @staticmethod - async def create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> 'OpenAIProvider': + async def create(ai_config: AIConfigKind) -> 'OpenAIProvider': """ Static factory method to create an OpenAI AIProvider from an AI configuration. :param ai_config: The LaunchDarkly AI configuration - :param logger: Optional logger for the provider :return: Configured OpenAIProvider instance """ client = AsyncOpenAI( @@ -62,7 +60,7 @@ async def create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> 'Open model_name = model_dict.get('name', '') parameters = model_dict.get('parameters') or {} - return OpenAIProvider(client, model_name, parameters, logger) + return OpenAIProvider(client, model_name, parameters) # ============================================================================= # INSTANCE METHODS (AIProvider Implementation) @@ -99,8 +97,7 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: content = message.content if not content: - if self.logger: - self.logger.warn('OpenAI response has no content available') + log.warn('OpenAI response has no content available') metrics = LDAIMetrics(success=False, usage=metrics.usage) return ChatResponse( @@ -108,8 +105,7 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: metrics=metrics, ) except Exception as error: - if self.logger: - self.logger.warn(f'OpenAI model invocation failed: {error}') + log.warn(f'OpenAI model invocation failed: {error}') return ChatResponse( message=LDMessage(role='assistant', content=''), @@ -160,8 +156,7 @@ async def invoke_structured_model( content = message.content if not content: - if self.logger: - self.logger.warn('OpenAI structured response has no content available') + log.warn('OpenAI structured response has no content available') metrics = LDAIMetrics(success=False, usage=metrics.usage) return StructuredResponse( data={}, @@ -177,8 +172,7 @@ async def invoke_structured_model( metrics=metrics, ) except json.JSONDecodeError as parse_error: - if self.logger: - self.logger.warn(f'OpenAI structured response contains invalid JSON: {parse_error}') + log.warn(f'OpenAI structured response contains invalid JSON: {parse_error}') metrics = LDAIMetrics(success=False, usage=metrics.usage) return StructuredResponse( data={}, @@ -186,8 +180,7 @@ async def invoke_structured_model( metrics=metrics, ) except Exception as error: - if self.logger: - self.logger.warn(f'OpenAI structured model invocation failed: {error}') + log.warn(f'OpenAI structured model invocation failed: {error}') return StructuredResponse( data={}, From b6b2c9609231f769508b191277cc12ef259b2321 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 2 Jan 2026 16:49:37 +0000 Subject: [PATCH 07/12] update to final name --- packages/ai-providers/server-ai-openai/pyproject.toml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/pyproject.toml b/packages/ai-providers/server-ai-openai/pyproject.toml index df7392f..05dbc3c 100644 --- a/packages/ai-providers/server-ai-openai/pyproject.toml +++ b/packages/ai-providers/server-ai-openai/pyproject.toml @@ -1,7 +1,6 @@ [tool.poetry] -# TODO: Rename before official release -name = "launchdarkly-server-sdk-ai-openai-dev" -version = "0.1.0" +name = "launchdarkly-server-sdk-ai-openai" +version = "0.0.0" description = "LaunchDarkly AI SDK OpenAI Provider" authors = ["LaunchDarkly "] license = "Apache-2.0" From 8fe67a33fc8ce47d9f139e78963f2ac779c722d9 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 2 Jan 2026 20:52:36 +0000 Subject: [PATCH 08/12] use log provided by ldai --- .../server-ai-openai/src/ldai_openai/openai_provider.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py index 29ca58c..911624f 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py @@ -4,9 +4,7 @@ import os from typing import Any, Dict, Iterable, List, Optional, cast -from ldclient import log - -from ldai import LDMessage +from ldai import LDMessage, log from ldai.models import AIConfigKind from ldai.providers import AIProvider from ldai.providers.types import ChatResponse, LDAIMetrics, StructuredResponse From dc76be2b32cc0e410715b16b2c54bb84f7fbf357 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 2 Jan 2026 22:39:40 +0000 Subject: [PATCH 09/12] fixing tests --- .../server-ai-openai/pyproject.toml | 2 +- .../src/ldai_openai/openai_provider.py | 10 +-- .../tests/test_openai_provider.py | 63 +++++-------------- 3 files changed, 22 insertions(+), 53 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/pyproject.toml b/packages/ai-providers/server-ai-openai/pyproject.toml index 05dbc3c..b3f305b 100644 --- a/packages/ai-providers/server-ai-openai/pyproject.toml +++ b/packages/ai-providers/server-ai-openai/pyproject.toml @@ -24,7 +24,7 @@ packages = [{ include = "ldai_openai", from = "src" }] [tool.poetry.dependencies] python = ">=3.9,<4" -launchdarkly-server-sdk-ai = ">=0.11.0" +launchdarkly-server-sdk-ai = ">=0.12.0" openai = ">=1.0.0" [tool.poetry.group.dev.dependencies] diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py index 911624f..bd1352e 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py @@ -95,7 +95,7 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: content = message.content if not content: - log.warn('OpenAI response has no content available') + log.warning('OpenAI response has no content available') metrics = LDAIMetrics(success=False, usage=metrics.usage) return ChatResponse( @@ -103,7 +103,7 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: metrics=metrics, ) except Exception as error: - log.warn(f'OpenAI model invocation failed: {error}') + log.warning(f'OpenAI model invocation failed: {error}') return ChatResponse( message=LDMessage(role='assistant', content=''), @@ -154,7 +154,7 @@ async def invoke_structured_model( content = message.content if not content: - log.warn('OpenAI structured response has no content available') + log.warning('OpenAI structured response has no content available') metrics = LDAIMetrics(success=False, usage=metrics.usage) return StructuredResponse( data={}, @@ -170,7 +170,7 @@ async def invoke_structured_model( metrics=metrics, ) except json.JSONDecodeError as parse_error: - log.warn(f'OpenAI structured response contains invalid JSON: {parse_error}') + log.warning(f'OpenAI structured response contains invalid JSON: {parse_error}') metrics = LDAIMetrics(success=False, usage=metrics.usage) return StructuredResponse( data={}, @@ -178,7 +178,7 @@ async def invoke_structured_model( metrics=metrics, ) except Exception as error: - log.warn(f'OpenAI structured model invocation failed: {error}') + log.warning(f'OpenAI structured model invocation failed: {error}') return StructuredResponse( data={}, diff --git a/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py index 457f9b0..ff9066b 100644 --- a/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py +++ b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py @@ -62,13 +62,8 @@ def mock_client(self): """Create a mock OpenAI client.""" return MagicMock() - @pytest.fixture - def mock_logger(self): - """Create a mock logger.""" - return MagicMock() - @pytest.mark.asyncio - async def test_invokes_openai_chat_completions_and_returns_response(self, mock_client, mock_logger): + async def test_invokes_openai_chat_completions_and_returns_response(self, mock_client): """Should invoke OpenAI chat completions and return response.""" mock_response = MagicMock() mock_response.choices = [MagicMock()] @@ -83,7 +78,7 @@ async def test_invokes_openai_chat_completions_and_returns_response(self, mock_c mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] result = await provider.invoke_model(messages) @@ -101,7 +96,7 @@ async def test_invokes_openai_chat_completions_and_returns_response(self, mock_c assert result.metrics.usage.output == 15 @pytest.mark.asyncio - async def test_returns_unsuccessful_response_when_no_content(self, mock_client, mock_logger): + async def test_returns_unsuccessful_response_when_no_content(self, mock_client): """Should return unsuccessful response when no content in response.""" mock_response = MagicMock() mock_response.choices = [MagicMock()] @@ -113,7 +108,7 @@ async def test_returns_unsuccessful_response_when_no_content(self, mock_client, mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] result = await provider.invoke_model(messages) @@ -122,7 +117,7 @@ async def test_returns_unsuccessful_response_when_no_content(self, mock_client, assert result.metrics.success is False @pytest.mark.asyncio - async def test_returns_unsuccessful_response_when_choices_empty(self, mock_client, mock_logger): + async def test_returns_unsuccessful_response_when_choices_empty(self, mock_client): """Should return unsuccessful response when choices array is empty.""" mock_response = MagicMock() mock_response.choices = [] @@ -132,7 +127,7 @@ async def test_returns_unsuccessful_response_when_choices_empty(self, mock_clien mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] result = await provider.invoke_model(messages) @@ -141,20 +136,19 @@ async def test_returns_unsuccessful_response_when_choices_empty(self, mock_clien assert result.metrics.success is False @pytest.mark.asyncio - async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_client, mock_logger): + async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_client): """Should return unsuccessful response when exception is thrown.""" mock_client.chat = MagicMock() mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(side_effect=Exception('API Error')) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] result = await provider.invoke_model(messages) assert result.message.role == 'assistant' assert result.message.content == '' assert result.metrics.success is False - mock_logger.warn.assert_called() class TestInvokeStructuredModel: @@ -165,13 +159,8 @@ def mock_client(self): """Create a mock OpenAI client.""" return MagicMock() - @pytest.fixture - def mock_logger(self): - """Create a mock logger.""" - return MagicMock() - @pytest.mark.asyncio - async def test_invokes_openai_with_structured_output(self, mock_client, mock_logger): + async def test_invokes_openai_with_structured_output(self, mock_client): """Should invoke OpenAI with structured output and return parsed response.""" mock_response = MagicMock() mock_response.choices = [MagicMock()] @@ -186,7 +175,7 @@ async def test_invokes_openai_with_structured_output(self, mock_client, mock_log mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Tell me about a person')] response_structure = { 'type': 'object', @@ -209,7 +198,7 @@ async def test_invokes_openai_with_structured_output(self, mock_client, mock_log assert result.metrics.usage.output == 10 @pytest.mark.asyncio - async def test_returns_unsuccessful_when_no_content_in_structured_response(self, mock_client, mock_logger): + async def test_returns_unsuccessful_when_no_content_in_structured_response(self, mock_client): """Should return unsuccessful response when no content in structured response.""" mock_response = MagicMock() mock_response.choices = [MagicMock()] @@ -221,7 +210,7 @@ async def test_returns_unsuccessful_when_no_content_in_structured_response(self, mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Tell me about a person')] response_structure = {'type': 'object'} @@ -232,7 +221,7 @@ async def test_returns_unsuccessful_when_no_content_in_structured_response(self, assert result.metrics.success is False @pytest.mark.asyncio - async def test_handles_json_parsing_errors(self, mock_client, mock_logger): + async def test_handles_json_parsing_errors(self, mock_client): """Should handle JSON parsing errors gracefully.""" mock_response = MagicMock() mock_response.choices = [MagicMock()] @@ -247,7 +236,7 @@ async def test_handles_json_parsing_errors(self, mock_client, mock_logger): mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Tell me about a person')] response_structure = {'type': 'object'} @@ -258,16 +247,15 @@ async def test_handles_json_parsing_errors(self, mock_client, mock_logger): assert result.metrics.success is False assert result.metrics.usage is not None assert result.metrics.usage.total == 15 - mock_logger.warn.assert_called() @pytest.mark.asyncio - async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_client, mock_logger): + async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_client): """Should return unsuccessful response when exception is thrown.""" mock_client.chat = MagicMock() mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(side_effect=Exception('API Error')) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}, mock_logger) + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Tell me about a person')] response_structure = {'type': 'object'} @@ -276,7 +264,6 @@ async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_cl assert result.data == {} assert result.raw_response == '' assert result.metrics.success is False - mock_logger.warn.assert_called() class TestGetClient: @@ -334,21 +321,3 @@ async def test_handles_missing_model_config(self): assert result._model_name == '' assert result._parameters == {} - -class TestCreateAIMetrics: - """Tests for deprecated create_ai_metrics static method.""" - - def test_delegates_to_get_ai_metrics_from_response(self): - """Should delegate to get_ai_metrics_from_response.""" - mock_response = MagicMock() - mock_response.usage = MagicMock() - mock_response.usage.prompt_tokens = 50 - mock_response.usage.completion_tokens = 50 - mock_response.usage.total_tokens = 100 - - result = OpenAIProvider.create_ai_metrics(mock_response) - - assert result.success is True - assert result.usage is not None - assert result.usage.total == 100 - From 07bb215c7d84ec7df0c7baf9180d6c47a4c91f35 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 2 Jan 2026 22:41:49 +0000 Subject: [PATCH 10/12] Remove unnecessary comments --- .../src/ldai_openai/openai_provider.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py index bd1352e..c62cc80 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py @@ -37,10 +37,6 @@ def __init__( self._model_name = model_name self._parameters = parameters - # ============================================================================= - # MAIN FACTORY METHOD - # ============================================================================= - @staticmethod async def create(ai_config: AIConfigKind) -> 'OpenAIProvider': """ @@ -60,10 +56,6 @@ async def create(ai_config: AIConfigKind) -> 'OpenAIProvider': return OpenAIProvider(client, model_name, parameters) - # ============================================================================= - # INSTANCE METHODS (AIProvider Implementation) - # ============================================================================= - async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: """ Invoke the OpenAI model with an array of messages. @@ -194,10 +186,6 @@ def get_client(self) -> AsyncOpenAI: """ return self._client - # ============================================================================= - # STATIC UTILITY METHODS - # ============================================================================= - @staticmethod def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: """ From 81fe0a8c38551f1cdebd79dfe1592c1f50b97c52 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 2 Jan 2026 22:49:45 +0000 Subject: [PATCH 11/12] set correct version --- .release-please-manifest.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6a40deb..00e20ec 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,5 +1,5 @@ { "packages/sdk/server-ai": "0.12.0", "packages/ai-providers/server-ai-langchain": "0.3.0", - "packages/ai-providers/server-ai-openai": "0.1.0" + "packages/ai-providers/server-ai-openai": "0.0.0" } From c9fdaaff3de84c44ccaec49b69073e009e83241c Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 2 Jan 2026 22:50:49 +0000 Subject: [PATCH 12/12] fix component name --- release-please-config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release-please-config.json b/release-please-config.json index 14a138b..e6b6bbd 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -24,7 +24,7 @@ "bump-minor-pre-major": true, "include-v-in-tag": false, "extra-files": ["src/ldai_openai/__init__.py"], - "component": "launchdarkly-server-sdk-ai-openai-dev" + "component": "launchdarkly-server-sdk-ai-openai" } } }