diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3584e65..9ee818d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,3 +111,56 @@ jobs: - name: Run tests run: make -C packages/ai-providers/server-ai-langchain test + + server-ai-openai-linux: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - uses: ./.github/actions/ci + with: + workspace_path: packages/ai-providers/server-ai-openai + python_version: ${{ matrix.python-version }} + + - uses: ./.github/actions/build + with: + workspace_path: packages/ai-providers/server-ai-openai + + server-ai-openai-windows: + runs-on: windows-latest + defaults: + run: + shell: powershell + + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install poetry + uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 + + - name: Configure poetry for local virtualenvs + run: poetry config virtualenvs.in-project true + + - name: Install server-ai dependency first + working-directory: packages/sdk/server-ai + run: poetry install + + - name: Install requirements + working-directory: packages/ai-providers/server-ai-openai + run: poetry install + + - name: Run tests + run: make -C packages/ai-providers/server-ai-openai test diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 051f969..eb6d555 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -26,6 +26,7 @@ on: options: - packages/sdk/server-ai - packages/ai-providers/server-ai-langchain + - packages/ai-providers/server-ai-openai dry_run: description: 'Is this a dry run. If so no package will be published.' type: boolean @@ -43,6 +44,8 @@ jobs: package-server-ai-tag-name: ${{ steps.release.outputs['packages/sdk/server-ai--tag_name'] }} package-server-ai-langchain-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-langchain--release_created'] }} package-server-ai-langchain-tag-name: ${{ steps.release.outputs['packages/ai-providers/server-ai-langchain--tag_name'] }} + package-server-ai-openai-released: ${{ steps.release.outputs['packages/ai-providers/server-ai-openai--release_created'] }} + package-server-ai-openai-tag-name: ${{ steps.release.outputs['packages/ai-providers/server-ai-openai--tag_name'] }} steps: - uses: googleapis/release-please-action@v4 id: release @@ -193,3 +196,57 @@ jobs: base64-subjects: "${{ needs.release-server-ai-langchain.outputs.package-hashes }}" upload-assets: true upload-tag-name: ${{ needs.release-please.outputs.package-server-ai-langchain-tag-name }} + + release-server-ai-openai: + runs-on: ubuntu-latest + needs: ['release-please'] + permissions: + id-token: write # Needed for OIDC to get release secrets from AWS. + if: ${{ needs.release-please.outputs.package-server-ai-openai-released == 'true' }} + outputs: + package-hashes: ${{ steps.build.outputs.package-hashes }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install poetry + uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 + + - uses: ./.github/actions/ci + with: + workspace_path: packages/ai-providers/server-ai-openai + + - uses: ./.github/actions/build + id: build + with: + workspace_path: packages/ai-providers/server-ai-openai + + - uses: launchdarkly/gh-actions/actions/release-secrets@release-secrets-v1.2.0 + name: 'Get PyPI token' + with: + aws_assume_role: ${{ vars.AWS_ROLE_ARN }} + ssm_parameter_pairs: '/production/common/releasing/pypi/token = PYPI_AUTH_TOKEN' + + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 + with: + password: ${{ env.PYPI_AUTH_TOKEN }} + packages-dir: packages/ai-providers/server-ai-openai/dist/ + + release-server-ai-openai-provenance: + needs: ['release-please', 'release-server-ai-openai'] + if: ${{ needs.release-please.outputs.package-server-ai-openai-released == 'true' }} + permissions: + actions: read # Needed for detecting the GitHub Actions environment. + id-token: write # Needed for provenance signing. + contents: write # Needed for uploading assets to the release. + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0 + with: + base64-subjects: "${{ needs.release-server-ai-openai.outputs.package-hashes }}" + upload-assets: true + upload-tag-name: ${{ needs.release-please.outputs.package-server-ai-openai-tag-name }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 0184176..00e20ec 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,4 +1,5 @@ { "packages/sdk/server-ai": "0.12.0", - "packages/ai-providers/server-ai-langchain": "0.3.0" + "packages/ai-providers/server-ai-langchain": "0.3.0", + "packages/ai-providers/server-ai-openai": "0.0.0" } diff --git a/Makefile b/Makefile index e895105..34ddfeb 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,7 @@ BUILDDIR = $(SOURCEDIR)/build # Package paths SERVER_AI_PKG = packages/sdk/server-ai LANGCHAIN_PKG = packages/ai-providers/server-ai-langchain +OPENAI_PKG = packages/ai-providers/server-ai-openai .PHONY: help help: #! Show this help message @@ -25,6 +26,7 @@ help: #! Show this help message install: #! Install all packages $(MAKE) install-server-ai $(MAKE) install-langchain + $(MAKE) install-openai .PHONY: install-server-ai install-server-ai: #! Install server-ai package @@ -34,6 +36,10 @@ install-server-ai: #! Install server-ai package install-langchain: #! Install langchain provider package $(MAKE) -C $(LANGCHAIN_PKG) install +.PHONY: install-openai +install-openai: #! Install openai provider package + $(MAKE) -C $(OPENAI_PKG) install + # # Quality control checks # @@ -42,6 +48,7 @@ install-langchain: #! Install langchain provider package test: #! Run unit tests for all packages $(MAKE) test-server-ai $(MAKE) test-langchain + $(MAKE) test-openai .PHONY: test-server-ai test-server-ai: #! Run unit tests for server-ai package @@ -51,10 +58,15 @@ test-server-ai: #! Run unit tests for server-ai package test-langchain: #! Run unit tests for langchain provider package $(MAKE) -C $(LANGCHAIN_PKG) test +.PHONY: test-openai +test-openai: #! Run unit tests for openai provider package + $(MAKE) -C $(OPENAI_PKG) test + .PHONY: lint lint: #! Run type analysis and linting checks for all packages $(MAKE) lint-server-ai $(MAKE) lint-langchain + $(MAKE) lint-openai .PHONY: lint-server-ai lint-server-ai: #! Run type analysis and linting checks for server-ai package @@ -64,6 +76,10 @@ lint-server-ai: #! Run type analysis and linting checks for server-ai package lint-langchain: #! Run type analysis and linting checks for langchain provider package $(MAKE) -C $(LANGCHAIN_PKG) lint +.PHONY: lint-openai +lint-openai: #! Run type analysis and linting checks for openai provider package + $(MAKE) -C $(OPENAI_PKG) lint + # # Build targets # @@ -72,6 +88,7 @@ lint-langchain: #! Run type analysis and linting checks for langchain provider p build: #! Build all packages $(MAKE) build-server-ai $(MAKE) build-langchain + $(MAKE) build-openai .PHONY: build-server-ai build-server-ai: #! Build server-ai package @@ -81,6 +98,10 @@ build-server-ai: #! Build server-ai package build-langchain: #! Build langchain provider package $(MAKE) -C $(LANGCHAIN_PKG) build +.PHONY: build-openai +build-openai: #! Build openai provider package + $(MAKE) -C $(OPENAI_PKG) build + # # Documentation generation # diff --git a/packages/ai-providers/server-ai-openai/Makefile b/packages/ai-providers/server-ai-openai/Makefile new file mode 100644 index 0000000..b14dfd9 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/Makefile @@ -0,0 +1,30 @@ +PYTEST_FLAGS=-W error::SyntaxWarning + +.PHONY: help +help: #! Show this help message + @echo 'Usage: make [target] ... ' + @echo '' + @echo 'Targets:' + @grep -h -F '#!' $(MAKEFILE_LIST) | grep -v grep | sed 's/:.*#!/:/' | column -t -s":" + +.PHONY: install +install: #! Install package dependencies + poetry install + +.PHONY: test +test: #! Run unit tests +test: install + poetry run pytest $(PYTEST_FLAGS) + +.PHONY: lint +lint: #! Run type analysis and linting checks +lint: install + poetry run mypy src/ldai_openai + poetry run isort --check --atomic src/ldai_openai + poetry run pycodestyle src/ldai_openai + +.PHONY: build +build: #! Build distribution files +build: install + poetry build + diff --git a/packages/ai-providers/server-ai-openai/README.md b/packages/ai-providers/server-ai-openai/README.md new file mode 100644 index 0000000..5f7619b --- /dev/null +++ b/packages/ai-providers/server-ai-openai/README.md @@ -0,0 +1,72 @@ +# LaunchDarkly AI SDK OpenAI Provider + +[![PyPI](https://img.shields.io/pypi/v/launchdarkly-server-sdk-ai-openai-dev.svg?style=flat-square)](https://pypi.org/project/launchdarkly-server-sdk-ai-openai-dev/) + +This package provides an OpenAI integration for the LaunchDarkly AI SDK. + +## Installation + +```bash +pip install launchdarkly-server-sdk-ai-openai-dev +``` + +## Quick Start + +```python +import asyncio +from ldai import AIClient +from ldai_openai import OpenAIProvider + +async def main(): + # Initialize the AI client + ai_client = AIClient(ld_client) + + # Get AI config + ai_config = ai_client.config( + "my-ai-config-key", + context, + default_value + ) + + # Create an OpenAI provider from the config + provider = await OpenAIProvider.create(ai_config) + + # Invoke the model + response = await provider.invoke_model(ai_config.messages) + print(response.message.content) + +asyncio.run(main()) +``` + +## Features + +- Full integration with OpenAI's chat completions API +- Automatic token usage tracking +- Support for structured output (JSON schema) +- Static utility methods for custom integrations + +## API Reference + +### OpenAIProvider + +#### Constructor + +```python +OpenAIProvider(client: OpenAI, model_name: str, parameters: Dict[str, Any], logger: Optional[Any] = None) +``` + +#### Static Methods + +- `create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> OpenAIProvider` - Factory method to create a provider from an AI config +- `get_ai_metrics_from_response(response: Any) -> LDAIMetrics` - Extract metrics from an OpenAI response + +#### Instance Methods + +- `invoke_model(messages: List[LDMessage]) -> ChatResponse` - Invoke the model with messages +- `invoke_structured_model(messages: List[LDMessage], response_structure: Dict[str, Any]) -> StructuredResponse` - Invoke the model with structured output +- `get_client() -> OpenAI` - Get the underlying OpenAI client + +## License + +Apache-2.0 + diff --git a/packages/ai-providers/server-ai-openai/pyproject.toml b/packages/ai-providers/server-ai-openai/pyproject.toml new file mode 100644 index 0000000..b3f305b --- /dev/null +++ b/packages/ai-providers/server-ai-openai/pyproject.toml @@ -0,0 +1,59 @@ +[tool.poetry] +name = "launchdarkly-server-sdk-ai-openai" +version = "0.0.0" +description = "LaunchDarkly AI SDK OpenAI Provider" +authors = ["LaunchDarkly "] +license = "Apache-2.0" +readme = "README.md" +homepage = "https://docs.launchdarkly.com/sdk/ai/python" +repository = "https://github.com/launchdarkly/python-server-sdk-ai" +classifiers = [ + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", +] +packages = [{ include = "ldai_openai", from = "src" }] + +[tool.poetry.dependencies] +python = ">=3.9,<4" +launchdarkly-server-sdk-ai = ">=0.12.0" +openai = ">=1.0.0" + +[tool.poetry.group.dev.dependencies] +pytest = ">=2.8" +pytest-cov = ">=2.4.0" +pytest-asyncio = ">=0.21.0,<1.0.0" +mypy = "==1.18.2" +pycodestyle = ">=2.11.0" +isort = ">=5.12.0" + +[tool.mypy] +python_version = "3.9" +ignore_missing_imports = true +install_types = true +non_interactive = true + +[tool.isort] +profile = "black" +known_third_party = ["openai", "ldai"] +sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] + + +[tool.pytest.ini_options] +addopts = ["-ra"] +testpaths = ["tests"] +asyncio_mode = "auto" + + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + diff --git a/packages/ai-providers/server-ai-openai/setup.cfg b/packages/ai-providers/server-ai-openai/setup.cfg new file mode 100644 index 0000000..6224f31 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/setup.cfg @@ -0,0 +1,2 @@ +[pycodestyle] +max-line-length = 120 diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py new file mode 100644 index 0000000..5d5120f --- /dev/null +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py @@ -0,0 +1,5 @@ +"""LaunchDarkly AI SDK OpenAI Provider.""" + +from ldai_openai.openai_provider import OpenAIProvider + +__all__ = ['OpenAIProvider'] diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py new file mode 100644 index 0000000..c62cc80 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py @@ -0,0 +1,216 @@ +"""OpenAI implementation of AIProvider for LaunchDarkly AI SDK.""" + +import json +import os +from typing import Any, Dict, Iterable, List, Optional, cast + +from ldai import LDMessage, log +from ldai.models import AIConfigKind +from ldai.providers import AIProvider +from ldai.providers.types import ChatResponse, LDAIMetrics, StructuredResponse +from ldai.tracker import TokenUsage +from openai import AsyncOpenAI +from openai.types.chat import ChatCompletionMessageParam + + +class OpenAIProvider(AIProvider): + """ + OpenAI implementation of AIProvider. + + This provider integrates OpenAI's chat completions API with LaunchDarkly's tracking capabilities. + """ + + def __init__( + self, + client: AsyncOpenAI, + model_name: str, + parameters: Dict[str, Any], + ): + """ + Initialize the OpenAI provider. + + :param client: An AsyncOpenAI client instance + :param model_name: The name of the model to use + :param parameters: Additional model parameters + """ + self._client = client + self._model_name = model_name + self._parameters = parameters + + @staticmethod + async def create(ai_config: AIConfigKind) -> 'OpenAIProvider': + """ + Static factory method to create an OpenAI AIProvider from an AI configuration. + + :param ai_config: The LaunchDarkly AI configuration + :return: Configured OpenAIProvider instance + """ + client = AsyncOpenAI( + api_key=os.environ.get('OPENAI_API_KEY'), + ) + + config_dict = ai_config.to_dict() + model_dict = config_dict.get('model') or {} + model_name = model_dict.get('name', '') + parameters = model_dict.get('parameters') or {} + + return OpenAIProvider(client, model_name, parameters) + + async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: + """ + Invoke the OpenAI model with an array of messages. + + :param messages: Array of LDMessage objects representing the conversation + :return: ChatResponse containing the model's response and metrics + """ + try: + # Convert LDMessage to OpenAI message format + openai_messages: Iterable[ChatCompletionMessageParam] = cast( + Iterable[ChatCompletionMessageParam], + [{'role': msg.role, 'content': msg.content} for msg in messages] + ) + + response = await self._client.chat.completions.create( + model=self._model_name, + messages=openai_messages, + **self._parameters, + ) + + # Generate metrics early (assumes success by default) + metrics = OpenAIProvider.get_ai_metrics_from_response(response) + + # Safely extract the first choice content + content = '' + if response.choices and len(response.choices) > 0: + message = response.choices[0].message + if message and message.content: + content = message.content + + if not content: + log.warning('OpenAI response has no content available') + metrics = LDAIMetrics(success=False, usage=metrics.usage) + + return ChatResponse( + message=LDMessage(role='assistant', content=content), + metrics=metrics, + ) + except Exception as error: + log.warning(f'OpenAI model invocation failed: {error}') + + return ChatResponse( + message=LDMessage(role='assistant', content=''), + metrics=LDAIMetrics(success=False, usage=None), + ) + + async def invoke_structured_model( + self, + messages: List[LDMessage], + response_structure: Dict[str, Any], + ) -> StructuredResponse: + """ + Invoke the OpenAI model with structured output support. + + :param messages: Array of LDMessage objects representing the conversation + :param response_structure: Dictionary defining the JSON schema for output structure + :return: StructuredResponse containing the structured data + """ + try: + # Convert LDMessage to OpenAI message format + openai_messages: Iterable[ChatCompletionMessageParam] = cast( + Iterable[ChatCompletionMessageParam], + [{'role': msg.role, 'content': msg.content} for msg in messages] + ) + + response = await self._client.chat.completions.create( + model=self._model_name, + messages=openai_messages, + response_format={ # type: ignore[arg-type] + 'type': 'json_schema', + 'json_schema': { + 'name': 'structured_output', + 'schema': response_structure, + 'strict': True, + }, + }, + **self._parameters, + ) + + # Generate metrics early (assumes success by default) + metrics = OpenAIProvider.get_ai_metrics_from_response(response) + + # Safely extract the first choice content + content = '' + if response.choices and len(response.choices) > 0: + message = response.choices[0].message + if message and message.content: + content = message.content + + if not content: + log.warning('OpenAI structured response has no content available') + metrics = LDAIMetrics(success=False, usage=metrics.usage) + return StructuredResponse( + data={}, + raw_response='', + metrics=metrics, + ) + + try: + data = json.loads(content) + return StructuredResponse( + data=data, + raw_response=content, + metrics=metrics, + ) + except json.JSONDecodeError as parse_error: + log.warning(f'OpenAI structured response contains invalid JSON: {parse_error}') + metrics = LDAIMetrics(success=False, usage=metrics.usage) + return StructuredResponse( + data={}, + raw_response=content, + metrics=metrics, + ) + except Exception as error: + log.warning(f'OpenAI structured model invocation failed: {error}') + + return StructuredResponse( + data={}, + raw_response='', + metrics=LDAIMetrics(success=False, usage=None), + ) + + def get_client(self) -> AsyncOpenAI: + """ + Get the underlying OpenAI client instance. + + :return: The underlying AsyncOpenAI client + """ + return self._client + + @staticmethod + def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: + """ + Get AI metrics from an OpenAI response. + + This method extracts token usage information and success status from OpenAI responses + and returns a LaunchDarkly AIMetrics object. + + :param response: The response from OpenAI chat completions API + :return: LDAIMetrics with success status and token usage + + Example: + response = await tracker.track_metrics_of( + lambda: client.chat.completions.create(config), + OpenAIProvider.get_ai_metrics_from_response + ) + """ + # Extract token usage if available + usage: Optional[TokenUsage] = None + if hasattr(response, 'usage') and response.usage: + usage = TokenUsage( + total=response.usage.total_tokens or 0, + input=response.usage.prompt_tokens or 0, + output=response.usage.completion_tokens or 0, + ) + + # OpenAI responses that complete successfully are considered successful by default + return LDAIMetrics(success=True, usage=usage) diff --git a/packages/ai-providers/server-ai-openai/tests/__init__.py b/packages/ai-providers/server-ai-openai/tests/__init__.py new file mode 100644 index 0000000..4a4a397 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/tests/__init__.py @@ -0,0 +1,2 @@ +"""Tests for LaunchDarkly AI SDK OpenAI Provider.""" + diff --git a/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py new file mode 100644 index 0000000..ff9066b --- /dev/null +++ b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py @@ -0,0 +1,323 @@ +"""Tests for OpenAI Provider.""" + +import pytest +from unittest.mock import AsyncMock, MagicMock, patch + +from ldai import LDMessage + +from ldai_openai import OpenAIProvider + + +class TestGetAIMetricsFromResponse: + """Tests for get_ai_metrics_from_response static method.""" + + def test_creates_metrics_with_success_true_and_token_usage(self): + """Should create metrics with success=True and token usage.""" + mock_response = MagicMock() + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 50 + mock_response.usage.completion_tokens = 50 + mock_response.usage.total_tokens = 100 + + result = OpenAIProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 100 + assert result.usage.input == 50 + assert result.usage.output == 50 + + def test_creates_metrics_with_success_true_and_no_usage_when_usage_missing(self): + """Should create metrics with success=True and no usage when usage is missing.""" + mock_response = MagicMock() + mock_response.usage = None + + result = OpenAIProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is None + + def test_handles_partial_usage_data(self): + """Should handle partial usage data.""" + mock_response = MagicMock() + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 30 + mock_response.usage.completion_tokens = None + mock_response.usage.total_tokens = None + + result = OpenAIProvider.get_ai_metrics_from_response(mock_response) + + assert result.success is True + assert result.usage is not None + assert result.usage.total == 0 + assert result.usage.input == 30 + assert result.usage.output == 0 + + +class TestInvokeModel: + """Tests for invoke_model instance method.""" + + @pytest.fixture + def mock_client(self): + """Create a mock OpenAI client.""" + return MagicMock() + + @pytest.mark.asyncio + async def test_invokes_openai_chat_completions_and_returns_response(self, mock_client): + """Should invoke OpenAI chat completions and return response.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = 'Hello! How can I help you today?' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 15 + mock_response.usage.total_tokens = 25 + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + messages = [LDMessage(role='user', content='Hello!')] + result = await provider.invoke_model(messages) + + mock_client.chat.completions.create.assert_called_once_with( + model='gpt-3.5-turbo', + messages=[{'role': 'user', 'content': 'Hello!'}], + ) + + assert result.message.role == 'assistant' + assert result.message.content == 'Hello! How can I help you today?' + assert result.metrics.success is True + assert result.metrics.usage is not None + assert result.metrics.usage.total == 25 + assert result.metrics.usage.input == 10 + assert result.metrics.usage.output == 15 + + @pytest.mark.asyncio + async def test_returns_unsuccessful_response_when_no_content(self, mock_client): + """Should return unsuccessful response when no content in response.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = None + mock_response.usage = None + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + messages = [LDMessage(role='user', content='Hello!')] + result = await provider.invoke_model(messages) + + assert result.message.role == 'assistant' + assert result.message.content == '' + assert result.metrics.success is False + + @pytest.mark.asyncio + async def test_returns_unsuccessful_response_when_choices_empty(self, mock_client): + """Should return unsuccessful response when choices array is empty.""" + mock_response = MagicMock() + mock_response.choices = [] + mock_response.usage = None + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + messages = [LDMessage(role='user', content='Hello!')] + result = await provider.invoke_model(messages) + + assert result.message.role == 'assistant' + assert result.message.content == '' + assert result.metrics.success is False + + @pytest.mark.asyncio + async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_client): + """Should return unsuccessful response when exception is thrown.""" + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(side_effect=Exception('API Error')) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + messages = [LDMessage(role='user', content='Hello!')] + result = await provider.invoke_model(messages) + + assert result.message.role == 'assistant' + assert result.message.content == '' + assert result.metrics.success is False + + +class TestInvokeStructuredModel: + """Tests for invoke_structured_model instance method.""" + + @pytest.fixture + def mock_client(self): + """Create a mock OpenAI client.""" + return MagicMock() + + @pytest.mark.asyncio + async def test_invokes_openai_with_structured_output(self, mock_client): + """Should invoke OpenAI with structured output and return parsed response.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = '{"name": "John", "age": 30, "city": "New York"}' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 20 + mock_response.usage.completion_tokens = 10 + mock_response.usage.total_tokens = 30 + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + messages = [LDMessage(role='user', content='Tell me about a person')] + response_structure = { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'age': {'type': 'number'}, + 'city': {'type': 'string'}, + }, + 'required': ['name', 'age', 'city'], + } + + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.data == {'name': 'John', 'age': 30, 'city': 'New York'} + assert result.raw_response == '{"name": "John", "age": 30, "city": "New York"}' + assert result.metrics.success is True + assert result.metrics.usage is not None + assert result.metrics.usage.total == 30 + assert result.metrics.usage.input == 20 + assert result.metrics.usage.output == 10 + + @pytest.mark.asyncio + async def test_returns_unsuccessful_when_no_content_in_structured_response(self, mock_client): + """Should return unsuccessful response when no content in structured response.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = None + mock_response.usage = None + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + messages = [LDMessage(role='user', content='Tell me about a person')] + response_structure = {'type': 'object'} + + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.data == {} + assert result.raw_response == '' + assert result.metrics.success is False + + @pytest.mark.asyncio + async def test_handles_json_parsing_errors(self, mock_client): + """Should handle JSON parsing errors gracefully.""" + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message = MagicMock() + mock_response.choices[0].message.content = 'invalid json content' + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 5 + mock_response.usage.total_tokens = 15 + + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=mock_response) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + messages = [LDMessage(role='user', content='Tell me about a person')] + response_structure = {'type': 'object'} + + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.data == {} + assert result.raw_response == 'invalid json content' + assert result.metrics.success is False + assert result.metrics.usage is not None + assert result.metrics.usage.total == 15 + + @pytest.mark.asyncio + async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_client): + """Should return unsuccessful response when exception is thrown.""" + mock_client.chat = MagicMock() + mock_client.chat.completions = MagicMock() + mock_client.chat.completions.create = AsyncMock(side_effect=Exception('API Error')) + + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + messages = [LDMessage(role='user', content='Tell me about a person')] + response_structure = {'type': 'object'} + + result = await provider.invoke_structured_model(messages, response_structure) + + assert result.data == {} + assert result.raw_response == '' + assert result.metrics.success is False + + +class TestGetClient: + """Tests for get_client instance method.""" + + def test_returns_underlying_client(self): + """Should return the underlying OpenAI client.""" + mock_client = MagicMock() + provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + + assert provider.get_client() is mock_client + + +class TestCreate: + """Tests for create static factory method.""" + + @pytest.mark.asyncio + async def test_creates_provider_with_correct_model_and_parameters(self): + """Should create OpenAIProvider with correct model and parameters.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = { + 'model': { + 'name': 'gpt-4', + 'parameters': { + 'temperature': 0.7, + 'max_tokens': 1000, + }, + }, + 'provider': {'name': 'openai'}, + } + + with patch('ldai_openai.openai_provider.AsyncOpenAI') as mock_openai_class: + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + result = await OpenAIProvider.create(mock_ai_config) + + assert isinstance(result, OpenAIProvider) + assert result._model_name == 'gpt-4' + assert result._parameters == {'temperature': 0.7, 'max_tokens': 1000} + + @pytest.mark.asyncio + async def test_handles_missing_model_config(self): + """Should handle missing model configuration.""" + mock_ai_config = MagicMock() + mock_ai_config.to_dict.return_value = {} + + with patch('ldai_openai.openai_provider.AsyncOpenAI') as mock_openai_class: + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + result = await OpenAIProvider.create(mock_ai_config) + + assert isinstance(result, OpenAIProvider) + assert result._model_name == '' + assert result._parameters == {} + diff --git a/release-please-config.json b/release-please-config.json index 9852902..e6b6bbd 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -17,6 +17,14 @@ "include-v-in-tag": false, "extra-files": ["src/ldai_langchain/__init__.py"], "component": "launchdarkly-server-sdk-ai-langchain" + }, + "packages/ai-providers/server-ai-openai": { + "release-type": "python", + "versioning": "default", + "bump-minor-pre-major": true, + "include-v-in-tag": false, + "extra-files": ["src/ldai_openai/__init__.py"], + "component": "launchdarkly-server-sdk-ai-openai" } } }