From 0dd719575186a4ecb537033bf921124194ea32e2 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Wed, 24 Dec 2025 15:19:07 +0100 Subject: [PATCH] feat: extend adapter and create custom model utilities --- docs/guides/extend-adapter.md | 170 ++++++++++++ packages/typescript/ai-anthropic/src/index.ts | 1 + packages/typescript/ai-gemini/src/index.ts | 1 + packages/typescript/ai-openai/src/index.ts | 1 + packages/typescript/ai/src/extend-adapter.ts | 182 +++++++++++++ packages/typescript/ai/src/index.ts | 4 + .../ai/tests/extend-adapter.test.ts | 253 ++++++++++++++++++ 7 files changed, 612 insertions(+) create mode 100644 docs/guides/extend-adapter.md create mode 100644 packages/typescript/ai/src/extend-adapter.ts create mode 100644 packages/typescript/ai/tests/extend-adapter.test.ts diff --git a/docs/guides/extend-adapter.md b/docs/guides/extend-adapter.md new file mode 100644 index 00000000..894c4839 --- /dev/null +++ b/docs/guides/extend-adapter.md @@ -0,0 +1,170 @@ +# Extending Adapters with Custom Models + +The `extendAdapter` utility allows you to extend existing adapter factories (like `openaiText`, `anthropicText`) with custom model names while maintaining full type safety for input modalities and provider options. + +## Basic Usage + +```typescript +import { createModel, extendAdapter } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +// Define your custom models using createModel helper +const myOpenaiModel = createModel('my-fine-tuned-gpt4',['text', 'image']); +const myOpenaiModelButCooler = createModel('my-fine-tuned-gpt5',['text', 'image']); + + +// Create an extended adapter factory - simple API, no type parameters needed +const myOpenai = extendAdapter(openaiText, [ + myOpenaiModel, + myOpenaiModelButCooler +]) + +// Use with original models - full type inference preserved +const gpt4Adapter = myOpenai('gpt-4o') + +// Use with custom models - your custom types are applied +const customAdapter = myOpenai('my-fine-tuned-gpt4') + +// Works seamlessly with chat() +import { chat } from '@tanstack/ai' + +const stream = chat({ + adapter: myOpenai('my-fine-tuned-gpt4'), + messages: [{ role: 'user', content: 'Hello!' }] +}) +``` + +## The `createModel` Helper + +The `createModel` function provides a clean way to define custom models with full type inference: + +```typescript +import { createModel } from '@tanstack/ai' + +// Arguments define name and input modalities +const model = createModel( + 'my-model', // model name (literal type inferred) + ['text', 'image'] // input modalities (tuple type inferred) +) +``` + + +## Model Definition Structure + +Each custom model definition has three properties: + +### Defining Input Modalities + +The `input` array specifies which content types your model supports: + +```typescript +const models = [ + createModel('text-only-model', ['text']), + createModel('multimodal-model', ['text', 'image', 'audio']), +] as const +``` + +Available modalities: `'text'`, `'image'`, `'audio'`, `'video'`, `'document'` + +## Preserving Original Factory Behavior + +`extendAdapter` fully preserves the original factory's signature, including any configuration parameters: + +```typescript +import { createModel, extendAdapter } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +const myOpenai = extendAdapter(openaiText, customModels) + +// Config parameter is preserved +const adapter = myOpenai('my-fine-tuned-gpt4', { + baseURL: 'https://my-proxy.com/v1', + timeout: 30000 +}) +``` + +## Type Safety + +The extended adapter provides full type safety: + +```typescript +import { extendAdapter, createModel } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +const myOpenai = extendAdapter(openaiText, [createModel('custom-model', ['text'])]) + +// ✅ Original models work with their original types +const a1 = myOpenai('gpt-4o') + +// ✅ Custom models work with your defined types +const a2 = myOpenai('custom-model') + +// ❌ Type error: invalid model name +// Note: Type checking works when you assign the result to a variable +const invalid = myOpenai('nonexistent-model') // TypeScript error! +``` + + +## Runtime Behavior + +At runtime, `extendAdapter` simply passes through to the original factory: + +- No validation is performed on custom model names +- The original factory receives exactly what you pass +- This allows the original provider's API to handle the model name + +This design is intentional - it allows you to: +- Use fine-tuned model names that the provider accepts but TypeScript doesn't know about +- Proxy requests to different backends that accept custom model identifiers +- Add type safety without runtime overhead + +## Example: OpenAI-Compatible Proxy + +A common use case is typing models for an OpenAI-compatible proxy: + +```typescript +import { extendAdapter, createModel } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai' + +// Models available through your proxy +const proxyModels = [ + createModel( + 'llama-3.1-70b', + ['text'] + ), + createModel( + 'mixtral-8x7b', + ['text'] + ), +] as const + +const proxyAdapter = extendAdapter(openaiText, proxyModels) + +// Use with your proxy's base URL +const adapter = proxyAdapter('llama-3.1-70b', { + baseURL: 'https://my-llm-proxy.com/v1' +}) +``` + +## Example: Fine-tuned Models + +Adding type safety for your fine-tuned models: + +```typescript +import { createModel, extendAdapter } from '@tanstack/ai' +import { anthropicText } from '@tanstack/ai-anthropic' + +const fineTunedModels = [ + createModel( + 'ft:claude-3-opus:my-org:custom-task:abc123', + ['text', 'image'] + ), +] as const + +const myAnthropic = extendAdapter(anthropicText, fineTunedModels) + +chat({ + adapter: myAnthropic('ft:claude-3-opus:my-org:custom-task:abc123'), + messages: [{ role: 'user', content: 'Analyze this...' }] +}) +``` diff --git a/packages/typescript/ai-anthropic/src/index.ts b/packages/typescript/ai-anthropic/src/index.ts index 4bca2e4b..24cbed69 100644 --- a/packages/typescript/ai-anthropic/src/index.ts +++ b/packages/typescript/ai-anthropic/src/index.ts @@ -28,6 +28,7 @@ export type { AnthropicChatModelProviderOptionsByName, AnthropicModelInputModalitiesByName, } from './model-meta' +export { ANTHROPIC_MODELS } from './model-meta' export type { AnthropicTextMetadata, AnthropicImageMetadata, diff --git a/packages/typescript/ai-gemini/src/index.ts b/packages/typescript/ai-gemini/src/index.ts index c60ce075..78901ec3 100644 --- a/packages/typescript/ai-gemini/src/index.ts +++ b/packages/typescript/ai-gemini/src/index.ts @@ -52,6 +52,7 @@ export { } from './adapters/tts' // Re-export models from model-meta for convenience +export { GEMINI_MODELS } from './model-meta' export { GEMINI_MODELS as GeminiTextModels } from './model-meta' export { GEMINI_IMAGE_MODELS as GeminiImageModels } from './model-meta' export { GEMINI_TTS_MODELS as GeminiTTSModels } from './model-meta' diff --git a/packages/typescript/ai-openai/src/index.ts b/packages/typescript/ai-openai/src/index.ts index a0073b74..0a402337 100644 --- a/packages/typescript/ai-openai/src/index.ts +++ b/packages/typescript/ai-openai/src/index.ts @@ -80,6 +80,7 @@ export type { OpenAIModelInputModalitiesByName, } from './model-meta' export { + OPENAI_CHAT_MODELS, OPENAI_IMAGE_MODELS, OPENAI_TTS_MODELS, OPENAI_TRANSCRIPTION_MODELS, diff --git a/packages/typescript/ai/src/extend-adapter.ts b/packages/typescript/ai/src/extend-adapter.ts new file mode 100644 index 00000000..8547ebef --- /dev/null +++ b/packages/typescript/ai/src/extend-adapter.ts @@ -0,0 +1,182 @@ +import type { Modality } from './types' + +// =========================== +// Extended Model Definition +// =========================== + +/** + * Definition for a custom model to add to an adapter. + * + * @template TName - The model name as a literal string type + * @template TInput - Array of supported input modalities + * @template TOptions - Provider options type for this model + * + * @example + * ```typescript + * const customModels = [ + * createModel('my-custom-model', ['text', 'image']), + * ] as const + * ``` + */ +export interface ExtendedModelDef< + TName extends string = string, + TInput extends ReadonlyArray = ReadonlyArray, + TOptions = unknown, +> { + /** The model name identifier */ + name: TName + /** Supported input modalities for this model */ + input: TInput + /** Type brand for provider options - use `{} as YourOptionsType` */ + modelOptions: TOptions +} + +/** + * Creates a custom model definition for use with `extendAdapter`. + * + * This is a helper function that provides proper type inference without + * requiring manual `as const` casts on individual properties. + * + * @template TName - The model name (inferred from argument) + * @template TInput - The input modalities array (inferred from argument) + * + * @param name - The model name identifier (literal string) + * @param input - Array of supported input modalities + * @returns A properly typed model definition for use with `extendAdapter` + * + * @example + * ```typescript + * import { extendAdapter, createModel } from '@tanstack/ai' + * import { openaiText } from '@tanstack/ai-openai' + * + * // Define custom models with full type inference + * const customModels = [ + * createModel('my-fine-tuned-gpt4', ['text', 'image']), + * createModel('local-llama', ['text']), + * ] as const + * + * const myOpenai = extendAdapter(openaiText, customModels) + * ``` + */ +export function createModel< + const TName extends string, + const TInput extends ReadonlyArray, +>(name: TName, input: TInput): ExtendedModelDef { + return { + name, + input, + modelOptions: {} as unknown, + } +} + +// =========================== +// Type Extraction Utilities +// =========================== + +/** + * Extract the model name union from an array of model definitions. + */ +type ExtractCustomModelNames> = + TDefs[number]['name'] + +// =========================== +// Factory Type Inference +// =========================== + +/** + * Infer the model parameter type from an adapter factory function. + * For generic functions like `(model: T)`, this gets `T` which + * TypeScript treats as the constraint union when used in parameter position. + */ +type InferFactoryModels = TFactory extends ( + model: infer TModel, + ...args: Array +) => any + ? TModel extends string + ? TModel + : string + : string + +/** + * Infer the config parameter type from an adapter factory function. + */ +type InferConfig = TFactory extends ( + model: any, + config?: infer TConfig, +) => any + ? TConfig + : undefined + +/** + * Infer the adapter return type from a factory function. + */ +type InferAdapterReturn = TFactory extends ( + ...args: Array +) => infer TReturn + ? TReturn + : never + +// =========================== +// extendAdapter Function +// =========================== + +/** + * Extends an existing adapter factory with additional custom models. + * + * The extended adapter accepts both original models (with full original type inference) + * and custom models (with types from your definitions). + * + * At runtime, this simply passes through to the original factory - no validation is performed. + * The original factory's signature is fully preserved, including any config parameters. + * + * @param factory - The original adapter factory function (e.g., `openaiText`, `anthropicText`) + * @param models - Array of custom model definitions with `name` and `input` + * @returns A new factory function that accepts both original and custom models + * + * @example + * ```typescript + * import { extendAdapter, createModel } from '@tanstack/ai' + * import { openaiText } from '@tanstack/ai-openai' + * + * // Define custom models + * const customModels = [ + * createModel('my-fine-tuned-gpt4', ['text', 'image']), + * createModel('local-llama', ['text']), + * ] as const + * + * // Create extended adapter + * const myOpenai = extendAdapter(openaiText, customModels) + * + * // Use with original models - full type inference preserved + * const gpt4 = myOpenai('gpt-4o') + * + * // Use with custom models + * const custom = myOpenai('my-fine-tuned-gpt4') + * + * // Type error: 'invalid-model' is not a valid model + * // myOpenai('invalid-model') + * + * // Works with chat() + * chat({ + * adapter: myOpenai('my-fine-tuned-gpt4'), + * messages: [...] + * }) + * ``` + */ +export function extendAdapter< + TFactory extends (...args: Array) => any, + const TDefs extends ReadonlyArray, +>( + factory: TFactory, + _customModels: TDefs, +): ( + model: InferFactoryModels | ExtractCustomModelNames, + ...args: InferConfig extends undefined + ? [] + : [config?: InferConfig] +) => InferAdapterReturn { + // At runtime, we simply pass through to the original factory. + // The _customModels parameter is only used for type inference. + // No runtime validation - users are trusted to pass valid model names. + return factory as any +} diff --git a/packages/typescript/ai/src/index.ts b/packages/typescript/ai/src/index.ts index 0476457d..63f07780 100644 --- a/packages/typescript/ai/src/index.ts +++ b/packages/typescript/ai/src/index.ts @@ -112,3 +112,7 @@ export type { ToolResultState, JSONParser, } from './activities/chat/stream/index' + +// Adapter extension utilities +export { createModel, extendAdapter } from './extend-adapter' +export type { ExtendedModelDef } from './extend-adapter' diff --git a/packages/typescript/ai/tests/extend-adapter.test.ts b/packages/typescript/ai/tests/extend-adapter.test.ts new file mode 100644 index 00000000..96e42ea6 --- /dev/null +++ b/packages/typescript/ai/tests/extend-adapter.test.ts @@ -0,0 +1,253 @@ +/** + * Tests for extendAdapter utility + * + * Verifies that extendAdapter: + * 1. Preserves original model type inference exactly + * 2. Adds custom models with correct types + * 3. Preserves factory signature (including config parameter) + * 4. Inherits message metadata from original adapter + */ +import { describe, expect, expectTypeOf, it } from 'vitest' +import { createModel, extendAdapter } from '../src/extend-adapter' +import { BaseTextAdapter } from '../src/activities/chat/adapter' +import { chat } from '../src/activities/chat' +import type { StreamChunk, TextOptions } from '../src/types' +import type { + StructuredOutputOptions, + StructuredOutputResult, +} from '../src/activities/chat/adapter' + +// =========================== +// Mock Adapter Setup (mimics OpenAI adapter structure) +// =========================== + +const MOCK_MODELS = ['mock-gpt-4', 'mock-gpt-3.5'] as const +type MockModel = (typeof MOCK_MODELS)[number] + +interface MockBaseOptions { + temperature?: number + maxTokens?: number +} + +interface MockAdvancedOptions extends MockBaseOptions { + reasoning?: { effort?: 'low' | 'high' } +} + +// Per-model options map (like OpenAI does) +type MockProviderOptionsByModel = { + 'mock-gpt-4': MockAdvancedOptions + 'mock-gpt-3.5': MockBaseOptions +} + +// Per-model input modalities (like OpenAI does) +type MockInputModalitiesByModel = { + 'mock-gpt-4': readonly ['text', 'image'] + 'mock-gpt-3.5': readonly ['text'] +} + +type ResolveProviderOptions = + TModel extends keyof MockProviderOptionsByModel + ? MockProviderOptionsByModel[TModel] + : MockBaseOptions + +type ResolveInputModalities = + TModel extends keyof MockInputModalitiesByModel + ? MockInputModalitiesByModel[TModel] + : readonly ['text'] + +// Mock message metadata +interface MockMessageMetadataByModality { + text: { encoding?: string } + image: { detail?: 'auto' | 'low' | 'high' } + audio: unknown + video: unknown + document: unknown +} + +// Mock adapter config +interface MockAdapterConfig { + baseURL?: string + timeout?: number +} + +/** + * Mock Text Adapter class + */ +class MockTextAdapter extends BaseTextAdapter< + TModel, + ResolveProviderOptions, + ResolveInputModalities, + MockMessageMetadataByModality +> { + readonly kind = 'text' as const + readonly name = 'mock' as const + + constructor(model: TModel, _config?: MockAdapterConfig) { + super({}, model) + } + + /* eslint-disable @typescript-eslint/require-await */ + async *chatStream( + _options: TextOptions>, + ): AsyncIterable { + yield { + type: 'content', + model: this.model, + id: 'mock-id', + timestamp: Date.now(), + delta: 'Hello', + content: 'Hello', + role: 'assistant', + } + yield { + type: 'done', + model: this.model, + id: 'mock-id', + timestamp: Date.now(), + finishReason: 'stop', + } + } + /* eslint-enable @typescript-eslint/require-await */ + + /* eslint-disable @typescript-eslint/require-await */ + async structuredOutput( + _options: StructuredOutputOptions>, + ): Promise> { + return { data: {}, rawText: '{}' } + } + /* eslint-enable @typescript-eslint/require-await */ +} + +/** + * Mock adapter factory function (mimics openaiText signature) + */ +function mockText( + model: TModel, + config?: MockAdapterConfig, +): MockTextAdapter { + return new MockTextAdapter(model, config) +} + +// Using createModel helper - model options fall back to adapter defaults +const customModels = [ + createModel('my-fine-tuned-model', ['text', 'image']), + createModel('local-llm', ['text']), +] as const + +// =========================== +// Tests +// =========================== + +describe('extendAdapter', () => { + describe('Basic functionality', () => { + it('should create an extended adapter factory', () => { + const extendedMock = extendAdapter(mockText, customModels) + expect(typeof extendedMock).toBe('function') + }) + + it('should call original factory for original models', () => { + const extendedMock = extendAdapter(mockText, customModels) + const adapter = extendedMock('mock-gpt-4') + + expect(adapter.name).toBe('mock') + expect(adapter.model).toBe('mock-gpt-4') + expect(adapter.kind).toBe('text') + }) + + it('should call original factory for custom models', () => { + const extendedMock = extendAdapter(mockText, customModels) + // Runtime: passes through to original factory + // Note: This will work at runtime because original factory accepts any string + // Type safety is enforced at compile time + const adapter = extendedMock('my-fine-tuned-model') + + expect(adapter.name).toBe('mock') + expect(adapter.model).toBe('my-fine-tuned-model') + }) + + it('should preserve config parameter passthrough', () => { + const extendedMock = extendAdapter(mockText, customModels) + const adapter = extendedMock('mock-gpt-4', { + baseURL: 'https://custom.api.com', + }) + + expect(adapter.model).toBe('mock-gpt-4') + }) + }) + + describe('Type inference for original models', () => { + it('should preserve original model type inference', () => { + const extendedMock = extendAdapter(mockText, customModels) + const adapter = extendedMock('mock-gpt-4') + + // Model type should be one of the original models (union preserved from factory return) + expectTypeOf(adapter.model).toExtend() + }) + + it('should allow original models in chat()', () => { + const extendedMock = extendAdapter(mockText, customModels) + + // This should compile without errors + chat({ + adapter: extendedMock('mock-gpt-4'), + messages: [{ role: 'user', content: 'Hello' }], + }) + }) + }) + + describe('Type inference for custom models', () => { + it('should infer custom model name type', () => { + const extendedMock = extendAdapter(mockText, customModels) + const adapter = extendedMock('my-fine-tuned-model') + + // The adapter model type is the union of all possible models from the factory return + // (This is expected since extendAdapter returns the factory's return type) + expectTypeOf(adapter.model).toExtend() + }) + + it('should allow custom models in chat()', () => { + const extendedMock = extendAdapter(mockText, customModels) + + // This should compile without errors + chat({ + adapter: extendedMock('my-fine-tuned-model'), + messages: [{ role: 'user', content: 'Hello' }], + }) + + chat({ + adapter: extendedMock('local-llm'), + messages: [{ role: 'user', content: 'Hello' }], + }) + }) + }) + + describe('Model union type', () => { + it('should accept both original and custom model names', () => { + const extendedMock = extendAdapter(mockText, customModels) + + // All of these should be valid - using void to suppress unused variable warnings + void extendedMock('mock-gpt-4') + void extendedMock('mock-gpt-3.5') + void extendedMock('my-fine-tuned-model') + void extendedMock('local-llm') + }) + + it('should reject invalid model names at type level', () => { + const extendedMock = extendAdapter(mockText, customModels) + + // Using assignment to force TypeScript to fully type-check the argument + // @ts-expect-error - 'invalid-model' is not a valid model name + const _invalid = extendedMock('invalid-model') + }) + }) + + describe('Empty custom models', () => { + it('should work with empty custom models array', () => { + const extendedMock = extendAdapter(mockText, [] as const) + + // Should still work with original models + const adapter = extendedMock('mock-gpt-4') + expect(adapter.model).toBe('mock-gpt-4') + }) + }) +})