11import { ANTHROPIC } from '../../globals' ;
22import { Params } from '../../types/requestBody' ;
33import { CompletionResponse , ErrorResponse , ProviderConfig } from '../types' ;
4- import { generateInvalidProviderResponseError } from '../utils' ;
4+ import {
5+ generateInvalidProviderResponseError ,
6+ transformFinishReason ,
7+ } from '../utils' ;
8+ import {
9+ ANTHROPIC_STOP_REASON ,
10+ AnthropicStreamState ,
11+ AnthropicErrorResponse ,
12+ } from './types' ;
513import { AnthropicErrorResponseTransform } from './utils' ;
6- import { AnthropicErrorResponse } from './types' ;
714
815// TODO: this configuration does not enforce the maximum token limit for the input parameter. If you want to enforce this, you might need to add a custom validation function or a max property to the ParameterConfig interface, and then use it in the input configuration. However, this might be complex because the token count is not a simple length check, but depends on the specific tokenization method used by the model.
916
@@ -57,7 +64,7 @@ export const AnthropicCompleteConfig: ProviderConfig = {
5764
5865interface AnthropicCompleteResponse {
5966 completion : string ;
60- stop_reason : string ;
67+ stop_reason : ANTHROPIC_STOP_REASON ;
6168 model : string ;
6269 truncated : boolean ;
6370 stop : null | string ;
@@ -68,10 +75,20 @@ interface AnthropicCompleteResponse {
6875// TODO: The token calculation is wrong atm
6976export const AnthropicCompleteResponseTransform : (
7077 response : AnthropicCompleteResponse | AnthropicErrorResponse ,
71- responseStatus : number
72- ) => CompletionResponse | ErrorResponse = ( response , responseStatus ) => {
73- if ( responseStatus !== 200 && 'error' in response ) {
74- return AnthropicErrorResponseTransform ( response ) ;
78+ responseStatus : number ,
79+ responseHeaders : Headers ,
80+ strictOpenAiCompliance : boolean
81+ ) => CompletionResponse | ErrorResponse = (
82+ response ,
83+ responseStatus ,
84+ _responseHeaders ,
85+ strictOpenAiCompliance
86+ ) => {
87+ if ( responseStatus !== 200 ) {
88+ const errorResposne = AnthropicErrorResponseTransform (
89+ response as AnthropicErrorResponse
90+ ) ;
91+ if ( errorResposne ) return errorResposne ;
7592 }
7693
7794 if ( 'completion' in response ) {
@@ -86,7 +103,10 @@ export const AnthropicCompleteResponseTransform: (
86103 text : response . completion ,
87104 index : 0 ,
88105 logprobs : null ,
89- finish_reason : response . stop_reason ,
106+ finish_reason : transformFinishReason (
107+ response . stop_reason ,
108+ strictOpenAiCompliance
109+ ) ,
90110 } ,
91111 ] ,
92112 } ;
@@ -96,8 +116,16 @@ export const AnthropicCompleteResponseTransform: (
96116} ;
97117
98118export const AnthropicCompleteStreamChunkTransform : (
99- response : string
100- ) => string | undefined = ( responseChunk ) => {
119+ response : string ,
120+ fallbackId : string ,
121+ streamState : AnthropicStreamState ,
122+ strictOpenAiCompliance : boolean
123+ ) => string | undefined = (
124+ responseChunk ,
125+ fallbackId ,
126+ streamState ,
127+ strictOpenAiCompliance
128+ ) => {
101129 let chunk = responseChunk . trim ( ) ;
102130 if ( chunk . startsWith ( 'event: ping' ) ) {
103131 return ;
@@ -110,6 +138,9 @@ export const AnthropicCompleteStreamChunkTransform: (
110138 return chunk ;
111139 }
112140 const parsedChunk : AnthropicCompleteResponse = JSON . parse ( chunk ) ;
141+ const finishReason = parsedChunk . stop_reason
142+ ? transformFinishReason ( parsedChunk . stop_reason , strictOpenAiCompliance )
143+ : null ;
113144 return (
114145 `data: ${ JSON . stringify ( {
115146 id : parsedChunk . log_id ,
@@ -122,7 +153,7 @@ export const AnthropicCompleteStreamChunkTransform: (
122153 text : parsedChunk . completion ,
123154 index : 0 ,
124155 logprobs : null ,
125- finish_reason : parsedChunk . stop_reason ,
156+ finish_reason : finishReason ,
126157 } ,
127158 ] ,
128159 } ) } ` + '\n\n'
0 commit comments