|
| 1 | +use std::pin::Pin; |
| 2 | + |
| 3 | +/// Gemini types (Generally user defined types) for Gemini API |
| 4 | +use async_openai::{ |
| 5 | + error::OpenAIError, |
| 6 | + types::{ChatChoice, ChatChoiceStream, CompletionUsage, Image}, |
| 7 | +}; |
| 8 | +use futures::Stream; |
| 9 | +use serde::{Deserialize, Serialize}; |
| 10 | + |
| 11 | +#[derive(Debug, Serialize, Deserialize)] |
| 12 | +pub struct GeminiModel { |
| 13 | + pub id: String, |
| 14 | + pub object: String, |
| 15 | + pub owned_by: String, |
| 16 | +} |
| 17 | + |
| 18 | +#[derive(Debug, Serialize, Deserialize)] |
| 19 | +pub struct ListGeminiModelResponse { |
| 20 | + pub data: Vec<GeminiModel>, |
| 21 | + pub object: String, |
| 22 | +} |
| 23 | + |
| 24 | +#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] |
| 25 | +/// Represents a streamed chunk of a chat completion response returned by model, based on the provided input. |
| 26 | +pub struct GeminiCreateChatCompletionStreamResponse { |
| 27 | + /// A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the last chunk if you set `stream_options: {"include_usage": true}`. |
| 28 | + pub choices: Vec<ChatChoiceStream>, |
| 29 | + |
| 30 | + /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. |
| 31 | + pub created: u32, |
| 32 | + /// The model to generate the completion. |
| 33 | + pub model: String, |
| 34 | + |
| 35 | + /// The object type, which is always `chat.completion.chunk`. |
| 36 | + pub object: String, |
| 37 | + |
| 38 | + /// An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. |
| 39 | + /// When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. |
| 40 | + pub usage: Option<CompletionUsage>, |
| 41 | +} |
| 42 | + |
| 43 | +/// A stream of chat completion responses. |
| 44 | +pub type GeminiChatCompletionResponseStream = Pin< |
| 45 | + Box<dyn Stream<Item = Result<GeminiCreateChatCompletionStreamResponse, OpenAIError>> + Send>, |
| 46 | +>; |
| 47 | + |
| 48 | +/// Represents a chat completion response returned by model, based on the provided input. |
| 49 | +#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] |
| 50 | +pub struct GeminiCreateChatCompletionResponse { |
| 51 | + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. |
| 52 | + pub choices: Vec<ChatChoice>, |
| 53 | + /// The Unix timestamp (in seconds) of when the chat completion was created. |
| 54 | + pub created: u32, |
| 55 | + /// The model used for the chat completion. |
| 56 | + pub model: String, |
| 57 | + /// The object type, which is always `chat.completion`. |
| 58 | + pub object: String, |
| 59 | + /// usage statistics for the entire request. |
| 60 | + pub usage: Option<CompletionUsage>, |
| 61 | +} |
| 62 | + |
| 63 | +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] |
| 64 | +pub struct GeminiImagesResponse { |
| 65 | + pub data: Vec<std::sync::Arc<Image>>, |
| 66 | +} |
| 67 | + |
| 68 | +/// Represents an embedding vector returned by embedding endpoint. |
| 69 | +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] |
| 70 | +pub struct GeminiEmbedding { |
| 71 | + /// The object type, which is always "embedding". |
| 72 | + pub object: String, |
| 73 | + /// The embedding vector, which is a list of floats. The length of vector |
| 74 | + pub embedding: Vec<f32>, |
| 75 | +} |
| 76 | + |
| 77 | +#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] |
| 78 | +pub struct GeminiCreateEmbeddingResponse { |
| 79 | + pub object: String, |
| 80 | + /// The name of the model used to generate the embedding. |
| 81 | + pub model: String, |
| 82 | + /// The list of embeddings generated by the model. |
| 83 | + pub data: Vec<GeminiEmbedding>, |
| 84 | +} |
0 commit comments