Skip to content

Commit 9d199fd

Browse files
removing google dependency
1 parent e894a8d commit 9d199fd

File tree

6 files changed

+24
-133
lines changed

6 files changed

+24
-133
lines changed

Package.resolved

Lines changed: 2 additions & 11 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Package.swift

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ let package = Package(
1919
// Dependencies declare other packages that this package depends on.
2020
.package(url: "https://github.com/jamesrochabrun/SwiftOpenAI", branch: "main"),
2121
.package(url: "https://github.com/jamesrochabrun/SwiftAnthropic", branch: "main"),
22-
.package(url: "https://github.com/google/generative-ai-swift", branch: "main"),
2322
],
2423
targets: [
2524
// Targets are the basic building blocks of a package, defining a module or a test suite.
@@ -29,7 +28,6 @@ let package = Package(
2928
dependencies: [
3029
.product(name: "SwiftOpenAI", package: "SwiftOpenAI"),
3130
.product(name: "SwiftAnthropic", package: "SwiftAnthropic"),
32-
.product(name: "GoogleGenerativeAI", package: "generative-ai-swift")
3331
]),
3432
.testTarget(
3533
name: "PolyAITests",

Sources/PolyAI/Interfaces/Parameters/LLMParameter.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ public enum LLMParameter {
3131
/// - model: The specific model of Gemini to use.
3232
/// - messages: An array of messages to send to the model.
3333
/// - maxTokens: The maximum number of tokens to generate.
34-
case gemini(model: String, messages: [LLMMessage], maxTokens: Int)
34+
case gemini(model: String, messages: [LLMMessage], maxTokens: Int? = nil)
3535

3636
/// Represents a configuration for interacting with Gemini's models.
3737
/// - Parameters:

Sources/PolyAI/Interfaces/Response/Message/LLMMessageResponse+Gemini.swift

Lines changed: 0 additions & 45 deletions
This file was deleted.

Sources/PolyAI/Interfaces/Response/Stream/LLMMessageStreamResponse+Gemini.swift

Lines changed: 0 additions & 18 deletions
This file was deleted.

Sources/PolyAI/Service/DefaultPolyAIService.swift

Lines changed: 21 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
//
77

88
import Foundation
9-
import GoogleGenerativeAI
109
import SwiftAnthropic
1110
import SwiftOpenAI
1211

@@ -16,20 +15,12 @@ enum PolyAIError: Error {
1615
case missingLLMConfiguration(String)
1716
}
1817

19-
// MARK: Gemini specific
20-
21-
extension GenerativeModel {
22-
public struct Configuration {
23-
let apiKey: String
24-
}
25-
}
26-
2718
struct DefaultPolyAIService: PolyAIService {
2819

2920
private var openAIService: OpenAIService?
3021
private var ollamaOpenAIServiceCompatible: OpenAIService?
3122
private var anthropicService: AnthropicService?
32-
private var gemini: GenerativeModel.Configuration?
23+
private var gemini: OpenAIService?
3324

3425
init(configurations: [LLMConfiguration])
3526
{
@@ -51,7 +42,14 @@ struct DefaultPolyAIService: PolyAIService {
5142
anthropicService = AnthropicServiceFactory.service(apiKey: apiKey, betaHeaders: betaHeaders, configuration: configuration)
5243

5344
case .gemini(let apiKey):
54-
gemini = .init(apiKey: apiKey)
45+
let baseURL = "https://generativelanguage.googleapis.com"
46+
let version = "v1beta"
47+
48+
let service = OpenAIServiceFactory.service(
49+
apiKey: apiKey,
50+
overrideBaseURL: baseURL,
51+
overrideVersion: version)
52+
gemini = service
5553

5654
case .ollama(let url):
5755
ollamaOpenAIServiceCompatible = OpenAIServiceFactory.service(baseURL: url)
@@ -93,37 +91,13 @@ struct DefaultPolyAIService: PolyAIService {
9391
return try await anthropicService.createMessage(messageParameter)
9492

9593
case .gemini(let model, let messages, let maxTokens):
96-
guard let gemini else {
97-
throw PolyAIError.missingLLMConfiguration("You Must provide a valid configuration for the \(parameter.llmService) API")
98-
}
99-
100-
// Extract system message if present
101-
let systemInstruction: ModelContent?
102-
if let systemMessage = messages.first(where: { $0.role == "system" })?.content {
103-
systemInstruction = ModelContent(parts: [.text(systemMessage)])
104-
} else {
105-
systemInstruction = nil
106-
}
107-
108-
// Create the model with system instruction
109-
let generativeModel = GenerativeModel(
110-
name: model,
111-
apiKey: gemini.apiKey,
112-
generationConfig: .init(GenerationConfig(maxOutputTokens: maxTokens)),
113-
systemInstruction: systemInstruction
114-
)
115-
116-
// Convert messages to ModelContent array for chat history
117-
let chatHistory = messages.filter { $0.role != "system" }.map { message in
118-
ModelContent(
119-
role: message.role,
120-
parts: [.text(message.content)]
121-
)
122-
}
123-
124-
// Create chat with history and send the last message
125-
let chat = generativeModel.startChat(history: chatHistory)
126-
return try await chat.sendMessage("") // Empty message since history contains everything
94+
guard let gemini else {
95+
throw PolyAIError.missingLLMConfiguration("You Must provide a valid configuration for the \(parameter.llmService) API")
96+
}
97+
let messageParams: [SwiftOpenAI.ChatCompletionParameters.Message] = messages.map { .init(role: .init(rawValue: $0.role) ?? .user, content: .text($0.content)) }
98+
let messageParameter = ChatCompletionParameters(messages: messageParams, model: .custom(model), maxTokens: maxTokens)
99+
return try await gemini.startChat(parameters: messageParameter)
100+
127101
case .ollama(let model, let messages, let maxTokens):
128102
guard let ollamaOpenAIServiceCompatible else {
129103
throw PolyAIError.missingLLMConfiguration("You Must provide a valid configuration for the \(parameter.llmService) API")
@@ -146,7 +120,6 @@ struct DefaultPolyAIService: PolyAIService {
146120
let messageParams: [SwiftOpenAI.ChatCompletionParameters.Message] = messages.map { .init(role: .init(rawValue: $0.role) ?? .user, content: .text($0.content)) }
147121
let messageParameter = ChatCompletionParameters(messages: messageParams, model: model, maxTokens: maxTokens)
148122

149-
150123
let stream = try await openAIService.startStreamedChat(parameters: messageParameter)
151124
return try mapToLLMMessageStreamResponse(stream: stream)
152125

@@ -173,20 +146,12 @@ struct DefaultPolyAIService: PolyAIService {
173146
guard let gemini else {
174147
throw PolyAIError.missingLLMConfiguration("You Must provide a valid configuration for the \(parameter.llmService) API")
175148
}
176-
let systemInstruction: ModelContent?
177-
if let systemMessage = messages.first(where: { message in
178-
message.role == "system"
179-
})?.content {
180-
systemInstruction = ModelContent(parts: [.text(systemMessage)])
181-
} else {
182-
systemInstruction = nil
183-
}
184-
let generativeModel = GenerativeModel(name: model, apiKey: gemini.apiKey, generationConfig: .init(GenerationConfig(maxOutputTokens: maxTokens)), systemInstruction: systemInstruction)
185-
let userMessage = messages.first { message in
186-
message.role == "user"
187-
}?.content ?? ""
188-
let stream = generativeModel.generateContentStream(userMessage)
149+
let messageParams: [SwiftOpenAI.ChatCompletionParameters.Message] = messages.map { .init(role: .init(rawValue: $0.role) ?? .user, content: .text($0.content)) }
150+
let messageParameter = ChatCompletionParameters(messages: messageParams, model: .custom(model), maxTokens: maxTokens)
151+
152+
let stream = try await gemini.startStreamedChat(parameters: messageParameter)
189153
return try mapToLLMMessageStreamResponse(stream: stream)
154+
190155
case .ollama(let model, let messages, let maxTokens):
191156
guard let ollamaOpenAIServiceCompatible else {
192157
throw PolyAIError.missingLLMConfiguration("You Must provide a valid configuration for the \(parameter.llmService) API")

0 commit comments

Comments
 (0)