Skip to content

Commit

Permalink
improve: adds better error handling (#12)
Browse files Browse the repository at this point in the history
  • Loading branch information
kevinhermawan authored Oct 26, 2024
1 parent f964c6d commit 3ec2afb
Show file tree
Hide file tree
Showing 5 changed files with 271 additions and 50 deletions.
29 changes: 29 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,35 @@ Task {

To learn more about structured outputs, check out the [OpenAI documentation](https://platform.openai.com/docs/guides/structured-outputs/introduction).

### Error Handling

`LLMChatOpenAI` provides structured error handling through the `LLMChatOpenAIError` enum. This enum contains three cases that represent different types of errors you might encounter:

```swift
let messages = [
ChatMessage(role: .system, content: "You are a helpful assistant."),
ChatMessage(role: .user, content: "What is the capital of Indonesia?")
]

do {
let completion = try await chat.send(model: "gpt-4o", messages: messages)

print(completion.choices.first?.message.content ?? "No response")
} catch let error as LLMChatOpenAIError {
switch error {
case .serverError(let message):
// Handle server-side errors (e.g., invalid API key, rate limits)
print("Server Error: \(message)")
case .networkError(let error):
// Handle network-related errors (e.g., no internet connection)
print("Network Error: \(error.localizedDescription)")
case .badServerResponse:
// Handle invalid server responses
print("Invalid response received from server")
}
}
```

## Related Packages

- [swift-ai-model-retriever](https://github.com/kevinhermawan/swift-ai-model-retriever)
Expand Down
29 changes: 29 additions & 0 deletions Sources/LLMChatOpenAI/Documentation.docc/Documentation.md
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,35 @@ Task {

To learn more about structured outputs, check out the [OpenAI documentation](https://platform.openai.com/docs/guides/structured-outputs/introduction).

### Error Handling

``LLMChatOpenAI`` provides structured error handling through the ``LLMChatOpenAIError`` enum. This enum contains three cases that represent different types of errors you might encounter:

```swift
let messages = [
ChatMessage(role: .system, content: "You are a helpful assistant."),
ChatMessage(role: .user, content: "What is the capital of Indonesia?")
]

do {
let completion = try await chat.send(model: "gpt-4o", messages: messages)

print(completion.choices.first?.message.content ?? "No response")
} catch let error as LLMChatOpenAIError {
switch error {
case .serverError(let message):
// Handle server-side errors (e.g., invalid API key, rate limits)
print("Server Error: \(message)")
case .networkError(let error):
// Handle network-related errors (e.g., no internet connection)
print("Network Error: \(error.localizedDescription)")
case .badServerResponse:
// Handle invalid server responses
print("Invalid response received from server")
}
}
```

## Related Packages

- [swift-ai-model-retriever](https://github.com/kevinhermawan/swift-ai-model-retriever)
Expand Down
124 changes: 78 additions & 46 deletions Sources/LLMChatOpenAI/LLMChatOpenAI.swift
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,23 @@ public struct LLMChatOpenAI {
self.endpoint = endpoint ?? URL(string: "https://api.openai.com/v1/chat/completions")!
self.headers = headers
}

var allHeaders: [String: String] {
var defaultHeaders = [
"Content-Type": "application/json",
"Authorization": "Bearer \(apiKey)"
]

if let headers {
defaultHeaders.merge(headers) { _, new in new }
}

return defaultHeaders
}
}

extension LLMChatOpenAI {
// MARK: - Send
public extension LLMChatOpenAI {
/// Sends a chat completion request.
///
/// - Parameters:
Expand All @@ -41,7 +55,7 @@ extension LLMChatOpenAI {
/// - options: Optional ``ChatOptions`` that customize the completion request.
///
/// - Returns: A ``ChatCompletion`` object that contains the API's response.
public func send(model: String, messages: [ChatMessage], options: ChatOptions? = nil) async throws -> ChatCompletion {
func send(model: String, messages: [ChatMessage], options: ChatOptions? = nil) async throws -> ChatCompletion {
let body = RequestBody(stream: false, model: model, messages: messages, options: options)

return try await performRequest(with: body)
Expand All @@ -57,7 +71,7 @@ extension LLMChatOpenAI {
/// - Returns: A ``ChatCompletion`` object that contains the API's response.
///
/// - Note: This method enables fallback functionality when using OpenRouter. For other providers, only the first model in the array will be used.
public func send(models: [String], messages: [ChatMessage], options: ChatOptions? = nil) async throws -> ChatCompletion {
func send(models: [String], messages: [ChatMessage], options: ChatOptions? = nil) async throws -> ChatCompletion {
let body: RequestBody

if isSupportFallbackModel {
Expand All @@ -68,7 +82,10 @@ extension LLMChatOpenAI {

return try await performRequest(with: body)
}

}

// MARK: - Stream
public extension LLMChatOpenAI {
/// Streams a chat completion request.
///
/// - Parameters:
Expand All @@ -77,7 +94,7 @@ extension LLMChatOpenAI {
/// - options: Optional ``ChatOptions`` that customize the completion request.
///
/// - Returns: An `AsyncThrowingStream` of ``ChatCompletionChunk`` objects.
public func stream(model: String, messages: [ChatMessage], options: ChatOptions? = nil) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
func stream(model: String, messages: [ChatMessage], options: ChatOptions? = nil) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
let body = RequestBody(stream: true, model: model, messages: messages, options: options)

return performStreamRequest(with: body)
Expand All @@ -93,7 +110,7 @@ extension LLMChatOpenAI {
/// - Returns: An `AsyncThrowingStream` of ``ChatCompletionChunk`` objects.
///
/// - Note: This method enables fallback functionality when using OpenRouter. For other providers, only the first model in the array will be used.
public func stream(models: [String], messages: [ChatMessage], options: ChatOptions? = nil) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
func stream(models: [String], messages: [ChatMessage], options: ChatOptions? = nil) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
let body: RequestBody

if isSupportFallbackModel {
Expand All @@ -104,22 +121,58 @@ extension LLMChatOpenAI {

return performStreamRequest(with: body)
}

private func performRequest(with body: RequestBody) async throws -> ChatCompletion {
let request = try createRequest(for: endpoint, with: body)
let (data, response) = try await URLSession.shared.data(for: request)
try validateHTTPResponse(response)
}

// MARK: - Helpers
private extension LLMChatOpenAI {
func createRequest(for url: URL, with body: RequestBody) throws -> URLRequest {
var request = URLRequest(url: url)
request.httpMethod = "POST"
request.httpBody = try JSONEncoder().encode(body)
request.allHTTPHeaderFields = allHeaders

return try JSONDecoder().decode(ChatCompletion.self, from: data)
return request
}

func performRequest(with body: RequestBody) async throws -> ChatCompletion {
do {
let request = try createRequest(for: endpoint, with: body)
let (data, response) = try await URLSession.shared.data(for: request)

if let errorResponse = try? JSONDecoder().decode(ChatCompletionError.self, from: data) {
throw LLMChatOpenAIError.serverError(errorResponse.error.message)
}

guard let httpResponse = response as? HTTPURLResponse, 200...299 ~= httpResponse.statusCode else {
throw LLMChatOpenAIError.badServerResponse
}

return try JSONDecoder().decode(ChatCompletion.self, from: data)
} catch let error as LLMChatOpenAIError {
throw error
} catch {
throw LLMChatOpenAIError.networkError(error)
}
}

private func performStreamRequest(with body: RequestBody) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
func performStreamRequest(with body: RequestBody) -> AsyncThrowingStream<ChatCompletionChunk, Error> {
AsyncThrowingStream { continuation in
Task {
do {
let request = try createRequest(for: endpoint, with: body)
let (bytes, response) = try await URLSession.shared.bytes(for: request)
try validateHTTPResponse(response)

guard let httpResponse = response as? HTTPURLResponse, 200...299 ~= httpResponse.statusCode else {
for try await line in bytes.lines {
if let data = line.data(using: .utf8), let errorResponse = try? JSONDecoder().decode(ChatCompletionError.self, from: data) {
throw LLMChatOpenAIError.serverError(errorResponse.error.message)
}

break
}

throw LLMChatOpenAIError.badServerResponse
}

for try await line in bytes.lines {
if line.hasPrefix("data: ") {
Expand All @@ -138,45 +191,16 @@ extension LLMChatOpenAI {
}

continuation.finish()
} catch {
} catch let error as LLMChatOpenAIError {
continuation.finish(throwing: error)
} catch {
continuation.finish(throwing: LLMChatOpenAIError.networkError(error))
}
}
}
}
}

// MARK: - Helper Methods
private extension LLMChatOpenAI {
var allHeaders: [String: String] {
var defaultHeaders = [
"Content-Type": "application/json",
"Authorization": "Bearer \(apiKey)"
]

if let headers {
defaultHeaders.merge(headers) { _, new in new }
}

return defaultHeaders
}

func createRequest(for url: URL, with body: RequestBody) throws -> URLRequest {
var request = URLRequest(url: url)
request.httpMethod = "POST"
request.httpBody = try JSONEncoder().encode(body)
request.allHTTPHeaderFields = allHeaders

return request
}

func validateHTTPResponse(_ response: URLResponse) throws {
guard let httpResponse = response as? HTTPURLResponse, 200...299 ~= httpResponse.statusCode else {
throw URLError(.badServerResponse)
}
}
}

// MARK: - Supporting Types
private extension LLMChatOpenAI {
struct RequestBody: Encodable {
Expand Down Expand Up @@ -228,4 +252,12 @@ private extension LLMChatOpenAI {
case streamOptions = "stream_options"
}
}

struct ChatCompletionError: Codable {
let error: Error

struct Error: Codable {
public let message: String
}
}
}
36 changes: 36 additions & 0 deletions Sources/LLMChatOpenAI/LLMChatOpenAIError.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
//
// LLMChatOpenAIError.swift
// LLMChatOpenAI
//
// Created by Kevin Hermawan on 10/27/24.
//

import Foundation

/// An enum that represents errors from the chat completion request.
public enum LLMChatOpenAIError: LocalizedError {
/// A case that represents a server-side error response.
///
/// - Parameter message: The error message from the server.
case serverError(String)

/// A case that represents a network-related error.
///
/// - Parameter error: The underlying network error.
case networkError(Error)

/// A case that represents an invalid server response.
case badServerResponse

/// A localized message that describes the error.
public var errorDescription: String? {
switch self {
case .serverError(let error):
return error
case .networkError(let error):
return error.localizedDescription
case .badServerResponse:
return "Invalid response received from server"
}
}
}
Loading

0 comments on commit 3ec2afb

Please sign in to comment.