diff --git a/Playground/Playground.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved b/Playground/Playground.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved index e5c66c9..0314ba2 100644 --- a/Playground/Playground.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved +++ b/Playground/Playground.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved @@ -1,5 +1,5 @@ { - "originHash" : "9babdbe0d420e7da0e4b1fdca252d3d06b03638f979b3d4ce55ccda4d14b84d3", + "originHash" : "53903b86839844f0235d6409aeff93ce3322d067fe255f83676414c60e9c1cd7", "pins" : [ { "identity" : "swift-ai-model-retriever", @@ -7,7 +7,7 @@ "location" : "https://github.com/kevinhermawan/swift-ai-model-retriever.git", "state" : { "branch" : "main", - "revision" : "5d22906f1bedcb53452257c784ebffa72e9ad1cb" + "revision" : "585de8246341cf0b715357ecfd57c20aea52545c" } }, { diff --git a/README.md b/README.md index 55a8dd2..ead6327 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ let messages = [ let task = Task { do { - let completion = try await chat.send(model: "claude-3-5-sonnet-20240620", messages: messages) + let completion = try await chat.send(model: "claude-3-5-sonnet", messages: messages) print(completion.content.first?.text ?? "No response") } catch { @@ -85,7 +85,7 @@ let messages = [ let task = Task { do { - for try await chunk in chat.stream(model: "claude-3-5-sonnet-20240620", messages: messages) { + for try await chunk in chat.stream(model: "claude-3-5-sonnet", messages: messages) { if let text = chunk.delta?.text { print(text, terminator: "") } @@ -116,7 +116,7 @@ let messages = [ Task { do { - let completion = try await chat.send(model: "claude-3-5-sonnet-20240620", messages: messages) + let completion = try await chat.send(model: "claude-3-5-sonnet", messages: messages) print(completion.content.first?.text ?? "") } catch { @@ -154,7 +154,7 @@ let options = ChatOptions(tools: [recommendBookTool]) Task { do { - let completion = try await chat.send(model: "claude-3-5-sonnet-20240620", messages: messages, options: options) + let completion = try await chat.send(model: "claude-3-5-sonnet", messages: messages, options: options) if let toolInput = completion.content.first(where: { $0.type == "tool_use" })?.toolInput { print(toolInput) @@ -182,7 +182,7 @@ let messages = [ let task = Task { do { - let completion = try await chat.send(model: "claude-3-5-sonnet-20240620", messages: messages) + let completion = try await chat.send(model: "claude-3-5-sonnet", messages: messages) print(completion.content.first?.text ?? "No response") } catch { @@ -193,6 +193,35 @@ let task = Task { To learn more about prompt caching, check out the [Anthropic documentation](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching). +### Error Handling + +`LLMChatAnthropic` provides structured error handling through the `LLMChatAnthropicError` enum. This enum contains three cases that represent different types of errors you might encounter: + +```swift +let messages = [ + ChatMessage(role: .system, content: "You are a helpful assistant."), + ChatMessage(role: .user, content: "What is the capital of Indonesia?") +] + +do { + let completion = try await chat.send(model: "claude-3-5-sonnet", messages: messages) + + print(completion.content.first?.text ?? "No response") +} catch let error as LLMChatAnthropicError { + switch error { + case .serverError(let message): + // Handle server-side errors (e.g., invalid API key, rate limits) + print("Server Error: \(message)") + case .networkError(let error): + // Handle network-related errors (e.g., no internet connection) + print("Network Error: \(error.localizedDescription)") + case .badServerResponse: + // Handle invalid server responses + print("Invalid response received from server") + } +} +``` + ## Related Packages - [swift-ai-model-retriever](https://github.com/kevinhermawan/swift-ai-model-retriever) diff --git a/Sources/LLMChatAnthropic/Documentation.docc/Documentation.md b/Sources/LLMChatAnthropic/Documentation.docc/Documentation.md index 593799d..972ccce 100644 --- a/Sources/LLMChatAnthropic/Documentation.docc/Documentation.md +++ b/Sources/LLMChatAnthropic/Documentation.docc/Documentation.md @@ -34,7 +34,7 @@ let messages = [ let task = Task { do { - let completion = try await chat.send(model: "claude-3-5-sonnet-20240620", messages: messages) + let completion = try await chat.send(model: "claude-3-5-sonnet", messages: messages) print(completion.content.first?.text ?? "No response") } catch { @@ -56,7 +56,7 @@ let messages = [ let task = Task { do { - for try await chunk in chat.stream(model: "claude-3-5-sonnet-20240620", messages: messages) { + for try await chunk in chat.stream(model: "claude-3-5-sonnet", messages: messages) { if let text = chunk.delta?.text { print(text, terminator: "") } @@ -87,7 +87,7 @@ let messages = [ Task { do { - let completion = try await chat.send(model: "claude-3-5-sonnet-20240620", messages: messages) + let completion = try await chat.send(model: "claude-3-5-sonnet", messages: messages) print(completion.content.first?.text ?? "") } catch { @@ -125,7 +125,7 @@ let options = ChatOptions(tools: [recommendBookTool]) Task { do { - let completion = try await chat.send(model: "claude-3-5-sonnet-20240620", messages: messages, options: options) + let completion = try await chat.send(model: "claude-3-5-sonnet", messages: messages, options: options) if let toolInput = completion.content.first(where: { $0.type == "tool_use" })?.toolInput { print(toolInput) @@ -153,7 +153,7 @@ let messages = [ let task = Task { do { - let completion = try await chat.send(model: "claude-3-5-sonnet-20240620", messages: messages) + let completion = try await chat.send(model: "claude-3-5-sonnet", messages: messages) print(completion.content.first?.text ?? "No response") } catch { @@ -164,6 +164,35 @@ let task = Task { To learn more about prompt caching, check out the [Anthropic documentation](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching). +### Error Handling + +``LLMChatAnthropic`` provides structured error handling through the ``LLMChatAnthropicError`` enum. This enum contains three cases that represent different types of errors you might encounter: + +```swift +let messages = [ + ChatMessage(role: .system, content: "You are a helpful assistant."), + ChatMessage(role: .user, content: "What is the capital of Indonesia?") +] + +do { + let completion = try await chat.send(model: "claude-3-5-sonnet", messages: messages) + + print(completion.content.first?.text ?? "No response") +} catch let error as LLMChatAnthropicError { + switch error { + case .serverError(let message): + // Handle server-side errors (e.g., invalid API key, rate limits) + print("Server Error: \(message)") + case .networkError(let error): + // Handle network-related errors (e.g., no internet connection) + print("Network Error: \(error.localizedDescription)") + case .badServerResponse: + // Handle invalid server responses + print("Invalid response received from server") + } +} +``` + ## Related Packages - [swift-ai-model-retriever](https://github.com/kevinhermawan/swift-ai-model-retriever) diff --git a/Sources/LLMChatAnthropic/LLMChatAnthropic.swift b/Sources/LLMChatAnthropic/LLMChatAnthropic.swift index ed75791..33d12de 100644 --- a/Sources/LLMChatAnthropic/LLMChatAnthropic.swift +++ b/Sources/LLMChatAnthropic/LLMChatAnthropic.swift @@ -26,9 +26,24 @@ public struct LLMChatAnthropic { self.endpoint = endpoint ?? URL(string: "https://api.anthropic.com/v1/messages")! self.headers = headers } + + private var allHeaders: [String: String] { + var defaultHeaders = [ + "Anthropic-Version": "2023-06-01", + "Content-Type": "application/json", + "X-Api-Key": apiKey + ] + + if let headers { + defaultHeaders.merge(headers) { _, new in new } + } + + return defaultHeaders + } } -extension LLMChatAnthropic { +// MARK: - Send +public extension LLMChatAnthropic { /// Sends a chat completion request. /// /// - Parameters: @@ -37,14 +52,10 @@ extension LLMChatAnthropic { /// - options: Optional ``ChatOptions`` that customize the completion request. /// /// - Returns: A ``ChatCompletion`` object that contains the API's response. - public func send(model: String, messages: [ChatMessage], options: ChatOptions? = nil) async throws -> ChatCompletion { + func send(model: String, messages: [ChatMessage], options: ChatOptions? = nil) async throws -> ChatCompletion { let body = RequestBody(stream: false, model: model, messages: messages, options: options) - let request = try createRequest(for: endpoint, with: body) - - let (data, response) = try await URLSession.shared.data(for: request) - try validateHTTPResponse(response) - return try JSONDecoder().decode(ChatCompletion.self, from: data) + return try await performRequest(with: body) } /// Streams a chat completion request. @@ -55,15 +66,63 @@ extension LLMChatAnthropic { /// - options: Optional ``ChatOptions`` that customize the completion request. /// /// - Returns: An `AsyncThrowingStream` of ``ChatCompletionChunk`` objects. - public func stream(model: String, messages: [ChatMessage], options: ChatOptions? = nil) -> AsyncThrowingStream { + func stream(model: String, messages: [ChatMessage], options: ChatOptions? = nil) -> AsyncThrowingStream { + let body = RequestBody(stream: true, model: model, messages: messages, options: options) + + return performStreamRequest(with: body) + } +} + +// MARK: - Helpers +private extension LLMChatAnthropic { + func createRequest(for url: URL, with body: RequestBody) throws -> URLRequest { + var request = URLRequest(url: url) + request.httpMethod = "POST" + request.httpBody = try JSONEncoder().encode(body) + request.allHTTPHeaderFields = allHeaders + + return request + } + + func performRequest(with body: RequestBody) async throws -> ChatCompletion { + do { + let request = try createRequest(for: endpoint, with: body) + let (data, response) = try await URLSession.shared.data(for: request) + + if let errorResponse = try? JSONDecoder().decode(ChatCompletionError.self, from: data) { + throw LLMChatAnthropicError.serverError(errorResponse.error.message) + } + + guard let httpResponse = response as? HTTPURLResponse, 200...299 ~= httpResponse.statusCode else { + throw LLMChatAnthropicError.badServerResponse + } + + return try JSONDecoder().decode(ChatCompletion.self, from: data) + } catch let error as LLMChatAnthropicError { + throw error + } catch { + throw LLMChatAnthropicError.networkError(error) + } + } + + func performStreamRequest(with body: RequestBody) -> AsyncThrowingStream { AsyncThrowingStream { continuation in Task { do { - let body = RequestBody(stream: true, model: model, messages: messages, options: options) let request = try createRequest(for: endpoint, with: body) - let (bytes, response) = try await URLSession.shared.bytes(for: request) - try validateHTTPResponse(response) + + guard let httpResponse = response as? HTTPURLResponse, 200...299 ~= httpResponse.statusCode else { + for try await line in bytes.lines { + if let data = line.data(using: .utf8), let errorResponse = try? JSONDecoder().decode(ChatCompletionError.self, from: data) { + throw LLMChatAnthropicError.serverError(errorResponse.error.message) + } + + break + } + + throw LLMChatAnthropicError.badServerResponse + } var currentChunk = ChatCompletionChunk(id: "", model: "", role: "") @@ -123,46 +182,16 @@ extension LLMChatAnthropic { } continuation.finish() - } catch { + } catch let error as LLMChatAnthropicError { continuation.finish(throwing: error) + } catch { + continuation.finish(throwing: LLMChatAnthropicError.networkError(error)) } } } } } -// MARK: - Helper Methods -private extension LLMChatAnthropic { - var allHeaders: [String: String] { - var defaultHeaders = [ - "Anthropic-Version": "2023-06-01", - "Content-Type": "application/json", - "X-Api-Key": apiKey - ] - - if let headers { - defaultHeaders.merge(headers) { _, new in new } - } - - return defaultHeaders - } - - func createRequest(for url: URL, with body: RequestBody) throws -> URLRequest { - var request = URLRequest(url: url) - request.httpMethod = "POST" - request.httpBody = try JSONEncoder().encode(body) - request.allHTTPHeaderFields = allHeaders - - return request - } - - func validateHTTPResponse(_ response: URLResponse) throws { - guard let httpResponse = response as? HTTPURLResponse, 200...299 ~= httpResponse.statusCode else { - throw URLError(.badServerResponse) - } - } -} - // MARK: - Supporting Types private extension LLMChatAnthropic { struct RequestBody: Encodable { @@ -171,34 +200,47 @@ private extension LLMChatAnthropic { let messages: [ChatMessage] let options: ChatOptions? + private struct SystemMessage: Encodable { + let type: String + let text: String + let cacheControl: CacheControl? + + private enum CodingKeys: String, CodingKey { + case type, text + case cacheControl = "cache_control" + } + + struct CacheControl: Encodable { + let type: String + } + } + func encode(to encoder: Encoder) throws { var container = encoder.container(keyedBy: CodingKeys.self) try container.encode(options?.maxTokens ?? 4096, forKey: .maxTokens) try container.encode(stream, forKey: .stream) try container.encode(model, forKey: .model) - var systemMessages: [[String: String]] = [] - var nonSystemMessages: [ChatMessage] = [] - - for message in messages { - if message.role == .system { - for content in message.content { - if case .text(let text) = content { - var encodedContent: [String: String] = ["type": "text", "text": text] - - if let cacheControl = message.cacheControl { - encodedContent["cacheControl"] = cacheControl.type.rawValue + let systemMessages: [SystemMessage] = messages + .filter { $0.role == .system } + .flatMap { message in + message.content.compactMap { content -> SystemMessage? in + guard case .text(let text) = content else { return nil } + + return SystemMessage( + type: "text", + text: text, + cacheControl: message.cacheControl.map { + SystemMessage.CacheControl(type: $0.type.rawValue) } - - systemMessages.append(encodedContent) - } + ) } - } else { - nonSystemMessages.append(message) } - } - if systemMessages.isEmpty == false { + let nonSystemMessages = messages + .filter { $0.role != .system } + + if !systemMessages.isEmpty { try container.encode(systemMessages, forKey: .system) } @@ -214,15 +256,6 @@ private extension LLMChatAnthropic { case maxTokens = "max_tokens" case system, messages } - - enum ContentCodingKeys: String, CodingKey { - case type, text - case cacheControl = "cache_control" - } - - enum CacheControlCodingKeys: String, CodingKey { - case type - } } struct RawChatCompletionChunk: Decodable { @@ -276,4 +309,12 @@ private extension LLMChatAnthropic { case delta, usage } } + + struct ChatCompletionError: Codable { + let error: Error + + struct Error: Codable { + let message: String + } + } } diff --git a/Sources/LLMChatAnthropic/LLMChatAnthropicError.swift b/Sources/LLMChatAnthropic/LLMChatAnthropicError.swift new file mode 100644 index 0000000..0e6e065 --- /dev/null +++ b/Sources/LLMChatAnthropic/LLMChatAnthropicError.swift @@ -0,0 +1,36 @@ +// +// LLMChatAnthropicError.swift +// LLMChatAnthropic +// +// Created by Kevin Hermawan on 10/27/24. +// + +import Foundation + +/// An enum that represents errors from the chat completion request. +public enum LLMChatAnthropicError: LocalizedError { + /// A case that represents a server-side error response. + /// + /// - Parameter message: The error message from the server. + case serverError(String) + + /// A case that represents a network-related error. + /// + /// - Parameter error: The underlying network error. + case networkError(Error) + + /// A case that represents an invalid server response. + case badServerResponse + + /// A localized message that describes the error. + public var errorDescription: String? { + switch self { + case .serverError(let error): + return error + case .networkError(let error): + return error.localizedDescription + case .badServerResponse: + return "Invalid response received from server" + } + } +} diff --git a/Tests/LLMChatAnthropicTests/ChatCompletionTests.swift b/Tests/LLMChatAnthropicTests/ChatCompletionTests.swift index 5ce6435..df7b78e 100644 --- a/Tests/LLMChatAnthropicTests/ChatCompletionTests.swift +++ b/Tests/LLMChatAnthropicTests/ChatCompletionTests.swift @@ -133,3 +133,97 @@ final class ChatCompletionTests: XCTestCase { XCTAssertEqual(receivedUsage?.totalTokens, 15) } } +// MARK: - Error Handling +extension ChatCompletionTests { + func testServerError() async throws { + let mockErrorResponse = """ + { + "error": { + "message": "Invalid API key provided" + } + } + """ + + URLProtocolMock.mockData = mockErrorResponse.data(using: .utf8) + + do { + _ = try await chat.send(model: "claude-3-5-sonnet", messages: messages) + + XCTFail("Expected serverError to be thrown") + } catch let error as LLMChatAnthropicError { + switch error { + case .serverError(let message): + XCTAssertEqual(message, "Invalid API key provided") + default: + XCTFail("Expected serverError but got \(error)") + } + } + } + + func testNetworkError() async throws { + URLProtocolMock.mockError = NSError( + domain: NSURLErrorDomain, + code: NSURLErrorNotConnectedToInternet, + userInfo: [NSLocalizedDescriptionKey: "The Internet connection appears to be offline."] + ) + + do { + _ = try await chat.send(model: "claude-3-5-sonnet", messages: messages) + + XCTFail("Expected networkError to be thrown") + } catch let error as LLMChatAnthropicError { + switch error { + case .networkError(let underlyingError): + XCTAssertEqual((underlyingError as NSError).code, NSURLErrorNotConnectedToInternet) + default: + XCTFail("Expected networkError but got \(error)") + } + } + } + + func testStreamServerError() async throws { + let mockErrorResponse = """ + { + "error": { + "message": "Rate limit exceeded" + } + } + """ + + URLProtocolMock.mockStreamData = [mockErrorResponse] + + do { + for try await _ in chat.stream(model: "claude-3-5-sonnet", messages: messages) { + XCTFail("Expected serverError to be thrown") + } + } catch let error as LLMChatAnthropicError { + switch error { + case .serverError(let message): + XCTAssertEqual(message, "Rate limit exceeded") + default: + XCTFail("Expected serverError but got \(error)") + } + } + } + + func testStreamNetworkError() async throws { + URLProtocolMock.mockError = NSError( + domain: NSURLErrorDomain, + code: NSURLErrorNotConnectedToInternet, + userInfo: [NSLocalizedDescriptionKey: "The Internet connection appears to be offline."] + ) + + do { + for try await _ in chat.stream(model: "claude-3-5-sonnet", messages: messages) { + XCTFail("Expected networkError to be thrown") + } + } catch let error as LLMChatAnthropicError { + switch error { + case .networkError(let underlyingError): + XCTAssertEqual((underlyingError as NSError).code, NSURLErrorNotConnectedToInternet) + default: + XCTFail("Expected networkError but got \(error)") + } + } + } +}