Skip to content

Commit

Permalink
Merge pull request #119 from rhx/main
Browse files Browse the repository at this point in the history
  • Loading branch information
buhe authored Jul 6, 2024
2 parents 3513093 + 68cf719 commit d1b5bee
Show file tree
Hide file tree
Showing 6 changed files with 138 additions and 57 deletions.
2 changes: 1 addition & 1 deletion .swiftpm/xcode/xcshareddata/xcschemes/LangChain.xcscheme
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1500"
LastUpgradeVersion = "1540"
version = "1.7">
<BuildAction
parallelizeBuildables = "YES"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1500"
LastUpgradeVersion = "1540"
version = "1.7">
<BuildAction
parallelizeBuildables = "YES"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1500"
LastUpgradeVersion = "1540"
version = "1.7">
<BuildAction
parallelizeBuildables = "YES"
Expand Down
30 changes: 15 additions & 15 deletions Sources/LangChain/llms/ChatOllama.swift
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ public class ChatOllama: Ollama {
///
/// This array contains the chat history
/// of the conversation so far.
var history = [ChatGLMMessage]()
public var history = [ChatGLMMessage]()

/// Create a new Ollama chat instance.
///
Expand Down Expand Up @@ -60,26 +60,26 @@ public extension ChatOllama {
/// This is a streaming endpoint, so there can be a series of responses.
/// Streaming can be disabled using "stream": false.
struct ChatRequest: Codable, Sendable {
let model: String
let options: [String: String]?
let format: String
let stream: Bool
let messages: [ChatGLMMessage]
public let model: String
public let options: [String: String]?
public let format: String
public let stream: Bool
public let messages: [ChatGLMMessage]
}
/// Ollama response to a `ChatRequest`.
///
/// This response object includes the next message in a chat conversation.
/// The final response object will include statistics and additional data from the request.
struct ChatResponse: Codable, Sendable {
let message: ChatGLMMessage
let model: String
let done: Bool
let totalDuration: Int?
let loadDuration: Int?
let promptEvalDuration: Int?
let evalDuration: Int?
let promptEvalCount: Int?
let evalCount: Int?
public let message: ChatGLMMessage
public let model: String
public let done: Bool
public let totalDuration: Int?
public let loadDuration: Int?
public let promptEvalDuration: Int?
public let evalDuration: Int?
public let promptEvalCount: Int?
public let evalCount: Int?

/// Return the message content.
public var content: String { message.content }
Expand Down
147 changes: 111 additions & 36 deletions Sources/LangChain/llms/Ollama.swift
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ import OpenAIKit
/// This class is a wrapper around the Ollama API.
public class Ollama: LLM {

let baseURL: String
let model: String
let options: [String: String]?
let requestTimeout: Int
public let baseURL: String
public let model: String
public let options: [String: String]?
public let requestTimeout: Int

/// Images encoded as base64 strings.
public var images: [String]?
Expand Down Expand Up @@ -100,6 +100,7 @@ public class Ollama: LLM {
return LLMResult()
}
let llmResponse = try JSONDecoder().decode(GenerateResponse.self, from: data)
context = llmResponse.context
return LLMResult(llm_output: llmResponse.response)
}

Expand Down Expand Up @@ -163,17 +164,17 @@ public extension Ollama {
/// This message is sent to the `generate` API
/// endpoint to generate a response.
struct GenerateRequest: Codable {
let model: String
let prompt: String
let images: [String]?
let system: String?
let template: String?
let context: [Int]?
let options: [String: String]?
let keepAlive: String?
let format: String?
let raw: Bool?
let stream: Bool?
public let model: String
public let prompt: String
public let images: [String]?
public let system: String?
public let template: String?
public let context: [Int]?
public let options: [String: String]?
public let keepAlive: String?
public let format: String?
public let raw: Bool?
public let stream: Bool?

enum CodingKeys: String, CodingKey {
case model
Expand All @@ -188,23 +189,55 @@ public extension Ollama {
case raw
case stream
}

/// Create a new `GenerateRequest`.
///
/// This initializer creates a new `GenerateRequest`.
///
/// - Parameters:
/// - model: The model to use for generation.
/// - prompt: The prompt to use for generation.
/// - images: An array of images to use for generation.
/// - system: The system prompt to use for generation.
/// - template: The template to use for generation.
/// - context: The context to use for generation.
/// - options: The options to use for generation.
/// - keepAlive: The keep-alive time for the generation.
/// - format: The format to use for the response.
/// - raw: Whether to return the raw response.
/// - stream: Whether to stream the response.
/// - Returns: A new `GenerateRequest`.
@inlinable
public init(model: String, prompt: String, images: [String]? = nil, system: String? = nil, template: String? = nil, context: [Int]? = nil, options: [String: String]? = nil, keepAlive: String? = nil, format: String? = nil, raw: Bool? = nil, stream: Bool? = nil) {
self.model = model
self.prompt = prompt
self.images = images
self.system = system
self.template = template
self.context = context
self.options = options
self.keepAlive = keepAlive
self.format = format
self.raw = raw
self.stream = stream
}
}
/// Ollama generation response.
///
/// This response object contains the response
/// generated by the Ollama `generate` API endpoint.
struct GenerateResponse: Codable {
let response: String
let createdAt: String
let model: String
let done: Bool
let context: [Int]?
let totalDuration: Int?
let loadDuration: Int?
let promptEvalDuration: Int?
let evalDuration: Int?
let promptEvalCount: Int?
let evalCount: Int?
public let response: String
public let createdAt: String
public let model: String
public let done: Bool
public let context: [Int]?
public let totalDuration: Int?
public let loadDuration: Int?
public let promptEvalDuration: Int?
public let evalDuration: Int?
public let promptEvalCount: Int?
public let evalCount: Int?

enum CodingKeys: String, CodingKey {
case response
Expand All @@ -219,6 +252,38 @@ public extension Ollama {
case promptEvalCount = "prompt_eval_count"
case evalCount = "eval_count"
}

/// Create a new `GenerateResponse`.
///
/// This initializer creates a new `GenerateResponse`.
///
/// - Parameters:
/// - response: The generated response string.
/// - createdAt: The creation time of the response.
/// - model: The model used for generation.
/// - done: Whether generation is complete.
/// - context: The context used for generation.
/// - totalDuration: The total duration of the generation.
/// - loadDuration: The load duration of the generation.
/// - promptEvalDuration: The duration of the prompt evaluation.
/// - evalDuration: The duration of the evaluation.
/// - promptEvalCount: The prompt evaluation count.
/// - evalCount: The evaluation count.
/// - Returns: A new `GenerateResponse`.
@inlinable
public init(response: String, createdAt: String, model: String, done: Bool, context: [Int]? = nil, totalDuration: Int? = nil, loadDuration: Int? = nil, promptEvalDuration: Int? = nil, evalDuration: Int? = nil, promptEvalCount: Int? = nil, evalCount: Int? = nil) {
self.response = response
self.createdAt = createdAt
self.model = model
self.done = done
self.context = context
self.totalDuration = totalDuration
self.loadDuration = loadDuration
self.promptEvalDuration = promptEvalDuration
self.evalDuration = evalDuration
self.promptEvalCount = promptEvalCount
self.evalCount = evalCount
}
}
/// Generate the next message in a chat with a provided model.
///
Expand Down Expand Up @@ -270,11 +335,16 @@ public extension Ollama {
/// This struct represents a single Ollama model
/// that is available via the API.
struct Model: Codable {
let name: String
let modifiedAt: String
let digest: String
let size: Int
let details: ModelDetails
/// Ollama model details.
public let name: String
/// The last modification date of the model.
public let modifiedAt: String
/// The digest of the model.
public let digest: String
/// The size of the model.
public let size: Int
/// The details of the model.
public let details: ModelDetails

/// JSON coding keys for the `Model` struct.
enum CodingKeys: String, CodingKey {
Expand All @@ -289,11 +359,16 @@ public extension Ollama {
///
/// This struct represents the details of an Ollama model.
struct ModelDetails: Codable {
let format: String
let family: String
let families: [String]?
let parameterSize: String
let quantizationLevel: String
/// The format of the model.
public let format: String
/// The family of the model.
public let family: String
/// The families of the model.
public let families: [String]?
/// The size of the model parameters.
public let parameterSize: String
/// The quantization level of the model.
public let quantizationLevel: String

/// JSON coding keys for the `ModelDetails` struct.
enum CodingKeys: String, CodingKey {
Expand Down
12 changes: 9 additions & 3 deletions Sources/LangChain/utilities/chatglm/ChatGLMAPIWrapper.swift
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,15 @@ import AsyncHTTPClient
import Foundation
import NIOPosix

struct ChatGLMMessage: Codable {
let role: String
let content: String
/// A ChatGLM message.
///
/// This structure represents a message in a chat.
public struct ChatGLMMessage: Codable, Sendable {
/// The role of the entity sending the message,
/// such as "user", "assistant", or "developer".
public let role: String
/// The content of the message.
public let content: String
}
struct ChatGLMPayload: Codable {
let prompt: [ChatGLMMessage]
Expand Down

0 comments on commit d1b5bee

Please sign in to comment.