Skip to content

Commit

Permalink
Merge pull request #236 from sobelio/workspace-deps
Browse files Browse the repository at this point in the history
Workspace deps
  • Loading branch information
williamhogman authored Nov 15, 2023
2 parents 0ab38b1 + 233ae1e commit 31f5fb7
Show file tree
Hide file tree
Showing 32 changed files with 443 additions and 494 deletions.
412 changes: 197 additions & 215 deletions Cargo.lock

Large diffs are not rendered by default.

10 changes: 10 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,9 +1,19 @@
[workspace]
members = ["crates/*"]
resolver = "2"

[workspace.metadata.release]
shared-version = true

[patch.crates-io]
hnsw_rs = { git = "https://github.com/ruqqq/hnswlib-rs" }

[workspace.dependencies]
serde = { version = "1.0.163", features = ["derive"] }
serde_json = "1.0.96"
thiserror = "1.0.40"
tokio = "1.28.0"
async-trait = "0.1.68"
anyhow = "1.0.71"
lazy_static = "1.4.0"
serde_yaml = "0.9.21"
12 changes: 6 additions & 6 deletions crates/llm-chain-hnsw/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@ repository = "https://github.com/sobelio/llm-chain/"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
async-trait = "0.1.68"
async-trait.workspace = true
hnsw_rs = "0.1.19"
llm-chain = { path = "../llm-chain", version = "0.12.3", default-features = false }
serde = "1.0.164"
serde_json = "1.0.99"
thiserror = "1.0.40"
tokio = "1.28.2"
llm-chain = { path = "../llm-chain", version = "0.12.0", default-features = false }
serde.workspace = true
serde_json.workspace = true
thiserror.workspace = true
tokio.workspace = true

[dev-dependencies]
llm-chain-openai = { path = "../llm-chain-openai" }
17 changes: 0 additions & 17 deletions crates/llm-chain-llama-sys/CHANGELOG.md

This file was deleted.

2 changes: 1 addition & 1 deletion crates/llm-chain-llama-sys/llama.cpp
Submodule llama.cpp updated 134 files
17 changes: 0 additions & 17 deletions crates/llm-chain-llama/CHANGELOG.md

This file was deleted.

12 changes: 6 additions & 6 deletions crates/llm-chain-llama/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@ readme = "./README.md"
repository = "https://github.com/sobelio/llm-chain/"

[dependencies]
anyhow = "1.0.72"
async-trait = "0.1.68"
anyhow.workspace = true
async-trait.workspace = true
llm-chain-llama-sys = { path = "../llm-chain-llama-sys", version = "0.12" }
llm-chain = { path = "../llm-chain", version = "0.12.3" }
serde = { version = "1.0.164", features = ["derive"] }
thiserror = "1.0.40"
llm-chain = { path = "../llm-chain", version = "0.12.0" }
serde = { version = "1.0.163", features = ["derive"] }
thiserror.workspace = true
lazy_static = "1.4.0"
tokio = "1.28.2"
tokio.workspace = true

[dev-dependencies]
tokio = { version = "1.28.2", features = ["macros", "rt"] }
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use llm_chain::options;
use llm_chain::options::{ModelRef, Options};
use llm_chain::options::ModelRef;
use llm_chain::{executor, parameters, prompt};
use std::{env::args, error::Error};
use std::env::args;
/// This example demonstrates how to use the llm-chain-llama crate to generate text using a
/// LLaMA model.
///
Expand Down
35 changes: 4 additions & 31 deletions crates/llm-chain-llama/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,23 +22,14 @@ pub struct LLAMACPPErrorCode(i32);
// Represents the configuration parameters for a LLamaContext.
#[derive(Debug, Clone)]
pub struct ContextParams {
pub n_parts: i32,
pub n_ctx: i32,
pub n_batch: i32,
pub n_gpu_layers: i32,
pub main_gpu: i32,
pub tensor_split: *const f32,
pub seed: u32,
pub seed: i32,
pub f16_kv: bool,
pub vocab_only: bool,
pub use_mlock: bool,
pub use_mmap: bool,
pub embedding: bool,
pub low_vram: bool,
pub rope_freq_base: f32,
pub rope_freq_scale: f32,
pub mul_mat_q: bool,
pub n_gqa: i32,
pub rms_norm_eps: f32,
}

unsafe impl Sync for ContextParams {}
Expand Down Expand Up @@ -66,11 +57,8 @@ impl Default for ContextParams {
impl From<ContextParams> for llama_context_params {
fn from(params: ContextParams) -> Self {
llama_context_params {
n_parts: params.n_parts,
n_ctx: params.n_ctx,
n_batch: params.n_batch,
n_gpu_layers: params.n_gpu_layers,
main_gpu: params.main_gpu,
tensor_split: params.tensor_split,
seed: params.seed,
f16_kv: params.f16_kv,
logits_all: false,
Expand All @@ -80,12 +68,6 @@ impl From<ContextParams> for llama_context_params {
embedding: params.embedding,
progress_callback: None,
progress_callback_user_data: null_mut(),
low_vram: params.low_vram,
rope_freq_base: params.rope_freq_base,
rope_freq_scale: params.rope_freq_scale,
mul_mat_q: params.mul_mat_q,
n_gqa: params.n_gqa,
rms_norm_eps: params.rms_norm_eps,
}
}
}
Expand All @@ -94,22 +76,13 @@ impl From<llama_context_params> for ContextParams {
fn from(params: llama_context_params) -> Self {
ContextParams {
n_ctx: params.n_ctx,
n_batch: params.n_batch,
n_gpu_layers: params.n_gpu_layers,
main_gpu: params.main_gpu,
tensor_split: params.tensor_split,
n_parts: params.n_parts,
seed: params.seed,
f16_kv: params.f16_kv,
vocab_only: params.vocab_only,
use_mlock: params.use_mlock,
use_mmap: params.use_mmap,
embedding: params.embedding,
low_vram: params.low_vram,
rope_freq_base: params.rope_freq_base,
rope_freq_scale: params.rope_freq_scale,
mul_mat_q: params.mul_mat_q,
n_gqa: params.n_gqa,
rms_norm_eps: params.rms_norm_eps,
}
}
}
Expand Down
11 changes: 1 addition & 10 deletions crates/llm-chain-llama/src/options.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,10 +111,7 @@ lazy_static! {
MirostatTau: 5.0,
MirostatEta: 0.1,
PenalizeNl: true,
StopSequence: vec!["\n\n".to_string()],
NumGpuLayers: 0,
RopeFrequencyBase: 10000.0,
RopeFrequencyScale: 1.0
StopSequence: vec!["\n\n".to_string()]
);
}

Expand All @@ -123,15 +120,9 @@ pub(crate) fn get_executor_initial_opts(
) -> Result<(String, ContextParams), ExecutorCreationError> {
let model = opt_extract!(opt, model, Model)?;
let max_context_size = opt_extract!(opt, max_context_size, MaxContextSize)?;
let num_gpu_layers = opt_extract!(opt, num_gpu_layers, NumGpuLayers)?;
let rope_freq_base = opt_extract!(opt, rope_freq_base, RopeFrequencyBase)?;
let rope_freq_scale = opt_extract!(opt, rope_freq_scale, RopeFrequencyScale)?;

let mut cp = ContextParams::new();
cp.n_ctx = *max_context_size as i32;
cp.n_gpu_layers = *num_gpu_layers;
cp.rope_freq_base = *rope_freq_base;
cp.rope_freq_scale = *rope_freq_scale;

Ok((model.to_path(), cp))
}
17 changes: 0 additions & 17 deletions crates/llm-chain-local/CHANGELOG.md

This file was deleted.

8 changes: 4 additions & 4 deletions crates/llm-chain-local/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,13 @@ repository = "https://github.com/sobelio/llm-chain/"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
async-trait = "0.1.68"
lazy_static = "1.4.0"
async-trait.workspace = true
lazy_static.workspace = true
llm = "0.1.1"
llm-chain = { path = "../llm-chain", version = "0.12.3", default-features = false }
rand = "0.8.5"
serde = { version = "1.0.164", features = ["derive"] }
thiserror = "1.0.40"
serde.workspace = true
thiserror.workspace = true

[dev-dependencies]
tokio = { version = "1.28.2", features = ["macros", "rt"] }
File renamed without changes.
2 changes: 1 addition & 1 deletion crates/llm-chain-milvus/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use milvus::{
collection::SearchOption,
data::FieldColumn,
proto::{milvus::MutationResult, schema::i_ds::IdField},
value::{Value, ValueVec},
value::ValueVec,
};
use serde::{de::DeserializeOwned, Serialize};
use std::{collections::HashMap, marker::PhantomData, sync::Arc};
Expand Down
File renamed without changes.
12 changes: 0 additions & 12 deletions crates/llm-chain-openai/CHANGELOG.md

This file was deleted.

23 changes: 11 additions & 12 deletions crates/llm-chain-openai/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,20 +14,19 @@ repository = "https://github.com/sobelio/llm-chain/"

[dependencies]
futures = "0.3.28"
async-openai = "0.10.3"
async-trait = "0.1.68"
llm-chain = { path = "../llm-chain", version = "0.12.3", default-features = false }
serde = { version = "1.0.164" }
strum = "0.25"
strum_macros = "0.25"
thiserror = "1.0.40"
tiktoken-rs = { version = "0.4.2", features = ["async-openai"] }
tokio = "1.28.2"
async-openai = "0.16.2"
async-trait.workspace = true
llm-chain = { path = "../llm-chain", version = "0.12.0", default-features = false }
serde.workspace = true
strum = "0.24"
strum_macros = "0.24"
thiserror.workspace = true
tiktoken-rs = { version = "0.5.7" }
tokio.workspace = true

[dev-dependencies]
tokio = "1.28.2"
qdrant-client = "1.3.0"
llm-chain = { path = "../llm-chain" }
anyhow = "1.0.72"
serde_yaml = "0.9.27"

anyhow = "1.0.70"
serde_yaml = "0.9.21"
12 changes: 12 additions & 0 deletions crates/llm-chain-openai/src/chatgpt/error.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
use async_openai::error::OpenAIError;
use llm_chain::prompt::StringTemplateError;
use thiserror::Error;

#[derive(Debug, Error)]
#[error(transparent)]
pub enum OpenAIInnerError {
#[error(transparent)]
OpenAIError(#[from] OpenAIError),
#[error(transparent)]
StringTemplateError(#[from] StringTemplateError),
}
Loading

0 comments on commit 31f5fb7

Please sign in to comment.