Skip to content

Commit

Permalink
update the model from gpt-4-1106-preview to gpt-4-turbo
Browse files Browse the repository at this point in the history
  • Loading branch information
anastasiya1155 committed Apr 23, 2024
1 parent 7d7d583 commit 222a131
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 6 deletions.
2 changes: 1 addition & 1 deletion server/bleep/src/agent/model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ const HEADROOM_CORRECTION: usize = GPT_4_TURBO_MAX_TOKENS - ACTUAL_MAX_TOKENS;
// PS: when we want to fully utilize the model max context window, the correction is 0
pub const GPT_4_TURBO_24K: LLMModel = LLMModel {
tokenizer: "gpt-4-1106-preview",
model_name: "gpt-4-1106-preview",
model_name: "gpt-4-turbo",
answer_headroom: 1024 + HEADROOM_CORRECTION,
prompt_headroom: 2500 + HEADROOM_CORRECTION,
history_headroom: 2048 + HEADROOM_CORRECTION,
Expand Down
2 changes: 1 addition & 1 deletion server/bleep/src/llm/call.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ pub async fn llm_call(
) -> anyhow::Result<impl Stream<Item = Result<Delta, api::Error>>> {
let model = match req.model.as_deref() {
Some(model) => model.to_owned(),
None => "gpt-4-turbo-preview".into(),
None => "gpt-4-turbo".into(),
};

let builder = {
Expand Down
8 changes: 4 additions & 4 deletions server/bleep/src/webserver/studio.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ use crate::{

mod diff;

const LLM_GATEWAY_MODEL: &str = "gpt-4-1106-preview";
const LLM_GATEWAY_MODEL: &str = "gpt-4-turbo";

fn studio_not_found() -> Error {
Error::not_found("unknown code studio ID")
Expand Down Expand Up @@ -491,7 +491,7 @@ async fn token_counts(
})
.collect::<Vec<_>>();

let core_bpe = tiktoken_rs::get_bpe_from_model("gpt-4-1106-preview").unwrap();
let core_bpe = tiktoken_rs::get_bpe_from_model("gpt-4-turbo").unwrap();
let per_doc_file = stream::iter(doc_context)
.map(|file| async {
if file.hidden {
Expand Down Expand Up @@ -652,14 +652,14 @@ pub async fn get_doc_file_token_count(
.map(|sr| sr.text)
.collect::<String>();

let core_bpe = tiktoken_rs::get_bpe_from_model("gpt-4-1106-preview").unwrap();
let core_bpe = tiktoken_rs::get_bpe_from_model("gpt-4-turbo").unwrap();
let token_count = core_bpe.encode_ordinary(&content).len();

Ok(Json(token_count))
}

fn count_tokens_for_file(path: &str, body: &str, ranges: &[Range<usize>]) -> usize {
let core_bpe = tiktoken_rs::get_bpe_from_model("gpt-4-1106-preview").unwrap();
let core_bpe = tiktoken_rs::get_bpe_from_model("gpt-4-turbo").unwrap();

let mut chunks = Vec::new();

Expand Down

0 comments on commit 222a131

Please sign in to comment.