Skip to content

Commit 7a4e847

Browse files
committed
add model and embedding customization on runtime
1 parent 647abf2 commit 7a4e847

File tree

4 files changed

+22
-1
lines changed

4 files changed

+22
-1
lines changed

README.md

+2
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,8 @@ const runtime = new BgentRuntime({
132132
evaluators: [
133133
/* your custom evaluators */
134134
],
135+
model: "gpt-3.5-turbo", // whatever model you want to use
136+
embeddingModel: "text-embedding-3-large", // whatever model you want to use
135137
});
136138
```
137139

docs/docs/classes/BgentRuntime.md

+9
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ Creates an instance of BgentRuntime.
2424
| `opts` | `Object` | The options for configuring the BgentRuntime. |
2525
| `opts.actions?` | [`Action`](../interfaces/Action.md)[] | Optional custom actions. |
2626
| `opts.debugMode?` | `boolean` | If true, debug messages will be logged. |
27+
| `opts.embeddingModel?` | `string` | The model to use for embedding. |
2728
| `opts.evaluators?` | [`Evaluator`](../interfaces/Evaluator.md)[] | Optional custom evaluators. |
2829
| `opts.model?` | `string` | The model to use for completion. |
2930
| `opts.providers?` | [`Provider`](../interfaces/Provider.md)[] | Optional context providers. |
@@ -62,6 +63,14 @@ Store and recall descriptions of users based on conversations.
6263

6364
___
6465

66+
### embeddingModel
67+
68+
**embeddingModel**: `string` = `"text-embedding-3-large"`
69+
70+
The model to use for embedding.
71+
72+
___
73+
6574
### evaluators
6675

6776
**evaluators**: [`Evaluator`](../interfaces/Evaluator.md)[] = `[]`

docs/docs/index.md

+2
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,8 @@ const runtime = new BgentRuntime({
140140
evaluators: [
141141
/* your custom evaluators */
142142
],
143+
model: "gpt-3.5-turbo", // whatever model you want to use
144+
embeddingModel: "text-embedding-3-large", // whatever model you want to use
143145
});
144146
```
145147

src/lib/runtime.ts

+9-1
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,11 @@ export class BgentRuntime {
8585
*/
8686
model = "gpt-3.5-turbo-0125";
8787

88+
/**
89+
* The model to use for embedding.
90+
*/
91+
embeddingModel = "text-embedding-3-large";
92+
8893
/**
8994
* Store messages that are sent and received by the agent.
9095
*/
@@ -129,6 +134,7 @@ export class BgentRuntime {
129134
* @param opts.evaluators - Optional custom evaluators.
130135
* @param opts.providers - Optional context providers.
131136
* @param opts.model - The model to use for completion.
137+
* @param opts.embeddingModel - The model to use for embedding.
132138
*/
133139
constructor(opts: {
134140
recentMessageCount?: number; // number of messages to hold in the recent message cache
@@ -140,13 +146,15 @@ export class BgentRuntime {
140146
evaluators?: Evaluator[]; // Optional custom evaluators
141147
providers?: Provider[];
142148
model?: string; // The model to use for completion
149+
embeddingModel?: string; // The model to use for embedding
143150
}) {
144151
this.#recentMessageCount =
145152
opts.recentMessageCount ?? this.#recentMessageCount;
146153
this.debugMode = opts.debugMode ?? false;
147154
this.supabase = opts.supabase;
148155
this.serverUrl = opts.serverUrl ?? this.serverUrl;
149156
this.model = opts.model ?? this.model;
157+
this.embeddingModel = opts.embeddingModel ?? this.embeddingModel;
150158
if (!this.serverUrl) {
151159
console.warn("No serverUrl provided, defaulting to localhost");
152160
}
@@ -269,7 +277,7 @@ export class BgentRuntime {
269277
* @returns The embedding of the input.
270278
*/
271279
async embed(input: string) {
272-
const embeddingModel = "text-embedding-3-large";
280+
const embeddingModel = this.embeddingModel;
273281
const requestOptions = {
274282
method: "POST",
275283
headers: {

0 commit comments

Comments
 (0)