Skip to content

Commit

Permalink
feat: allow to change ollama port (#811)
Browse files Browse the repository at this point in the history
  • Loading branch information
himself65 authored May 6, 2024
1 parent 456d3fb commit b6a6606
Show file tree
Hide file tree
Showing 7 changed files with 94 additions and 101 deletions.
5 changes: 5 additions & 0 deletions .changeset/brown-suits-thank.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"llamaindex": patch
---

feat: allow change host of ollama
5 changes: 5 additions & 0 deletions .changeset/itchy-paws-love.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"llamaindex": patch
---

chore: export ollama in default js runtime
7 changes: 6 additions & 1 deletion examples/ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,12 @@ import { OllamaEmbedding } from "llamaindex";
import { Ollama } from "llamaindex/llm/ollama";

(async () => {
const llm = new Ollama({ model: "llama3" });
const llm = new Ollama({
model: "llama3",
config: {
host: "http://localhost:11434",
},
});
const embedModel = new OllamaEmbedding({ model: "nomic-embed-text" });
{
const response = await llm.chat({
Expand Down
1 change: 1 addition & 0 deletions packages/core/src/embeddings/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ export * from "./GeminiEmbedding.js";
export * from "./JinaAIEmbedding.js";
export * from "./MistralAIEmbedding.js";
export * from "./MultiModalEmbedding.js";
export { OllamaEmbedding } from "./OllamaEmbedding.js";
export * from "./OpenAIEmbedding.js";
export { FireworksEmbedding } from "./fireworks.js";
export { TogetherEmbedding } from "./together.js";
Expand Down
2 changes: 0 additions & 2 deletions packages/core/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,5 +10,3 @@ export {
HuggingFaceEmbedding,
HuggingFaceEmbeddingModelType,
} from "./embeddings/HuggingFaceEmbedding.js";
export { OllamaEmbedding } from "./embeddings/OllamaEmbedding.js";
export { Ollama, type OllamaParams } from "./llm/ollama.js";
1 change: 1 addition & 0 deletions packages/core/src/llm/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ export * from "./openai.js";
export { Portkey } from "./portkey.js";
export * from "./replicate_ai.js";
// Note: The type aliases for replicate are to simplify usage for Llama 2 (we're using replicate for Llama 2 support)
export { Ollama, type OllamaParams } from "./ollama.js";
export {
ALL_AVAILABLE_REPLICATE_MODELS,
DeuceChatStrategy,
Expand Down
174 changes: 76 additions & 98 deletions packages/core/src/llm/ollama.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,23 @@
import { BaseEmbedding } from "../embeddings/types.js";
import ollama, {
import {
Ollama as OllamaBase,
type Config,
type CopyRequest,
type CreateRequest,
type DeleteRequest,
type EmbeddingsRequest,
type EmbeddingsResponse,
type GenerateRequest,
type ListResponse,
type ChatResponse as OllamaChatResponse,
type GenerateResponse as OllamaGenerateResponse,
type Options,
type ProgressResponse,
type PullRequest,
type PushRequest,
type ShowRequest,
type ShowResponse,
type StatusResponse,
} from "../internal/deps/ollama.js";
import type {
ChatResponse,
Expand Down Expand Up @@ -35,15 +47,21 @@ const completionAccessor = (

export type OllamaParams = {
model: string;
config?: Partial<Config>;
options?: Partial<Options>;
};

/**
* This class both implements the LLM and Embedding interfaces.
*/
export class Ollama extends BaseEmbedding implements LLM {
export class Ollama
extends BaseEmbedding
implements LLM, Omit<OllamaBase, "chat">
{
readonly hasStreaming = true;

ollama: OllamaBase;

// https://ollama.ai/library
model: string;

Expand All @@ -57,6 +75,7 @@ export class Ollama extends BaseEmbedding implements LLM {
constructor(params: OllamaParams) {
super();
this.model = params.model;
this.ollama = new OllamaBase(params.config);
if (params.options) {
this.options = {
...this.options,
Expand Down Expand Up @@ -97,7 +116,7 @@ export class Ollama extends BaseEmbedding implements LLM {
},
};
if (!stream) {
const chatResponse = await ollama.chat({
const chatResponse = await this.ollama.chat({
...payload,
stream: false,
});
Expand All @@ -110,7 +129,7 @@ export class Ollama extends BaseEmbedding implements LLM {
raw: chatResponse,
};
} else {
const stream = await ollama.chat({
const stream = await this.ollama.chat({
...payload,
stream: true,
});
Expand All @@ -137,7 +156,7 @@ export class Ollama extends BaseEmbedding implements LLM {
},
};
if (!stream) {
const response = await ollama.generate({
const response = await this.ollama.generate({
...payload,
stream: false,
});
Expand All @@ -146,7 +165,7 @@ export class Ollama extends BaseEmbedding implements LLM {
raw: response,
};
} else {
const stream = await ollama.generate({
const stream = await this.ollama.generate({
...payload,
stream: true,
});
Expand All @@ -162,7 +181,7 @@ export class Ollama extends BaseEmbedding implements LLM {
...this.options,
},
};
const response = await ollama.embeddings({
const response = await this.ollama.embeddings({
...payload,
});
return response.embedding;
Expand All @@ -176,104 +195,63 @@ export class Ollama extends BaseEmbedding implements LLM {
return this.getEmbedding(query);
}

// ollama specific methods, inherited from the ollama library
static async list() {
const { models } = await ollama.list();
return models;
}

static async detail(modelName: string, options?: Omit<ShowRequest, "model">) {
return ollama.show({
model: modelName,
...options,
});
}
// Inherited from OllamaBase

static async create(
modelName: string,
options?: Omit<CreateRequest, "model"> & {
stream: false;
},
push(
request: PushRequest & { stream: true },
): Promise<AsyncGenerator<ProgressResponse, any, unknown>>;
push(
request: PushRequest & { stream?: false | undefined },
): Promise<ProgressResponse>;
static async create(
modelName: string,
options: Omit<CreateRequest, "model"> & {
stream: true;
},
): Promise<AsyncGenerator<ProgressResponse>>;
static async create(
modelName: string,
options?: Omit<CreateRequest, "model"> & {
stream: boolean;
},
) {
return ollama.create({
model: modelName,
...options,
stream: (options ? !!options.stream : false) as never,
}) as Promise<ProgressResponse> | Promise<AsyncGenerator<ProgressResponse>>;
push(request: any): any {
return this.ollama.push(request);
}

static async delete(modelName: string) {
return ollama.delete({
model: modelName,
});
abort(): void {
return this.ollama.abort();
}

static async copy(source: string, destination: string) {
return ollama.copy({
source,
destination,
});
encodeImage(image: string | Uint8Array): Promise<string> {
return this.ollama.encodeImage(image);
}

static async pull(
modelName: string,
options?: Omit<CreateRequest, "model"> & {
stream: false;
},
): Promise<ProgressResponse>;
static async pull(
modelName: string,
options: Omit<CreateRequest, "model"> & {
stream: true;
},
): Promise<AsyncGenerator<ProgressResponse>>;
static async pull(
modelName: string,
options?: Omit<CreateRequest, "model"> & {
stream: boolean;
},
) {
return ollama.pull({
model: modelName,
...options,
stream: (options ? !!options.stream : false) as never,
}) as Promise<ProgressResponse> | Promise<AsyncGenerator<ProgressResponse>>;
generate(
request: GenerateRequest & { stream: true },
): Promise<AsyncGenerator<OllamaGenerateResponse>>;
generate(
request: GenerateRequest & { stream?: false | undefined },
): Promise<OllamaGenerateResponse>;
generate(request: any): any {
return this.ollama.generate(request);
}

static async push(
modelName: string,
options?: Omit<CreateRequest, "model"> & {
stream: false;
},
create(
request: CreateRequest & { stream: true },
): Promise<AsyncGenerator<ProgressResponse>>;
create(
request: CreateRequest & { stream?: false | undefined },
): Promise<ProgressResponse>;
static async push(
modelName: string,
options: Omit<CreateRequest, "model"> & {
stream: true;
},
create(request: any): any {
return this.ollama.create(request);
}
pull(
request: PullRequest & { stream: true },
): Promise<AsyncGenerator<ProgressResponse>>;
static async push(
modelName: string,
options?: Omit<CreateRequest, "model"> & {
stream: boolean;
},
) {
return ollama.push({
model: modelName,
...options,
stream: (options ? !!options.stream : false) as never,
}) as Promise<ProgressResponse> | Promise<AsyncGenerator<ProgressResponse>>;
pull(
request: PullRequest & { stream?: false | undefined },
): Promise<ProgressResponse>;
pull(request: any): any {
return this.ollama.pull(request);
}
delete(request: DeleteRequest): Promise<StatusResponse> {
return this.ollama.delete(request);
}
copy(request: CopyRequest): Promise<StatusResponse> {
return this.ollama.copy(request);
}
list(): Promise<ListResponse> {
return this.ollama.list();
}
show(request: ShowRequest): Promise<ShowResponse> {
return this.ollama.show(request);
}
embeddings(request: EmbeddingsRequest): Promise<EmbeddingsResponse> {
return this.ollama.embeddings(request);
}
}

0 comments on commit b6a6606

Please sign in to comment.