From 6387bc6a2b2dc74535fd98257988683646a7f780 Mon Sep 17 00:00:00 2001 From: Marcus Schiesser Date: Mon, 2 Dec 2024 12:29:43 +0700 Subject: [PATCH] fix: simplify examples --- examples/vercel/README.md | 50 +++++++++++++++++++++++++++ examples/vercel/llamacloud.ts | 26 ++++++++------ examples/vercel/vector-store.ts | 27 +++++++++------ packages/providers/vercel/src/tool.ts | 6 ++-- 4 files changed, 84 insertions(+), 25 deletions(-) create mode 100644 examples/vercel/README.md diff --git a/examples/vercel/README.md b/examples/vercel/README.md new file mode 100644 index 0000000000..edb1f11f24 --- /dev/null +++ b/examples/vercel/README.md @@ -0,0 +1,50 @@ +# Vercel Examples + +These examples demonstrate how to integrate LlamaIndexTS with Vercel's AI SDK. The examples show how to use LlamaIndex for search and retrieval in both local vector store and LlamaCloud environments. + +## Setup + +To run these examples, first install the required dependencies from the parent folder `examples`: + +```bash +npm i +``` + +## Running the Examples + +Make sure to run the examples from the parent folder called `examples`. The following examples are available: + +### Vector Store Example + +Run the local vector store example with: + +```bash +npx tsx vercel/vector-store.ts +``` + +This example demonstrates: + +- Creating a vector store index from one document +- Using Vercel's AI SDK with LlamaIndex for streaming responses + +### LlamaCloud Example + +To run the LlamaCloud example: + +```bash +npx tsx vercel/llamacloud.ts +``` + +This example requires a LlamaCloud API key set in your environment and an embedding model set in the `EMBEDDING_MODEL` environment variable: + +```bash +export LLAMA_CLOUD_API_KEY=your_api_key_here +export EMBEDDING_MODEL="text-embedding-3-small" +``` + +The example demonstrates: + +- Creating a LlamaCloud index from one document +- Streaming responses using Vercel's AI SDK + +For more detailed information about the Vercel integration, check out [the documentation](https://ts.llamaindex.ai/docs/llamaindex/integration/vercel). diff --git a/examples/vercel/llamacloud.ts b/examples/vercel/llamacloud.ts index 04f8d6fa02..2fac09863d 100644 --- a/examples/vercel/llamacloud.ts +++ b/examples/vercel/llamacloud.ts @@ -15,20 +15,24 @@ async function main() { projectName: "Default", apiKey: process.env.LLAMA_CLOUD_API_KEY, }); - const queryTool = llamaindex({ - index, - description: "Search through the documents", // optional description - }); - console.log("Successfully created index and queryTool"); + console.log("Successfully created index"); - streamText({ - tools: { queryTool }, - prompt: "Cost of moving cat from Russia to UK?", + const result = streamText({ model: openai("gpt-4o"), - onFinish({ response }) { - console.log("Response:", JSON.stringify(response.messages, null, 2)); + prompt: "Cost of moving cat from Russia to UK?", + tools: { + queryTool: llamaindex({ + index, + description: + "get information from your knowledge base to answer questions.", // optional description + }), }, - }).toDataStream(); + maxSteps: 5, + }); + + for await (const textPart of result.textStream) { + process.stdout.write(textPart); + } } main().catch(console.error); diff --git a/examples/vercel/vector-store.ts b/examples/vercel/vector-store.ts index 383be6827f..c61291ba38 100644 --- a/examples/vercel/vector-store.ts +++ b/examples/vercel/vector-store.ts @@ -2,6 +2,7 @@ import { openai } from "@ai-sdk/openai"; import { llamaindex } from "@llamaindex/vercel"; import { streamText } from "ai"; import { Document, VectorStoreIndex } from "llamaindex"; + import fs from "node:fs/promises"; async function main() { @@ -10,20 +11,24 @@ async function main() { const document = new Document({ text: essay, id_: path }); const index = await VectorStoreIndex.fromDocuments([document]); - const queryTool = llamaindex({ - index, - description: "Search through the documents", // optional description - }); - console.log("Successfully created index and queryTool"); + console.log("Successfully created index"); - streamText({ - tools: { queryTool }, - prompt: "Cost of moving cat from Russia to UK?", + const result = streamText({ model: openai("gpt-4o"), - onFinish({ response }) { - console.log("Response:", JSON.stringify(response.messages, null, 2)); + prompt: "Cost of moving cat from Russia to UK?", + tools: { + queryTool: llamaindex({ + index, + description: + "get information from your knowledge base to answer questions.", // optional description + }), }, - }).toDataStream(); + maxSteps: 5, + }); + + for await (const textPart of result.textStream) { + process.stdout.write(textPart); + } } main().catch(console.error); diff --git a/packages/providers/vercel/src/tool.ts b/packages/providers/vercel/src/tool.ts index efdce13f7b..a0720d94fc 100644 --- a/packages/providers/vercel/src/tool.ts +++ b/packages/providers/vercel/src/tool.ts @@ -1,5 +1,5 @@ import type { BaseQueryEngine } from "@llamaindex/core/query-engine"; -import type { CoreTool } from "ai"; +import { type CoreTool, tool } from "ai"; import { z } from "zod"; interface DatasourceIndex { @@ -14,7 +14,7 @@ export function llamaindex({ description?: string; }): CoreTool { const queryEngine = index.asQueryEngine(); - return { + return tool({ description: description ?? "Get information about your documents.", parameters: z.object({ query: z @@ -25,5 +25,5 @@ export function llamaindex({ const result = await queryEngine?.query({ query }); return result?.message.content ?? "No result found in documents."; }, - }; + }); }