diff --git a/apps/next/src/content/docs/llamaindex/_static/concepts/indexing.jpg b/apps/next/src/content/docs/llamaindex/_static/concepts/indexing.jpg new file mode 100644 index 0000000000..8672967213 Binary files /dev/null and b/apps/next/src/content/docs/llamaindex/_static/concepts/indexing.jpg differ diff --git a/apps/next/src/content/docs/llamaindex/_static/concepts/querying.jpg b/apps/next/src/content/docs/llamaindex/_static/concepts/querying.jpg new file mode 100644 index 0000000000..3c241bdda5 Binary files /dev/null and b/apps/next/src/content/docs/llamaindex/_static/concepts/querying.jpg differ diff --git a/apps/next/src/content/docs/llamaindex/_static/concepts/rag.jpg b/apps/next/src/content/docs/llamaindex/_static/concepts/rag.jpg new file mode 100644 index 0000000000..b68eca2564 Binary files /dev/null and b/apps/next/src/content/docs/llamaindex/_static/concepts/rag.jpg differ diff --git a/apps/next/src/content/docs/llamaindex/examples/agent.mdx b/apps/next/src/content/docs/llamaindex/examples/agent.mdx new file mode 100644 index 0000000000..84b4fb29f3 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/examples/agent.mdx @@ -0,0 +1,12 @@ +--- +title: Agents +--- + +A built-in agent that can take decisions and reasoning based on the tools provided to it. + +## OpenAI Agent + +import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; +import CodeSource from "!raw-loader!../../../../../../../examples/agent/openai"; + + diff --git a/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx b/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx new file mode 100644 index 0000000000..38bc4ef72d --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx @@ -0,0 +1,8 @@ +--- +title: Gemini Agent +--- + +import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; +import CodeSourceGemini from "!raw-loader!../../../../../../../examples/gemini/agent.ts"; + + diff --git a/apps/next/src/content/docs/llamaindex/examples/chat_engine.mdx b/apps/next/src/content/docs/llamaindex/examples/chat_engine.mdx new file mode 100644 index 0000000000..ac4528951a --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/examples/chat_engine.mdx @@ -0,0 +1,10 @@ +--- +title: Chat Engine +--- + +import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; +import CodeSource from "!raw-loader!../../../../../../../examples/chatEngine"; + +Chat Engine is a class that allows you to create a chatbot from a retriever. It is a wrapper around a retriever that allows you to chat with it in a conversational manner. + + diff --git a/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx b/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx new file mode 100644 index 0000000000..6aa5c5a4fb --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx @@ -0,0 +1,61 @@ +--- +title: Context-Aware Agent +--- + +The Context-Aware Agent enhances the capabilities of standard LLM agents by incorporating relevant context from a retriever for each query. This allows the agent to provide more informed and specific responses based on the available information. + +## Usage + +Here's a simple example of how to use the Context-Aware Agent: + +```typescript +import { + Document, + VectorStoreIndex, + OpenAIContextAwareAgent, + OpenAI, +} from "llamaindex"; + +async function createContextAwareAgent() { + // Create and index some documents + const documents = [ + new Document({ + text: "LlamaIndex is a data framework for LLM applications.", + id_: "doc1", + }), + new Document({ + text: "The Eiffel Tower is located in Paris, France.", + id_: "doc2", + }), + ]; + + const index = await VectorStoreIndex.fromDocuments(documents); + const retriever = index.asRetriever({ similarityTopK: 1 }); + + // Create the Context-Aware Agent + const agent = new OpenAIContextAwareAgent({ + llm: new OpenAI({ model: "gpt-3.5-turbo" }), + contextRetriever: retriever, + }); + + // Use the agent to answer queries + const response = await agent.chat({ + message: "What is LlamaIndex used for?", + }); + + console.log("Agent Response:", response.response); +} + +createContextAwareAgent().catch(console.error); +``` + +In this example, the Context-Aware Agent uses the retriever to fetch relevant context for each query, allowing it to provide more accurate and informed responses based on the indexed documents. + +## Key Components + +- `contextRetriever`: A retriever (e.g., from a VectorStoreIndex) that fetches relevant documents or passages for each query. + +## Available Context-Aware Agents + +- `OpenAIContextAwareAgent`: A context-aware agent using OpenAI's models. +- `AnthropicContextAwareAgent`: A context-aware agent using Anthropic's models. diff --git a/apps/next/src/content/docs/llamaindex/examples/local_llm.mdx b/apps/next/src/content/docs/llamaindex/examples/local_llm.mdx new file mode 100644 index 0000000000..031019fabe --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/examples/local_llm.mdx @@ -0,0 +1,79 @@ +--- +title: Local LLMs +--- + +LlamaIndex.TS supports OpenAI and [other remote LLM APIs](other_llms). You can also run a local LLM on your machine! + +## Using a local model via Ollama + +The easiest way to run a local LLM is via the great work of our friends at [Ollama](https://ollama.com/), who provide a simple to use client that will download, install and run a [growing range of models](https://ollama.com/library) for you. + +### Install Ollama + +They provide a one-click installer for Mac, Linux and Windows on their [home page](https://ollama.com/). + +### Pick and run a model + +Since we're going to be doing agentic work, we'll need a very capable model, but the largest models are hard to run on a laptop. We think `mixtral 8x7b` is a good balance between power and resources, but `llama3` is another great option. You can run Mixtral by running + +```bash +ollama run mixtral:8x7b +``` + +The first time you run it will also automatically download and install the model for you. + +### Switch the LLM in your code + +To tell LlamaIndex to use a local LLM, use the `Settings` object: + +```javascript +Settings.llm = new Ollama({ + model: "mixtral:8x7b", +}); +``` + +### Use local embeddings + +If you're doing retrieval-augmented generation, LlamaIndex.TS will also call out to OpenAI to index and embed your data. To be entirely local, you can use a local embedding model like this: + +```javascript +Settings.embedModel = new HuggingFaceEmbedding({ + modelType: "BAAI/bge-small-en-v1.5", + quantized: false, +}); +``` + +The first time this runs it will download the embedding model to run it. + +### Try it out + +With a local LLM and local embeddings in place, you can perform RAG as usual and everything will happen on your machine without calling an API: + +```typescript +async function main() { + // Load essay from abramov.txt in Node + const path = "node_modules/llamaindex/examples/abramov.txt"; + + const essay = await fs.readFile(path, "utf-8"); + + // Create Document object with essay + const document = new Document({ text: essay, id_: path }); + + // Split text and create embeddings. Store them in a VectorStoreIndex + const index = await VectorStoreIndex.fromDocuments([document]); + + // Query the index + const queryEngine = index.asQueryEngine(); + + const response = await queryEngine.query({ + query: "What did the author do in college?", + }); + + // Output response + console.log(response.toString()); +} + +main().catch(console.error); +``` + +You can see the [full example file](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexLocal.ts). diff --git a/apps/next/src/content/docs/llamaindex/examples/meta.json b/apps/next/src/content/docs/llamaindex/examples/meta.json new file mode 100644 index 0000000000..f432a3b772 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/examples/meta.json @@ -0,0 +1,15 @@ +{ + "title": "Examples", + "pages": [ + "more_examples", + "chat_engine", + "vector_index", + "summary_index", + "save_load_index", + "context_aware_agent", + "agent", + "agent_gemini", + "local_llm", + "other_llms" + ] +} diff --git a/apps/next/src/content/docs/llamaindex/examples/more_examples.mdx b/apps/next/src/content/docs/llamaindex/examples/more_examples.mdx new file mode 100644 index 0000000000..ef7a02754d --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/examples/more_examples.mdx @@ -0,0 +1,21 @@ +--- +title: See all examples +--- + +Our GitHub repository has a wealth of examples to explore and try out. You can check out our [examples folder](https://github.com/run-llama/LlamaIndexTS/tree/main/examples) to see them all at once, or browse the pages in this section for some selected highlights. + +## Check out all examples + +It may be useful to check out all the examples at once so you can try them out locally. To do this into a folder called `my-new-project`, run these commands: + +```bash npm2yarn +npx degit run-llama/LlamaIndexTS/examples my-new-project +cd my-new-project +npm install +``` + +Then you can run any example in the folder with `tsx`, e.g.: + +```bash npm2yarn +npx tsx ./vectorIndex.ts +``` diff --git a/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx b/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx new file mode 100644 index 0000000000..5fdc6bbaa0 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx @@ -0,0 +1,43 @@ +--- +title: Using other LLM APIs +--- + +import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; +import CodeSource from "!raw-loader!../../../../../../../examples/mistral"; + +By default LlamaIndex.TS uses OpenAI's LLMs and embedding models, but we support [lots of other LLMs](../modules/llms) including models from Mistral (Mistral, Mixtral), Anthropic (Claude) and Google (Gemini). + +If you don't want to use an API at all you can [run a local model](../../examples/local_llm) + +## Using another LLM + +You can specify what LLM LlamaIndex.TS will use on the `Settings` object, like this: + +```typescript +import { MistralAI, Settings } from "llamaindex"; + +Settings.llm = new MistralAI({ + model: "mistral-tiny", + apiKey: "", +}); +``` + +You can see examples of other APIs we support by checking out "Available LLMs" in the sidebar of our [LLMs section](../modules/llms). + +## Using another embedding model + +A frequent gotcha when trying to use a different API as your LLM is that LlamaIndex will also by default index and embed your data using OpenAI's embeddings. To completely switch away from OpenAI you will need to set your embedding model as well, for example: + +```typescript +import { MistralAIEmbedding, Settings } from "llamaindex"; + +Settings.embedModel = new MistralAIEmbedding(); +``` + +We support [many different embeddings](../modules/embeddings). + +## Full example + +This example uses Mistral's `mistral-tiny` model as the LLM and Mistral for embeddings as well. + + diff --git a/apps/next/src/content/docs/llamaindex/examples/save_load_index.mdx b/apps/next/src/content/docs/llamaindex/examples/save_load_index.mdx new file mode 100644 index 0000000000..bce10b9db6 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/examples/save_load_index.mdx @@ -0,0 +1,8 @@ +--- +title: Save/Load an Index +--- + +import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; +import CodeSource from "!raw-loader!../../../../../../../examples/storageContext"; + + diff --git a/apps/next/src/content/docs/llamaindex/examples/summary_index.mdx b/apps/next/src/content/docs/llamaindex/examples/summary_index.mdx new file mode 100644 index 0000000000..344ce6fe84 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/examples/summary_index.mdx @@ -0,0 +1,8 @@ +--- +title: Summary Index +--- + +import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; +import CodeSource from "!raw-loader!../../../../../../../examples/summaryIndex"; + + diff --git a/apps/next/src/content/docs/llamaindex/examples/vector_index.mdx b/apps/next/src/content/docs/llamaindex/examples/vector_index.mdx new file mode 100644 index 0000000000..03c16fd956 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/examples/vector_index.mdx @@ -0,0 +1,8 @@ +--- +title: Vector Index +--- + +import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; +import CodeSource from "!raw-loader!../../../../../../../examples/vectorIndex"; + + diff --git a/apps/next/src/content/docs/llamaindex/getting_started/concepts.mdx b/apps/next/src/content/docs/llamaindex/getting_started/concepts.mdx new file mode 100644 index 0000000000..4189c3204b --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/getting_started/concepts.mdx @@ -0,0 +1,76 @@ +--- +title: Concepts +--- + +LlamaIndex.TS helps you build LLM-powered applications (e.g. Q&A, chatbot) over custom data. + +In this high-level concepts guide, you will learn: + +- how an LLM can answer questions using your own data. +- key concepts and modules in LlamaIndex.TS for composing your own query pipeline. + +## Answering Questions Across Your Data + +LlamaIndex uses a two stage method when using an LLM with your data: + +1. **indexing stage**: preparing a knowledge base, and +2. **querying stage**: retrieving relevant context from the knowledge to assist the LLM in responding to a question + +![](../_static/concepts/rag.jpg) + +This process is also known as Retrieval Augmented Generation (RAG). + +LlamaIndex.TS provides the essential toolkit for making both steps super easy. + +Let's explore each stage in detail. + +### Indexing Stage + +LlamaIndex.TS help you prepare the knowledge base with a suite of data connectors and indexes. + +![](../_static/concepts/indexing.jpg) + +[**Data Loaders**](/docs/llamaindex/modules/data_loaders/index): +A data connector (i.e. `Reader`) ingest data from different data sources and data formats into a simple `Document` representation (text and simple metadata). + +[**Documents / Nodes**](/docs/llamaindex/modules/documents_and_nodes/index): A `Document` is a generic container around any data source - for instance, a PDF, an API output, or retrieved data from a database. A `Node` is the atomic unit of data in LlamaIndex and represents a "chunk" of a source `Document`. It's a rich representation that includes metadata and relationships (to other nodes) to enable accurate and expressive retrieval operations. + +[**Data Indexes**](/docs/llamaindex/modules/data_index): +Once you've ingested your data, LlamaIndex helps you index data into a format that's easy to retrieve. + +Under the hood, LlamaIndex parses the raw documents into intermediate representations, calculates vector embeddings, and stores your data in-memory or to disk. + +### Querying Stage + +In the querying stage, the query pipeline retrieves the most relevant context given a user query, +and pass that to the LLM (along with the query) to synthesize a response. + +This gives the LLM up-to-date knowledge that is not in its original training data, +(also reducing hallucination). + +The key challenge in the querying stage is retrieval, orchestration, and reasoning over (potentially many) knowledge bases. + +LlamaIndex provides composable modules that help you build and integrate RAG pipelines for Q&A (query engine), chatbot (chat engine), or as part of an agent. + +These building blocks can be customized to reflect ranking preferences, as well as composed to reason over multiple knowledge bases in a structured way. + +![](../_static/concepts/querying.jpg) + +#### Building Blocks + +[**Retrievers**](/docs/llamaindex/modules/retriever): +A retriever defines how to efficiently retrieve relevant context from a knowledge base (i.e. index) when given a query. +The specific retrieval logic differs for different indices, the most popular being dense retrieval against a vector index. + +[**Response Synthesizers**](/docs/llamaindex/modules/response_synthesizer): +A response synthesizer generates a response from an LLM, using a user query and a given set of retrieved text chunks. + +#### Pipelines + +[**Query Engines**](/docs/llamaindex/modules/query_engines): +A query engine is an end-to-end pipeline that allow you to ask question over your data. +It takes in a natural language query, and returns a response, along with reference context retrieved and passed to the LLM. + +[**Chat Engines**](/docs/llamaindex/modules/chat_engine): +A chat engine is an end-to-end pipeline for having a conversation with your data +(multiple back-and-forth instead of a single question & answer). diff --git a/apps/next/src/content/docs/llamaindex/getting_started/environments.mdx b/apps/next/src/content/docs/llamaindex/getting_started/environments.mdx new file mode 100644 index 0000000000..87530ffde6 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/getting_started/environments.mdx @@ -0,0 +1,20 @@ +--- +title: Environments +--- + +We support Node.JS versions 18, 20 and 22, with experimental support for Deno, Bun and Vercel Edge functions. + +## NextJS + +If you're using NextJS you'll need to add `withLlamaIndex` to your `next.config.js` file. This will add the necessary configuration for included 3rd-party libraries to your build: + +```js +// next.config.js +const withLlamaIndex = require("llamaindex/next"); + +module.exports = withLlamaIndex({ + // your next.js config +}); +``` + +For details, check the latest [withLlamaIndex](https://github.com/run-llama/LlamaIndexTS/blob/main/packages/llamaindex/src/next.ts) implementation. diff --git a/apps/next/src/content/docs/llamaindex/getting_started/index.mdx b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx new file mode 100644 index 0000000000..f77fa845a0 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx @@ -0,0 +1,35 @@ +--- +title: Getting Started with LlamaIndex.TS +description: Install llamaindex by running a single command. +--- + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + + + ```shell tab="npm" + npm install llamaindex + ``` + + ```shell tab="yarn" + yarn add llamaindex + ``` + + ```shell tab="pnpm" + pnpm add llamaindex + ``` + + +## What's next? + + + + + diff --git a/apps/next/src/content/docs/llamaindex/getting_started/meta.json b/apps/next/src/content/docs/llamaindex/getting_started/meta.json new file mode 100644 index 0000000000..ed2c8903e1 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/getting_started/meta.json @@ -0,0 +1,4 @@ +{ + "title": "Getting Started", + "pages": ["index", "setup", "starter_tutorial", "environments", "concepts"] +} diff --git a/apps/next/src/content/docs/llamaindex/setup/cloudflare.mdx b/apps/next/src/content/docs/llamaindex/getting_started/setup/cloudflare.mdx similarity index 100% rename from apps/next/src/content/docs/llamaindex/setup/cloudflare.mdx rename to apps/next/src/content/docs/llamaindex/getting_started/setup/cloudflare.mdx diff --git a/apps/next/src/content/docs/llamaindex/setup/getting-started.mdx b/apps/next/src/content/docs/llamaindex/getting_started/setup/getting-started.mdx similarity index 100% rename from apps/next/src/content/docs/llamaindex/setup/getting-started.mdx rename to apps/next/src/content/docs/llamaindex/getting_started/setup/getting-started.mdx diff --git a/apps/next/src/content/docs/llamaindex/setup/meta.json b/apps/next/src/content/docs/llamaindex/getting_started/setup/meta.json similarity index 100% rename from apps/next/src/content/docs/llamaindex/setup/meta.json rename to apps/next/src/content/docs/llamaindex/getting_started/setup/meta.json diff --git a/apps/next/src/content/docs/llamaindex/setup/next.mdx b/apps/next/src/content/docs/llamaindex/getting_started/setup/next.mdx similarity index 100% rename from apps/next/src/content/docs/llamaindex/setup/next.mdx rename to apps/next/src/content/docs/llamaindex/getting_started/setup/next.mdx diff --git a/apps/next/src/content/docs/llamaindex/setup/node.mdx b/apps/next/src/content/docs/llamaindex/getting_started/setup/node.mdx similarity index 100% rename from apps/next/src/content/docs/llamaindex/setup/node.mdx rename to apps/next/src/content/docs/llamaindex/getting_started/setup/node.mdx diff --git a/apps/next/src/content/docs/llamaindex/setup/typescript.mdx b/apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx similarity index 100% rename from apps/next/src/content/docs/llamaindex/setup/typescript.mdx rename to apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx diff --git a/apps/next/src/content/docs/llamaindex/setup/vite.mdx b/apps/next/src/content/docs/llamaindex/getting_started/setup/vite.mdx similarity index 100% rename from apps/next/src/content/docs/llamaindex/setup/vite.mdx rename to apps/next/src/content/docs/llamaindex/getting_started/setup/vite.mdx diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/agent.mdx b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/agent.mdx new file mode 100644 index 0000000000..ffa84aa94c --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/agent.mdx @@ -0,0 +1,47 @@ +--- +title: Agent tutorial +--- + +import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; +import CodeSource from "!raw-loader!../../../../../../../../examples/agent/openai"; + +We have a comprehensive, step-by-step [guide to building agents in LlamaIndex.TS](../../guides/agents/setup) that we recommend to learn what agents are and how to build them for production. But building a basic agent is simple: + +## Set up + +In a new folder: + +```bash npm2yarn +npm init +npm install -D typescript @types/node +``` + +## Run agent + +Create the file `example.ts`. This code will: + +- Create two tools for use by the agent: + - A `sumNumbers` tool that adds two numbers + - A `divideNumbers` tool that divides numbers +- +- Give an example of the data structure we wish to generate +- Prompt the LLM with instructions and the example, plus a sample transcript + + + +To run the code: + +```bash +npx tsx example.ts +``` + +You should expect output something like: + +``` +{ + content: 'The sum of 5 + 5 is 10. When you divide 10 by 2, you get 5.', + role: 'assistant', + options: {} +} +Done +``` diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/chatbot.mdx b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/chatbot.mdx new file mode 100644 index 0000000000..8672118933 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/chatbot.mdx @@ -0,0 +1,25 @@ +--- +title: Chatbot tutorial +--- + +Once you've mastered basic [retrieval-augment generation](retrieval_augmented_generation) you may want to create an interface to chat with your data. You can do this step-by-step, but we recommend getting started quickly using `create-llama`. + +## Using create-llama + +`create-llama` is a powerful but easy to use command-line tool that generates a working, full-stack web application that allows you to chat with your data. You can learn more about it on [the `create-llama` README page](https://www.npmjs.com/package/create-llama). + +Run it once and it will ask you a series of questions about the kind of application you want to generate. Then you can customize your application to suit your use-case. To get started, run: + +```bash npm2yarn +npx create-llama@latest +``` + +Once your app is generated, `cd` into your app directory and run + +```bash npm2yarn +npm run dev +``` + +to start the development server. You can then visit [http://localhost:3000](http://localhost:3000) to see your app, which should look something like this: + +![create-llama interface](./images/create_llama.png) diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/images/create_llama.png b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/images/create_llama.png new file mode 100644 index 0000000000..0dd4daddb7 Binary files /dev/null and b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/images/create_llama.png differ diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/meta.json b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/meta.json new file mode 100644 index 0000000000..1ea6d9295c --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/meta.json @@ -0,0 +1,9 @@ +{ + "title": "Starter Tutorials", + "pages": [ + "retrieval_augmented_generation", + "chatbot", + "structured_data_extraction", + "agent" + ] +} diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/retrieval_augmented_generation.mdx b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/retrieval_augmented_generation.mdx new file mode 100644 index 0000000000..2da654d1e8 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/retrieval_augmented_generation.mdx @@ -0,0 +1,56 @@ +--- +title: Retrieval Augmented Generation (RAG) Tutorial +--- + +import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; +import CodeSource from "!raw-loader!../../../../../../../../examples/vectorIndex"; +import TSConfigSource from "!!raw-loader!../../../../../../../../examples/tsconfig.json"; + +One of the most common use-cases for LlamaIndex is Retrieval-Augmented Generation or RAG, in which your data is indexed and selectively retrieved to be given to an LLM as source material for responding to a query. You can learn more about the [concepts behind RAG](../concepts). + +## Set up the project + +In a new folder, run: + +```bash npm2yarn +npm init +npm install -D typescript @types/node +``` + +Then, check out the [installation](../installation) steps to install LlamaIndex.TS and prepare an OpenAI key. + +You can use [other LLMs](../../examples/other_llms) via their APIs; if you would prefer to use local models check out our [local LLM example](../../examples/local_llm). + +## Run queries + +Create the file `example.ts`. This code will + +- load an example file +- convert it into a Document object +- index it (which creates embeddings using OpenAI) +- create a query engine to answer questions about the data + + + +Create a `tsconfig.json` file in the same folder: + + + +Now you can run the code with + +```bash +npx tsx example.ts +``` + +You should expect output something like: + +``` +In college, the author studied subjects like linear algebra and physics, but did not find them particularly interesting. They started slacking off, skipping lectures, and eventually stopped attending classes altogether. They also had a negative experience with their English classes, where they were required to pay for catch-up training despite getting verbal approval to skip most of the classes. Ultimately, the author lost motivation for college due to their job as a software developer and stopped attending classes, only returning years later to pick up their papers. + +0: Score: 0.8305309270895813 - I started this decade as a first-year college stud... + + +1: Score: 0.8286388215713089 - A short digression. I’m not saying colleges are wo... +``` + +Once you've mastered basic RAG, you may want to consider [chatting with your data](chatbot). diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/structured_data_extraction.mdx b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/structured_data_extraction.mdx new file mode 100644 index 0000000000..e925192cd1 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/structured_data_extraction.mdx @@ -0,0 +1,50 @@ +--- +title: Structured data extraction tutorial +--- + +import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; +import CodeSource from "!raw-loader!../../../../../../../../examples/jsonExtract"; + +Make sure you have installed LlamaIndex.TS and have an OpenAI key. If you haven't, check out the [installation](../installation) guide. + +You can use [other LLMs](../../examples/other_llms) via their APIs; if you would prefer to use local models check out our [local LLM example](../../examples/local_llm). + +## Set up + +In a new folder: + +```bash npm2yarn +npm init +npm install -D typescript @types/node +``` + +## Extract data + +Create the file `example.ts`. This code will: + +- Set up an LLM connection to GPT-4 +- Give an example of the data structure we wish to generate +- Prompt the LLM with instructions and the example, plus a sample transcript + + + +To run the code: + +```bash +npx tsx example.ts +``` + +You should expect output something like: + +```json +{ + "summary": "Sarah from XYZ Company called John to introduce the XYZ Widget, a tool designed to automate tasks and improve productivity. John expressed interest and requested case studies and a product demo. Sarah agreed to send the information and follow up to schedule the demo.", + "products": ["XYZ Widget"], + "rep_name": "Sarah", + "prospect_name": "John", + "action_items": [ + "Send case studies and additional product information to John", + "Follow up with John to schedule a product demo" + ] +} +``` diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx new file mode 100644 index 0000000000..770fb26c87 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx @@ -0,0 +1,39 @@ +--- +title: Agent tutorial +--- + +In this guide we'll walk you through the process of building an Agent in JavaScript using the LlamaIndex.TS library, starting from nothing and adding complexity in stages. + +## What is an Agent? + +In LlamaIndex, an agent is a semi-autonomous piece of software powered by an LLM that is given a task and executes a series of steps towards solving that task. It is given a set of tools, which can be anything from arbitrary functions up to full LlamaIndex query engines, and it selects the best available tool to complete each step. When each step is completed, the agent judges whether the task is now complete, in which case it returns a result to the user, or whether it needs to take another step, in which case it loops back to the start. + +![agent flow](./images/agent_flow.png) + +## Install LlamaIndex.TS + +You'll need to have a recent version of [Node.js](https://nodejs.org/en) installed. Then you can install LlamaIndex.TS by running + +```bash +npm install llamaindex +``` + +## Choose your model + +By default we'll be using OpenAI with GPT-4, as it's a powerful model and easy to get started with. If you'd prefer to run a local model, see [using a local model](local_model). + +## Get an OpenAI API key + +If you don't already have one, you can sign up for an [OpenAI API key](https://platform.openai.com/api-keys). You should then put the key in a `.env` file in the root of the project; the file should look like + +``` +OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXXXXXX +``` + +We'll use `dotenv` to pull the API key out of that .env file, so also run: + +```bash +npm install dotenv +``` + +Now you're ready to [create your agent](create_agent). diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx new file mode 100644 index 0000000000..909250a2b2 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx @@ -0,0 +1,181 @@ +--- +title: Create a basic agent +--- + +We want to use `await` so we're going to wrap all of our code in a `main` function, like this: + +```typescript +// Your imports go here + +async function main() { + // the rest of your code goes here +} + +main().catch(console.error); +``` + +For the rest of this guide we'll assume your code is wrapped like this so we can use `await`. You can run the code this way: + +```bash +npx tsx example.ts +``` + +### Load your dependencies + +First we'll need to pull in our dependencies. These are: + +- The OpenAI class to use the OpenAI LLM +- FunctionTool to provide tools to our agent +- OpenAIAgent to create the agent itself +- Settings to define some global settings for the library +- Dotenv to load our API key from the .env file + +```javascript +import { OpenAI, FunctionTool, OpenAIAgent, Settings } from "llamaindex"; +import "dotenv/config"; +``` + +### Initialize your LLM + +We need to tell our OpenAI class where its API key is, and which of OpenAI's models to use. We'll be using `gpt-4o`, which is capable while still being pretty cheap. This is a global setting, so anywhere an LLM is needed will use the same model. + +```javascript +Settings.llm = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, + model: "gpt-4o", +}); +``` + +### Turn on logging + +We want to see what our agent is up to, so we're going to hook into some events that the library generates and print them out. There are several events possible, but we'll specifically tune in to `llm-tool-call` (when a tool is called) and `llm-tool-result` (when it responds). + +```javascript +Settings.callbackManager.on("llm-tool-call", (event) => { + console.log(event.detail); +}); +Settings.callbackManager.on("llm-tool-result", (event) => { + console.log(event.detail); +}); +``` + +### Create a function + +We're going to create a very simple function that adds two numbers together. This will be the tool we ask our agent to use. + +```javascript +const sumNumbers = ({ a, b }) => { + return `${a + b}`; +}; +``` + +Note that we're passing in an object with two named parameters, `a` and `b`. This is a little unusual, but important for defining a tool that an LLM can use. + +### Turn the function into a tool for the agent + +This is the most complicated part of creating an agent. We need to define a `FunctionTool`. We have to pass in: + +- The function itself (`sumNumbers`) +- A name for the function, which the LLM will use to call it +- A description of the function. The LLM will read this description to figure out what the tool does, and if it needs to call it +- A schema for function. We tell the LLM that the parameter is an `object`, and we tell it about the two named parameters we gave it, `a` and `b`. We describe each parameter as a `number`, and we say that both are required. +- You can see [more examples of function schemas](https://cookbook.openai.com/examples/how_to_call_functions_with_chat_models). + +```javascript +const tool = FunctionTool.from(sumNumbers, { + name: "sumNumbers", + description: "Use this function to sum two numbers", + parameters: { + type: "object", + properties: { + a: { + type: "number", + description: "First number to sum", + }, + b: { + type: "number", + description: "Second number to sum", + }, + }, + required: ["a", "b"], + }, +}); +``` + +We then wrap up the tools into an array. We could provide lots of tools this way, but for this example we're just using the one. + +```javascript +const tools = [tool]; +``` + +### Create the agent + +With your LLM already set up and your tools defined, creating an agent is simple: + +```javascript +const agent = new OpenAIAgent({ tools }); +``` + +### Ask the agent a question + +We can use the `chat` interface to ask our agent a question, and it will use the tools we've defined to find an answer. + +```javascript +let response = await agent.chat({ + message: "Add 101 and 303", +}); + +console.log(response); +``` + +Let's see what running this looks like using `npx tsx agent.ts` + +**_Output_** + +```javascript +{ + toolCall: { + id: 'call_ze6A8C3mOUBG4zmXO8Z4CPB5', + name: 'sumNumbers', + input: { a: 101, b: 303 } + }, + toolResult: { + tool: FunctionTool { _fn: [Function: sumNumbers], _metadata: [Object] }, + input: { a: 101, b: 303 }, + output: '404', + isError: false + } +} +``` + +```javascript +{ + response: { + raw: { + id: 'chatcmpl-9KwauZku3QOvH78MNvxJs81mDvQYK', + object: 'chat.completion', + created: 1714778824, + model: 'gpt-4-turbo-2024-04-09', + choices: [Array], + usage: [Object], + system_fingerprint: 'fp_ea6eb70039' + }, + message: { + content: 'The sum of 101 and 303 is 404.', + role: 'assistant', + options: {} + } + }, + sources: [Getter] +} +``` + +We're seeing two pieces of output here. The first is our callback firing when the tool is called. You can see in `toolResult` that the LLM has correctly passed `101` and `303` to our `sumNumbers` function, which adds them up and returns `404`. + +The second piece of output is the response from the LLM itself, where the `message.content` key is giving us the answer. + +Great! We've built an agent with tool use! Next you can: + +- [See the full code](https://github.com/run-llama/ts-agents/blob/main/1_agent/agent.ts) +- [Switch to a local LLM](local_model) +- Move on to [add Retrieval-Augmented Generation to your agent](agentic_rag) diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx new file mode 100644 index 0000000000..0c649dfe37 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx @@ -0,0 +1,92 @@ +--- +title: Using a local model via Ollama +--- + +If you're happy using OpenAI, you can skip this section, but many people are interested in using models they run themselves. The easiest way to do this is via the great work of our friends at [Ollama](https://ollama.com/), who provide a simple to use client that will download, install and run a [growing range of models](https://ollama.com/library) for you. + +### Install Ollama + +They provide a one-click installer for Mac, Linux and Windows on their [home page](https://ollama.com/). + +### Pick and run a model + +Since we're going to be doing agentic work, we'll need a very capable model, but the largest models are hard to run on a laptop. We think `mixtral 8x7b` is a good balance between power and resources, but `llama3` is another great option. You can run it simply by running + +```bash +ollama run mixtral:8x7b +``` + +The first time you run it will also automatically download and install the model for you. + +### Switch the LLM in your code + +There are two changes you need to make to the code we already wrote in `1_agent` to get Mixtral 8x7b to work. First, you need to switch to that model. Replace the call to `Settings.llm` with this: + +```javascript +Settings.llm = new Ollama({ + model: "mixtral:8x7b", +}); +``` + +### Swap to a ReActAgent + +In our original code we used a specific OpenAIAgent, so we'll need to switch to a more generic agent pattern, the ReAct pattern. This is simple: change the `const agent` line in your code to read + +```javascript +const agent = new ReActAgent({ tools }); +``` + +(You will also need to bring in `Ollama` and `ReActAgent` in your imports) + +### Run your totally local agent + +Because your embeddings were already local, your agent can now run entirely locally without making any API calls. + +```bash +node agent.mjs +``` + +Note that your model will probably run a lot slower than OpenAI, so be prepared to wait a while! + +**_Output_** + +```javascript +{ + response: { + message: { + role: 'assistant', + content: ' Thought: I need to use a tool to add the numbers 101 and 303.\n' + + 'Action: sumNumbers\n' + + 'Action Input: {"a": 101, "b": 303}\n' + + '\n' + + 'Observation: 404\n' + + '\n' + + 'Thought: I can answer without using any more tools.\n' + + 'Answer: The sum of 101 and 303 is 404.' + }, + raw: { + model: 'mixtral:8x7b', + created_at: '2024-05-09T00:24:30.339473Z', + message: [Object], + done: true, + total_duration: 64678371209, + load_duration: 57394551334, + prompt_eval_count: 475, + prompt_eval_duration: 4163981000, + eval_count: 94, + eval_duration: 3116692000 + } + }, + sources: [Getter] +} +``` + +Tada! You can see all of this in the folder `1a_mixtral`. + +### Extending to other examples + +You can use a ReActAgent instead of an OpenAIAgent in any of the further examples below, but keep in mind that GPT-4 is a lot more capable than Mixtral 8x7b, so you may see more errors or failures in reasoning if you are using an entirely local setup. + +### Next steps + +Now you've got a local agent, you can [add Retrieval-Augmented Generation to your agent](agentic_rag). diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx new file mode 100644 index 0000000000..f5f4432afb --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx @@ -0,0 +1,156 @@ +--- +title: Adding Retrieval-Augmented Generation (RAG) +--- + +While an agent that can perform math is nifty (LLMs are usually not very good at math), LLM-based applications are always more interesting when they work with large amounts of data. In this case, we're going to use a 200-page PDF of the proposed budget of the city of San Francisco for fiscal years 2024-2024 and 2024-2025. It's a great example because it's extremely wordy and full of tables of figures, which present a challenge for humans and LLMs alike. + +To learn more about RAG, we recommend this [introduction](https://docs.llamaindex.ai/en/stable/getting_started/concepts/) from our Python docs. We'll assume you know the basics: + +- Parse your source data into chunks of text. +- Encode that text as numbers, called embeddings. +- Search your embeddings for the most relevant chunks of text. +- Use the relevant chunks along with a query to ask an LLM to generate an answer. + +We're going to start with the same agent we [built in step 1](https://github.com/run-llama/ts-agents/blob/main/1_agent/agent.ts), but make a few changes. You can find the finished version [in the repository](https://github.com/run-llama/ts-agents/blob/main/2_agentic_rag/agent.ts). + +### New dependencies + +We'll be bringing in `SimpleDirectoryReader`, `HuggingFaceEmbedding`, `VectorStoreIndex`, and `QueryEngineTool`, `OpenAIContextAwareAgent` from LlamaIndex.TS, as well as the dependencies we previously used. + +```javascript +import { + OpenAI, + FunctionTool, + OpenAIAgent, + OpenAIContextAwareAgent, + Settings, + SimpleDirectoryReader, + HuggingFaceEmbedding, + VectorStoreIndex, + QueryEngineTool, +} from "llamaindex"; +``` + +### Add an embedding model + +To encode our text into embeddings, we'll need an embedding model. We could use OpenAI for this but to save on API calls we're going to use a local embedding model from HuggingFace. + +```javascript +Settings.embedModel = new HuggingFaceEmbedding({ + modelType: "BAAI/bge-small-en-v1.5", + quantized: false, +}); +``` + +### Load data using SimpleDirectoryReader + +`SimpleDirectoryReader` is a flexible tool that can read various file formats. We will point it at our data directory, which contains a single PDF file, and retrieve a set of documents. + +```javascript +const reader = new SimpleDirectoryReader(); +const documents = await reader.loadData("../data"); +``` + +### Index our data + +We will convert our text into embeddings using the `VectorStoreIndex` class through the `fromDocuments` method, which utilizes the embedding model defined earlier in `Settings`. + +```javascript +const index = await VectorStoreIndex.fromDocuments(documents); +``` + +### Configure a retriever + +Before LlamaIndex can send a query to the LLM, it needs to find the most relevant chunks to send. That's the purpose of a `Retriever`. We're going to get `VectorStoreIndex` to act as a retriever for us + +```javascript +const retriever = await index.asRetriever(); +``` + +### Configure how many documents to retrieve + +By default LlamaIndex will retrieve just the 2 most relevant chunks of text. This document is complex though, so we'll ask for more context. + +```javascript +retriever.similarityTopK = 10; +``` + +### Approach 1: Create a Context-Aware Agent + +With the retriever ready, you can create a **context-aware agent**. + +```javascript +const agent = new OpenAIContextAwareAgent({ + contextRetriever: retriever, +}); + +// Example query to the context-aware agent +let response = await agent.chat({ + message: `What's the budget of San Francisco in 2023-2024?`, +}); + +console.log(response); +``` + +**Expected Output:** + +```md +The total budget for the City and County of San Francisco for the fiscal year 2023-2024 is $14.6 billion. This represents a $611.8 million, or 4.4 percent, increase over the previous fiscal year's budget. The budget covers various expenditures across different departments and services, including significant allocations to public works, transportation, commerce, public protection, and health services. +``` + +### Approach 2: Using QueryEngineTool (Alternative Approach) + +If you prefer more flexibility and don't mind additional complexity, you can create a `QueryEngineTool`. This approach allows you to define the query logic, providing a more tailored way to interact with the data, but note that it introduces a delay due to the extra tool call. + +```javascript +const queryEngine = await index.asQueryEngine({ retriever }); +const tools = [ + new QueryEngineTool({ + queryEngine: queryEngine, + metadata: { + name: "san_francisco_budget_tool", + description: `This tool can answer detailed questions about the individual components of the budget of San Francisco in 2023-2024.`, + }, + }), +]; + +// Create an agent using the tools array +const agent = new OpenAIAgent({ tools }); + +let toolResponse = await agent.chat({ + message: "What's the budget of San Francisco in 2023-2024?", +}); + +console.log(toolResponse); +``` + +**Expected Output:** + +```javascript +{ + toolCall: { + id: 'call_iNo6rTK4pOpOBbO8FanfWLI9', + name: 'san_francisco_budget_tool', + input: { query: 'total budget' } + }, + toolResult: { + tool: QueryEngineTool { + queryEngine: [RetrieverQueryEngine], + metadata: [Object] + }, + input: { query: 'total budget' }, + output: 'The total budget for the City and County of San Francisco for Fiscal Year (FY) 2023-24 is $14.6 billion, which represents a $611.8 million, or 4.4 percent, increase over the FY 2022-23 budget. For FY 2024-25, the total budget is also projected to be $14.6 billion, reflecting a $40.5 million, or 0.3 percent, decrease from the FY 2023-24 proposed budget. This budget includes various expenditures across different departments and services, with significant allocations to public works, transportation, commerce, public protection, and health services.', + isError: false + } +} +``` + +Once again we see a `toolResult`. You can see the query the LLM decided to send to the query engine ("total budget"), and the output the engine returned. In `response.message` you see that the LLM has returned the output from the tool almost verbatim, although it trimmed out the bit about 2024-2025 since we didn't ask about that year. + +### Comparison of Approaches + +The `OpenAIContextAwareAgent` approach simplifies the setup by allowing you to directly link the retriever to the agent, making it straightforward to access relevant context for your queries. This is ideal for situations where you want easy integration with existing data sources, like a context chat engine. + +On the other hand, using the `QueryEngineTool` offers more flexibility and power. This method allows for customization in how queries are constructed and executed, enabling you to query data from various storages and process them in different ways. However, this added flexibility comes with increased complexity and response time due to the separate tool call and queryEngine generating tool output by LLM that is then passed to the agent. + +So now we have an agent that can index complicated documents and answer questions about them. Let's [combine our math agent and our RAG agent](rag_and_tools)! diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx new file mode 100644 index 0000000000..0f95857d2f --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx @@ -0,0 +1,130 @@ +--- +title: A RAG agent that does math +--- + +In [our third iteration of the agent](https://github.com/run-llama/ts-agents/blob/main/3_rag_and_tools/agent.ts) we've combined the two previous agents, so we've defined both `sumNumbers` and a `QueryEngineTool` and created an array of two tools: + +```javascript +// define the query engine as a tool +const tools = [ + new QueryEngineTool({ + queryEngine: queryEngine, + metadata: { + name: "san_francisco_budget_tool", + description: `This tool can answer detailed questions about the individual components of the budget of San Francisco in 2023-2024.`, + }, + }), + FunctionTool.from(sumNumbers, { + name: "sumNumbers", + description: "Use this function to sum two numbers", + parameters: { + type: "object", + properties: { + a: { + type: "number", + description: "First number to sum", + }, + b: { + type: "number", + description: "Second number to sum", + }, + }, + required: ["a", "b"], + }, + }), +]; +``` + +These tool descriptions are identical to the ones we previously defined. Now let's ask it 3 questions in a row: + +```javascript +let response = await agent.chat({ + message: + "What's the budget of San Francisco for community health in 2023-24?", +}); +console.log(response); + +let response2 = await agent.chat({ + message: + "What's the budget of San Francisco for public protection in 2023-24?", +}); +console.log(response2); + +let response3 = await agent.chat({ + message: + "What's the combined budget of San Francisco for community health and public protection in 2023-24?", +}); +console.log(response3); +``` + +We'll abbreviate the output, but here are the important things to spot: + +```javascript +{ + toolCall: { + id: 'call_ZA1LPx03gO4ABre1r6XowLWq', + name: 'san_francisco_budget_tool', + input: { query: 'community health budget 2023-2024' } + }, + toolResult: { + tool: QueryEngineTool { + queryEngine: [RetrieverQueryEngine], + metadata: [Object] + }, + input: { query: 'community health budget 2023-2024' }, + output: 'The proposed Fiscal Year (FY) 2023-24 budget for the Department of Public Health is $3.2 billion + } +} +``` + +This is the first tool call, where it used the query engine to get the public health budget. + +```javascript +{ + toolCall: { + id: 'call_oHu1KjEvA47ER6HYVfFIq9yp', + name: 'san_francisco_budget_tool', + input: { query: 'public protection budget 2023-2024' } + }, + toolResult: { + tool: QueryEngineTool { + queryEngine: [RetrieverQueryEngine], + metadata: [Object] + }, + input: { query: 'public protection budget 2023-2024' }, + output: "The budget for Public Protection in San Francisco for Fiscal Year (FY) 2023-24 is $2,012.5 million." + } +} +``` + +In the second tool call, it got the police budget also from the query engine. + +```javascript +{ + toolCall: { + id: 'call_SzG4yGUnLbv1T7IyaLAOqg3t', + name: 'sumNumbers', + input: { a: 3200, b: 2012.5 } + }, + toolResult: { + tool: FunctionTool { _fn: [Function: sumNumbers], _metadata: [Object] }, + input: { a: 3200, b: 2012.5 }, + output: '5212.5', + isError: false + } +} +``` + +In the final tool call, it used the `sumNumbers` function to add the two budgets together. Perfect! This leads to the final answer: + +```javascript +{ + message: { + content: 'The combined budget of San Francisco for community health and public protection in Fiscal Year (FY) 2023-24 is $5,212.5 million.', + role: 'assistant', + options: {} + } +} +``` + +Great! Now let's improve accuracy by improving our parsing with [LlamaParse](llamaparse). diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx new file mode 100644 index 0000000000..dc0047addf --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx @@ -0,0 +1,20 @@ +--- +title: Adding LlamaParse +--- + +Complicated PDFs can be very tricky for LLMs to understand. To help with this, LlamaIndex provides LlamaParse, a hosted service that parses complex documents including PDFs. To use it, get a `LLAMA_CLOUD_API_KEY` by [signing up for LlamaCloud](https://cloud.llamaindex.ai/) (it's free for up to 1000 pages/day) and adding it to your `.env` file just as you did for your OpenAI key: + +```bash +LLAMA_CLOUD_API_KEY=llx-XXXXXXXXXXXXXXXX +``` + +Then replace `SimpleDirectoryReader` with `LlamaParseReader`: + +```javascript +const reader = new LlamaParseReader({ resultType: "markdown" }); +const documents = await reader.loadData("../data/sf_budget_2023_2024.pdf"); +``` + +Now you will be able to ask more complicated questions of the same PDF and get better results. You can find this code [in our repo](https://github.com/run-llama/ts-agents/blob/main/4_llamaparse/agent.ts). + +Next up, let's persist our embedded data so we don't have to re-parse every time by [using a vector store](qdrant). diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx new file mode 100644 index 0000000000..d6154c580d --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx @@ -0,0 +1,77 @@ +--- +title: Adding persistent vector storage +--- + +In the previous examples, we've been loading our data into memory each time we run the agent. This is fine for small datasets, but for larger datasets you'll want to store your embeddings in a database. LlamaIndex.TS provides a `VectorStore` class that can store your embeddings in a variety of databases. We're going to use [Qdrant](https://qdrant.tech/), a popular vector store, for this example. + +We can get a local instance of Qdrant running very simply with Docker (make sure you [install Docker](https://www.docker.com/products/docker-desktop/) first): + +```bash +docker pull qdrant/qdrant +docker run -p 6333:6333 qdrant/qdrant +``` + +And in our code we initialize a `VectorStore` with the Qdrant URL: + +```javascript +// initialize qdrant vector store +const vectorStore = new QdrantVectorStore({ + url: "http://localhost:6333", +}); +``` + +Now once we have loaded our documents, we can instantiate an index with the vector store: + +```javascript +// create a query engine from our documents +const index = await VectorStoreIndex.fromDocuments(documents, { vectorStore }); +``` + +In [the final iteration](https://github.com/run-llama/ts-agents/blob/main/5_qdrant/agent.ts) you can see that we have also implemented a very naive caching mechanism to avoid re-parsing the PDF each time we run the agent: + +```javascript +// load cache.json and parse it +let cache = {}; +let cacheExists = false; +try { + await fs.access(PARSING_CACHE, fs.constants.F_OK); + cacheExists = true; +} catch (e) { + console.log("No cache found"); +} +if (cacheExists) { + cache = JSON.parse(await fs.readFile(PARSING_CACHE, "utf-8")); +} + +const filesToParse = ["../data/sf_budget_2023_2024.pdf"]; + +// load our data, reading only files we haven't seen before +let documents = []; +const reader = new LlamaParseReader({ resultType: "markdown" }); +for (let file of filesToParse) { + if (!cache[file]) { + documents = documents.concat(await reader.loadData(file)); + cache[file] = true; + } +} + +// write the cache back to disk +await fs.writeFile(PARSING_CACHE, JSON.stringify(cache)); +``` + +Since parsing a PDF can be slow, especially a large one, using the pre-parsed chunks in Qdrant can significantly speed up your agent. + +## Next steps + +In this guide you've learned how to + +- [Create an agent](create_agent) +- Use remote LLMs like GPT-4 +- [Use local LLMs like Mixtral](local_model) +- [Create a RAG query engine](agentic_rag) +- [Turn functions and query engines into agent tools](rag_and_tools) +- Combine those tools +- [Enhance your parsing with LlamaParse](llamaparse) +- Persist your data in a vector store + +The next steps are up to you! Try creating more complex functions and query engines, and set your agent loose on the world. diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/images/agent_flow.png b/apps/next/src/content/docs/llamaindex/guide/agents/images/agent_flow.png new file mode 100644 index 0000000000..ad0456397b Binary files /dev/null and b/apps/next/src/content/docs/llamaindex/guide/agents/images/agent_flow.png differ diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/meta.json b/apps/next/src/content/docs/llamaindex/guide/agents/meta.json new file mode 100644 index 0000000000..0812922414 --- /dev/null +++ b/apps/next/src/content/docs/llamaindex/guide/agents/meta.json @@ -0,0 +1,12 @@ +{ + "title": "Agents", + "pages": [ + "1_setup", + "2_create_agent", + "3_local_model", + "4_agentic_rag", + "5_rag_and_tools", + "6_llamaparse", + "7_qdrant" + ] +} diff --git a/apps/next/src/content/docs/llamaindex/guide/meta.json b/apps/next/src/content/docs/llamaindex/guide/meta.json index 801ed8d57b..b95c3a6110 100644 --- a/apps/next/src/content/docs/llamaindex/guide/meta.json +++ b/apps/next/src/content/docs/llamaindex/guide/meta.json @@ -1,5 +1,5 @@ { "title": "Guide", "description": "See our guide", - "pages": ["workflow", "chat"] + "pages": ["workflow", "chat", "agents"] } diff --git a/apps/next/src/content/docs/llamaindex/index.mdx b/apps/next/src/content/docs/llamaindex/index.mdx index f77fa845a0..b673ccb392 100644 --- a/apps/next/src/content/docs/llamaindex/index.mdx +++ b/apps/next/src/content/docs/llamaindex/index.mdx @@ -1,35 +1,24 @@ --- -title: Getting Started with LlamaIndex.TS -description: Install llamaindex by running a single command. +title: What is LlamaIndex.TS +description: LlamaIndex is the leading data framework for building LLM applications --- -import { Tab, Tabs } from "fumadocs-ui/components/tabs"; +import { + SiNodedotjs, + SiDeno, + SiBun, + SiCloudflareworkers, +} from "@icons-pack/react-simple-icons"; - - ```shell tab="npm" - npm install llamaindex - ``` +LlamaIndex is a framework for building context-augmented generative AI applications with LLMs including agents and workflows. - ```shell tab="yarn" - yarn add llamaindex - ``` +The TypeScript implementation is designed for JavaScript server side applications using Node.js, Deno, Bun, Cloudflare Workers, and more. - ```shell tab="pnpm" - pnpm add llamaindex - ``` - +LlamaIndex.TS provides tools for beginners, advanced users, and everyone in between. -## What's next? - - - - - +