diff --git a/README.md b/README.md
index b83d08daa5..171499c197 100644
--- a/README.md
+++ b/README.md
@@ -1,12 +1,16 @@
-# LlamaIndex.TS
+
+
+
+LlamaIndex.TS
+
+ Data framework for your LLM application.
+
[![NPM Version](https://img.shields.io/npm/v/llamaindex)](https://www.npmjs.com/package/llamaindex)
[![NPM License](https://img.shields.io/npm/l/llamaindex)](https://www.npmjs.com/package/llamaindex)
[![NPM Downloads](https://img.shields.io/npm/dm/llamaindex)](https://www.npmjs.com/package/llamaindex)
[![Discord](https://img.shields.io/discord/1059199217496772688)](https://discord.com/invite/eN6D2HQ4aX)
-LlamaIndex is a data framework for your LLM application.
-
Use your own data with large language models (LLMs, OpenAI ChatGPT and others) in JS runtime environments with TypeScript support.
Documentation: https://ts.llamaindex.ai/
diff --git a/apps/docs/CHANGELOG.md b/apps/docs/CHANGELOG.md
index ec283d3fc8..cdc28ec296 100644
--- a/apps/docs/CHANGELOG.md
+++ b/apps/docs/CHANGELOG.md
@@ -1,5 +1,115 @@
# docs
+## 0.0.128
+
+### Patch Changes
+
+- llamaindex@0.8.23
+
+## 0.0.127
+
+### Patch Changes
+
+- Updated dependencies [819af45]
+ - llamaindex@0.8.22
+
+## 0.0.126
+
+### Patch Changes
+
+- Updated dependencies [83c3897]
+- Updated dependencies [efa2211]
+ - llamaindex@0.8.21
+
+## 0.0.125
+
+### Patch Changes
+
+- Updated dependencies [02b22da]
+ - llamaindex@0.8.20
+
+## 0.0.124
+
+### Patch Changes
+
+- Updated dependencies [90d265c]
+ - llamaindex@0.8.19
+
+## 0.0.123
+
+### Patch Changes
+
+- Updated dependencies [d17450f]
+ - llamaindex@0.8.18
+
+## 0.0.122
+
+### Patch Changes
+
+- llamaindex@0.8.17
+
+## 0.0.121
+
+### Patch Changes
+
+- llamaindex@0.8.16
+
+## 0.0.120
+
+### Patch Changes
+
+- Updated dependencies [3d503cb]
+- Updated dependencies [5dae534]
+ - llamaindex@0.8.15
+
+## 0.0.119
+
+### Patch Changes
+
+- Updated dependencies [630b425]
+ - llamaindex@0.8.14
+
+## 0.0.118
+
+### Patch Changes
+
+- llamaindex@0.8.13
+- @llamaindex/examples@0.0.16
+
+## 0.0.117
+
+### Patch Changes
+
+- @llamaindex/examples@0.0.15
+
+## 0.0.116
+
+### Patch Changes
+
+- llamaindex@0.8.12
+
+## 0.0.115
+
+### Patch Changes
+
+- llamaindex@0.8.11
+
+## 0.0.114
+
+### Patch Changes
+
+- Updated dependencies [f066e50]
+ - llamaindex@0.8.10
+ - @llamaindex/examples@0.0.14
+
+## 0.0.113
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+- Updated dependencies [4d4cd8a]
+ - llamaindex@0.8.9
+
## 0.0.112
### Patch Changes
diff --git a/apps/docs/docusaurus.config.js b/apps/docs/docusaurus.config.js
index c520156a71..b1c9218de4 100644
--- a/apps/docs/docusaurus.config.js
+++ b/apps/docs/docusaurus.config.js
@@ -62,6 +62,12 @@ const config = {
({
// Replace with your project's social card
image: "img/favicon.png", // TODO change this
+ announcementBar: {
+ id: "migrate_to_next",
+ content:
+ 'We are migrating to Next.js based documentation. Check it out here!',
+ isCloseable: false,
+ },
navbar: {
title: "LlamaIndex.TS",
logo: {
diff --git a/apps/docs/package.json b/apps/docs/package.json
index 19daa28f45..a60cdca8a1 100644
--- a/apps/docs/package.json
+++ b/apps/docs/package.json
@@ -1,6 +1,6 @@
{
"name": "docs",
- "version": "0.0.112",
+ "version": "0.0.128",
"private": true,
"scripts": {
"docusaurus": "docusaurus",
@@ -15,23 +15,23 @@
"typecheck": "tsc"
},
"dependencies": {
- "@docusaurus/core": "3.6.0",
- "@docusaurus/remark-plugin-npm2yarn": "3.6.0",
+ "@docusaurus/core": "3.6.1",
+ "@docusaurus/remark-plugin-npm2yarn": "3.6.1",
"@llamaindex/examples": "workspace:*",
"@mdx-js/react": "^3.1.0",
"clsx": "^2.1.1",
"llamaindex": "workspace:*",
- "postcss": "^8.4.47",
+ "postcss": "^8.4.49",
"prism-react-renderer": "^2.4.0",
"raw-loader": "^4.0.2",
"react": "^18.3.1",
"react-dom": "18.3.1"
},
"devDependencies": {
- "@docusaurus/module-type-aliases": "3.6.0",
- "@docusaurus/preset-classic": "3.6.0",
- "@docusaurus/theme-classic": "3.6.0",
- "@docusaurus/types": "3.6.0",
+ "@docusaurus/module-type-aliases": "3.6.1",
+ "@docusaurus/preset-classic": "3.6.1",
+ "@docusaurus/theme-classic": "3.6.1",
+ "@docusaurus/types": "3.6.1",
"@tsconfig/docusaurus": "2.0.3",
"@types/node": "^22.9.0",
"docusaurus-plugin-typedoc": "1.0.5",
diff --git a/apps/next/CHANGELOG.md b/apps/next/CHANGELOG.md
index 470f606b94..86662f13f8 100644
--- a/apps/next/CHANGELOG.md
+++ b/apps/next/CHANGELOG.md
@@ -1,5 +1,179 @@
# @llamaindex/doc
+## 0.0.26
+
+### Patch Changes
+
+- @llamaindex/cloud@2.0.15
+- @llamaindex/core@0.4.15
+- llamaindex@0.8.23
+- @llamaindex/node-parser@0.0.16
+- @llamaindex/openai@0.1.40
+- @llamaindex/readers@1.0.17
+
+## 0.0.25
+
+### Patch Changes
+
+- Updated dependencies [819af45]
+ - llamaindex@0.8.22
+ - @llamaindex/cloud@2.0.14
+ - @llamaindex/core@0.4.14
+ - @llamaindex/node-parser@0.0.15
+ - @llamaindex/openai@0.1.39
+ - @llamaindex/readers@1.0.16
+
+## 0.0.24
+
+### Patch Changes
+
+- Updated dependencies [83c3897]
+- Updated dependencies [efa2211]
+ - llamaindex@0.8.21
+
+## 0.0.23
+
+### Patch Changes
+
+- Updated dependencies [02b22da]
+ - llamaindex@0.8.20
+
+## 0.0.22
+
+### Patch Changes
+
+- Updated dependencies [90d265c]
+ - @llamaindex/cloud@2.0.13
+ - @llamaindex/core@0.4.13
+ - llamaindex@0.8.19
+ - @llamaindex/node-parser@0.0.14
+ - @llamaindex/readers@1.0.15
+ - @llamaindex/openai@0.1.38
+
+## 0.0.21
+
+### Patch Changes
+
+- Updated dependencies [d17450f]
+- Updated dependencies [ef4f63d]
+ - llamaindex@0.8.18
+ - @llamaindex/core@0.4.12
+ - @llamaindex/cloud@2.0.12
+ - @llamaindex/node-parser@0.0.13
+ - @llamaindex/openai@0.1.37
+ - @llamaindex/readers@1.0.14
+
+## 0.0.20
+
+### Patch Changes
+
+- Updated dependencies [6d22fa2]
+ - @llamaindex/core@0.4.11
+ - @llamaindex/cloud@2.0.11
+ - llamaindex@0.8.17
+ - @llamaindex/node-parser@0.0.12
+ - @llamaindex/openai@0.1.36
+ - @llamaindex/readers@1.0.13
+
+## 0.0.19
+
+### Patch Changes
+
+- Updated dependencies [e60328b]
+ - @llamaindex/readers@1.0.12
+ - llamaindex@0.8.16
+
+## 0.0.18
+
+### Patch Changes
+
+- Updated dependencies [3d503cb]
+- Updated dependencies [5dae534]
+ - llamaindex@0.8.15
+
+## 0.0.17
+
+### Patch Changes
+
+- Updated dependencies [630b425]
+ - llamaindex@0.8.14
+
+## 0.0.16
+
+### Patch Changes
+
+- Updated dependencies [a7b0ac3]
+- Updated dependencies [ee20c44]
+- Updated dependencies [c69605f]
+ - @llamaindex/core@0.4.10
+ - @llamaindex/workflow@0.0.6
+ - llamaindex@0.8.13
+ - @llamaindex/cloud@2.0.10
+ - @llamaindex/node-parser@0.0.11
+ - @llamaindex/openai@0.1.35
+ - @llamaindex/readers@1.0.11
+
+## 0.0.15
+
+### Patch Changes
+
+- Updated dependencies [ea92b69]
+- Updated dependencies [fadc8b8]
+ - @llamaindex/workflow@0.0.5
+
+## 0.0.14
+
+### Patch Changes
+
+- Updated dependencies [7ae6eaa]
+ - @llamaindex/core@0.4.9
+ - @llamaindex/openai@0.1.34
+ - @llamaindex/cloud@2.0.9
+ - llamaindex@0.8.12
+ - @llamaindex/node-parser@0.0.10
+ - @llamaindex/readers@1.0.10
+
+## 0.0.13
+
+### Patch Changes
+
+- Updated dependencies [f865c98]
+ - @llamaindex/core@0.4.8
+ - @llamaindex/cloud@2.0.8
+ - llamaindex@0.8.11
+ - @llamaindex/node-parser@0.0.9
+ - @llamaindex/openai@0.1.33
+ - @llamaindex/readers@1.0.9
+
+## 0.0.12
+
+### Patch Changes
+
+- Updated dependencies [f066e50]
+- Updated dependencies [d89ebe0]
+- Updated dependencies [fd8c882]
+- Updated dependencies [fd8c882]
+ - llamaindex@0.8.10
+ - @llamaindex/core@0.4.7
+ - @llamaindex/workflow@0.0.4
+ - @llamaindex/cloud@2.0.7
+ - @llamaindex/node-parser@0.0.8
+ - @llamaindex/openai@0.1.32
+ - @llamaindex/readers@1.0.8
+
+## 0.0.11
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+- Updated dependencies [4d4cd8a]
+ - llamaindex@0.8.9
+ - @llamaindex/cloud@2.0.6
+ - @llamaindex/core@0.4.6
+ - @llamaindex/node-parser@0.0.7
+ - @llamaindex/openai@0.1.31
+ - @llamaindex/readers@1.0.7
+
## 0.0.10
### Patch Changes
diff --git a/apps/next/next.config.mjs b/apps/next/next.config.mjs
index 3248e75aec..4634aec95a 100644
--- a/apps/next/next.config.mjs
+++ b/apps/next/next.config.mjs
@@ -6,6 +6,7 @@ const withMDX = createMDX();
const config = {
reactStrictMode: true,
transpilePackages: ["monaco-editor"],
+ serverExternalPackages: ["@huggingface/transformers"],
webpack: (config, { isServer }) => {
if (Array.isArray(config.target) && config.target.includes("web")) {
config.target = ["web", "es2020"];
@@ -26,6 +27,7 @@ const config = {
}),
);
}
+ config.resolve.alias["replicate"] = false;
return config;
},
};
diff --git a/apps/next/package.json b/apps/next/package.json
index eb6f2b3386..6b5942f55d 100644
--- a/apps/next/package.json
+++ b/apps/next/package.json
@@ -1,6 +1,6 @@
{
"name": "@llamaindex/doc",
- "version": "0.0.10",
+ "version": "0.0.26",
"private": true,
"scripts": {
"build": "pnpm run build:docs && next build",
@@ -12,7 +12,7 @@
},
"dependencies": {
"@icons-pack/react-simple-icons": "^10.1.0",
- "@llamaindex/chat-ui": "0.0.5",
+ "@llamaindex/chat-ui": "0.0.9",
"@llamaindex/cloud": "workspace:*",
"@llamaindex/core": "workspace:*",
"@llamaindex/node-parser": "workspace:*",
@@ -20,31 +20,31 @@
"@llamaindex/readers": "workspace:*",
"@llamaindex/workflow": "workspace:*",
"@mdx-js/mdx": "^3.1.0",
- "@number-flow/react": "^0.3.0",
+ "@number-flow/react": "^0.3.4",
"@radix-ui/react-dialog": "^1.1.2",
- "@radix-ui/react-icons": "^1.3.1",
+ "@radix-ui/react-icons": "^1.3.2",
"@radix-ui/react-label": "^2.1.0",
"@radix-ui/react-slider": "^1.2.1",
"@radix-ui/react-slot": "^1.1.0",
- "@radix-ui/react-tooltip": "^1.1.3",
+ "@radix-ui/react-tooltip": "^1.1.4",
"@vercel/functions": "^1.5.0",
- "ai": "^3.4.31",
+ "ai": "^3.4.33",
"class-variance-authority": "^0.7.0",
"clsx": "2.1.1",
- "foxact": "^0.2.40",
- "framer-motion": "^11.11.11",
- "fumadocs-core": "14.2.0",
- "fumadocs-docgen": "^1.3.1",
+ "foxact": "^0.2.41",
+ "framer-motion": "^11.11.17",
+ "fumadocs-core": "14.4.2",
+ "fumadocs-docgen": "^1.3.2",
"fumadocs-mdx": "^11.1.1",
- "fumadocs-openapi": "^5.5.6",
+ "fumadocs-openapi": "^5.7.0",
"fumadocs-twoslash": "^2.0.1",
- "fumadocs-typescript": "^3.0.1",
- "fumadocs-ui": "14.2.0",
+ "fumadocs-typescript": "^3.0.2",
+ "fumadocs-ui": "14.4.2",
"hast-util-to-jsx-runtime": "^2.3.2",
"llamaindex": "workspace:*",
- "lucide-react": "^0.454.0",
- "next": "15.0.2",
- "next-themes": "^0.3.0",
+ "lucide-react": "^0.460.0",
+ "next": "15.0.3",
+ "next-themes": "^0.4.3",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-icons": "^5.3.0",
@@ -54,19 +54,19 @@
"rehype-katex": "^7.0.1",
"remark-math": "^6.0.0",
"rimraf": "^6.0.1",
- "shiki": "^1.22.2",
+ "shiki": "^1.23.1",
"shiki-magic-move": "^0.5.0",
"swr": "^2.2.5",
"tailwind-merge": "^2.5.2",
"tailwindcss-animate": "^1.0.7",
- "tree-sitter": "^0.22.0",
- "tree-sitter-typescript": "^0.23.0",
- "use-stick-to-bottom": "^1.0.41",
- "web-tree-sitter": "^0.24.3",
+ "tree-sitter": "^0.22.1",
+ "tree-sitter-typescript": "^0.23.2",
+ "use-stick-to-bottom": "^1.0.42",
+ "web-tree-sitter": "^0.24.4",
"zod": "^3.23.8"
},
"devDependencies": {
- "@next/env": "^15.0.2",
+ "@next/env": "^15.0.3",
"@types/mdx": "^2.0.13",
"@types/node": "22.9.0",
"@types/react": "^18.3.12",
@@ -75,12 +75,12 @@
"fast-glob": "^3.3.2",
"gray-matter": "^4.0.3",
"monaco-editor-webpack-plugin": "^7.1.0",
- "postcss": "^8.4.47",
+ "postcss": "^8.4.49",
"remark": "^15.0.1",
"remark-gfm": "^4.0.0",
"remark-mdx": "^3.1.0",
"remark-stringify": "^11.0.0",
- "tailwindcss": "^3.4.14",
+ "tailwindcss": "^3.4.15",
"tsx": "^4.19.2",
"typescript": "^5.6.3"
}
diff --git a/apps/next/public/square.svg b/apps/next/public/square.svg
new file mode 100644
index 0000000000..fdcc834c6f
--- /dev/null
+++ b/apps/next/public/square.svg
@@ -0,0 +1,18 @@
+
+
diff --git a/apps/next/src/app/api/chat/route.ts b/apps/next/src/app/api/chat/route.ts
index 94a65fc535..0eb9e3133c 100644
--- a/apps/next/src/app/api/chat/route.ts
+++ b/apps/next/src/app/api/chat/route.ts
@@ -1,7 +1,10 @@
+import { MockLLM } from "@llamaindex/core/utils";
import { LlamaIndexAdapter, type Message } from "ai";
-import { SimpleChatEngine, type ChatMessage } from "llamaindex";
+import { Settings, SimpleChatEngine, type ChatMessage } from "llamaindex";
import { NextResponse, type NextRequest } from "next/server";
+Settings.llm = new MockLLM(); // config your LLM here
+
export async function POST(request: NextRequest) {
try {
const { messages } = (await request.json()) as { messages: Message[] };
diff --git a/apps/next/src/components/demo/chat.tsx b/apps/next/src/components/demo/chat.tsx
deleted file mode 100644
index b816ffd6e8..0000000000
--- a/apps/next/src/components/demo/chat.tsx
+++ /dev/null
@@ -1,8 +0,0 @@
-"use client";
-import { ChatSection } from "@llamaindex/chat-ui";
-import { useChat } from "ai/react";
-
-export const ChatDemo = () => {
- const handler = useChat();
- return ;
-};
diff --git a/apps/next/src/components/demo/chat/api/demo.tsx b/apps/next/src/components/demo/chat/api/demo.tsx
new file mode 100644
index 0000000000..85bb750e12
--- /dev/null
+++ b/apps/next/src/components/demo/chat/api/demo.tsx
@@ -0,0 +1,16 @@
+"use client";
+import { ChatInput, ChatMessages, ChatSection } from "@llamaindex/chat-ui";
+import { useChat } from "ai/react";
+
+export const ChatDemo = () => {
+ const handler = useChat();
+ return (
+
+
+
+
+
+
+
+ );
+};
diff --git a/apps/next/src/components/demo/chat/rsc/ai-action.tsx b/apps/next/src/components/demo/chat/rsc/ai-action.tsx
new file mode 100644
index 0000000000..169ba827ae
--- /dev/null
+++ b/apps/next/src/components/demo/chat/rsc/ai-action.tsx
@@ -0,0 +1,57 @@
+import { Markdown } from "@llamaindex/chat-ui/widgets";
+import { MockLLM } from "@llamaindex/core/utils";
+import { generateId, Message } from "ai";
+import { createAI, createStreamableUI, getMutableAIState } from "ai/rsc";
+import { type ChatMessage, Settings, SimpleChatEngine } from "llamaindex";
+import { ReactNode } from "react";
+
+type ServerState = Message[];
+type FrontendState = Array;
+type Actions = {
+ chat: (message: Message) => Promise;
+};
+
+Settings.llm = new MockLLM(); // config your LLM here
+
+export const AI = createAI({
+ initialAIState: [],
+ initialUIState: [],
+ actions: {
+ chat: async (message: Message) => {
+ "use server";
+
+ const aiState = getMutableAIState();
+ aiState.update((prev) => [...prev, message]);
+
+ const uiStream = createStreamableUI();
+ const chatEngine = new SimpleChatEngine();
+ const assistantMessage: Message = {
+ id: generateId(),
+ role: "assistant",
+ content: "",
+ };
+
+ // run the async function without blocking
+ (async () => {
+ const chatResponse = await chatEngine.chat({
+ stream: true,
+ message: message.content,
+ chatHistory: aiState.get() as ChatMessage[],
+ });
+
+ for await (const chunk of chatResponse) {
+ assistantMessage.content += chunk.delta;
+ uiStream.update();
+ }
+
+ aiState.done([...aiState.get(), assistantMessage]);
+ uiStream.done();
+ })();
+
+ return {
+ ...assistantMessage,
+ display: uiStream.value,
+ };
+ },
+ },
+});
diff --git a/apps/next/src/components/demo/chat/rsc/chat-section.tsx b/apps/next/src/components/demo/chat/rsc/chat-section.tsx
new file mode 100644
index 0000000000..384cdae3b0
--- /dev/null
+++ b/apps/next/src/components/demo/chat/rsc/chat-section.tsx
@@ -0,0 +1,33 @@
+"use client";
+
+import {
+ ChatInput,
+ ChatMessage,
+ ChatMessages,
+ ChatSection as ChatSectionUI,
+} from "@llamaindex/chat-ui";
+import { useChatRSC } from "./use-chat-rsc";
+
+export const ChatSectionRSC = () => {
+ const handler = useChatRSC();
+ return (
+
+
+
+ {handler.messages.map((message, index) => (
+
+
+ {message.display}
+
+ ))}
+
+
+
+
+
+ );
+};
diff --git a/apps/next/src/components/demo/chat/rsc/demo.tsx b/apps/next/src/components/demo/chat/rsc/demo.tsx
new file mode 100644
index 0000000000..76a2cfaa8a
--- /dev/null
+++ b/apps/next/src/components/demo/chat/rsc/demo.tsx
@@ -0,0 +1,8 @@
+import { AI } from "./ai-action";
+import { ChatSectionRSC } from "./chat-section";
+
+export const ChatDemoRSC = () => (
+
+
+
+);
diff --git a/apps/next/src/components/demo/chat/rsc/use-chat-rsc.tsx b/apps/next/src/components/demo/chat/rsc/use-chat-rsc.tsx
new file mode 100644
index 0000000000..f7d1f5e77d
--- /dev/null
+++ b/apps/next/src/components/demo/chat/rsc/use-chat-rsc.tsx
@@ -0,0 +1,41 @@
+"use client";
+
+import { useActions } from "ai/rsc";
+
+import { generateId, Message } from "ai";
+import { useUIState } from "ai/rsc";
+import { useState } from "react";
+import { AI } from "./ai-action";
+
+export function useChatRSC() {
+ const [input, setInput] = useState("");
+ const [isLoading, setIsLoading] = useState(false);
+ const [messages, setMessages] = useUIState();
+ const { chat } = useActions();
+
+ const append = async (message: Omit) => {
+ const newMsg: Message = { ...message, id: generateId() };
+
+ setIsLoading(true);
+ try {
+ setMessages((prev) => [...prev, { ...newMsg, display: message.content }]);
+ const assistantMsg = await chat(newMsg);
+ setMessages((prev) => [...prev, assistantMsg]);
+ } catch (error) {
+ console.error(error);
+ }
+ setIsLoading(false);
+ setInput("");
+
+ return message.content;
+ };
+
+ return {
+ input,
+ setInput,
+ isLoading,
+ messages,
+ setMessages,
+ append,
+ };
+}
diff --git a/apps/next/src/components/website/Footer.tsx b/apps/next/src/components/website/Footer.tsx
index a317e59d24..38146e2d2b 100644
--- a/apps/next/src/components/website/Footer.tsx
+++ b/apps/next/src/components/website/Footer.tsx
@@ -85,6 +85,33 @@ const Footer = () => {
SharePoint
+
+
+ AWS S3
+
+
+
+
+ Azure Blob Storage
+
+
+
+
+ Google Drive
+
+
@@ -171,11 +198,6 @@ const Footer = () => {
SEC Insights
-
-
- Chat LlamaIndex
-
-
LlamaBot
diff --git a/apps/next/src/content/docs/llamaindex/guide/chat.mdx b/apps/next/src/content/docs/llamaindex/guide/chat/chat.mdx
similarity index 85%
rename from apps/next/src/content/docs/llamaindex/guide/chat.mdx
rename to apps/next/src/content/docs/llamaindex/guide/chat/chat.mdx
index 8a5ef245e2..4cbaea1572 100644
--- a/apps/next/src/content/docs/llamaindex/guide/chat.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/chat/chat.mdx
@@ -1,8 +1,8 @@
---
-title: Chat-UI
-description: Use chat-ui to add a chat interface to your LlamaIndexTS application.
+title: Using API Route
+description: Chat interface for your LlamaIndexTS application using API Route
---
-import { ChatDemo } from '../../../../components/demo/chat';
+import { ChatDemo } from '../../../../../components/demo/chat/api/demo';
import "@llamaindex/chat-ui/styles/code.css";
import "@llamaindex/chat-ui/styles/katex.css";
@@ -26,7 +26,7 @@ This is the simplest way to add a chat interface to your application. Copy the f
```json doc-gen:file
{
- "file": "./src/components/demo/chat.tsx",
+ "file": "./src/components/demo/chat/api/demo.tsx",
"codeblock": true
}
```
@@ -37,6 +37,7 @@ Combining both, you're getting a fully functional chat interface:
+
## Next Steps
The steps above are the bare minimum to get a chat interface working. From here, you can go two ways:
diff --git a/apps/next/src/content/docs/llamaindex/guide/chat/meta.json b/apps/next/src/content/docs/llamaindex/guide/chat/meta.json
new file mode 100644
index 0000000000..7550c3cc9a
--- /dev/null
+++ b/apps/next/src/content/docs/llamaindex/guide/chat/meta.json
@@ -0,0 +1,6 @@
+{
+ "title": "Chat-UI",
+ "description": "Use chat-ui to add a chat interface to your LlamaIndexTS application.",
+ "defaultOpen": false,
+ "pages": ["chat", "rsc"]
+}
diff --git a/apps/next/src/content/docs/llamaindex/guide/chat/rsc.mdx b/apps/next/src/content/docs/llamaindex/guide/chat/rsc.mdx
new file mode 100644
index 0000000000..d0a8ebc262
--- /dev/null
+++ b/apps/next/src/content/docs/llamaindex/guide/chat/rsc.mdx
@@ -0,0 +1,68 @@
+---
+title: Using Next.js RSC
+description: Chat interface for your LlamaIndexTS application using Next.js RSC
+---
+import { ChatDemoRSC } from '../../../../../components/demo/chat/rsc/demo';
+import "@llamaindex/chat-ui/styles/code.css";
+import "@llamaindex/chat-ui/styles/katex.css";
+
+Using [chat-ui](https://github.com/run-llama/chat-ui), it's easy to add a chat interface to your LlamaIndexTS application using [Next.js RSC](https://nextjs.org/docs/app/building-your-application/rendering/server-components) and [Vercel AI RSC](https://sdk.vercel.ai/docs/ai-sdk-rsc/overview).
+
+With RSC, the chat messages are not returned as JSON from the server (like when using an [API route](./chat)), instead the chat message components are rendered on the server side.
+This is for example useful for rendering a whole chat history on the server before sending it to the client. [Check here](https://sdk.vercel.ai/docs/getting-started/navigating-the-library#when-to-use-ai-sdk-rsc), for a discussion of when to use use RSC.
+
+For implementing a chat interface with RSC, you need to create an AI action and then connect the chat interface to use it.
+
+## Create an AI action
+
+First, define an [AI context provider](https://sdk.vercel.ai/examples/rsc/state-management/ai-ui-states) with a chat server action:
+
+```json doc-gen:file
+{
+ "file": "./src/components/demo/chat/rsc/ai-action.tsx",
+ "codeblock": true
+}
+```
+
+The chat server action is using LlamaIndexTS to generate a response based on the chat history and the user input.
+
+## Create the chat UI
+
+The entrypoint of our application initializes the AI provider for the application and adds a `ChatSection` component:
+
+```json doc-gen:file
+{
+ "file": "./src/components/demo/chat/rsc/demo.tsx",
+ "codeblock": true
+}
+```
+
+The `ChatSection` component is created by using chat components from @llamaindex/chat-ui:
+
+```json doc-gen:file
+{
+ "file": "./src/components/demo/chat/rsc/chat-section.tsx",
+ "codeblock": true
+}
+```
+
+It is using a `useChatRSC` hook to conntect the chat interface to the `chat` AI action that we defined earlier:
+
+```json doc-gen:file
+{
+ "file": "./src/components/demo/chat/rsc/use-chat-rsc.tsx",
+ "codeblock": true
+}
+```
+
+## Try RSC Chat ⬇️
+
+
+
+## Next Steps
+
+The steps above are the bare minimum to get a chat interface working with RSC. From here, you can go two ways:
+
+1. Use our [full-stack RSC example](https://github.com/run-llama/nextjs-rsc) based on [create-llama](https://github.com/run-llama/create-llama) to get started quickly with a fully working chat interface or
+2. Learn more about [AI RSC](https://sdk.vercel.ai/examples/rsc), [chat-ui](https://github.com/run-llama/chat-ui) and [LlamaIndexTS](https://github.com/run-llama/llamaindex-ts) to customize the chat interface and AI actions to your needs.
+
diff --git a/apps/next/src/content/docs/llamaindex/setup/cloudflare.mdx b/apps/next/src/content/docs/llamaindex/setup/cloudflare.mdx
index b377a65f18..5ed244b912 100644
--- a/apps/next/src/content/docs/llamaindex/setup/cloudflare.mdx
+++ b/apps/next/src/content/docs/llamaindex/setup/cloudflare.mdx
@@ -37,6 +37,33 @@ Then, you need create `.dev.vars` and add LLM api keys for the local development
Do not commit the api key to git repository.
+## Integrating with Hono
+
+```ts
+import { Hono } from "hono";
+
+type Bindings = {
+ OPENAI_API_KEY: string;
+};
+
+const app = new Hono<{
+ Bindings: Bindings;
+}>();
+
+app.post("/llm", async (c) => {
+ const { setEnvs } = await import("@llamaindex/env");
+ setEnvs(c.env);
+
+ // ...
+
+ return new Response('Hello, world!');
+})
+
+export default {
+ fetch: app.fetch,
+};
+```
+
## Difference between Node.js and Cloudflare Worker
In Cloudflare Worker and similar serverless JS environment, you need to be aware of the following differences:
@@ -46,3 +73,7 @@ In Cloudflare Worker and similar serverless JS environment, you need to be aware
- Some of LlamaIndex.TS modules are not available in Cloudflare Worker, for example `SimpleDirectoryReader` (requires `node:fs`), Some multimodal API that relies on [`onnxruntime-node`](https://www.npmjs.com/package/onnxruntime-node)(we might port to HTTP based module in the future).
- `@llamaindex/core` is designed to work in all JavaScript environment, including Cloudflare Worker. If you find any issue, please report to us.
- `@llamaindex/env` is a JS environment binding module, which polyfill some Node.js/Modern Web API (for example, we have a memory based `fs` module, and Crypto API polyfill). It is designed to work in all JavaScript environment, including Cloudflare Worker.
+
+## Known issues
+
+- `llamaindex` not work perfectly in Cloudflare Worker, bundle size will be larger than 1MB, which is the limit of Cloudflare Worker. You will need import submodule instead of the whole `llamaindex` module.
diff --git a/apps/next/turbo.json b/apps/next/turbo.json
index b98ab04da1..618d16c57e 100644
--- a/apps/next/turbo.json
+++ b/apps/next/turbo.json
@@ -1,6 +1,14 @@
{
"extends": ["//"],
"tasks": {
+ "build": {
+ "outputs": [
+ ".next",
+ ".source",
+ "next-env.d.ts",
+ "src/content/docs/cloud/api/**"
+ ]
+ },
"dev": {
"dependsOn": ["^build"]
}
diff --git a/e2e/.env.ci b/e2e/.env.ci
index 080df233ef..25f35a7bdb 100644
--- a/e2e/.env.ci
+++ b/e2e/.env.ci
@@ -1 +1,4 @@
POSTGRES_USER=runner
+PINECONE_API_KEY=
+PINECONE_INDEX_NAME=
+PINECONE_NAMESPACE=
diff --git a/e2e/examples/cloudflare-hono/.gitignore b/e2e/examples/cloudflare-hono/.gitignore
new file mode 100644
index 0000000000..3b0fe33c47
--- /dev/null
+++ b/e2e/examples/cloudflare-hono/.gitignore
@@ -0,0 +1,172 @@
+# Logs
+
+logs
+_.log
+npm-debug.log_
+yarn-debug.log*
+yarn-error.log*
+lerna-debug.log*
+.pnpm-debug.log*
+
+# Diagnostic reports (https://nodejs.org/api/report.html)
+
+report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
+
+# Runtime data
+
+pids
+_.pid
+_.seed
+\*.pid.lock
+
+# Directory for instrumented libs generated by jscoverage/JSCover
+
+lib-cov
+
+# Coverage directory used by tools like istanbul
+
+coverage
+\*.lcov
+
+# nyc test coverage
+
+.nyc_output
+
+# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
+
+.grunt
+
+# Bower dependency directory (https://bower.io/)
+
+bower_components
+
+# node-waf configuration
+
+.lock-wscript
+
+# Compiled binary addons (https://nodejs.org/api/addons.html)
+
+build/Release
+
+# Dependency directories
+
+node_modules/
+jspm_packages/
+
+# Snowpack dependency directory (https://snowpack.dev/)
+
+web_modules/
+
+# TypeScript cache
+
+\*.tsbuildinfo
+
+# Optional npm cache directory
+
+.npm
+
+# Optional eslint cache
+
+.eslintcache
+
+# Optional stylelint cache
+
+.stylelintcache
+
+# Microbundle cache
+
+.rpt2_cache/
+.rts2_cache_cjs/
+.rts2_cache_es/
+.rts2_cache_umd/
+
+# Optional REPL history
+
+.node_repl_history
+
+# Output of 'npm pack'
+
+\*.tgz
+
+# Yarn Integrity file
+
+.yarn-integrity
+
+# dotenv environment variable files
+
+.env
+.env.development.local
+.env.test.local
+.env.production.local
+.env.local
+
+# parcel-bundler cache (https://parceljs.org/)
+
+.cache
+.parcel-cache
+
+# Next.js build output
+
+.next
+out
+
+# Nuxt.js build / generate output
+
+.nuxt
+dist
+
+# Gatsby files
+
+.cache/
+
+# Comment in the public line in if your project uses Gatsby and not Next.js
+
+# https://nextjs.org/blog/next-9-1#public-directory-support
+
+# public
+
+# vuepress build output
+
+.vuepress/dist
+
+# vuepress v2.x temp and cache directory
+
+.temp
+.cache
+
+# Docusaurus cache and generated files
+
+.docusaurus
+
+# Serverless directories
+
+.serverless/
+
+# FuseBox cache
+
+.fusebox/
+
+# DynamoDB Local files
+
+.dynamodb/
+
+# TernJS port file
+
+.tern-port
+
+# Stores VSCode versions used for testing VSCode extensions
+
+.vscode-test
+
+# yarn v2
+
+.yarn/cache
+.yarn/unplugged
+.yarn/build-state.yml
+.yarn/install-state.gz
+.pnp.\*
+
+# wrangler project
+
+.dev.vars
+.wrangler/
diff --git a/e2e/examples/cloudflare-hono/package.json b/e2e/examples/cloudflare-hono/package.json
new file mode 100644
index 0000000000..fc1864e282
--- /dev/null
+++ b/e2e/examples/cloudflare-hono/package.json
@@ -0,0 +1,20 @@
+{
+ "name": "@llamaindex/cloudflare-hono",
+ "version": "0.0.0",
+ "private": true,
+ "scripts": {
+ "deploy": "wrangler deploy",
+ "build": "wrangler deploy --dry-run --outdir dist",
+ "dev": "wrangler dev",
+ "start": "wrangler dev",
+ "cf-typegen": "wrangler types"
+ },
+ "devDependencies": {
+ "@cloudflare/workers-types": "^4.20241112.0",
+ "typescript": "^5.5.2",
+ "wrangler": "^3.89.0"
+ },
+ "dependencies": {
+ "hono": "^4.6.11"
+ }
+}
diff --git a/e2e/examples/cloudflare-hono/src/index.ts b/e2e/examples/cloudflare-hono/src/index.ts
new file mode 100644
index 0000000000..ab0c317d9e
--- /dev/null
+++ b/e2e/examples/cloudflare-hono/src/index.ts
@@ -0,0 +1,91 @@
+import { Hono } from "hono";
+
+type Bindings = {
+ OPENAI_API_KEY: string;
+ PINECONE_API_KEY: string;
+};
+
+const app = new Hono<{
+ Bindings: Bindings;
+}>();
+
+app.post("/llm", async (c) => {
+ //#region init envs
+ const { setEnvs } = await import("@llamaindex/env");
+ setEnvs(c.env);
+ //#endregion
+
+ const { message } = await c.req.json();
+
+ const { extractText } = await import("@llamaindex/core/utils");
+
+ const {
+ QueryEngineTool,
+ serviceContextFromDefaults,
+ VectorStoreIndex,
+ OpenAIAgent,
+ Settings,
+ OpenAI,
+ OpenAIEmbedding,
+ } = await import("llamaindex");
+
+ const { PineconeVectorStore } = await import(
+ "llamaindex/vector-store/PineconeVectorStore"
+ );
+
+ const llm = new OpenAI({
+ model: "gpt-4o-mini",
+ apiKey: c.env.OPENAI_API_KEY,
+ });
+
+ Settings.embedModel = new OpenAIEmbedding({
+ model: "text-embedding-3-small",
+ apiKey: c.env.OPENAI_API_KEY,
+ });
+
+ const serviceContext = serviceContextFromDefaults({
+ llm,
+ chunkSize: 8191,
+ chunkOverlap: 0,
+ });
+
+ const store = new PineconeVectorStore({
+ namespace: "8xolsn4ulEQGdhnhP76yCzfLHdOZ",
+ });
+
+ const index = await VectorStoreIndex.fromVectorStore(store, serviceContext);
+
+ const retriever = index.asRetriever({
+ similarityTopK: 3,
+ });
+
+ // Create a query engine
+ const queryEngine = index.asQueryEngine({
+ retriever,
+ });
+
+ const tools = [
+ new QueryEngineTool({
+ queryEngine: queryEngine,
+ metadata: {
+ name: "business_info_tool",
+ description:
+ "This tool can answer questions based " +
+ "on business information. Return not found if you" +
+ " can't find the answer in the documents.",
+ },
+ }),
+ ];
+
+ const agent = new OpenAIAgent({ tools });
+
+ const response = await agent.chat({
+ message: message,
+ });
+
+ return new Response(extractText(response.message.content));
+});
+
+export default {
+ fetch: app.fetch,
+};
diff --git a/e2e/examples/cloudflare-hono/tsconfig.json b/e2e/examples/cloudflare-hono/tsconfig.json
new file mode 100644
index 0000000000..2ea4edc7bc
--- /dev/null
+++ b/e2e/examples/cloudflare-hono/tsconfig.json
@@ -0,0 +1,39 @@
+{
+ "extends": "../../tsconfig.json",
+ "compilerOptions": {
+ /* Visit https://aka.ms/tsconfig.json to read more about this file */
+
+ /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
+ "target": "es2021",
+ /* Specify a set of bundled library declaration files that describe the target runtime environment. */
+ "lib": ["es2021", "DOM.AsyncIterable"],
+ /* Specify what JSX code is generated. */
+ "jsx": "react-jsx",
+ /* Specify what module code is generated. */
+ "module": "es2022",
+ /* Specify how TypeScript looks up a file from a given module specifier. */
+ "moduleResolution": "Bundler",
+ /* Specify type package names to be included without being referenced in a source file. */
+ "types": ["@cloudflare/workers-types/2023-07-01"],
+ /* Enable importing .json files */
+ "resolveJsonModule": true,
+ /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */
+ "allowJs": true,
+ /* Enable error reporting in type-checked JavaScript files. */
+ "checkJs": false,
+ /* Disable emitting files from a compilation. */
+ "noEmit": true,
+ /* Ensure that each file can be safely transpiled without relying on other imports. */
+ "isolatedModules": true,
+ /* Allow 'import x from y' when a module doesn't have a default export. */
+ "allowSyntheticDefaultImports": true,
+ /* Ensure that casing is correct in imports. */
+ "forceConsistentCasingInFileNames": true,
+ /* Enable all strict type-checking options. */
+ "strict": true,
+ /* Skip type checking all .d.ts files. */
+ "skipLibCheck": true
+ },
+ "exclude": ["test"],
+ "include": ["vitest.config.mts", "worker-configuration.d.ts", "src/**/*.ts"]
+}
diff --git a/e2e/examples/cloudflare-hono/worker-configuration.d.ts b/e2e/examples/cloudflare-hono/worker-configuration.d.ts
new file mode 100644
index 0000000000..a093a4fc61
--- /dev/null
+++ b/e2e/examples/cloudflare-hono/worker-configuration.d.ts
@@ -0,0 +1,4 @@
+// Generated by Wrangler by running `wrangler types`
+
+// eslint-disable-next-line @typescript-eslint/no-empty-object-type
+interface Env {}
diff --git a/e2e/examples/cloudflare-hono/wrangler.toml b/e2e/examples/cloudflare-hono/wrangler.toml
new file mode 100644
index 0000000000..b9e36d4d82
--- /dev/null
+++ b/e2e/examples/cloudflare-hono/wrangler.toml
@@ -0,0 +1,7 @@
+name = "llamaindex-cloudflare-hono-example"
+main = "src/index.ts"
+compatibility_date = "2024-11-12"
+compatibility_flags = ["nodejs_als"]
+
+[observability]
+enabled = true
diff --git a/e2e/examples/cloudflare-worker-agent/CHANGELOG.md b/e2e/examples/cloudflare-worker-agent/CHANGELOG.md
index 44b7a214bf..8a663ed5e1 100644
--- a/e2e/examples/cloudflare-worker-agent/CHANGELOG.md
+++ b/e2e/examples/cloudflare-worker-agent/CHANGELOG.md
@@ -1,5 +1,107 @@
# @llamaindex/cloudflare-worker-agent-test
+## 0.0.119
+
+### Patch Changes
+
+- llamaindex@0.8.23
+
+## 0.0.118
+
+### Patch Changes
+
+- Updated dependencies [819af45]
+ - llamaindex@0.8.22
+
+## 0.0.117
+
+### Patch Changes
+
+- Updated dependencies [83c3897]
+- Updated dependencies [efa2211]
+ - llamaindex@0.8.21
+
+## 0.0.116
+
+### Patch Changes
+
+- Updated dependencies [02b22da]
+ - llamaindex@0.8.20
+
+## 0.0.115
+
+### Patch Changes
+
+- Updated dependencies [90d265c]
+ - llamaindex@0.8.19
+
+## 0.0.114
+
+### Patch Changes
+
+- Updated dependencies [d17450f]
+ - llamaindex@0.8.18
+
+## 0.0.113
+
+### Patch Changes
+
+- llamaindex@0.8.17
+
+## 0.0.112
+
+### Patch Changes
+
+- llamaindex@0.8.16
+
+## 0.0.111
+
+### Patch Changes
+
+- Updated dependencies [3d503cb]
+- Updated dependencies [5dae534]
+ - llamaindex@0.8.15
+
+## 0.0.110
+
+### Patch Changes
+
+- Updated dependencies [630b425]
+ - llamaindex@0.8.14
+
+## 0.0.109
+
+### Patch Changes
+
+- llamaindex@0.8.13
+
+## 0.0.108
+
+### Patch Changes
+
+- llamaindex@0.8.12
+
+## 0.0.107
+
+### Patch Changes
+
+- llamaindex@0.8.11
+
+## 0.0.106
+
+### Patch Changes
+
+- Updated dependencies [f066e50]
+ - llamaindex@0.8.10
+
+## 0.0.105
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+- Updated dependencies [4d4cd8a]
+ - llamaindex@0.8.9
+
## 0.0.104
### Patch Changes
diff --git a/e2e/examples/cloudflare-worker-agent/package.json b/e2e/examples/cloudflare-worker-agent/package.json
index 9d1c906378..aa95d54f5e 100644
--- a/e2e/examples/cloudflare-worker-agent/package.json
+++ b/e2e/examples/cloudflare-worker-agent/package.json
@@ -1,6 +1,6 @@
{
"name": "@llamaindex/cloudflare-worker-agent-test",
- "version": "0.0.104",
+ "version": "0.0.119",
"type": "module",
"private": true,
"scripts": {
@@ -12,13 +12,13 @@
"cf-typegen": "wrangler types"
},
"devDependencies": {
- "@cloudflare/vitest-pool-workers": "^0.5.8",
- "@cloudflare/workers-types": "^4.20240924.0",
- "@vitest/runner": "2.1.4",
- "@vitest/snapshot": "2.1.4",
+ "@cloudflare/vitest-pool-workers": "^0.5.28",
+ "@cloudflare/workers-types": "^4.20241112.0",
+ "@vitest/runner": "2.1.5",
+ "@vitest/snapshot": "2.1.5",
"typescript": "^5.6.3",
- "vitest": "2.1.4",
- "wrangler": "^3.78.8"
+ "vitest": "2.1.5",
+ "wrangler": "^3.87.0"
},
"dependencies": {
"llamaindex": "workspace:*"
diff --git a/e2e/examples/llama-parse-browser/CHANGELOG.md b/e2e/examples/llama-parse-browser/CHANGELOG.md
index dcccdaa2ff..aab6413637 100644
--- a/e2e/examples/llama-parse-browser/CHANGELOG.md
+++ b/e2e/examples/llama-parse-browser/CHANGELOG.md
@@ -1,5 +1,66 @@
# @llamaindex/llama-parse-browser-test
+## 0.0.35
+
+### Patch Changes
+
+- @llamaindex/cloud@2.0.15
+
+## 0.0.34
+
+### Patch Changes
+
+- @llamaindex/cloud@2.0.14
+
+## 0.0.33
+
+### Patch Changes
+
+- Updated dependencies [90d265c]
+ - @llamaindex/cloud@2.0.13
+
+## 0.0.32
+
+### Patch Changes
+
+- @llamaindex/cloud@2.0.12
+
+## 0.0.31
+
+### Patch Changes
+
+- @llamaindex/cloud@2.0.11
+
+## 0.0.30
+
+### Patch Changes
+
+- @llamaindex/cloud@2.0.10
+
+## 0.0.29
+
+### Patch Changes
+
+- @llamaindex/cloud@2.0.9
+
+## 0.0.28
+
+### Patch Changes
+
+- @llamaindex/cloud@2.0.8
+
+## 0.0.27
+
+### Patch Changes
+
+- @llamaindex/cloud@2.0.7
+
+## 0.0.26
+
+### Patch Changes
+
+- @llamaindex/cloud@2.0.6
+
## 0.0.25
### Patch Changes
diff --git a/e2e/examples/llama-parse-browser/package.json b/e2e/examples/llama-parse-browser/package.json
index 934c5e5957..b0b10e8201 100644
--- a/e2e/examples/llama-parse-browser/package.json
+++ b/e2e/examples/llama-parse-browser/package.json
@@ -1,7 +1,7 @@
{
"name": "@llamaindex/llama-parse-browser-test",
"private": true,
- "version": "0.0.25",
+ "version": "0.0.35",
"type": "module",
"scripts": {
"dev": "vite",
@@ -10,7 +10,7 @@
},
"devDependencies": {
"typescript": "^5.6.3",
- "vite": "^5.4.1",
+ "vite": "^5.4.11",
"vite-plugin-wasm": "^3.3.0"
},
"dependencies": {
diff --git a/e2e/examples/nextjs-agent/CHANGELOG.md b/e2e/examples/nextjs-agent/CHANGELOG.md
index 5649cbd9bf..ac17353b4d 100644
--- a/e2e/examples/nextjs-agent/CHANGELOG.md
+++ b/e2e/examples/nextjs-agent/CHANGELOG.md
@@ -1,5 +1,107 @@
# @llamaindex/next-agent-test
+## 0.1.119
+
+### Patch Changes
+
+- llamaindex@0.8.23
+
+## 0.1.118
+
+### Patch Changes
+
+- Updated dependencies [819af45]
+ - llamaindex@0.8.22
+
+## 0.1.117
+
+### Patch Changes
+
+- Updated dependencies [83c3897]
+- Updated dependencies [efa2211]
+ - llamaindex@0.8.21
+
+## 0.1.116
+
+### Patch Changes
+
+- Updated dependencies [02b22da]
+ - llamaindex@0.8.20
+
+## 0.1.115
+
+### Patch Changes
+
+- Updated dependencies [90d265c]
+ - llamaindex@0.8.19
+
+## 0.1.114
+
+### Patch Changes
+
+- Updated dependencies [d17450f]
+ - llamaindex@0.8.18
+
+## 0.1.113
+
+### Patch Changes
+
+- llamaindex@0.8.17
+
+## 0.1.112
+
+### Patch Changes
+
+- llamaindex@0.8.16
+
+## 0.1.111
+
+### Patch Changes
+
+- Updated dependencies [3d503cb]
+- Updated dependencies [5dae534]
+ - llamaindex@0.8.15
+
+## 0.1.110
+
+### Patch Changes
+
+- Updated dependencies [630b425]
+ - llamaindex@0.8.14
+
+## 0.1.109
+
+### Patch Changes
+
+- llamaindex@0.8.13
+
+## 0.1.108
+
+### Patch Changes
+
+- llamaindex@0.8.12
+
+## 0.1.107
+
+### Patch Changes
+
+- llamaindex@0.8.11
+
+## 0.1.106
+
+### Patch Changes
+
+- Updated dependencies [f066e50]
+ - llamaindex@0.8.10
+
+## 0.1.105
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+- Updated dependencies [4d4cd8a]
+ - llamaindex@0.8.9
+
## 0.1.104
### Patch Changes
diff --git a/e2e/examples/nextjs-agent/package.json b/e2e/examples/nextjs-agent/package.json
index ea61953756..11d74331e9 100644
--- a/e2e/examples/nextjs-agent/package.json
+++ b/e2e/examples/nextjs-agent/package.json
@@ -1,6 +1,6 @@
{
"name": "@llamaindex/next-agent-test",
- "version": "0.1.104",
+ "version": "0.1.119",
"private": true,
"scripts": {
"dev": "next dev",
@@ -8,9 +8,9 @@
"start": "next start"
},
"dependencies": {
- "ai": "^3.3.21",
+ "ai": "^4.0.0",
"llamaindex": "workspace:*",
- "next": "15.0.2",
+ "next": "15.0.3",
"react": "18.3.1",
"react-dom": "18.3.1"
},
@@ -18,10 +18,10 @@
"@types/node": "^22.9.0",
"@types/react": "^18.3.12",
"@types/react-dom": "^18.3.1",
- "eslint": "9.14.0",
- "eslint-config-next": "15.0.2",
- "postcss": "^8.4.41",
- "tailwindcss": "^3.4.10",
+ "eslint": "9.15.0",
+ "eslint-config-next": "15.0.3",
+ "postcss": "^8.4.49",
+ "tailwindcss": "^3.4.15",
"typescript": "^5.6.3"
}
}
diff --git a/e2e/examples/nextjs-edge-runtime/CHANGELOG.md b/e2e/examples/nextjs-edge-runtime/CHANGELOG.md
index 21e2fc079b..664a5bec7a 100644
--- a/e2e/examples/nextjs-edge-runtime/CHANGELOG.md
+++ b/e2e/examples/nextjs-edge-runtime/CHANGELOG.md
@@ -1,5 +1,107 @@
# test-edge-runtime
+## 0.1.118
+
+### Patch Changes
+
+- llamaindex@0.8.23
+
+## 0.1.117
+
+### Patch Changes
+
+- Updated dependencies [819af45]
+ - llamaindex@0.8.22
+
+## 0.1.116
+
+### Patch Changes
+
+- Updated dependencies [83c3897]
+- Updated dependencies [efa2211]
+ - llamaindex@0.8.21
+
+## 0.1.115
+
+### Patch Changes
+
+- Updated dependencies [02b22da]
+ - llamaindex@0.8.20
+
+## 0.1.114
+
+### Patch Changes
+
+- Updated dependencies [90d265c]
+ - llamaindex@0.8.19
+
+## 0.1.113
+
+### Patch Changes
+
+- Updated dependencies [d17450f]
+ - llamaindex@0.8.18
+
+## 0.1.112
+
+### Patch Changes
+
+- llamaindex@0.8.17
+
+## 0.1.111
+
+### Patch Changes
+
+- llamaindex@0.8.16
+
+## 0.1.110
+
+### Patch Changes
+
+- Updated dependencies [3d503cb]
+- Updated dependencies [5dae534]
+ - llamaindex@0.8.15
+
+## 0.1.109
+
+### Patch Changes
+
+- Updated dependencies [630b425]
+ - llamaindex@0.8.14
+
+## 0.1.108
+
+### Patch Changes
+
+- llamaindex@0.8.13
+
+## 0.1.107
+
+### Patch Changes
+
+- llamaindex@0.8.12
+
+## 0.1.106
+
+### Patch Changes
+
+- llamaindex@0.8.11
+
+## 0.1.105
+
+### Patch Changes
+
+- Updated dependencies [f066e50]
+ - llamaindex@0.8.10
+
+## 0.1.104
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+- Updated dependencies [4d4cd8a]
+ - llamaindex@0.8.9
+
## 0.1.103
### Patch Changes
diff --git a/e2e/examples/nextjs-edge-runtime/package.json b/e2e/examples/nextjs-edge-runtime/package.json
index fb516662fb..362d9fcff0 100644
--- a/e2e/examples/nextjs-edge-runtime/package.json
+++ b/e2e/examples/nextjs-edge-runtime/package.json
@@ -1,6 +1,6 @@
{
"name": "@llamaindex/nextjs-edge-runtime-test",
- "version": "0.1.103",
+ "version": "0.1.118",
"private": true,
"scripts": {
"dev": "next dev",
@@ -9,7 +9,7 @@
},
"dependencies": {
"llamaindex": "workspace:*",
- "next": "15.0.2",
+ "next": "15.0.3",
"react": "^18.3.1",
"react-dom": "^18.3.1"
},
diff --git a/e2e/examples/nextjs-node-runtime/CHANGELOG.md b/e2e/examples/nextjs-node-runtime/CHANGELOG.md
index 9421cea50e..0f488130bd 100644
--- a/e2e/examples/nextjs-node-runtime/CHANGELOG.md
+++ b/e2e/examples/nextjs-node-runtime/CHANGELOG.md
@@ -1,5 +1,107 @@
# @llamaindex/next-node-runtime
+## 0.0.100
+
+### Patch Changes
+
+- llamaindex@0.8.23
+
+## 0.0.99
+
+### Patch Changes
+
+- Updated dependencies [819af45]
+ - llamaindex@0.8.22
+
+## 0.0.98
+
+### Patch Changes
+
+- Updated dependencies [83c3897]
+- Updated dependencies [efa2211]
+ - llamaindex@0.8.21
+
+## 0.0.97
+
+### Patch Changes
+
+- Updated dependencies [02b22da]
+ - llamaindex@0.8.20
+
+## 0.0.96
+
+### Patch Changes
+
+- Updated dependencies [90d265c]
+ - llamaindex@0.8.19
+
+## 0.0.95
+
+### Patch Changes
+
+- Updated dependencies [d17450f]
+ - llamaindex@0.8.18
+
+## 0.0.94
+
+### Patch Changes
+
+- llamaindex@0.8.17
+
+## 0.0.93
+
+### Patch Changes
+
+- llamaindex@0.8.16
+
+## 0.0.92
+
+### Patch Changes
+
+- Updated dependencies [3d503cb]
+- Updated dependencies [5dae534]
+ - llamaindex@0.8.15
+
+## 0.0.91
+
+### Patch Changes
+
+- Updated dependencies [630b425]
+ - llamaindex@0.8.14
+
+## 0.0.90
+
+### Patch Changes
+
+- llamaindex@0.8.13
+
+## 0.0.89
+
+### Patch Changes
+
+- llamaindex@0.8.12
+
+## 0.0.88
+
+### Patch Changes
+
+- llamaindex@0.8.11
+
+## 0.0.87
+
+### Patch Changes
+
+- Updated dependencies [f066e50]
+ - llamaindex@0.8.10
+
+## 0.0.86
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+- Updated dependencies [4d4cd8a]
+ - llamaindex@0.8.9
+
## 0.0.85
### Patch Changes
diff --git a/e2e/examples/nextjs-node-runtime/package.json b/e2e/examples/nextjs-node-runtime/package.json
index 48cdd09a65..6abcbe1390 100644
--- a/e2e/examples/nextjs-node-runtime/package.json
+++ b/e2e/examples/nextjs-node-runtime/package.json
@@ -1,6 +1,6 @@
{
"name": "@llamaindex/next-node-runtime-test",
- "version": "0.0.85",
+ "version": "0.0.100",
"private": true,
"scripts": {
"dev": "next dev",
@@ -9,7 +9,7 @@
},
"dependencies": {
"llamaindex": "workspace:*",
- "next": "15.0.2",
+ "next": "15.0.3",
"react": "18.3.1",
"react-dom": "18.3.1"
},
@@ -17,10 +17,10 @@
"@types/node": "^22.9.0",
"@types/react": "^18.3.12",
"@types/react-dom": "^18.3.1",
- "eslint": "9.14.0",
- "eslint-config-next": "15.0.2",
- "postcss": "^8.4.41",
- "tailwindcss": "^3.4.10",
+ "eslint": "9.15.0",
+ "eslint-config-next": "15.0.3",
+ "postcss": "^8.4.49",
+ "tailwindcss": "^3.4.15",
"typescript": "^5.6.3"
}
}
diff --git a/e2e/examples/nextjs-node-runtime/src/actions/openai.ts b/e2e/examples/nextjs-node-runtime/src/actions/openai.ts
index a5643bb928..28d9fe9111 100644
--- a/e2e/examples/nextjs-node-runtime/src/actions/openai.ts
+++ b/e2e/examples/nextjs-node-runtime/src/actions/openai.ts
@@ -15,7 +15,6 @@ Settings.llm = new OpenAI({
});
Settings.embedModel = new HuggingFaceEmbedding({
modelType: "BAAI/bge-small-en-v1.5",
- quantized: false,
});
Settings.callbackManager.on("llm-tool-call", (event) => {
console.log(event.detail);
diff --git a/e2e/examples/waku-query-engine/CHANGELOG.md b/e2e/examples/waku-query-engine/CHANGELOG.md
index 9d235fbfeb..ee910e171d 100644
--- a/e2e/examples/waku-query-engine/CHANGELOG.md
+++ b/e2e/examples/waku-query-engine/CHANGELOG.md
@@ -1,5 +1,107 @@
# @llamaindex/waku-query-engine-test
+## 0.0.119
+
+### Patch Changes
+
+- llamaindex@0.8.23
+
+## 0.0.118
+
+### Patch Changes
+
+- Updated dependencies [819af45]
+ - llamaindex@0.8.22
+
+## 0.0.117
+
+### Patch Changes
+
+- Updated dependencies [83c3897]
+- Updated dependencies [efa2211]
+ - llamaindex@0.8.21
+
+## 0.0.116
+
+### Patch Changes
+
+- Updated dependencies [02b22da]
+ - llamaindex@0.8.20
+
+## 0.0.115
+
+### Patch Changes
+
+- Updated dependencies [90d265c]
+ - llamaindex@0.8.19
+
+## 0.0.114
+
+### Patch Changes
+
+- Updated dependencies [d17450f]
+ - llamaindex@0.8.18
+
+## 0.0.113
+
+### Patch Changes
+
+- llamaindex@0.8.17
+
+## 0.0.112
+
+### Patch Changes
+
+- llamaindex@0.8.16
+
+## 0.0.111
+
+### Patch Changes
+
+- Updated dependencies [3d503cb]
+- Updated dependencies [5dae534]
+ - llamaindex@0.8.15
+
+## 0.0.110
+
+### Patch Changes
+
+- Updated dependencies [630b425]
+ - llamaindex@0.8.14
+
+## 0.0.109
+
+### Patch Changes
+
+- llamaindex@0.8.13
+
+## 0.0.108
+
+### Patch Changes
+
+- llamaindex@0.8.12
+
+## 0.0.107
+
+### Patch Changes
+
+- llamaindex@0.8.11
+
+## 0.0.106
+
+### Patch Changes
+
+- Updated dependencies [f066e50]
+ - llamaindex@0.8.10
+
+## 0.0.105
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+- Updated dependencies [4d4cd8a]
+ - llamaindex@0.8.9
+
## 0.0.104
### Patch Changes
diff --git a/e2e/examples/waku-query-engine/package.json b/e2e/examples/waku-query-engine/package.json
index cfd9bc2e3a..2e900d0966 100644
--- a/e2e/examples/waku-query-engine/package.json
+++ b/e2e/examples/waku-query-engine/package.json
@@ -1,6 +1,6 @@
{
"name": "@llamaindex/waku-query-engine-test",
- "version": "0.0.104",
+ "version": "0.0.119",
"type": "module",
"private": true,
"scripts": {
@@ -10,16 +10,16 @@
},
"dependencies": {
"llamaindex": "workspace:*",
- "react": "19.0.0-rc-bf7e210c-20241017",
- "react-dom": "19.0.0-rc-bf7e210c-20241017",
- "react-server-dom-webpack": "19.0.0-rc-bf7e210c-20241017",
- "waku": "0.21.4"
+ "react": "19.0.0-rc-5c56b873-20241107",
+ "react-dom": "19.0.0-rc-5c56b873-20241107",
+ "react-server-dom-webpack": "19.0.0-rc-5c56b873-20241107",
+ "waku": "0.21.6"
},
"devDependencies": {
"@types/react": "18.3.12",
"@types/react-dom": "18.3.1",
"autoprefixer": "^10.4.20",
- "tailwindcss": "^3.4.14",
+ "tailwindcss": "^3.4.15",
"typescript": "5.6.3"
}
}
diff --git a/e2e/fixtures/llm/ollama.ts b/e2e/fixtures/llm/ollama.ts
new file mode 100644
index 0000000000..b16fea1333
--- /dev/null
+++ b/e2e/fixtures/llm/ollama.ts
@@ -0,0 +1,3 @@
+import { OpenAI } from "./openai.js";
+
+export class Ollama extends OpenAI {}
diff --git a/e2e/mock-module.js b/e2e/mock-module.js
index 28f55f3aa6..7c664a8efa 100644
--- a/e2e/mock-module.js
+++ b/e2e/mock-module.js
@@ -15,7 +15,17 @@ export async function resolve(specifier, context, nextResolve) {
const targetUrl = fileURLToPath(result.url).replace(/\.js$/, ".ts");
let relativePath = relative(packageDistDir, targetUrl);
// todo: make it more generic if we have more sub modules fixtures in the future
- if (relativePath.startsWith("../../llm/openai")) {
+ if (relativePath.startsWith("../../llm/anthropic")) {
+ relativePath = relativePath.replace(
+ "../../llm/ollama/dist/index.ts",
+ "llm/anthropic.ts",
+ );
+ } else if (relativePath.startsWith("../../llm/ollama")) {
+ relativePath = relativePath.replace(
+ "../../llm/ollama/dist/index.ts",
+ "llm/ollama.ts",
+ );
+ } else if (relativePath.startsWith("../../llm/openai")) {
relativePath = relativePath.replace(
"../../llm/openai/dist/index.ts",
"llm/openai.ts",
diff --git a/e2e/node/embedding/clip.e2e.ts b/e2e/node/embedding/clip.e2e.ts
index e21c9fe16c..3b6a8c2a74 100644
--- a/e2e/node/embedding/clip.e2e.ts
+++ b/e2e/node/embedding/clip.e2e.ts
@@ -64,7 +64,7 @@ await test("clip embedding", async (t) => {
});
await t.test("custom transformer", async () => {
- const transformers = await import("@xenova/transformers");
+ const transformers = await import("@huggingface/transformers");
const getter = test.mock.fn((t, k, r) => {
return Reflect.get(t, k, r);
});
diff --git a/e2e/node/ollama.e2e.ts b/e2e/node/ollama.e2e.ts
new file mode 100644
index 0000000000..9aadcdb4cf
--- /dev/null
+++ b/e2e/node/ollama.e2e.ts
@@ -0,0 +1,35 @@
+import { Ollama } from "@llamaindex/ollama";
+import assert from "node:assert";
+import { test } from "node:test";
+import { getWeatherTool } from "./fixtures/tools.js";
+import { mockLLMEvent } from "./utils.js";
+
+await test("ollama", async (t) => {
+ await mockLLMEvent(t, "ollama");
+ await t.test("ollama function call", async (t) => {
+ const llm = new Ollama({
+ model: "llama3.2",
+ });
+ const chatResponse = await llm.chat({
+ messages: [
+ {
+ role: "user",
+ content: "What is the weather in Paris?",
+ },
+ ],
+ tools: [getWeatherTool],
+ });
+ if (
+ chatResponse.message.options &&
+ "toolCall" in chatResponse.message.options
+ ) {
+ assert.equal(chatResponse.message.options.toolCall.length, 1);
+ assert.equal(
+ chatResponse.message.options.toolCall[0]!.name,
+ getWeatherTool.metadata.name,
+ );
+ } else {
+ throw new Error("Expected tool calls in response");
+ }
+ });
+});
diff --git a/e2e/node/openai.e2e.ts b/e2e/node/openai.e2e.ts
index 4019390854..1513810cee 100644
--- a/e2e/node/openai.e2e.ts
+++ b/e2e/node/openai.e2e.ts
@@ -167,6 +167,7 @@ For questions about more specific sections, please use the vector_tool.`,
const mockCall = t.mock.fn(({ query }: { query: string }) => {
return originalCall({ query });
});
+ // @ts-expect-error what?
queryEngineTools[1]!.call = mockCall;
const toolMapping = SimpleToolNodeMapping.fromObjects(queryEngineTools);
diff --git a/e2e/node/snapshot/ollama.snap b/e2e/node/snapshot/ollama.snap
new file mode 100644
index 0000000000..b2f4b07655
--- /dev/null
+++ b/e2e/node/snapshot/ollama.snap
@@ -0,0 +1,37 @@
+{
+ "llmEventStart": [
+ {
+ "id": "PRESERVE_0",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the weather in Paris?"
+ }
+ ]
+ }
+ ],
+ "llmEventEnd": [
+ {
+ "id": "PRESERVE_0",
+ "response": {
+ "message": {
+ "role": "assistant",
+ "content": "",
+ "options": {
+ "toolCall": [
+ {
+ "name": "getWeather",
+ "input": {
+ "city": "Paris"
+ },
+ "id": "5d198775-5268-4552-993b-9ecb4425385b"
+ }
+ ]
+ }
+ },
+ "raw": null
+ }
+ }
+ ],
+ "llmEventStream": []
+}
\ No newline at end of file
diff --git a/e2e/node/vector-store/pinecone.e2e.ts b/e2e/node/vector-store/pinecone.e2e.ts
new file mode 100644
index 0000000000..e15b08e5e1
--- /dev/null
+++ b/e2e/node/vector-store/pinecone.e2e.ts
@@ -0,0 +1,66 @@
+import { Document, MetadataMode } from "@llamaindex/core/schema";
+import { config } from "dotenv";
+import {
+ OpenAIEmbedding,
+ PineconeVectorStore,
+ VectorStoreIndex,
+} from "llamaindex";
+import assert from "node:assert";
+import { test } from "node:test";
+
+config({ path: [".env.local", ".env", ".env.ci"] });
+
+await test("pinecone", async (t) => {
+ if (
+ !process.env.PINECONE_API_KEY ||
+ !process.env.PINECONE_NAMESPACE ||
+ !process.env.PINECONE_INDEX_NAME
+ ) {
+ return t.skip(
+ "PINECONE_API_KEY, PINECONE_NAMESPACE, and PINECONE_INDEX_NAME must be set to run this test",
+ );
+ }
+ const openaiEmbedding = new OpenAIEmbedding({
+ model: "text-embedding-3-large",
+ });
+
+ const vectorStore = new PineconeVectorStore({
+ embeddingModel: openaiEmbedding,
+ });
+
+ t.after(async () => {
+ await vectorStore.clearIndex();
+ });
+
+ const index = await VectorStoreIndex.fromVectorStore(vectorStore);
+
+ const retriever = index.asRetriever({
+ similarityTopK: 3,
+ });
+ const text = "We are open from 9am to 5pm";
+
+ await vectorStore.add([
+ new Document({
+ text,
+ embedding: await openaiEmbedding.getTextEmbedding(text),
+ }),
+ ]);
+
+ const results = await retriever.retrieve({
+ query: "When are you open?",
+ });
+ results.every((result) => {
+ assert.ok(result.node.embedding instanceof Array);
+ result.node.embedding.every((embedding, idx) =>
+ assert.ok(
+ typeof embedding === "number",
+ `Embedding at index ${idx} should be a number`,
+ ),
+ );
+ assert.ok(typeof result.score === "number", "Score should be a number");
+ assert.ok(
+ result.node.getContent(MetadataMode.NONE).length > 0,
+ "Content should not be empty",
+ );
+ });
+});
diff --git a/e2e/package.json b/e2e/package.json
index 65a797cb3d..55808e1e24 100644
--- a/e2e/package.json
+++ b/e2e/package.json
@@ -12,10 +12,11 @@
"@faker-js/faker": "^9.2.0",
"@llamaindex/core": "workspace:*",
"@llamaindex/env": "workspace:*",
+ "@llamaindex/ollama": "workspace:*",
"@llamaindex/openai": "workspace:*",
"@types/node": "^22.9.0",
"@types/pg": "^8.11.8",
- "@xenova/transformers": "^2.17.2",
+ "@huggingface/transformers": "^3.0.2",
"consola": "^3.2.3",
"dotenv": "^16.4.5",
"llamaindex": "workspace:*",
diff --git a/examples/CHANGELOG.md b/examples/CHANGELOG.md
index 7817ae84a3..6bcb469f64 100644
--- a/examples/CHANGELOG.md
+++ b/examples/CHANGELOG.md
@@ -1,5 +1,38 @@
# examples
+## 0.0.16
+
+### Patch Changes
+
+- Updated dependencies [a7b0ac3]
+- Updated dependencies [ee20c44]
+- Updated dependencies [c69605f]
+ - @llamaindex/core@0.4.10
+ - @llamaindex/workflow@0.0.6
+ - llamaindex@0.8.13
+ - @llamaindex/readers@1.0.11
+
+## 0.0.15
+
+### Patch Changes
+
+- Updated dependencies [ea92b69]
+- Updated dependencies [fadc8b8]
+ - @llamaindex/workflow@0.0.5
+
+## 0.0.14
+
+### Patch Changes
+
+- Updated dependencies [f066e50]
+- Updated dependencies [d89ebe0]
+- Updated dependencies [fd8c882]
+- Updated dependencies [fd8c882]
+ - llamaindex@0.8.10
+ - @llamaindex/core@0.4.7
+ - @llamaindex/workflow@0.0.4
+ - @llamaindex/readers@1.0.8
+
## 0.0.13
### Patch Changes
diff --git a/examples/anthropic/prompt-caching.ts b/examples/anthropic/prompt-caching.ts
new file mode 100644
index 0000000000..2f9182e9a6
--- /dev/null
+++ b/examples/anthropic/prompt-caching.ts
@@ -0,0 +1,38 @@
+import { Anthropic } from "llamaindex";
+
+async function main() {
+ const anthropic = new Anthropic({
+ model: "claude-3-5-sonnet-20241022",
+ });
+
+ const entireBook = await fetch(
+ "https://www.gutenberg.org/files/1342/1342-0.txt",
+ ).then((response) => response.text());
+
+ const response = await anthropic.chat({
+ messages: [
+ {
+ content:
+ "You are an AI assistant tasked with analyzing literary works. Your goal is to provide insightful commentary on themes, characters, and writing style.\n",
+ role: "system",
+ },
+ {
+ content: entireBook,
+ role: "system",
+ options: {
+ cache_control: {
+ type: "ephemeral",
+ },
+ },
+ },
+ {
+ content: "analyze the major themes in Pride and Prejudice.",
+ role: "user",
+ },
+ ],
+ });
+
+ console.log(response.message.content);
+}
+
+main().catch(console.error);
diff --git a/examples/cosmosdb/queryVectorData.ts b/examples/cosmosdb/queryVectorData.ts
index 29f875b516..13e9604831 100644
--- a/examples/cosmosdb/queryVectorData.ts
+++ b/examples/cosmosdb/queryVectorData.ts
@@ -83,14 +83,6 @@ async function query() {
});
}
- // configure the Azure CosmosDB NoSQL Vector Store
- const dbConfig: AzureCosmosDBNoSQLConfig = {
- client: cosmosClient,
- databaseName,
- containerName,
- flatMetadata: false,
- };
-
// use Azure CosmosDB as a vectorStore, docStore, and indexStore
const { vectorStore, docStore, indexStore } = await initializeStores();
diff --git a/examples/package.json b/examples/package.json
index 9a45ad5dde..31c1ba0697 100644
--- a/examples/package.json
+++ b/examples/package.json
@@ -1,24 +1,24 @@
{
"name": "@llamaindex/examples",
"private": true,
- "version": "0.0.13",
+ "version": "0.0.16",
"dependencies": {
"@aws-crypto/sha256-js": "^5.2.0",
"@azure/cosmos": "^4.1.1",
"@azure/identity": "^4.4.1",
"@datastax/astra-db-ts": "^1.4.1",
- "@llamaindex/core": "^0.4.5",
- "@llamaindex/readers": "^1.0.6",
- "@llamaindex/workflow": "^0.0.3",
+ "@llamaindex/core": "^0.4.10",
+ "@llamaindex/readers": "^1.0.11",
+ "@llamaindex/workflow": "^0.0.6",
"@notionhq/client": "^2.2.15",
- "@pinecone-database/pinecone": "^3.0.2",
+ "@pinecone-database/pinecone": "^4.0.0",
"@vercel/postgres": "^0.10.0",
"@zilliz/milvus2-sdk-node": "^2.4.6",
"chromadb": "^1.8.1",
"commander": "^12.1.0",
"dotenv": "^16.4.5",
"js-tiktoken": "^1.0.14",
- "llamaindex": "^0.8.8",
+ "llamaindex": "^0.8.13",
"mongodb": "^6.7.0",
"pathe": "^1.1.2",
"postgres": "^3.4.4"
diff --git a/examples/vectorIndexLocal.ts b/examples/vectorIndexLocal.ts
index 859f21e0aa..9d19e68ced 100644
--- a/examples/vectorIndexLocal.ts
+++ b/examples/vectorIndexLocal.ts
@@ -14,7 +14,6 @@ Settings.llm = new Ollama({
Settings.embedModel = new HuggingFaceEmbedding({
modelType: "BAAI/bge-small-en-v1.5",
- quantized: false,
});
async function main() {
diff --git a/examples/vllm.ts b/examples/vllm.ts
new file mode 100644
index 0000000000..f65cd375ea
--- /dev/null
+++ b/examples/vllm.ts
@@ -0,0 +1,16 @@
+import { VLLM } from "llamaindex";
+
+const llm = new VLLM({
+ model: "NousResearch/Meta-Llama-3-8B-Instruct",
+});
+
+const response = await llm.chat({
+ messages: [
+ {
+ role: "user",
+ content: "Hello?",
+ },
+ ],
+});
+
+console.log(response.message.content);
diff --git a/examples/workflow/app-creator.ts b/examples/workflow/app-creator.ts
index 6cfc5b95eb..8a6448cb1c 100644
--- a/examples/workflow/app-creator.ts
+++ b/examples/workflow/app-creator.ts
@@ -1,14 +1,19 @@
import {
- Context,
+ HandlerContext,
StartEvent,
StopEvent,
Workflow,
WorkflowEvent,
-} from "@llamaindex/core/workflow";
+} from "@llamaindex/workflow";
import { OpenAI } from "llamaindex";
const MAX_REVIEWS = 3;
+type Context = {
+ specification: string;
+ numberReviews: number;
+};
+
// Using the o1-preview model (see https://platform.openai.com/docs/guides/reasoning?reasoning-prompt-examples=coding-planning)
const llm = new OpenAI({ model: "o1-preview", temperature: 1 });
@@ -20,7 +25,9 @@ stores the question/answer pair in the database.`;
// Create custom event types
export class MessageEvent extends WorkflowEvent<{ msg: string }> {}
+
export class CodeEvent extends WorkflowEvent<{ code: string }> {}
+
export class ReviewEvent extends WorkflowEvent<{
review: string;
code: string;
@@ -34,12 +41,13 @@ const truncate = (str: string) => {
};
// the architect is responsible for writing the structure and the initial code based on the specification
-const architect = async (context: Context, ev: StartEvent) => {
- // get the specification from the start event and save it to context
- context.set("specification", ev.data.input);
- const spec = context.get("specification");
+const architect = async (
+ context: HandlerContext,
+ _: StartEvent,
+) => {
+ const spec = context.data.specification;
// write a message to send an update to the user
- context.writeEventToStream(
+ context.sendEvent(
new MessageEvent({
msg: `Writing app using this specification: ${truncate(spec)}`,
}),
@@ -50,13 +58,13 @@ const architect = async (context: Context, ev: StartEvent) => {
};
// the coder is responsible for updating the code based on the review
-const coder = async (context: Context, ev: ReviewEvent) => {
+const coder = async (context: HandlerContext, ev: ReviewEvent) => {
// get the specification from the context
- const spec = context.get("specification");
+ const spec = context.data.specification;
// get the latest review and code
const { review, code } = ev.data;
// write a message to send an update to the user
- context.writeEventToStream(
+ context.sendEvent(
new MessageEvent({
msg: `Update code based on review: ${truncate(review)}`,
}),
@@ -67,32 +75,35 @@ const coder = async (context: Context, ev: ReviewEvent) => {
};
// the reviewer is responsible for reviewing the code and providing feedback
-const reviewer = async (context: Context, ev: CodeEvent) => {
+const reviewer = async (context: HandlerContext, ev: CodeEvent) => {
// get the specification from the context
- const spec = context.get("specification");
+ const spec = context.data.specification;
// get latest code from the event
const { code } = ev.data;
// update and check the number of reviews
- const numberReviews = context.get("numberReviews", 0) + 1;
- context.set("numberReviews", numberReviews);
- if (numberReviews > MAX_REVIEWS) {
+ context.data.numberReviews++;
+ if (context.data.numberReviews > MAX_REVIEWS) {
// the we've done this too many times - return the code
- context.writeEventToStream(
+ context.sendEvent(
new MessageEvent({
- msg: `Already reviewed ${numberReviews - 1} times, stopping!`,
+ msg: `Already reviewed ${
+ context.data.numberReviews - 1
+ } times, stopping!`,
}),
);
return new StopEvent({ result: code });
}
// write a message to send an update to the user
- context.writeEventToStream(
- new MessageEvent({ msg: `Review #${numberReviews}: ${truncate(code)}` }),
+ context.sendEvent(
+ new MessageEvent({
+ msg: `Review #${context.data.numberReviews}: ${truncate(code)}`,
+ }),
);
const prompt = `Review this code: ${code}
. Check if the code quality and whether it correctly implements this specification: ${spec}. If you're satisfied, just return 'Looks great', nothing else. If not, return a review with a list of changes you'd like to see.`;
const review = (await llm.complete({ prompt })).text;
if (review.includes("Looks great")) {
// the reviewer is satisfied with the code, let's return the review
- context.writeEventToStream(
+ context.sendEvent(
new MessageEvent({
msg: `Reviewer says: ${review}`,
}),
@@ -103,20 +114,44 @@ const reviewer = async (context: Context, ev: CodeEvent) => {
return new ReviewEvent({ review, code });
};
-const codeAgent = new Workflow({ validate: true });
-codeAgent.addStep(StartEvent, architect, { outputs: CodeEvent });
-codeAgent.addStep(ReviewEvent, coder, { outputs: CodeEvent });
-codeAgent.addStep(CodeEvent, reviewer, { outputs: ReviewEvent });
+const codeAgent = new Workflow();
+codeAgent.addStep(
+ {
+ inputs: [StartEvent],
+ outputs: [CodeEvent],
+ },
+ architect,
+);
+codeAgent.addStep(
+ {
+ inputs: [ReviewEvent],
+ outputs: [CodeEvent],
+ },
+ coder,
+);
+codeAgent.addStep(
+ {
+ inputs: [CodeEvent],
+ outputs: [ReviewEvent, StopEvent],
+ },
+ reviewer,
+);
// Usage
async function main() {
- const run = codeAgent.run(specification);
- for await (const event of codeAgent.streamEvents()) {
- const msg = (event as MessageEvent).data.msg;
- console.log(`${msg}\n`);
+ const run = codeAgent.run(specification).with({
+ specification,
+ numberReviews: 0,
+ });
+ for await (const event of run) {
+ if (event instanceof MessageEvent) {
+ const msg = (event as MessageEvent).data.msg;
+ console.log(`${msg}\n`);
+ } else if (event instanceof StopEvent) {
+ const result = (event as StopEvent).data;
+ console.log("Final code:\n", result);
+ }
}
- const result = await run;
- console.log("Final code:\n", result.data.result);
}
main().catch(console.error);
diff --git a/examples/workflow/collect-events.ts b/examples/workflow/conditional.ts
similarity index 50%
rename from examples/workflow/collect-events.ts
rename to examples/workflow/conditional.ts
index 4c68317a4a..c8b6cf0401 100644
--- a/examples/workflow/collect-events.ts
+++ b/examples/workflow/conditional.ts
@@ -1,10 +1,10 @@
import {
- Context,
+ HandlerContext,
StartEvent,
StopEvent,
Workflow,
WorkflowEvent,
-} from "@llamaindex/core/workflow";
+} from "@llamaindex/workflow";
import { OpenAI } from "llamaindex";
// Create LLM instance
@@ -12,59 +12,77 @@ const llm = new OpenAI();
// Create custom event types
export class JokeEvent extends WorkflowEvent<{ joke: string }> {}
+
export class CritiqueEvent extends WorkflowEvent<{ critique: string }> {}
+
export class AnalysisEvent extends WorkflowEvent<{ analysis: string }> {}
-const generateJoke = async (_context: Context, ev: StartEvent) => {
- const prompt = `Write your best joke about ${ev.data.input}.`;
+const generateJoke = async (_: unknown, ev: StartEvent) => {
+ const prompt = `Write your best joke about ${ev.data}.`;
const response = await llm.complete({ prompt });
return new JokeEvent({ joke: response.text });
};
-const critiqueJoke = async (_context: Context, ev: JokeEvent) => {
+const critiqueJoke = async (_: unknown, ev: JokeEvent) => {
const prompt = `Give a thorough critique of the following joke: ${ev.data.joke}`;
const response = await llm.complete({ prompt });
return new CritiqueEvent({ critique: response.text });
};
-const analyzeJoke = async (_context: Context, ev: JokeEvent) => {
+const analyzeJoke = async (_: unknown, ev: JokeEvent) => {
const prompt = `Give a thorough analysis of the following joke: ${ev.data.joke}`;
const response = await llm.complete({ prompt });
return new AnalysisEvent({ analysis: response.text });
};
const reportJoke = async (
- context: Context,
- ev: AnalysisEvent | CritiqueEvent,
+ context: HandlerContext,
+ ev1: AnalysisEvent,
+ ev2: CritiqueEvent,
) => {
- const events = context.collectEvents(ev, [AnalysisEvent, CritiqueEvent]);
- if (!events) {
- return;
- }
- const subPrompts = events.map((event) => {
- if (event instanceof AnalysisEvent) {
- return `Analysis: ${event.data.analysis}`;
- } else if (event instanceof CritiqueEvent) {
- return `Critique: ${event.data.critique}`;
- }
- return "";
- });
+ const subPrompts = [ev1.data.analysis, ev2.data.critique];
- const prompt = `Based on the following information about a joke:\n${subPrompts.join("\n")}\nProvide a comprehensive report on the joke's quality and impact.`;
+ const prompt = `Based on the following information about a joke:\n${subPrompts.join(
+ "\n",
+ )}\nProvide a comprehensive report on the joke's quality and impact.`;
const response = await llm.complete({ prompt });
- return new StopEvent({ result: response.text });
+ return new StopEvent(response.text);
};
-const jokeFlow = new Workflow();
-jokeFlow.addStep(StartEvent, generateJoke);
-jokeFlow.addStep(JokeEvent, critiqueJoke);
-jokeFlow.addStep(JokeEvent, analyzeJoke);
-jokeFlow.addStep([AnalysisEvent, CritiqueEvent], reportJoke);
+const jokeFlow = new Workflow();
+jokeFlow.addStep(
+ {
+ inputs: [StartEvent],
+ outputs: [JokeEvent],
+ },
+ generateJoke,
+);
+jokeFlow.addStep(
+ {
+ inputs: [JokeEvent],
+ outputs: [CritiqueEvent],
+ },
+ critiqueJoke,
+);
+jokeFlow.addStep(
+ {
+ inputs: [JokeEvent],
+ outputs: [AnalysisEvent],
+ },
+ analyzeJoke,
+);
+jokeFlow.addStep(
+ {
+ inputs: [AnalysisEvent, CritiqueEvent],
+ outputs: [StopEvent],
+ },
+ reportJoke,
+);
// Usage
async function main() {
const result = await jokeFlow.run("pirates");
- console.log(result.data.result);
+ console.log(result.data);
}
main().catch(console.error);
diff --git a/examples/workflow/joke.ts b/examples/workflow/joke.ts
index ca83f3cae4..310761d77a 100644
--- a/examples/workflow/joke.ts
+++ b/examples/workflow/joke.ts
@@ -1,10 +1,9 @@
import {
- Context,
StartEvent,
StopEvent,
Workflow,
WorkflowEvent,
-} from "@llamaindex/core/workflow";
+} from "@llamaindex/workflow";
import { OpenAI } from "llamaindex";
// Create LLM instance
@@ -13,26 +12,38 @@ const llm = new OpenAI();
// Create a custom event type
export class JokeEvent extends WorkflowEvent<{ joke: string }> {}
-const generateJoke = async (_context: Context, ev: StartEvent) => {
- const prompt = `Write your best joke about ${ev.data.input}.`;
+const generateJoke = async (_: unknown, ev: StartEvent) => {
+ const prompt = `Write your best joke about ${ev.data}.`;
const response = await llm.complete({ prompt });
return new JokeEvent({ joke: response.text });
};
-const critiqueJoke = async (_context: Context, ev: JokeEvent) => {
+const critiqueJoke = async (_: unknown, ev: JokeEvent) => {
const prompt = `Give a thorough critique of the following joke: ${ev.data.joke}`;
const response = await llm.complete({ prompt });
- return new StopEvent({ result: response.text });
+ return new StopEvent(response.text);
};
-const jokeFlow = new Workflow({ verbose: true });
-jokeFlow.addStep(StartEvent, generateJoke);
-jokeFlow.addStep(JokeEvent, critiqueJoke);
+const jokeFlow = new Workflow();
+jokeFlow.addStep(
+ {
+ inputs: [StartEvent],
+ outputs: [JokeEvent],
+ },
+ generateJoke,
+);
+jokeFlow.addStep(
+ {
+ inputs: [JokeEvent],
+ outputs: [StopEvent],
+ },
+ critiqueJoke,
+);
// Usage
async function main() {
const result = await jokeFlow.run("pirates");
- console.log(result.data.result);
+ console.log(result.data);
}
main().catch(console.error);
diff --git a/examples/workflow/stream-events.ts b/examples/workflow/stream-events.ts
index 28d584c8e2..2fc1f107df 100644
--- a/examples/workflow/stream-events.ts
+++ b/examples/workflow/stream-events.ts
@@ -1,10 +1,10 @@
import {
- Context,
+ HandlerContext,
StartEvent,
StopEvent,
Workflow,
WorkflowEvent,
-} from "@llamaindex/core/workflow";
+} from "@llamaindex/workflow";
import { OpenAI } from "llamaindex";
// Create LLM instance
@@ -12,38 +12,55 @@ const llm = new OpenAI();
// Create custom event types
export class JokeEvent extends WorkflowEvent<{ joke: string }> {}
+
export class MessageEvent extends WorkflowEvent<{ msg: string }> {}
-const generateJoke = async (context: Context, ev: StartEvent) => {
- context.writeEventToStream(
- new MessageEvent({ msg: `Generating a joke about: ${ev.data.input}` }),
+const generateJoke = async (context: HandlerContext, ev: StartEvent) => {
+ context.sendEvent(
+ new MessageEvent({ msg: `Generating a joke about: ${ev.data}` }),
);
- const prompt = `Write your best joke about ${ev.data.input}.`;
+ const prompt = `Write your best joke about ${ev.data}.`;
const response = await llm.complete({ prompt });
return new JokeEvent({ joke: response.text });
};
-const critiqueJoke = async (context: Context, ev: JokeEvent) => {
- context.writeEventToStream(
+const critiqueJoke = async (context: HandlerContext, ev: JokeEvent) => {
+ context.sendEvent(
new MessageEvent({ msg: `Write a critique of this joke: ${ev.data.joke}` }),
);
const prompt = `Give a thorough critique of the following joke: ${ev.data.joke}`;
const response = await llm.complete({ prompt });
- return new StopEvent({ result: response.text });
+ return new StopEvent(response.text);
};
const jokeFlow = new Workflow();
-jokeFlow.addStep(StartEvent, generateJoke);
-jokeFlow.addStep(JokeEvent, critiqueJoke);
+jokeFlow.addStep(
+ {
+ inputs: [StartEvent],
+ outputs: [JokeEvent],
+ },
+ generateJoke,
+);
+jokeFlow.addStep(
+ {
+ inputs: [JokeEvent],
+ outputs: [StopEvent],
+ },
+ critiqueJoke,
+);
// Usage
async function main() {
const run = jokeFlow.run("pirates");
- for await (const event of jokeFlow.streamEvents()) {
- console.log((event as MessageEvent).data.msg);
+ for await (const event of run) {
+ if (event instanceof MessageEvent) {
+ console.log("Message:");
+ console.log((event as MessageEvent).data.msg);
+ } else if (event instanceof StopEvent) {
+ console.log("Result:");
+ console.log((event as StopEvent).data);
+ }
}
- const result = await run;
- console.log(result.data.result);
}
main().catch(console.error);
diff --git a/examples/workflow/timeout.ts b/examples/workflow/timeout.ts
index c10fd595c9..261fb39808 100644
--- a/examples/workflow/timeout.ts
+++ b/examples/workflow/timeout.ts
@@ -1,19 +1,21 @@
-import {
- Context,
- StartEvent,
- StopEvent,
- Workflow,
-} from "@llamaindex/core/workflow";
+import { StartEvent, StopEvent, Workflow } from "@llamaindex/workflow";
-const longRunning = async (_context: Context, ev: StartEvent) => {
+const longRunning = async (_: unknown, ev: StartEvent) => {
await new Promise((resolve) => setTimeout(resolve, 2000)); // Wait for 2 seconds
- return new StopEvent({ result: "We waited 2 seconds" });
+ return new StopEvent("We waited 2 seconds");
};
async function timeout() {
- const workflow = new Workflow({ verbose: true, timeout: 1 });
- workflow.addStep(StartEvent, longRunning);
- // This will timeout
+ const workflow = new Workflow({
+ timeout: 1,
+ });
+ workflow.addStep(
+ {
+ inputs: [StartEvent],
+ outputs: [StopEvent],
+ },
+ longRunning,
+ );
try {
await workflow.run("Let's start");
} catch (error) {
@@ -23,14 +25,23 @@ async function timeout() {
async function notimeout() {
// Increase timeout to 3 seconds - no timeout
- const workflow = new Workflow({ verbose: true, timeout: 3 });
- workflow.addStep(StartEvent, longRunning);
+ const workflow = new Workflow({
+ timeout: 3,
+ });
+ workflow.addStep(
+ {
+ inputs: [StartEvent],
+ outputs: [StopEvent],
+ },
+ longRunning,
+ );
const result = await workflow.run("Let's start");
- console.log(result.data.result);
+ console.log(result.data);
}
async function main() {
await timeout();
+ console.log("---");
await notimeout();
}
diff --git a/examples/workflow/validation.ts b/examples/workflow/validation.ts
index f344af9edc..7be348cfcd 100644
--- a/examples/workflow/validation.ts
+++ b/examples/workflow/validation.ts
@@ -1,10 +1,9 @@
import {
- Context,
StartEvent,
StopEvent,
Workflow,
WorkflowEvent,
-} from "@llamaindex/core/workflow";
+} from "@llamaindex/workflow";
import { OpenAI } from "llamaindex";
// Create LLM instance
@@ -13,40 +12,66 @@ const llm = new OpenAI();
// Create a custom event type
export class JokeEvent extends WorkflowEvent<{ joke: string }> {}
-const generateJoke = async (_context: Context, ev: StartEvent) => {
- const prompt = `Write your best joke about ${ev.data.input}.`;
+const generateJoke = async (_: unknown, ev: StartEvent) => {
+ const prompt = `Write your best joke about ${ev.data}.`;
const response = await llm.complete({ prompt });
return new JokeEvent({ joke: response.text });
};
-const critiqueJoke = async (_context: Context, ev: JokeEvent) => {
+const critiqueJoke = async (_: unknown, ev: JokeEvent) => {
const prompt = `Give a thorough critique of the following joke: ${ev.data.joke}`;
const response = await llm.complete({ prompt });
- return new StopEvent({ result: response.text });
+ return new StopEvent(response.text);
};
async function validateFails() {
try {
- const jokeFlow = new Workflow({ verbose: true, validate: true });
- jokeFlow.addStep(StartEvent, generateJoke, { outputs: StopEvent });
- jokeFlow.addStep(JokeEvent, critiqueJoke, { outputs: StopEvent });
- await jokeFlow.run("pirates");
+ const jokeFlow = new Workflow();
+ jokeFlow.addStep(
+ {
+ inputs: [StartEvent],
+ outputs: [StopEvent],
+ },
+ // @ts-expect-error outputs should be JokeEvent
+ generateJoke,
+ );
+ jokeFlow.addStep(
+ {
+ inputs: [JokeEvent],
+ outputs: [StopEvent],
+ },
+ critiqueJoke,
+ );
+ await jokeFlow.run("pirates").strict();
} catch (e) {
console.error("Validation failed:", e);
}
}
async function validate() {
- const jokeFlow = new Workflow({ verbose: true, validate: true });
- jokeFlow.addStep(StartEvent, generateJoke, { outputs: JokeEvent });
- jokeFlow.addStep(JokeEvent, critiqueJoke, { outputs: StopEvent });
- const result = await jokeFlow.run("pirates");
- console.log(result.data.result);
+ const jokeFlow = new Workflow();
+ jokeFlow.addStep(
+ {
+ inputs: [StartEvent],
+ outputs: [JokeEvent],
+ },
+ generateJoke,
+ );
+ jokeFlow.addStep(
+ {
+ inputs: [JokeEvent],
+ outputs: [StopEvent],
+ },
+ critiqueJoke,
+ );
+ const result = await jokeFlow.run("pirates").strict();
+ console.log(result.data);
}
// Usage
async function main() {
await validateFails();
+ console.log("---");
await validate();
}
diff --git a/package.json b/package.json
index 2ac6d8c68f..6c2afe8d4d 100644
--- a/package.json
+++ b/package.json
@@ -19,28 +19,22 @@
},
"devDependencies": {
"@changesets/cli": "^2.27.5",
- "eslint": "9.14.0",
- "eslint-config-next": "^15.0.2",
+ "eslint": "9.15.0",
+ "eslint-config-next": "^15.0.3",
"eslint-config-prettier": "^9.1.0",
- "eslint-config-turbo": "^2.2.3",
+ "eslint-config-turbo": "^2.3.0",
"eslint-plugin-react": "7.37.2",
"globals": "^15.12.0",
- "husky": "^9.1.6",
+ "husky": "^9.1.7",
"lint-staged": "^15.2.10",
"madge": "^8.0.0",
"prettier": "^3.3.3",
"prettier-plugin-organize-imports": "^4.1.0",
- "turbo": "^2.2.3",
+ "turbo": "^2.3.0",
"typescript": "^5.6.3",
- "typescript-eslint": "^8.13.0"
+ "typescript-eslint": "^8.15.0"
},
"packageManager": "pnpm@9.12.3",
- "pnpm": {
- "overrides": {
- "trim": "1.0.1",
- "protobufjs": "7.2.6"
- }
- },
"lint-staged": {
"(!apps/docs/i18n/**/docusaurus-plugin-content-docs/current/api/*).{js,jsx,ts,tsx,md}": "prettier --write"
}
diff --git a/packages/autotool/CHANGELOG.md b/packages/autotool/CHANGELOG.md
index 96585dd28b..3e58caa9c6 100644
--- a/packages/autotool/CHANGELOG.md
+++ b/packages/autotool/CHANGELOG.md
@@ -1,5 +1,108 @@
# @llamaindex/autotool
+## 5.0.23
+
+### Patch Changes
+
+- llamaindex@0.8.23
+
+## 5.0.22
+
+### Patch Changes
+
+- Updated dependencies [819af45]
+ - llamaindex@0.8.22
+
+## 5.0.21
+
+### Patch Changes
+
+- Updated dependencies [83c3897]
+- Updated dependencies [efa2211]
+ - llamaindex@0.8.21
+
+## 5.0.20
+
+### Patch Changes
+
+- Updated dependencies [02b22da]
+ - llamaindex@0.8.20
+
+## 5.0.19
+
+### Patch Changes
+
+- 90d265c: chore: bump version
+- Updated dependencies [90d265c]
+ - llamaindex@0.8.19
+
+## 5.0.18
+
+### Patch Changes
+
+- Updated dependencies [d17450f]
+ - llamaindex@0.8.18
+
+## 5.0.17
+
+### Patch Changes
+
+- llamaindex@0.8.17
+
+## 5.0.16
+
+### Patch Changes
+
+- llamaindex@0.8.16
+
+## 5.0.15
+
+### Patch Changes
+
+- Updated dependencies [3d503cb]
+- Updated dependencies [5dae534]
+ - llamaindex@0.8.15
+
+## 5.0.14
+
+### Patch Changes
+
+- Updated dependencies [630b425]
+ - llamaindex@0.8.14
+
+## 5.0.13
+
+### Patch Changes
+
+- llamaindex@0.8.13
+
+## 5.0.12
+
+### Patch Changes
+
+- llamaindex@0.8.12
+
+## 5.0.11
+
+### Patch Changes
+
+- llamaindex@0.8.11
+
+## 5.0.10
+
+### Patch Changes
+
+- Updated dependencies [f066e50]
+ - llamaindex@0.8.10
+
+## 5.0.9
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+- Updated dependencies [4d4cd8a]
+ - llamaindex@0.8.9
+
## 5.0.8
### Patch Changes
diff --git a/packages/autotool/examples/01_node/CHANGELOG.md b/packages/autotool/examples/01_node/CHANGELOG.md
index b325c686f4..318ed3e305 100644
--- a/packages/autotool/examples/01_node/CHANGELOG.md
+++ b/packages/autotool/examples/01_node/CHANGELOG.md
@@ -1,5 +1,122 @@
# @llamaindex/autotool-01-node-example
+## 0.0.66
+
+### Patch Changes
+
+- llamaindex@0.8.23
+- @llamaindex/autotool@5.0.23
+
+## 0.0.65
+
+### Patch Changes
+
+- Updated dependencies [819af45]
+ - llamaindex@0.8.22
+ - @llamaindex/autotool@5.0.22
+
+## 0.0.64
+
+### Patch Changes
+
+- Updated dependencies [83c3897]
+- Updated dependencies [efa2211]
+ - llamaindex@0.8.21
+ - @llamaindex/autotool@5.0.21
+
+## 0.0.63
+
+### Patch Changes
+
+- Updated dependencies [02b22da]
+ - llamaindex@0.8.20
+ - @llamaindex/autotool@5.0.20
+
+## 0.0.62
+
+### Patch Changes
+
+- Updated dependencies [90d265c]
+ - @llamaindex/autotool@5.0.19
+ - llamaindex@0.8.19
+
+## 0.0.61
+
+### Patch Changes
+
+- Updated dependencies [d17450f]
+ - llamaindex@0.8.18
+ - @llamaindex/autotool@5.0.18
+
+## 0.0.60
+
+### Patch Changes
+
+- llamaindex@0.8.17
+- @llamaindex/autotool@5.0.17
+
+## 0.0.59
+
+### Patch Changes
+
+- llamaindex@0.8.16
+- @llamaindex/autotool@5.0.16
+
+## 0.0.58
+
+### Patch Changes
+
+- Updated dependencies [3d503cb]
+- Updated dependencies [5dae534]
+ - llamaindex@0.8.15
+ - @llamaindex/autotool@5.0.15
+
+## 0.0.57
+
+### Patch Changes
+
+- Updated dependencies [630b425]
+ - llamaindex@0.8.14
+ - @llamaindex/autotool@5.0.14
+
+## 0.0.56
+
+### Patch Changes
+
+- llamaindex@0.8.13
+- @llamaindex/autotool@5.0.13
+
+## 0.0.55
+
+### Patch Changes
+
+- llamaindex@0.8.12
+- @llamaindex/autotool@5.0.12
+
+## 0.0.54
+
+### Patch Changes
+
+- llamaindex@0.8.11
+- @llamaindex/autotool@5.0.11
+
+## 0.0.53
+
+### Patch Changes
+
+- Updated dependencies [f066e50]
+ - llamaindex@0.8.10
+ - @llamaindex/autotool@5.0.10
+
+## 0.0.52
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+- Updated dependencies [4d4cd8a]
+ - llamaindex@0.8.9
+ - @llamaindex/autotool@5.0.9
+
## 0.0.51
### Patch Changes
diff --git a/packages/autotool/examples/01_node/package.json b/packages/autotool/examples/01_node/package.json
index 7d277da9f0..69c5c9f45e 100644
--- a/packages/autotool/examples/01_node/package.json
+++ b/packages/autotool/examples/01_node/package.json
@@ -5,7 +5,7 @@
"dependencies": {
"@llamaindex/autotool": "workspace:*",
"llamaindex": "workspace:*",
- "openai": "^4.57.0"
+ "openai": "^4.72.0"
},
"devDependencies": {
"tsx": "^4.19.0"
@@ -13,5 +13,5 @@
"scripts": {
"start": "node --import tsx --import @llamaindex/autotool/node ./src/index.ts"
},
- "version": "0.0.51"
+ "version": "0.0.66"
}
diff --git a/packages/autotool/examples/02_nextjs/CHANGELOG.md b/packages/autotool/examples/02_nextjs/CHANGELOG.md
index a49a78c448..c275f31aa8 100644
--- a/packages/autotool/examples/02_nextjs/CHANGELOG.md
+++ b/packages/autotool/examples/02_nextjs/CHANGELOG.md
@@ -1,5 +1,122 @@
# @llamaindex/autotool-02-next-example
+## 0.1.110
+
+### Patch Changes
+
+- llamaindex@0.8.23
+- @llamaindex/autotool@5.0.23
+
+## 0.1.109
+
+### Patch Changes
+
+- Updated dependencies [819af45]
+ - llamaindex@0.8.22
+ - @llamaindex/autotool@5.0.22
+
+## 0.1.108
+
+### Patch Changes
+
+- Updated dependencies [83c3897]
+- Updated dependencies [efa2211]
+ - llamaindex@0.8.21
+ - @llamaindex/autotool@5.0.21
+
+## 0.1.107
+
+### Patch Changes
+
+- Updated dependencies [02b22da]
+ - llamaindex@0.8.20
+ - @llamaindex/autotool@5.0.20
+
+## 0.1.106
+
+### Patch Changes
+
+- Updated dependencies [90d265c]
+ - @llamaindex/autotool@5.0.19
+ - llamaindex@0.8.19
+
+## 0.1.105
+
+### Patch Changes
+
+- Updated dependencies [d17450f]
+ - llamaindex@0.8.18
+ - @llamaindex/autotool@5.0.18
+
+## 0.1.104
+
+### Patch Changes
+
+- llamaindex@0.8.17
+- @llamaindex/autotool@5.0.17
+
+## 0.1.103
+
+### Patch Changes
+
+- llamaindex@0.8.16
+- @llamaindex/autotool@5.0.16
+
+## 0.1.102
+
+### Patch Changes
+
+- Updated dependencies [3d503cb]
+- Updated dependencies [5dae534]
+ - llamaindex@0.8.15
+ - @llamaindex/autotool@5.0.15
+
+## 0.1.101
+
+### Patch Changes
+
+- Updated dependencies [630b425]
+ - llamaindex@0.8.14
+ - @llamaindex/autotool@5.0.14
+
+## 0.1.100
+
+### Patch Changes
+
+- llamaindex@0.8.13
+- @llamaindex/autotool@5.0.13
+
+## 0.1.99
+
+### Patch Changes
+
+- llamaindex@0.8.12
+- @llamaindex/autotool@5.0.12
+
+## 0.1.98
+
+### Patch Changes
+
+- llamaindex@0.8.11
+- @llamaindex/autotool@5.0.11
+
+## 0.1.97
+
+### Patch Changes
+
+- Updated dependencies [f066e50]
+ - llamaindex@0.8.10
+ - @llamaindex/autotool@5.0.10
+
+## 0.1.96
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+- Updated dependencies [4d4cd8a]
+ - llamaindex@0.8.9
+ - @llamaindex/autotool@5.0.9
+
## 0.1.95
### Patch Changes
diff --git a/packages/autotool/examples/02_nextjs/package.json b/packages/autotool/examples/02_nextjs/package.json
index 86e3109432..c68e814ddc 100644
--- a/packages/autotool/examples/02_nextjs/package.json
+++ b/packages/autotool/examples/02_nextjs/package.json
@@ -1,7 +1,7 @@
{
"name": "@llamaindex/autotool-02-next-example",
"private": true,
- "version": "0.1.95",
+ "version": "0.1.110",
"scripts": {
"dev": "next dev",
"build": "next build",
@@ -10,17 +10,17 @@
"dependencies": {
"@llamaindex/autotool": "workspace:*",
"@radix-ui/react-slot": "^1.1.0",
- "ai": "^3.3.21",
+ "ai": "^4.0.0",
"class-variance-authority": "^0.7.0",
"dotenv": "^16.3.1",
"llamaindex": "workspace:*",
- "lucide-react": "^0.436.0",
- "next": "15.0.2",
+ "lucide-react": "^0.460.0",
+ "next": "15.0.3",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-markdown": "^9.0.1",
"react-syntax-highlighter": "^15.5.0",
- "sonner": "^1.5.0",
+ "sonner": "^1.7.0",
"tailwind-merge": "^2.5.2"
},
"devDependencies": {
@@ -30,8 +30,8 @@
"@types/react-syntax-highlighter": "^15.5.11",
"autoprefixer": "^10.4.20",
"cross-env": "^7.0.3",
- "postcss": "^8.4.41",
- "tailwindcss": "^3.4.10",
+ "postcss": "^8.4.49",
+ "tailwindcss": "^3.4.15",
"typescript": "^5.6.3"
}
}
diff --git a/packages/autotool/package.json b/packages/autotool/package.json
index b1bdbb36c2..2d796342fe 100644
--- a/packages/autotool/package.json
+++ b/packages/autotool/package.json
@@ -1,7 +1,7 @@
{
"name": "@llamaindex/autotool",
"type": "module",
- "version": "5.0.8",
+ "version": "5.0.23",
"description": "auto transpile your JS function to LLM Agent compatible",
"files": [
"dist",
@@ -45,10 +45,10 @@
"dev": "bunchee --watch"
},
"dependencies": {
- "@swc/core": "^1.7.22",
- "jotai": "2.8.4",
+ "@swc/core": "^1.9.2",
+ "jotai": "2.10.2",
"typedoc": "^0.26.11",
- "unplugin": "^1.12.2"
+ "unplugin": "^1.16.0"
},
"peerDependencies": {
"llamaindex": "workspace:*",
@@ -72,11 +72,11 @@
"@types/node": "^22.9.0",
"bunchee": "5.6.1",
"llamaindex": "workspace:*",
- "next": "15.0.2",
- "rollup": "^4.24.4",
+ "next": "15.0.3",
+ "rollup": "^4.27.3",
"tsx": "^4.19.0",
"typescript": "^5.6.3",
- "vitest": "^2.1.4",
+ "vitest": "^2.1.5",
"webpack": "^5.94.0"
}
}
diff --git a/packages/autotool/src/internal/index.ts b/packages/autotool/src/internal/index.ts
index ee58526ab9..b9accbb72e 100644
--- a/packages/autotool/src/internal/index.ts
+++ b/packages/autotool/src/internal/index.ts
@@ -22,7 +22,7 @@ export type InfoString = {
parameterMapping: Record;
};
-export const store = createStore();
+export const store: ReturnType = createStore();
export const toolMetadataAtom = atom<[ToolMetadata, Info][]>([]);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export const toolsAtom = atom any>>({});
diff --git a/packages/cloud/CHANGELOG.md b/packages/cloud/CHANGELOG.md
index d8f0207701..e37590afcd 100644
--- a/packages/cloud/CHANGELOG.md
+++ b/packages/cloud/CHANGELOG.md
@@ -1,5 +1,82 @@
# @llamaindex/cloud
+## 2.0.15
+
+### Patch Changes
+
+- Updated dependencies [d2b2722]
+ - @llamaindex/env@0.1.23
+ - @llamaindex/core@0.4.15
+
+## 2.0.14
+
+### Patch Changes
+
+- Updated dependencies [969365c]
+ - @llamaindex/env@0.1.22
+ - @llamaindex/core@0.4.14
+
+## 2.0.13
+
+### Patch Changes
+
+- 90d265c: chore: bump version
+- Updated dependencies [90d265c]
+ - @llamaindex/core@0.4.13
+ - @llamaindex/env@0.1.21
+
+## 2.0.12
+
+### Patch Changes
+
+- Updated dependencies [ef4f63d]
+ - @llamaindex/core@0.4.12
+
+## 2.0.11
+
+### Patch Changes
+
+- Updated dependencies [6d22fa2]
+ - @llamaindex/core@0.4.11
+
+## 2.0.10
+
+### Patch Changes
+
+- Updated dependencies [a7b0ac3]
+- Updated dependencies [c69605f]
+ - @llamaindex/core@0.4.10
+
+## 2.0.9
+
+### Patch Changes
+
+- Updated dependencies [7ae6eaa]
+ - @llamaindex/core@0.4.9
+
+## 2.0.8
+
+### Patch Changes
+
+- Updated dependencies [f865c98]
+ - @llamaindex/core@0.4.8
+
+## 2.0.7
+
+### Patch Changes
+
+- Updated dependencies [d89ebe0]
+- Updated dependencies [fd8c882]
+ - @llamaindex/core@0.4.7
+
+## 2.0.6
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+ - @llamaindex/env@0.1.20
+ - @llamaindex/core@0.4.6
+
## 2.0.5
### Patch Changes
diff --git a/packages/cloud/package.json b/packages/cloud/package.json
index c8549a2381..8cb4231115 100644
--- a/packages/cloud/package.json
+++ b/packages/cloud/package.json
@@ -1,6 +1,6 @@
{
"name": "@llamaindex/cloud",
- "version": "2.0.5",
+ "version": "2.0.15",
"type": "module",
"license": "MIT",
"scripts": {
@@ -50,8 +50,8 @@
"directory": "packages/cloud"
},
"devDependencies": {
- "@hey-api/client-fetch": "^0.4.2",
- "@hey-api/openapi-ts": "^0.54.3",
+ "@hey-api/client-fetch": "^0.4.4",
+ "@hey-api/openapi-ts": "^0.56.0",
"@llamaindex/core": "workspace:*",
"@llamaindex/env": "workspace:*",
"bunchee": "5.6.1"
diff --git a/packages/community/CHANGELOG.md b/packages/community/CHANGELOG.md
index d37c5557d7..0aadf3d08c 100644
--- a/packages/community/CHANGELOG.md
+++ b/packages/community/CHANGELOG.md
@@ -1,5 +1,82 @@
# @llamaindex/community
+## 0.0.73
+
+### Patch Changes
+
+- Updated dependencies [d2b2722]
+ - @llamaindex/env@0.1.23
+ - @llamaindex/core@0.4.15
+
+## 0.0.72
+
+### Patch Changes
+
+- Updated dependencies [969365c]
+ - @llamaindex/env@0.1.22
+ - @llamaindex/core@0.4.14
+
+## 0.0.71
+
+### Patch Changes
+
+- 90d265c: chore: bump version
+- Updated dependencies [90d265c]
+ - @llamaindex/core@0.4.13
+ - @llamaindex/env@0.1.21
+
+## 0.0.70
+
+### Patch Changes
+
+- Updated dependencies [ef4f63d]
+ - @llamaindex/core@0.4.12
+
+## 0.0.69
+
+### Patch Changes
+
+- Updated dependencies [6d22fa2]
+ - @llamaindex/core@0.4.11
+
+## 0.0.68
+
+### Patch Changes
+
+- Updated dependencies [a7b0ac3]
+- Updated dependencies [c69605f]
+ - @llamaindex/core@0.4.10
+
+## 0.0.67
+
+### Patch Changes
+
+- Updated dependencies [7ae6eaa]
+ - @llamaindex/core@0.4.9
+
+## 0.0.66
+
+### Patch Changes
+
+- Updated dependencies [f865c98]
+ - @llamaindex/core@0.4.8
+
+## 0.0.65
+
+### Patch Changes
+
+- Updated dependencies [d89ebe0]
+- Updated dependencies [fd8c882]
+ - @llamaindex/core@0.4.7
+
+## 0.0.64
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+ - @llamaindex/env@0.1.20
+ - @llamaindex/core@0.4.6
+
## 0.0.63
### Patch Changes
diff --git a/packages/community/package.json b/packages/community/package.json
index c842588480..2999ac7d8b 100644
--- a/packages/community/package.json
+++ b/packages/community/package.json
@@ -1,7 +1,7 @@
{
"name": "@llamaindex/community",
"description": "Community package for LlamaIndexTS",
- "version": "0.0.63",
+ "version": "0.0.73",
"type": "module",
"types": "dist/type/index.d.ts",
"main": "dist/cjs/index.js",
@@ -46,8 +46,8 @@
"bunchee": "5.6.1"
},
"dependencies": {
- "@aws-sdk/client-bedrock-agent-runtime": "^3.642.0",
- "@aws-sdk/client-bedrock-runtime": "^3.642.0",
+ "@aws-sdk/client-bedrock-agent-runtime": "^3.693.0",
+ "@aws-sdk/client-bedrock-runtime": "^3.693.0",
"@llamaindex/core": "workspace:*",
"@llamaindex/env": "workspace:*"
}
diff --git a/packages/core/CHANGELOG.md b/packages/core/CHANGELOG.md
index ea562042a2..df3280da9a 100644
--- a/packages/core/CHANGELOG.md
+++ b/packages/core/CHANGELOG.md
@@ -1,5 +1,72 @@
# @llamaindex/core
+## 0.4.15
+
+### Patch Changes
+
+- Updated dependencies [d2b2722]
+ - @llamaindex/env@0.1.23
+
+## 0.4.14
+
+### Patch Changes
+
+- Updated dependencies [969365c]
+ - @llamaindex/env@0.1.22
+
+## 0.4.13
+
+### Patch Changes
+
+- 90d265c: chore: bump version
+- Updated dependencies [90d265c]
+ - @llamaindex/env@0.1.21
+
+## 0.4.12
+
+### Patch Changes
+
+- ef4f63d: refactor: move mockLLM to core
+
+## 0.4.11
+
+### Patch Changes
+
+- 6d22fa2: Get PromptTemplate template variables at run-time
+
+## 0.4.10
+
+### Patch Changes
+
+- a7b0ac3: fix: update tool call llm type
+- c69605f: feat: add async support to BaseChatStore and BaseChatStoreMemory
+
+## 0.4.9
+
+### Patch Changes
+
+- 7ae6eaa: feat: allow pass `additionalChatOptions` to agent
+
+## 0.4.8
+
+### Patch Changes
+
+- f865c98: feat: async get message on chat store
+
+## 0.4.7
+
+### Patch Changes
+
+- d89ebe0: feat: better support for zod schema
+- fd8c882: chore: add warning on legacy workflow API
+
+## 0.4.6
+
+### Patch Changes
+
+- Updated dependencies [4fc001c]
+ - @llamaindex/env@0.1.20
+
## 0.4.5
### Patch Changes
diff --git a/packages/core/package.json b/packages/core/package.json
index a9de6b66c4..363fcc8ff0 100644
--- a/packages/core/package.json
+++ b/packages/core/package.json
@@ -1,7 +1,7 @@
{
"name": "@llamaindex/core",
"type": "module",
- "version": "0.4.5",
+ "version": "0.4.15",
"description": "LlamaIndex Core Module",
"exports": {
"./agent": {
@@ -389,10 +389,10 @@
"url": "https://github.com/run-llama/LlamaIndexTS.git"
},
"devDependencies": {
- "@edge-runtime/vm": "^4.0.3",
+ "@edge-runtime/vm": "^4.0.4",
"ajv": "^8.17.1",
"bunchee": "5.6.1",
- "happy-dom": "^15.11.0",
+ "happy-dom": "^15.11.6",
"natural": "^8.0.1"
},
"dependencies": {
diff --git a/packages/core/src/agent/base.ts b/packages/core/src/agent/base.ts
index 7ee77c55a5..23ed979596 100644
--- a/packages/core/src/agent/base.ts
+++ b/packages/core/src/agent/base.ts
@@ -106,11 +106,17 @@ export type AgentRunnerParams<
>
? AdditionalMessageOptions
: never,
+ AdditionalChatOptions extends object = object,
> = {
llm: AI;
chatHistory: ChatMessage[];
systemPrompt: MessageContent | null;
- runner: AgentWorker;
+ runner: AgentWorker<
+ AI,
+ Store,
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >;
tools:
| BaseToolWithCall[]
| ((query: MessageContent) => Promise);
@@ -125,6 +131,7 @@ export type AgentParamsBase<
>
? AdditionalMessageOptions
: never,
+ AdditionalChatOptions extends object = object,
> =
| {
llm?: AI;
@@ -132,6 +139,7 @@ export type AgentParamsBase<
systemPrompt?: MessageContent;
verbose?: boolean;
tools: BaseToolWithCall[];
+ additionalChatOptions?: AdditionalChatOptions;
}
| {
llm?: AI;
@@ -139,6 +147,7 @@ export type AgentParamsBase<
systemPrompt?: MessageContent;
verbose?: boolean;
toolRetriever: ObjectRetriever;
+ additionalChatOptions?: AdditionalChatOptions;
};
/**
@@ -153,21 +162,36 @@ export abstract class AgentWorker<
>
? AdditionalMessageOptions
: never,
+ AdditionalChatOptions extends object = object,
> {
- #taskSet = new Set>();
- abstract taskHandler: TaskHandler;
+ #taskSet = new Set<
+ TaskStep
+ >();
+ abstract taskHandler: TaskHandler<
+ AI,
+ Store,
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >;
public createTask(
query: MessageContent,
- context: AgentTaskContext,
- ): ReadableStream> {
+ context: AgentTaskContext<
+ AI,
+ Store,
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >,
+ ): ReadableStream<
+ TaskStepOutput
+ > {
context.store.messages.push({
role: "user",
content: query,
});
const taskOutputStream = createTaskOutputStream(this.taskHandler, context);
return new ReadableStream<
- TaskStepOutput
+ TaskStepOutput
>({
start: async (controller) => {
for await (const stepOutput of taskOutputStream) {
@@ -176,7 +200,8 @@ export abstract class AgentWorker<
let currentStep: TaskStep<
AI,
Store,
- AdditionalMessageOptions
+ AdditionalMessageOptions,
+ AdditionalChatOptions
> | null = stepOutput.taskStep;
while (currentStep) {
this.#taskSet.delete(currentStep);
@@ -227,6 +252,7 @@ export abstract class AgentRunner<
>
? AdditionalMessageOptions
: never,
+ AdditionalChatOptions extends object = object,
> extends BaseChatEngine {
readonly #llm: AI;
readonly #tools:
@@ -234,7 +260,12 @@ export abstract class AgentRunner<
| ((query: MessageContent) => Promise);
readonly #systemPrompt: MessageContent | null = null;
#chatHistory: ChatMessage[];
- readonly #runner: AgentWorker;
+ readonly #runner: AgentWorker<
+ AI,
+ Store,
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >;
readonly #verbose: boolean;
// create extra store
@@ -245,7 +276,7 @@ export abstract class AgentRunner<
}
static defaultTaskHandler: TaskHandler = async (step, enqueueOutput) => {
- const { llm, getTools, stream } = step.context;
+ const { llm, getTools, stream, additionalChatOptions } = step.context;
const lastMessage = step.context.store.messages.at(-1)!.content;
const tools = await getTools(lastMessage);
if (!stream) {
@@ -253,8 +284,9 @@ export abstract class AgentRunner<
stream,
tools,
messages: [...step.context.store.messages],
+ additionalChatOptions,
});
- await stepTools({
+ await stepTools({
response,
tools,
step,
@@ -265,6 +297,7 @@ export abstract class AgentRunner<
stream,
tools,
messages: [...step.context.store.messages],
+ additionalChatOptions,
});
await stepToolsStreaming({
response,
@@ -276,7 +309,12 @@ export abstract class AgentRunner<
};
protected constructor(
- params: AgentRunnerParams,
+ params: AgentRunnerParams<
+ AI,
+ Store,
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >,
) {
super();
const { llm, chatHistory, systemPrompt, runner, tools, verbose } = params;
@@ -330,6 +368,7 @@ export abstract class AgentRunner<
stream: boolean = false,
verbose: boolean | undefined = undefined,
chatHistory?: ChatMessage[],
+ additionalChatOptions?: AdditionalChatOptions,
) {
const initialMessages = [...(chatHistory ?? this.#chatHistory)];
if (this.#systemPrompt !== null) {
@@ -348,6 +387,7 @@ export abstract class AgentRunner<
stream,
toolCallCount: 0,
llm: this.#llm,
+ additionalChatOptions: additionalChatOptions ?? {},
getTools: (message) => this.getTools(message),
store: {
...this.createStore(),
@@ -365,13 +405,29 @@ export abstract class AgentRunner<
});
}
- async chat(params: NonStreamingChatEngineParams): Promise;
async chat(
- params: StreamingChatEngineParams,
+ params: NonStreamingChatEngineParams<
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >,
+ ): Promise;
+ async chat(
+ params: StreamingChatEngineParams<
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >,
): Promise>;
@wrapEventCaller
async chat(
- params: NonStreamingChatEngineParams | StreamingChatEngineParams,
+ params:
+ | NonStreamingChatEngineParams<
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >
+ | StreamingChatEngineParams<
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >,
): Promise> {
let chatHistory: ChatMessage[] = [];
@@ -388,6 +444,7 @@ export abstract class AgentRunner<
!!params.stream,
false,
chatHistory,
+ params.chatOptions,
);
for await (const stepOutput of task) {
// update chat history for each round
@@ -398,7 +455,12 @@ export abstract class AgentRunner<
return output.pipeThrough(
new TransformStream({
transform(chunk, controller) {
- controller.enqueue(EngineResponse.fromChatResponseChunk(chunk));
+ controller.enqueue(
+ EngineResponse.fromChatResponseChunk(
+ chunk,
+ chunk.sourceNodes,
+ ),
+ );
},
}),
);
diff --git a/packages/core/src/agent/llm.ts b/packages/core/src/agent/llm.ts
index 5050ee2a8f..a04604e217 100644
--- a/packages/core/src/agent/llm.ts
+++ b/packages/core/src/agent/llm.ts
@@ -4,24 +4,66 @@ import { ObjectRetriever } from "../objects";
import { AgentRunner, AgentWorker, type AgentParamsBase } from "./base.js";
import { validateAgentParams } from "./utils.js";
-type LLMParamsBase = AgentParamsBase;
+type LLMParamsBase<
+ AI extends LLM,
+ AdditionalMessageOptions extends object = AI extends LLM<
+ object,
+ infer AdditionalMessageOptions
+ >
+ ? AdditionalMessageOptions
+ : never,
+ AdditionalChatOptions extends object = object,
+> = AgentParamsBase;
-type LLMParamsWithTools = LLMParamsBase & {
+type LLMParamsWithTools<
+ AI extends LLM,
+ AdditionalMessageOptions extends object = AI extends LLM<
+ object,
+ infer AdditionalMessageOptions
+ >
+ ? AdditionalMessageOptions
+ : never,
+ AdditionalChatOptions extends object = object,
+> = LLMParamsBase & {
tools: BaseToolWithCall[];
};
-type LLMParamsWithToolRetriever = LLMParamsBase & {
+type LLMParamsWithToolRetriever<
+ AI extends LLM,
+ AdditionalMessageOptions extends object = AI extends LLM<
+ object,
+ infer AdditionalMessageOptions
+ >
+ ? AdditionalMessageOptions
+ : never,
+ AdditionalChatOptions extends object = object,
+> = LLMParamsBase & {
toolRetriever: ObjectRetriever;
};
-export type LLMAgentParams = LLMParamsWithTools | LLMParamsWithToolRetriever;
+export type LLMAgentParams<
+ AI extends LLM,
+ AdditionalMessageOptions extends object = AI extends LLM<
+ object,
+ infer AdditionalMessageOptions
+ >
+ ? AdditionalMessageOptions
+ : never,
+ AdditionalChatOptions extends object = object,
+> =
+ | LLMParamsWithTools
+ | LLMParamsWithToolRetriever<
+ AI,
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >;
export class LLMAgentWorker extends AgentWorker {
taskHandler = AgentRunner.defaultTaskHandler;
}
export class LLMAgent extends AgentRunner {
- constructor(params: LLMAgentParams) {
+ constructor(params: LLMAgentParams) {
validateAgentParams(params);
const llm = params.llm ?? (Settings.llm ? (Settings.llm as LLM) : null);
if (!llm)
diff --git a/packages/core/src/agent/types.ts b/packages/core/src/agent/types.ts
index c6e7a78d14..d5063c5e16 100644
--- a/packages/core/src/agent/types.ts
+++ b/packages/core/src/agent/types.ts
@@ -19,6 +19,7 @@ export type AgentTaskContext<
>
? AdditionalMessageOptions
: never,
+ AdditionalChatOptions extends object = object,
> = {
readonly stream: boolean;
readonly toolCallCount: number;
@@ -26,6 +27,7 @@ export type AgentTaskContext<
readonly getTools: (
input: MessageContent,
) => BaseToolWithCall[] | Promise;
+ readonly additionalChatOptions: Partial;
shouldContinue: (
taskStep: Readonly>,
) => boolean;
@@ -45,13 +47,26 @@ export type TaskStep<
>
? AdditionalMessageOptions
: never,
+ AdditionalChatOptions extends object = object,
> = {
id: UUID;
- context: AgentTaskContext;
+ context: AgentTaskContext<
+ Model,
+ Store,
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >;
// linked list
- prevStep: TaskStep | null;
- nextSteps: Set>;
+ prevStep: TaskStep<
+ Model,
+ Store,
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ > | null;
+ nextSteps: Set<
+ TaskStep
+ >;
};
export type TaskStepOutput<
@@ -63,8 +78,14 @@ export type TaskStepOutput<
>
? AdditionalMessageOptions
: never,
+ AdditionalChatOptions extends object = object,
> = {
- taskStep: TaskStep;
+ taskStep: TaskStep<
+ Model,
+ Store,
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >;
// output shows the response to the user
output:
| ChatResponse
@@ -81,10 +102,16 @@ export type TaskHandler<
>
? AdditionalMessageOptions
: never,
+ AdditionalChatOptions extends object = object,
> = (
- step: TaskStep,
+ step: TaskStep,
enqueueOutput: (
- taskOutput: TaskStepOutput,
+ taskOutput: TaskStepOutput<
+ Model,
+ Store,
+ AdditionalMessageOptions,
+ AdditionalChatOptions
+ >,
) => void,
) => Promise;
diff --git a/packages/core/src/chat-engine/base.ts b/packages/core/src/chat-engine/base.ts
index b4bd4cf3b1..77bc735001 100644
--- a/packages/core/src/chat-engine/base.ts
+++ b/packages/core/src/chat-engine/base.ts
@@ -16,14 +16,18 @@ export interface BaseChatEngineParams<
export interface StreamingChatEngineParams<
AdditionalMessageOptions extends object = object,
+ AdditionalChatOptions extends object = object,
> extends BaseChatEngineParams {
stream: true;
+ chatOptions?: AdditionalChatOptions;
}
export interface NonStreamingChatEngineParams<
AdditionalMessageOptions extends object = object,
+ AdditionalChatOptions extends object = object,
> extends BaseChatEngineParams {
stream?: false;
+ chatOptions?: AdditionalChatOptions;
}
export abstract class BaseChatEngine {
diff --git a/packages/core/src/llms/base.ts b/packages/core/src/llms/base.ts
index b04defc65d..46306bfece 100644
--- a/packages/core/src/llms/base.ts
+++ b/packages/core/src/llms/base.ts
@@ -1,5 +1,4 @@
-import { streamConverter } from "../utils";
-import { extractText } from "../utils/llms";
+import { extractText, streamConverter } from "../utils";
import type {
ChatResponse,
ChatResponseChunk,
@@ -67,6 +66,8 @@ export abstract class BaseLLM<
export abstract class ToolCallLLM<
AdditionalChatOptions extends object = object,
-> extends BaseLLM {
+ AdditionalMessageOptions extends
+ ToolCallLLMMessageOptions = ToolCallLLMMessageOptions,
+> extends BaseLLM {
abstract supportToolCall: boolean;
}
diff --git a/packages/core/src/memory/base.ts b/packages/core/src/memory/base.ts
index e89e4822df..6e2af2afca 100644
--- a/packages/core/src/memory/base.ts
+++ b/packages/core/src/memory/base.ts
@@ -65,19 +65,21 @@ export abstract class BaseChatStoreMemory<
super();
}
- getAllMessages(): ChatMessage[] {
+ getAllMessages():
+ | ChatMessage[]
+ | Promise[]> {
return this.chatStore.getMessages(this.chatStoreKey);
}
- put(messages: ChatMessage) {
+ put(messages: ChatMessage): void | Promise {
this.chatStore.addMessage(this.chatStoreKey, messages);
}
- set(messages: ChatMessage[]) {
+ set(messages: ChatMessage[]): void | Promise {
this.chatStore.setMessages(this.chatStoreKey, messages);
}
- reset() {
+ reset(): void | Promise {
this.chatStore.deleteMessages(this.chatStoreKey);
}
}
diff --git a/packages/core/src/memory/chat-memory-buffer.ts b/packages/core/src/memory/chat-memory-buffer.ts
index 9fd0673681..68b0b6e2bb 100644
--- a/packages/core/src/memory/chat-memory-buffer.ts
+++ b/packages/core/src/memory/chat-memory-buffer.ts
@@ -33,11 +33,11 @@ export class ChatMemoryBuffer<
}
}
- getMessages(
+ async getMessages(
transientMessages?: ChatMessage[] | undefined,
initialTokenCount: number = 0,
) {
- const messages = this.getAllMessages();
+ const messages = await this.getAllMessages();
if (initialTokenCount > this.tokenLimit) {
throw new Error("Initial token count exceeds token limit");
diff --git a/packages/core/src/prompts/base.ts b/packages/core/src/prompts/base.ts
index 2aacdf3404..b17b11316c 100644
--- a/packages/core/src/prompts/base.ts
+++ b/packages/core/src/prompts/base.ts
@@ -32,6 +32,10 @@ export abstract class BasePromptTemplate<
const Vars extends readonly string[] = string[],
> {
metadata: Metadata = {};
+ /**
+ * Set of template variables used in the prompt template. Used for type hints only.
+ * To get the list of template variables used in the prompt at run-time, use the `vars` method.
+ */
templateVars: Set = new Set();
options: Partial> = {};
outputParser: BaseOutputParser | undefined;
@@ -223,4 +227,13 @@ export class PromptTemplate<
get template(): Template {
return this.#template;
}
+
+ /**
+ * Returns all the template variables used in the prompt template.
+ */
+ vars(): string[] {
+ const template = this.template;
+ const matches = template.match(/\{([^}]+)\}/g) || [];
+ return [...new Set(matches.map((match) => match.slice(1, -1)))];
+ }
}
diff --git a/packages/core/src/storage/chat-store/base-chat-store.ts b/packages/core/src/storage/chat-store/base-chat-store.ts
index be19928f26..7cbefdbb99 100644
--- a/packages/core/src/storage/chat-store/base-chat-store.ts
+++ b/packages/core/src/storage/chat-store/base-chat-store.ts
@@ -7,7 +7,11 @@ export abstract class BaseChatStore<
key: string,
messages: ChatMessage[],
): void;
- abstract getMessages(key: string): ChatMessage[];
+ abstract getMessages(
+ key: string,
+ ):
+ | ChatMessage[]
+ | Promise[]>;
abstract addMessage(
key: string,
message: ChatMessage,
@@ -15,5 +19,7 @@ export abstract class BaseChatStore<
): void;
abstract deleteMessages(key: string): void;
abstract deleteMessage(key: string, idx: number): void;
- abstract getKeys(): IterableIterator;
+ abstract getKeys():
+ | IterableIterator
+ | Promise>;
}
diff --git a/packages/core/src/tools/function-tool.ts b/packages/core/src/tools/function-tool.ts
index 53010641dd..31334725f7 100644
--- a/packages/core/src/tools/function-tool.ts
+++ b/packages/core/src/tools/function-tool.ts
@@ -4,18 +4,12 @@ import { zodToJsonSchema } from "zod-to-json-schema";
import type { JSONValue } from "../global";
import type { BaseTool, ToolMetadata } from "../llms";
-const kOriginalFn = Symbol("originalFn");
-
export class FunctionTool>
implements BaseTool
{
- [kOriginalFn]?: (input: T) => R;
-
#fn: (input: T) => R;
- #metadata: ToolMetadata>;
- // todo: for the future, we can use zod to validate the input parameters
- // eslint-disable-next-line no-unused-private-class-members
- #zodType: z.ZodType | null = null;
+ readonly #metadata: ToolMetadata>;
+ readonly #zodType: z.ZodType | null = null;
constructor(
fn: (input: T) => R,
metadata: ToolMetadata>,
@@ -32,6 +26,12 @@ export class FunctionTool>
fn: (input: T) => JSONValue | Promise,
schema: ToolMetadata>,
): FunctionTool>;
+ static from(
+ fn: (input: z.infer) => JSONValue | Promise,
+ schema: Omit & {
+ parameters: R;
+ },
+ ): FunctionTool, JSONValue | Promise>;
static from>(
fn: (input: T) => JSONValue | Promise,
schema: Omit & {
@@ -40,15 +40,15 @@ export class FunctionTool>
): FunctionTool;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
static from(fn: any, schema: any): any {
- if (schema.parameter instanceof z.ZodSchema) {
- const jsonSchema = zodToJsonSchema(schema.parameter);
+ if (schema.parameters instanceof z.ZodSchema) {
+ const jsonSchema = zodToJsonSchema(schema.parameters);
return new FunctionTool(
fn,
{
...schema,
parameters: jsonSchema,
},
- schema.parameter,
+ schema.parameters,
);
}
return new FunctionTool(fn, schema);
@@ -58,7 +58,15 @@ export class FunctionTool>
return this.#metadata as BaseTool["metadata"];
}
- call(input: T) {
+ call = (input: T) => {
+ if (this.#zodType) {
+ const result = this.#zodType.safeParse(input);
+ if (result.success) {
+ return this.#fn.call(null, result.data);
+ } else {
+ console.warn(result.error.errors);
+ }
+ }
return this.#fn.call(null, input);
- }
+ };
}
diff --git a/packages/core/src/utils/index.ts b/packages/core/src/utils/index.ts
index d040f010ab..a682739194 100644
--- a/packages/core/src/utils/index.ts
+++ b/packages/core/src/utils/index.ts
@@ -76,6 +76,7 @@ export {
extractText,
imageToDataUrl,
messagesToHistory,
+ MockLLM,
toToolDescriptions,
} from "./llms";
diff --git a/packages/core/src/utils/llms.ts b/packages/core/src/utils/llms.ts
index 255b82b918..c089336671 100644
--- a/packages/core/src/utils/llms.ts
+++ b/packages/core/src/utils/llms.ts
@@ -2,6 +2,15 @@ import { fs } from "@llamaindex/env";
import { filetypemime } from "magic-bytes.js";
import type {
ChatMessage,
+ ChatResponse,
+ ChatResponseChunk,
+ CompletionResponse,
+ LLM,
+ LLMChatParamsNonStreaming,
+ LLMChatParamsStreaming,
+ LLMCompletionParamsNonStreaming,
+ LLMCompletionParamsStreaming,
+ LLMMetadata,
MessageContent,
MessageContentDetail,
MessageContentTextDetail,
@@ -143,3 +152,82 @@ export async function imageToDataUrl(
}
return await blobToDataUrl(input);
}
+
+export class MockLLM implements LLM {
+ metadata: LLMMetadata;
+ options: {
+ timeBetweenToken: number;
+ responseMessage: string;
+ };
+
+ constructor(options?: {
+ timeBetweenToken?: number;
+ responseMessage?: string;
+ metadata?: LLMMetadata;
+ }) {
+ this.options = {
+ timeBetweenToken: options?.timeBetweenToken ?? 20,
+ responseMessage: options?.responseMessage ?? "This is a mock response",
+ };
+ this.metadata = options?.metadata ?? {
+ model: "MockLLM",
+ temperature: 0.5,
+ topP: 0.5,
+ contextWindow: 1024,
+ tokenizer: undefined,
+ };
+ }
+
+ chat(
+ params: LLMChatParamsStreaming