diff --git a/.changeset/tasty-rules-camp.md b/.changeset/tasty-rules-camp.md new file mode 100644 index 00000000..da66379c --- /dev/null +++ b/.changeset/tasty-rules-camp.md @@ -0,0 +1,9 @@ +--- +'@llamaindex/chat-ui': minor +'@llamaindex/server': minor +'llamaindex-server-examples': patch +'web': patch +'@llamaindex/chat-ui-docs': patch +--- + +support vercel ai sdk ver 5 diff --git a/README.md b/README.md index 98628269..0b6fe3fd 100644 --- a/README.md +++ b/README.md @@ -83,7 +83,7 @@ Components are designed to be composable. You can use them as is: ```tsx import { ChatSection } from '@llamaindex/chat-ui' -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' const ChatExample = () => { const handler = useChat() @@ -96,7 +96,7 @@ Or you can extend them with your own children components: ```tsx import { ChatSection, ChatMessages, ChatInput } from '@llamaindex/chat-ui' import LlamaCloudSelector from './components/LlamaCloudSelector' // your custom component -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' const ChatExample = () => { const handler = useChat() @@ -158,7 +158,7 @@ Additionally, you can also override each component's styles by setting custom cl ```tsx import { ChatSection, ChatMessages, ChatInput } from '@llamaindex/chat-ui' -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' const ChatExample = () => { const handler = useChat() diff --git a/apps/web/app/api/chat/route.ts b/apps/web/app/api/chat/route.ts index 33822095..92fd9745 100644 --- a/apps/web/app/api/chat/route.ts +++ b/apps/web/app/api/chat/route.ts @@ -1,13 +1,13 @@ -import { Message, LlamaIndexAdapter, StreamData } from 'ai' +import { fakeStreamText, TextChunk, writeStream } from '@/app/utils' +import { UIMessage as Message } from '@ai-sdk/react' import { - ChatMessage, + MessageContentDetail, OpenAI, OpenAIEmbedding, Settings, SimpleChatEngine, } from 'llamaindex' import { NextResponse, type NextRequest } from 'next/server' -import { fakeStreamText } from '@/app/utils' export const runtime = 'nodejs' export const dynamic = 'force-dynamic' @@ -24,13 +24,11 @@ export async function POST(request: NextRequest) { const messages = body.messages const lastMessage = messages[messages.length - 1] - const vercelStreamData = new StreamData() - if (!process.env.OPENAI_API_KEY) { // Return fake stream if API key is not set return new Response(fakeStreamText(), { headers: { - 'Content-Type': 'text/plain', + 'Content-Type': 'text/event-stream', Connection: 'keep-alive', }, }) @@ -38,18 +36,49 @@ export async function POST(request: NextRequest) { const chatEngine = new SimpleChatEngine() + const messageContent = (lastMessage.parts[0] as { text: string }).text + const response = await chatEngine.chat({ - message: lastMessage.content, - chatHistory: messages as ChatMessage[], + message: messageContent, + chatHistory: messages.map(message => ({ + role: message.role, + content: message.parts as MessageContentDetail[], + })), stream: true, }) - return LlamaIndexAdapter.toDataStreamResponse(response, { - data: vercelStreamData, - callbacks: { - onCompletion: async () => { - await vercelStreamData.close() - }, + const sseStream = new ReadableStream({ + async start(controller) { + // Generate a unique message id + const messageId = crypto.randomUUID() + + // Start the text chunk + const startChunk: TextChunk = { id: messageId, type: 'text-start' } + writeStream(controller, startChunk) + + // Consume the response and write the chunks to the controller + for await (const chunk of response) { + writeStream(controller, { + id: messageId, + type: 'text-delta', + delta: chunk.delta, + }) + } + + // End the text chunk + const endChunk: TextChunk = { id: messageId, type: 'text-end' } + writeStream(controller, endChunk) + + controller.close() + }, + }) + + return new Response(sseStream, { + status: 200, + statusText: 'OK', + headers: { + 'content-type': 'text/event-stream', + connection: 'keep-alive', }, }) } catch (error) { diff --git a/apps/web/app/demo/canvas/code-preview/page.tsx b/apps/web/app/demo/canvas/code-preview/page.tsx index 8ca3beb5..863cc7b6 100644 --- a/apps/web/app/demo/canvas/code-preview/page.tsx +++ b/apps/web/app/demo/canvas/code-preview/page.tsx @@ -9,19 +9,35 @@ import { useChatCanvas, } from '@llamaindex/chat-ui' import { DynamicComponent } from '@llamaindex/dynamic-ui' -import { Message, useChat } from 'ai/react' +import { UIMessage as Message, useChat } from '@ai-sdk/react' const initialMessages: Message[] = [ { id: 'code-gen1', role: 'user', - content: 'Generate a simple calculator', + parts: [{ type: 'text', text: 'Generate a simple calculator' }], }, { id: 'code-gen2', role: 'assistant', - content: - '\n```annotation\n{"type":"artifact","data":{"type":"code","created_at":1752124365106,"data":{"language":"typescript","file_name":"calculator.tsx","code":"import React, { useState } from \\"react\\"\\nimport { Button } from \\"@/components/ui/button\\"\\nimport { Card } from \\"@/components/ui/card\\"\\nimport { cn } from \\"@/lib/utils\\"\\n\\nconst buttons = [\\n [\\"7\\", \\"8\\", \\"9\\", \\"/\\"],\\n [\\"4\\", \\"5\\", \\"6\\", \\"*\\"],\\n [\\"1\\", \\"2\\", \\"3\\", \\"-\\"],\\n [\\"0\\", \\"C\\", \\"=\\", \\"+\\"],\\n]\\n\\nexport default function Calculator() {\\n const [input, setInput] = useState(\\"\\")\\n const [result, setResult] = useState(null)\\n\\n const handleButtonClick = (value: string) => {\\n if (value === \\"C\\") {\\n setInput(\\"\\")\\n setResult(null)\\n return\\n }\\n if (value === \\"=\\") {\\n try {\\n // eslint-disable-next-line no-eval\\n const evalResult = eval(input)\\n setResult(evalResult.toString())\\n } catch {\\n setResult(\\"Error\\")\\n }\\n return\\n }\\n if (result !== null) {\\n setInput(value.match(/[0-9.]/) ? value : result + value)\\n setResult(null)\\n } else {\\n setInput((prev) => prev + value)\\n }\\n }\\n\\n return (\\n
\\n \\n
\\n {result !== null ? result : input || \\"0\\"}\\n
\\n
\\n {buttons.flat().map((btn, idx) => (\\n handleButtonClick(btn)}\\n >\\n {btn}\\n \\n ))}\\n
\\n
\\n
\\n )\\n}"}}}\n```\nHere\'s how the simple calculator works:\n\n- The calculator displays the current input or the result at the top.\n- You can click the number buttons (0-9) and the operators (+, -, *, /) to build your calculation.\n- Pressing the = button evaluates the expression and shows the result.\n- Pressing the C button clears the input and resets the calculator.\n- If you get an error (like dividing by zero or entering an invalid expression), "Error" will be displayed.\n\nYou can further customize the calculator\'s appearance or add more features as needed! If you have any questions about how the code works or want to add more functionality, let me know!', + parts: [ + { + type: 'text', + text: "Here's the simple calculator:", + }, + { + type: 'data-artifact', + data: { + type: 'code', + created_at: 1752124365106, + data: { + language: 'typescript', + file_name: 'calculator.tsx', + code: 'import React, { useState } from "react"\nimport { Button } from "@/components/ui/button"\nimport { Card } from "@/components/ui/card"\nimport { cn } from "@/lib/utils"\n\nconst buttons = [\n ["7", "8", "9", "/"],\n ["4", "5", "6", "*"],\n ["1", "2", "3", "-"],\n ["0", "C", "=", "+"],\n]\n\nexport default function Calculator() {\n const [input, setInput] = useState("")\n const [result, setResult] = useState(null)\n\n const handleButtonClick = (value: string) => {\n if (value === "C") {\n setInput("")\n setResult(null)\n return\n }\n if (value === "=") {\n try {\n // eslint-disable-next-line no-eval\n const evalResult = eval(input)\n setResult(evalResult.toString())\n } catch {\n setResult("Error")\n }\n return\n }\n if (result !== null) {\n setInput(value.match(/[0-9.]/) ? value : result + value)\n setResult(null)\n } else {\n setInput((prev) => prev + value)\n }\n }\n\n return (\n
\n \n
\n {result !== null ? result : input || "0"}\n
\n
\n {buttons.flat().map((btn, idx) => (\n handleButtonClick(btn)}\n >\n {btn}\n \n ))}\n
\n
\n
\n )\n}', + }, + }, + }, + ], }, ] @@ -30,7 +46,7 @@ export default function Page(): JSX.Element { } function CustomChat() { - const handler = useChat({ initialMessages }) + const handler = useChat({ messages: initialMessages }) return ( - + + @@ -116,32 +113,24 @@ function CustomChatMessages() { ) } - -// custom artifact card for image artifacts -function CustomArtifactCard({ data }: { data: Artifact }) { - return ( - (artifact as ImageArtifact).data.caption} - iconMap={{ image: Image }} - /> - ) -} ` const initialMessages: Message[] = [ { id: '1', role: 'user', - content: 'Generate an image of a cat', + parts: [{ type: 'text', text: 'Generate an image of a cat' }], }, { id: '2', role: 'assistant', - content: - 'Here is a cat image named Millie.' + - `\n\`\`\`annotation\n${JSON.stringify({ - type: 'artifact', + parts: [ + { + type: 'text', + text: 'Here is a cat image named Millie.', + }, + { + type: 'data-artifact', data: { type: 'image', data: { @@ -150,21 +139,24 @@ const initialMessages: Message[] = [ }, created_at: 1745480281756, }, - })} - \n\`\`\`\n`, + }, + ], }, { id: '3', role: 'user', - content: 'Please generate a black cat image', + parts: [{ type: 'text', text: 'Please generate a black cat image' }], }, { id: '4', role: 'assistant', - content: - 'Here is a black cat image named Poppy.' + - `\n\`\`\`annotation\n${JSON.stringify({ - type: 'artifact', + parts: [ + { + type: 'text', + text: 'Here is a black cat image named Poppy.', + }, + { + type: 'data-artifact', data: { type: 'image', data: { @@ -173,8 +165,8 @@ const initialMessages: Message[] = [ }, created_at: 1745480281999, }, - })} - \n\`\`\`\n`, + }, + ], }, ] @@ -184,7 +176,7 @@ export default function Page(): JSX.Element { function CustomChat() { const { copyToClipboard, isCopied } = useCopyToClipboard({ timeout: 2000 }) - const handler = useChat({ initialMessages }) + const handler = useChat({ messages: initialMessages }) return ( - + + @@ -292,14 +281,3 @@ function CustomChatMessages() { ) } - -// custom artifact card for image artifacts -function CustomArtifactCard({ data }: { data: Artifact }) { - return ( - (artifact as ImageArtifact).data.caption} - iconMap={{ image: Image }} - /> - ) -} diff --git a/apps/web/app/demo/canvas/page.tsx b/apps/web/app/demo/canvas/page.tsx index eb855893..dfcc1276 100644 --- a/apps/web/app/demo/canvas/page.tsx +++ b/apps/web/app/demo/canvas/page.tsx @@ -7,7 +7,7 @@ import { ChatMessages, ChatSection, } from '@llamaindex/chat-ui' -import { Message, useChat } from 'ai/react' +import { UIMessage as Message, useChat } from '@ai-sdk/react' import { ArrowRightIcon, Code } from 'lucide-react' import Link from 'next/link' @@ -18,10 +18,10 @@ import { ChatMessages, ChatSection, } from '@llamaindex/chat-ui' -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' export function CustomChat() { - const handler = useChat({ initialMessages: [] }) + const handler = useChat({ messages: [] }) return ( { try { await uploadFile(file) @@ -37,18 +36,22 @@ export function CustomChat() { console.error(error) } } + const attachments = getAttachments() return ( - +
- {imageUrl ? ( + {image ? ( uploaded ) : null} @@ -56,6 +59,7 @@ export function CustomChat() { @@ -66,7 +70,7 @@ export function CustomChat() { } function CustomChatMessages() { - const { messages, isLoading, append } = useChatUI() + const { messages } = useChatUI() return ( @@ -91,10 +95,9 @@ function CustomChatMessages() { src="/llama.png" /> - - - - + + + @@ -110,18 +113,22 @@ function CustomChatMessages() { const initialMessages: Message[] = [ { id: '1', - content: 'Generate a logo for LlamaIndex', + parts: [{ type: 'text', text: 'Generate a logo for LlamaIndex' }], role: 'user', }, { id: '2', role: 'assistant', - content: - 'Got it! Here is the logo for LlamaIndex. The logo features a friendly llama mascot that represents our AI-powered document indexing and chat capabilities.', - annotations: [ + parts: [ + { + type: 'text', + text: 'Got it! Here is the logo for LlamaIndex. The logo features a friendly llama mascot that represents our AI-powered document indexing and chat capabilities.', + }, { - type: 'image', + type: 'data-file', data: { + filename: 'llama.png', + mediaType: 'image/png', url: '/llama.png', }, }, @@ -130,24 +137,22 @@ const initialMessages: Message[] = [ { id: '3', role: 'user', - content: 'Show me a pdf file', + parts: [{ type: 'text', text: 'Show me a pdf file' }], }, { id: '4', role: 'assistant', - content: - 'Got it! Here is a sample PDF file that demonstrates PDF handling capabilities. This PDF contains some basic text and formatting examples that you can use to test PDF viewing functionality.', - annotations: [ + parts: [ + { + type: 'text', + text: 'Got it! Here is a sample PDF file that demonstrates PDF handling capabilities. This PDF contains some basic text and formatting examples that you can use to test PDF viewing functionality.', + }, { - type: 'document_file', + type: 'data-file', data: { - files: [ - { - id: '1', - name: 'sample.pdf', - url: 'https://pdfobject.com/pdf/sample.pdf', - }, - ], + filename: 'sample.pdf', + mediaType: 'application/pdf', + url: 'https://pdfobject.com/pdf/sample.pdf', }, }, ], @@ -172,11 +177,10 @@ export default function Page(): JSX.Element { } function CustomChat() { - const handler = useChat({ initialMessages }) - const { imageUrl, getAnnotations, uploadFile, reset } = useFile({ + const handler = useChat({ messages: initialMessages }) + const { image, uploadFile, reset, getAttachments } = useFile({ uploadAPI: '/chat/upload', }) - const annotations = getAnnotations() const handleUpload = async (file: File) => { try { await uploadFile(file) @@ -184,18 +188,19 @@ function CustomChat() { console.error(error) } } + const attachments = getAttachments() return ( - +
- {imageUrl ? ( + {image ? ( uploaded ) : null} @@ -214,7 +219,7 @@ function CustomChat() { } function CustomChatMessages() { - const { messages, isLoading, append } = useChatUI() + const { messages } = useChatUI() return ( @@ -239,10 +244,9 @@ function CustomChatMessages() { src="/llama.png" /> - - - - + + + diff --git a/apps/web/app/demo/latex/page.tsx b/apps/web/app/demo/latex/page.tsx index 8f0aca7b..e766bfca 100644 --- a/apps/web/app/demo/latex/page.tsx +++ b/apps/web/app/demo/latex/page.tsx @@ -1,13 +1,13 @@ 'use client' -import { Message, useChat } from 'ai/react' +import { UIMessage as Message, useChat } from '@ai-sdk/react' import { ChatSection } from '@llamaindex/chat-ui' import { Code } from '@/components/code' const code = ` import { ChatSection } from '@llamaindex/chat-ui' import '@llamaindex/chat-ui/styles/markdown.css' -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' function DemoLatexChat() { const handler = useChat() @@ -18,65 +18,109 @@ function DemoLatexChat() { const initialMessages: Message[] = [ { role: 'user', - content: 'The product costs $10 and the discount is $5', + parts: [ + { + type: 'text', + text: 'The product costs $10 and the discount is $5', + }, + ], id: 'DQXPGjYiCEK1MlXg', }, { id: '0wR35AGp8GEDoHZu', role: 'assistant', - content: - 'If the product costs $10 and there is a discount of $5, you can calculate the final price by subtracting the discount from the original price:\n\nFinal Price = Original Price - Discount \nFinal Price = $10 - $5 \nFinal Price = $5\n\nSo, after applying the discount, the product will cost $5.', + parts: [ + { + type: 'text', + text: 'If the product costs $10 and there is a discount of $5, you can calculate the final price by subtracting the discount from the original price:\n\nFinal Price = Original Price - Discount \nFinal Price = $10 - $5 \nFinal Price = $5\n\nSo, after applying the discount, the product will cost $5.', + }, + ], }, { role: 'user', - content: - 'Write js code that accept a location and console log Hello from location', + parts: [ + { + type: 'text', + text: 'Write js code that accept a location and console log Hello from location', + }, + ], id: '2VH8xx07DxwibdFX', }, { id: 'Jb1Xs8w8p2RBTdUQ', role: 'assistant', - content: - 'You can create a simple JavaScript function that accepts a location as an argument and logs a message to the console. Here\'s an example of how you can do this:\n\n```javascript\nfunction greetFromLocation(location) {\n console.log(`Hello from ${location}`);\n}\n\n// Example usage:\ngreetFromLocation("New York");\ngreetFromLocation("Tokyo");\ngreetFromLocation("Paris");\n```\n\nIn this code:\n\n- The `greetFromLocation` function takes one parameter, `location`.\n- It uses template literals (the backticks ``) to create a string that includes the location.\n- The `console.log` function is used to print the message to the console.\n\nYou can call the function with different locations to see the output.', + parts: [ + { + type: 'text', + text: 'You can create a simple JavaScript function that accepts a location as an argument and logs a message to the console. Here\'s an example of how you can do this:\n\n```javascript\nfunction greetFromLocation(location) {\n console.log(`Hello from ${location}`);\n}\n\n// Example usage:\ngreetFromLocation("New York");\ngreetFromLocation("Tokyo");\ngreetFromLocation("Paris");\n```\n\nIn this code:\n\n- The `greetFromLocation` function takes one parameter, `location`.\n- It uses template literals (the backticks ``) to create a string that includes the location.\n- The `console.log` function is used to print the message to the console.\n\nYou can call the function with different locations to see the output.', + }, + ], }, { role: 'user', - content: 'Formula to caculate triangle', + parts: [ + { + type: 'text', + text: 'Formula to caculate triangle', + }, + ], id: 'G7MEUgkjwqq0RDLk', }, { id: 'aonMZaAcoUglAjka', role: 'assistant', - content: - "To calculate various properties of a triangle, you can use different formulas depending on what you want to find. Here are some common calculations:\n\n1. **Area of a Triangle**:\n - Using base and height: \n \\[\n \\text{Area} = \\frac{1}{2} \\times \\text{base} \\times \\text{height}\n \\]\n - Using Heron's formula (when you know all three sides \\(a\\), \\(b\\), and \\(c\\)):\n \\[\n s = \\frac{a + b + c}{2} \\quad \\text{(semi-perimeter)}\n \\]\n \\[\n \\text{Area} = \\sqrt{s(s-a)(s-b)(s-c)}\n \\]\n\n2. **Perimeter of a Triangle**:\n - If you know the lengths of all three sides \\(a\\), \\(b\\), and \\(c\\):\n \\[\n \\text{Perimeter} = a + b + c\n \\]\n\n3. **Pythagorean Theorem** (for right triangles):\n - If \\(c\\) is the length of the hypotenuse and \\(a\\) and \\(b\\) are the lengths of the other two sides:\n \\[\n c^2 = a^2 + b^2\n \\]\n\n4. **Angles**:\n - To find angles using the sides (Law of Cosines):\n \\[\n c^2 = a^2 + b^2 - 2ab \\cdot \\cos(C)\n \\]\n - Rearranging gives:\n \\[\n \\cos(C) = \\frac{a^2 + b^2 - c^2}{2ab}\n \\]\n\nThese formulas can help you calculate the area, perimeter, and angles of a triangle based on the information you have.", + parts: [ + { + type: 'text', + text: "To calculate various properties of a triangle, you can use different formulas depending on what you want to find. Here are some common calculations:\n\n1. **Area of a Triangle**:\n - Using base and height: \n \\[\n \\text{Area} = \\frac{1}{2} \\times \\text{base} \\times \\text{height}\n \\]\n - Using Heron's formula (when you know all three sides \\(a\\), \\(b\\), and \\(c\\)):\n \\[\n s = \\frac{a + b + c}{2} \\quad \\text{(semi-perimeter)}\n \\]\n \\[\n \\text{Area} = \\sqrt{s(s-a)(s-b)(s-c)}\n \\]\n\n2. **Perimeter of a Triangle**:\n - If you know the lengths of all three sides \\(a\\), \\(b\\), and \\(c\\):\n \\[\n \\text{Perimeter} = a + b + c\n \\]\n\n3. **Pythagorean Theorem** (for right triangles):\n - If \\(c\\) is the length of the hypotenuse and \\(a\\) and \\(b\\) are the lengths of the other two sides:\n \\[\n c^2 = a^2 + b^2\n \\]\n\n4. **Angles**:\n - To find angles using the sides (Law of Cosines):\n \\[\n c^2 = a^2 + b^2 - 2ab \\cdot \\cos(C)\n \\]\n - Rearranging gives:\n \\[\n \\cos(C) = \\frac{a^2 + b^2 - c^2}{2ab}\n \\]\n\nThese formulas can help you calculate the area, perimeter, and angles of a triangle based on the information you have.", + }, + ], }, { id: 'aonMZaA22oU7lAj222', role: 'user', - content: 'Implement calculate triangle area in js', + parts: [ + { + type: 'text', + text: 'Implement calculate triangle area in js', + }, + ], }, { id: 'a2nMZaA22oU7lAj222', role: 'assistant', - content: - 'To calculate the area of a triangle in JavaScript, you can use the formula:\n\n\\[\n\\text{Area} = \\frac{1}{2} \\times \\text{base} \\times \\text{height}\n\\]\n\nHere\'s a simple implementation in JavaScript:\n\n```javascript\nfunction calculateTriangleArea(base, height) {\n if (base <= 0 || height <= 0) {\n throw new Error("Base and height must be positive numbers.");\n }\n return 0.5 * base * height;\n}\n\n// Example usage:\nconst base = 5; // Example base length\nconst height = 10; // Example height length\n\ntry {\n const area = calculateTriangleArea(base, height);\n console.log(`The area of the triangle is: ${area}`);\n} catch (error) {\n console.error(error.message);\n}\n```\n\n### Explanation:\n1. **Function Definition**: The function `calculateTriangleArea` takes two parameters: `base` and `height`.\n2. **Input Validation**: It checks if the base and height are positive numbers. If not, it throws an error.\n3. **Area Calculation**: It calculates the area using the formula and returns the result.\n4. **Example Usage**: The example shows how to call the function and log the result to the console.\n\nYou can modify the `base` and `height` variables to test with different values.', + parts: [ + { + type: 'text', + text: 'To calculate the area of a triangle in JavaScript, you can use the formula:\n\n\\[\n\\text{Area} = \\frac{1}{2} \\times \\text{base} \\times \\text{height}\n\\]\n\nHere\'s a simple implementation in JavaScript:\n\n```javascript\nfunction calculateTriangleArea(base, height) {\n if (base <= 0 || height <= 0) {\n throw new Error("Base and height must be positive numbers.");\n }\n return 0.5 * base * height;\n}\n\n// Example usage:\nconst base = 5; // Example base length\nconst height = 10; // Example height length\n\ntry {\n const area = calculateTriangleArea(base, height);\n console.log(`The area of the triangle is: ${area}`);\n} catch (error) {\n console.error(error.message);\n}\n```\n\n### Explanation:\n1. **Function Definition**: The function `calculateTriangleArea` takes two parameters: `base` and `height`.\n2. **Input Validation**: It checks if the base and height are positive numbers. If not, it throws an error.\n3. **Area Calculation**: It calculates the area using the formula and returns the result.\n4. **Example Usage**: The example shows how to call the function and log the result to the console.\n\nYou can modify the `base` and `height` variables to test with different values.', + }, + ], }, { id: 'aonMZaAcoU7lAjka', role: 'user', - content: 'Popupar formulas in Math', + parts: [ + { + type: 'text', + text: 'Popupar formulas in Math', + }, + ], }, { id: 'aonMZaA22oU7lAjka', role: 'assistant', - content: - "Here are some popular mathematical formulas across various branches of mathematics:\n\n### Algebra\n1. **Quadratic Formula**: \n \\[\n x = \\frac{-b \\pm \\sqrt{b^2 - 4ac}}{2a}\n \\]\n (Used to find the roots of a quadratic equation \\( ax^2 + bx + c = 0 \\))\n\n2. **Difference of Squares**: \n \\[\n a^2 - b^2 = (a - b)(a + b)\n \\]\n\n3. **Factoring a Perfect Square**: \n \\[\n a^2 + 2ab + b^2 = (a + b)^2\n \\]\n \\[\n a^2 - 2ab + b^2 = (a - b)^2\n \\]\n\n### Geometry\n1. **Area of a Circle**: \n \\[\n A = \\pi r^2\n \\]\n\n2. **Circumference of a Circle**: \n \\[\n C = 2\\pi r\n \\]\n\n3. **Pythagorean Theorem**: \n \\[\n a^2 + b^2 = c^2\n \\]\n (In a right triangle, where \\( c \\) is the hypotenuse)\n\n4. **Area of a Triangle**: \n \\[\n A = \\frac{1}{2} \\times \\text{base} \\times \\text{height}\n \\]\n\n### Trigonometry\n1. **Sine, Cosine, and Tangent**: \n \\[\n \\sin(\\theta) = \\frac{\\text{opposite}}{\\text{hypotenuse}}, \\quad \\cos(\\theta) = \\frac{\\text{adjacent}}{\\text{hypotenuse}}, \\quad \\tan(\\theta) = \\frac{\\text{opposite}}{\\text{adjacent}}\n \\]\n\n2. **Pythagorean Identity**: \n \\[\n \\sin^2(\\theta) + \\cos^2(\\theta) = 1\n \\]\n\n### Calculus\n1. **Derivative of a Function**: \n \\[\n \\frac{d}{dx}(x^n) = nx^{n-1}\n \\]\n\n2. **Integral of a Function**: \n \\[\n \\int x^n \\, dx = \\frac{x^{n+1}}{n+1} + C \\quad (n \\neq -1)\n \\]\n\n3. **Fundamental Theorem of Calculus**: \n \\[\n \\int_a^b f(x) \\, dx = F(b) - F(a)\n \\]\n (Where \\( F \\) is an antiderivative of \\( f \\))\n\n### Statistics\n1. **Mean**: \n \\[\n \\text{Mean} = \\frac{\\sum_{i=1}^{n} x_i}{n}\n \\]\n\n2. **Variance**: \n \\[\n \\sigma^2 = \\frac{\\sum_{i=1}^{n} (x_i - \\mu)^2}{n}\n \\]\n (Where \\( \\mu \\) is the mean)\n\n3. **Standard Deviation**: \n \\[\n \\sigma = \\sqrt{\\sigma^2}\n \\]\n\n### Probability\n1. **Probability of an Event**: \n \\[\n P(A) = \\frac{\\text{Number of favorable outcomes}}{\\text{Total number of outcomes}}\n \\]\n\n2. **Bayes' Theorem**: \n \\[\n P(A|B) = \\frac{P(B|A)P(A)}{P(B)}\n \\]\n\nThese formulas are foundational in their respective areas and are widely used in various applications of mathematics.", + parts: [ + { + type: 'text', + text: "Here are some popular mathematical formulas across various branches of mathematics:\n\n### Algebra\n1. **Quadratic Formula**: \n \\[\n x = \\frac{-b \\pm \\sqrt{b^2 - 4ac}}{2a}\n \\]\n (Used to find the roots of a quadratic equation \\( ax^2 + bx + c = 0 \\))\n\n2. **Difference of Squares**: \n \\[\n a^2 - b^2 = (a - b)(a + b)\n \\]\n\n3. **Factoring a Perfect Square**: \n \\[\n a^2 + 2ab + b^2 = (a + b)^2\n \\]\n \\[\n a^2 - 2ab + b^2 = (a - b)^2\n \\]\n\n### Geometry\n1. **Area of a Circle**: \n \\[\n A = \\pi r^2\n \\]\n\n2. **Circumference of a Circle**: \n \\[\n C = 2\\pi r\n \\]\n\n3. **Pythagorean Theorem**: \n \\[\n a^2 + b^2 = c^2\n \\]\n (In a right triangle, where \\( c \\) is the hypotenuse)\n\n4. **Area of a Triangle**: \n \\[\n A = \\frac{1}{2} \\times \\text{base} \\times \\text{height}\n \\]\n\n### Trigonometry\n1. **Sine, Cosine, and Tangent**: \n \\[\n \\sin(\\theta) = \\frac{\\text{opposite}}{\\text{hypotenuse}}, \\quad \\cos(\\theta) = \\frac{\\text{adjacent}}{\\text{hypotenuse}}, \\quad \\tan(\\theta) = \\frac{\\text{opposite}}{\\text{adjacent}}\n \\]\n\n2. **Pythagorean Identity**: \n \\[\n \\sin^2(\\theta) + \\cos^2(\\theta) = 1\n \\]\n\n### Calculus\n1. **Derivative of a Function**: \n \\[\n \\frac{d}{dx}(x^n) = nx^{n-1}\n \\]\n\n2. **Integral of a Function**: \n \\[\n \\int x^n \\, dx = \\frac{x^{n+1}}{n+1} + C \\quad (n \\neq -1)\n \\]\n\n3. **Fundamental Theorem of Calculus**: \n \\[\n \\int_a^b f(x) \\, dx = F(b) - F(a)\n \\]\n (Where \\( F \\) is an antiderivative of \\( f \\))\n\n### Statistics\n1. **Mean**: \n \\[\n \\text{Mean} = \\frac{\\sum_{i=1}^{n} x_i}{n}\n \\]\n\n2. **Variance**: \n \\[\n \\sigma^2 = \\frac{\\sum_{i=1}^{n} (x_i - \\mu)^2}{n}\n \\]\n (Where \\( \\mu \\) is the mean)\n\n3. **Standard Deviation**: \n \\[\n \\sigma = \\sqrt{\\sigma^2}\n \\]\n\n### Probability\n1. **Probability of an Event**: \n \\[\n P(A) = \\frac{\\text{Number of favorable outcomes}}{\\text{Total number of outcomes}}\n \\]\n\n2. **Bayes' Theorem**: \n \\[\n P(A|B) = \\frac{P(B|A)P(A)}{P(B)}\n \\]\n\nThese formulas are foundational in their respective areas and are widely used in various applications of mathematics.", + }, + ], }, ] export default function Page(): JSX.Element { - const handler = useChat({ initialMessages }) + const handler = useChat({ messages: initialMessages }) return (
diff --git a/apps/web/app/demo/mermaid/page.tsx b/apps/web/app/demo/mermaid/page.tsx index d173e783..9e5fcca2 100644 --- a/apps/web/app/demo/mermaid/page.tsx +++ b/apps/web/app/demo/mermaid/page.tsx @@ -7,13 +7,13 @@ import { ChatSection, useChatUI, } from '@llamaindex/chat-ui' -import { Message, useChat } from 'ai/react' +import { UIMessage as Message, useChat } from '@ai-sdk/react' import { Code } from '@/components/code' import MermaidDiagram from './mermaid-diagram' const code = ` import { ChatSection, ChatInput, ChatMessage, ChatMessages, useChatUI } from '@llamaindex/chat-ui' -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' // This demo requires mermaid to be installed in your project: // pnpm add mermaid @@ -53,31 +53,41 @@ const initialMessages: Message[] = [ { id: '1', role: 'user', - content: 'Show me a system architecture diagram', + parts: [ + { + type: 'text', + text: 'Show me a system architecture diagram', + }, + ], }, { id: '2', role: 'assistant', - content: [ - 'Here is a system architecture diagram showing how LlamaIndex ChatUI components interact:', - '', - '```mermaid', - 'graph TD', - ' A[User] -->|Input| B[ChatInput]', - ' B -->|Process| C[ChatSection]', - ' C -->|Render| D[ChatMessages]', - ' D -->|Display| E[ChatMessage]', - ' E -->|Show| F[Content]', - ' F -->|Render| G[Markdown]', - ' F -->|Render| H[Images]', - ' F -->|Render| I[Documents]', - ' F -->|Render| J[Mermaid]', - ' style A fill:#f9f,stroke:#333,stroke-width:2px', - ' style B fill:#bbf,stroke:#333,stroke-width:2px', - ' style C fill:#dfd,stroke:#333,stroke-width:2px', - '```', - '', - ].join('\n'), + parts: [ + { + type: 'text', + text: [ + 'Here is a system architecture diagram showing how LlamaIndex ChatUI components interact:', + '', + '```mermaid', + 'graph TD', + ' A[User] -->|Input| B[ChatInput]', + ' B -->|Process| C[ChatSection]', + ' C -->|Render| D[ChatMessages]', + ' D -->|Display| E[ChatMessage]', + ' E -->|Show| F[Content]', + ' F -->|Render| G[Markdown]', + ' F -->|Render| H[Images]', + ' F -->|Render| I[Documents]', + ' F -->|Render| J[Mermaid]', + ' style A fill:#f9f,stroke:#333,stroke-width:2px', + ' style B fill:#bbf,stroke:#333,stroke-width:2px', + ' style C fill:#dfd,stroke:#333,stroke-width:2px', + '```', + '', + ].join('\n'), + }, + ], }, ] @@ -96,7 +106,7 @@ export default function MermaidDemoPage(): JSX.Element { } function MermaidChat() { - const handler = useChat({ initialMessages }) + const handler = useChat({ messages: initialMessages }) return ( @@ -106,7 +116,7 @@ function MermaidChat() { } function MermaidChatMessages() { - const { messages, isLoading, append } = useChatUI() + const { messages } = useChatUI() return ( @@ -118,8 +128,8 @@ function MermaidChatMessages() { className="items-start" > - - + diff --git a/apps/web/app/demo/simple/page.tsx b/apps/web/app/demo/simple/page.tsx index 1a730b4f..a5fe58b6 100644 --- a/apps/web/app/demo/simple/page.tsx +++ b/apps/web/app/demo/simple/page.tsx @@ -1,12 +1,12 @@ 'use client' -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' import { ChatSection } from '@llamaindex/chat-ui' import { Code } from '@/components/code' const code = ` import { ChatSection } from '@llamaindex/chat-ui' -import { useChat } from 'ai/react' +import { useChat } from "@ai-sdk/react"; function SimpleChat() { const handler = useChat() diff --git a/apps/web/app/utils.ts b/apps/web/app/utils.ts index a2b3e55e..bda867bb 100644 --- a/apps/web/app/utils.ts +++ b/apps/web/app/utils.ts @@ -1,12 +1,36 @@ import { faker } from '@faker-js/faker' +const DATA_PREFIX = 'data: ' // SSE format prefix +const TOKEN_DELAY = 30 // 30ms delay between tokens + +export type TextChunk = { + type: 'text-delta' | 'text-start' | 'text-end' + id: string + delta?: string +} + +export type DataChunk = { + type: `data-${string}` // requires `data-` prefix when sending data parts + data: Record +} + +const encoder = new TextEncoder() + +export const writeStream = ( + controller: ReadableStreamDefaultController, + chunk: TextChunk | DataChunk +) => { + controller.enqueue( + encoder.encode(`${DATA_PREFIX}${JSON.stringify(chunk)}\n\n`) + ) +} + export const fakeStreamText = ({ chunkCount = 10, - streamProtocol = 'data', }: { chunkCount?: number - streamProtocol?: 'data' | 'text' } = {}) => { + // Generate sample text blocks const blocks = [ Array.from({ length: chunkCount }, () => ({ delay: faker.number.int({ max: 100, min: 30 }), @@ -18,49 +42,47 @@ export const fakeStreamText = ({ })), ] - const encoder = new TextEncoder() - return new ReadableStream({ async start(controller) { - for (let i = 0; i < blocks.length; i++) { - const block = blocks[i] + async function writeTextMessage(content: string) { + // Generate a unique message id + const messageId = crypto.randomUUID() - for (const chunk of block) { - await new Promise(resolve => setTimeout(resolve, chunk.delay)) + // Start the text chunk + const startChunk: TextChunk = { id: messageId, type: 'text-start' } + writeStream(controller, startChunk) - if (streamProtocol === 'text') { - controller.enqueue(encoder.encode(chunk.texts)) - } else { - controller.enqueue( - encoder.encode(`0:${JSON.stringify(chunk.texts)}\n`) - ) + // Stream tokens one by one + for (const token of content.split(' ')) { + if (token.trim()) { + const deltaChunk: TextChunk = { + id: messageId, + type: 'text-delta', + delta: `${token} `, + } + writeStream(controller, deltaChunk) + await new Promise(resolve => setTimeout(resolve, TOKEN_DELAY)) } } - if (i < blocks.length - 1) { - if (streamProtocol === 'text') { - controller.enqueue(encoder.encode('\n\n')) - } else { - controller.enqueue(encoder.encode(`0:${JSON.stringify('\n\n')}\n`)) - } - } + // End the text chunk + const endChunk: TextChunk = { id: messageId, type: 'text-end' } + writeStream(controller, endChunk) } - if (streamProtocol === 'data') { - controller.enqueue( - encoder.encode( - `d:${JSON.stringify({ - finishReason: 'stop', - usage: { - promptTokens: 0, - completionTokens: blocks.reduce( - (sum, block) => sum + block.length, - 0 - ), - }, - })}\n` - ) - ) + // Stream each block as a separate message + for (let i = 0; i < blocks.length; i++) { + const block = blocks[i] + + // Combine all texts in the block into one message + const blockText = block.map(chunk => chunk.texts).join('') + + await writeTextMessage(blockText) + + // Add paragraph break between blocks + if (i < blocks.length - 1) { + await writeTextMessage('\n\n') + } } controller.close() diff --git a/apps/web/package.json b/apps/web/package.json index 93d2e2f8..2c5ac2d1 100644 --- a/apps/web/package.json +++ b/apps/web/package.json @@ -18,7 +18,8 @@ "@llamaindex/dynamic-ui": "workspace:*", "@radix-ui/react-slot": "^1.0.2", "@radix-ui/react-tabs": "^1.1.1", - "ai": "4.0.0", + "@ai-sdk/react": "^2.0.4", + "ai": "^5.0.4", "class-variance-authority": "^0.7.0", "clsx": "^2.1.1", "highlight.js": "^11.10.0", diff --git a/apps/web/public/r/chat.json b/apps/web/public/r/chat.json index bd85f810..657adff9 100644 --- a/apps/web/public/r/chat.json +++ b/apps/web/public/r/chat.json @@ -11,7 +11,7 @@ "files": [ { "path": "registry/chat/chat.tsx", - "content": "'use client'\r\n\r\nimport {\r\n ChatHandler,\r\n ChatSection as ChatSectionUI,\r\n Message,\r\n} from '@llamaindex/chat-ui'\r\n\r\nimport '@llamaindex/chat-ui/styles/markdown.css'\r\nimport '@llamaindex/chat-ui/styles/pdf.css'\r\nimport '@llamaindex/chat-ui/styles/editor.css'\r\nimport { useState } from 'react'\r\n\r\nconst initialMessages: Message[] = [\r\n {\r\n content: 'Write simple Javascript hello world code',\r\n role: 'user',\r\n },\r\n {\r\n role: 'assistant',\r\n content:\r\n 'Got it! Here\\'s the simplest JavaScript code to print \"Hello, World!\" to the console:\\n\\n```javascript\\nconsole.log(\"Hello, World!\");\\n```\\n\\nYou can run this code in any JavaScript environment, such as a web browser\\'s console or a Node.js environment. Just paste the code and execute it to see the output.',\r\n },\r\n {\r\n content: 'Write a simple math equation',\r\n role: 'user',\r\n },\r\n {\r\n role: 'assistant',\r\n content:\r\n \"Let's explore a simple mathematical equation using LaTeX:\\n\\n The quadratic formula is: $$x = \\\\frac{-b \\\\pm \\\\sqrt{b^2 - 4ac}}{2a}$$\\n\\nThis formula helps us solve quadratic equations in the form $ax^2 + bx + c = 0$. The solution gives us the x-values where the parabola intersects the x-axis.\",\r\n },\r\n]\r\n\r\nexport function ChatSection() {\r\n // You can replace the handler with a useChat hook from Vercel AI SDK\r\n const handler = useMockChat(initialMessages)\r\n return (\r\n
\r\n \r\n
\r\n )\r\n}\r\n\r\nfunction useMockChat(initMessages: Message[]): ChatHandler {\r\n const [messages, setMessages] = useState(initMessages)\r\n const [input, setInput] = useState('')\r\n const [isLoading, setIsLoading] = useState(false)\r\n\r\n const append = async (message: Message) => {\r\n setIsLoading(true)\r\n\r\n const mockResponse: Message = {\r\n role: 'assistant',\r\n content: '',\r\n }\r\n setMessages(prev => [...prev, message, mockResponse])\r\n\r\n const mockContent =\r\n 'This is a mock response. In a real implementation, this would be replaced with an actual AI response.'\r\n\r\n let streamedContent = ''\r\n const words = mockContent.split(' ')\r\n\r\n for (const word of words) {\r\n await new Promise(resolve => setTimeout(resolve, 100))\r\n streamedContent += (streamedContent ? ' ' : '') + word\r\n setMessages(prev => {\r\n return [\r\n ...prev.slice(0, -1),\r\n {\r\n role: 'assistant',\r\n content: streamedContent,\r\n },\r\n ]\r\n })\r\n }\r\n\r\n setIsLoading(false)\r\n return mockContent\r\n }\r\n\r\n return {\r\n messages,\r\n input,\r\n setInput,\r\n isLoading,\r\n append,\r\n }\r\n}\r\n", + "content": "'use client'\n\nimport {\n ChatHandler,\n ChatSection as ChatSectionUI,\n Message,\n} from '@llamaindex/chat-ui'\n\nimport '@llamaindex/chat-ui/styles/markdown.css'\nimport '@llamaindex/chat-ui/styles/pdf.css'\nimport '@llamaindex/chat-ui/styles/editor.css'\nimport { useState } from 'react'\n\nconst initialMessages: Message[] = [\n {\n id: '1',\n parts: [{ type: 'text', text: 'Write simple Javascript hello world code' }],\n role: 'user',\n },\n {\n id: '2',\n role: 'assistant',\n parts: [\n {\n type: 'text',\n text: 'Got it! Here\\'s the simplest JavaScript code to print \"Hello, World!\" to the console:\\n\\n```javascript\\nconsole.log(\"Hello, World!\");\\n```\\n\\nYou can run this code in any JavaScript environment, such as a web browser\\'s console or a Node.js environment. Just paste the code and execute it to see the output.',\n },\n ],\n },\n {\n id: '3',\n parts: [{ type: 'text', text: 'Write a simple math equation' }],\n role: 'user',\n },\n {\n id: '4',\n role: 'assistant',\n parts: [\n {\n type: 'text',\n text: \"Let's explore a simple mathematical equation using LaTeX:\\n\\n The quadratic formula is: $$x = \\\\frac{-b \\\\pm \\\\sqrt{b^2 - 4ac}}{2a}$$\\n\\nThis formula helps us solve quadratic equations in the form $ax^2 + bx + c = 0$. The solution gives us the x-values where the parabola intersects the x-axis.\",\n },\n ],\n },\n]\n\nexport function ChatSection() {\n // You can replace the handler with a useChat hook from Vercel AI SDK\n const handler = useMockChat(initialMessages)\n return (\n
\n \n
\n )\n}\n\nfunction useMockChat(initMessages: Message[]): ChatHandler {\n const [messages, setMessages] = useState(initMessages)\n const [status, setStatus] = useState<\n 'streaming' | 'ready' | 'error' | 'submitted'\n >('ready')\n\n const append = async (message: Message) => {\n const mockResponse: Message = {\n id: '5',\n role: 'assistant',\n parts: [{ type: 'text', text: '' }],\n }\n setMessages(prev => [...prev, message, mockResponse])\n\n const mockContent =\n 'This is a mock response. In a real implementation, this would be replaced with an actual AI response.'\n\n let streamedContent = ''\n const words = mockContent.split(' ')\n\n for (const word of words) {\n await new Promise(resolve => setTimeout(resolve, 100))\n streamedContent += (streamedContent ? ' ' : '') + word\n setMessages(prev => {\n return [\n ...prev.slice(0, -1),\n {\n id: '6',\n role: 'assistant',\n parts: [{ type: 'text', text: streamedContent }],\n },\n ]\n })\n }\n\n return mockContent\n }\n\n return {\n messages,\n status,\n sendMessage: async (message: Message) => {\n setStatus('submitted')\n await append(message)\n setStatus('ready')\n },\n }\n}\n", "type": "registry:block" } ], diff --git a/apps/web/registry/chat/chat.tsx b/apps/web/registry/chat/chat.tsx index f82be7af..1d4e40ba 100644 --- a/apps/web/registry/chat/chat.tsx +++ b/apps/web/registry/chat/chat.tsx @@ -13,22 +13,34 @@ import { useState } from 'react' const initialMessages: Message[] = [ { - content: 'Write simple Javascript hello world code', + id: '1', + parts: [{ type: 'text', text: 'Write simple Javascript hello world code' }], role: 'user', }, { + id: '2', role: 'assistant', - content: - 'Got it! Here\'s the simplest JavaScript code to print "Hello, World!" to the console:\n\n```javascript\nconsole.log("Hello, World!");\n```\n\nYou can run this code in any JavaScript environment, such as a web browser\'s console or a Node.js environment. Just paste the code and execute it to see the output.', + parts: [ + { + type: 'text', + text: 'Got it! Here\'s the simplest JavaScript code to print "Hello, World!" to the console:\n\n```javascript\nconsole.log("Hello, World!");\n```\n\nYou can run this code in any JavaScript environment, such as a web browser\'s console or a Node.js environment. Just paste the code and execute it to see the output.', + }, + ], }, { - content: 'Write a simple math equation', + id: '3', + parts: [{ type: 'text', text: 'Write a simple math equation' }], role: 'user', }, { + id: '4', role: 'assistant', - content: - "Let's explore a simple mathematical equation using LaTeX:\n\n The quadratic formula is: $$x = \\frac{-b \\pm \\sqrt{b^2 - 4ac}}{2a}$$\n\nThis formula helps us solve quadratic equations in the form $ax^2 + bx + c = 0$. The solution gives us the x-values where the parabola intersects the x-axis.", + parts: [ + { + type: 'text', + text: "Let's explore a simple mathematical equation using LaTeX:\n\n The quadratic formula is: $$x = \\frac{-b \\pm \\sqrt{b^2 - 4ac}}{2a}$$\n\nThis formula helps us solve quadratic equations in the form $ax^2 + bx + c = 0$. The solution gives us the x-values where the parabola intersects the x-axis.", + }, + ], }, ] @@ -44,15 +56,15 @@ export function ChatSection() { function useMockChat(initMessages: Message[]): ChatHandler { const [messages, setMessages] = useState(initMessages) - const [input, setInput] = useState('') - const [isLoading, setIsLoading] = useState(false) + const [status, setStatus] = useState< + 'streaming' | 'ready' | 'error' | 'submitted' + >('ready') const append = async (message: Message) => { - setIsLoading(true) - const mockResponse: Message = { + id: '5', role: 'assistant', - content: '', + parts: [{ type: 'text', text: '' }], } setMessages(prev => [...prev, message, mockResponse]) @@ -69,22 +81,24 @@ function useMockChat(initMessages: Message[]): ChatHandler { return [ ...prev.slice(0, -1), { + id: '6', role: 'assistant', - content: streamedContent, + parts: [{ type: 'text', text: streamedContent }], }, ] }) } - setIsLoading(false) return mockContent } return { messages, - input, - setInput, - isLoading, - append, + status, + sendMessage: async (message: Message) => { + setStatus('submitted') + await append(message) + setStatus('ready') + }, } } diff --git a/docs/chat-ui/annotations.mdx b/docs/chat-ui/annotations.mdx deleted file mode 100644 index 40c59277..00000000 --- a/docs/chat-ui/annotations.mdx +++ /dev/null @@ -1,435 +0,0 @@ ---- -title: Annotations -description: Working with rich content annotations for multimedia and interactive chat experiences ---- - -Annotations are the key to creating rich, interactive chat experiences beyond simple text. They allow you to embed images, files, sources, events, and custom content types directly into chat messages. - -## Annotation System Overview - -Annotations are structured data attached to messages that widgets can render as rich content. The system supports both built-in annotation types and custom annotations for domain-specific content. - -### Message Structure with Annotations - -```typescript -interface Message { - id: string - role: 'user' | 'assistant' | 'system' - content: string - annotations?: JSONValue[] -} -``` - -### Built-in Annotation Types - -The library provides several built-in annotation types: - -- **IMAGE** - Image data with URLs -- **DOCUMENT_FILE** - File attachments and metadata -- **SOURCES** - Citation and source references -- **EVENTS** - Process events and function calls -- **AGENT_EVENTS** - Agent-specific events with progress -- **ARTIFACT** - Interactive code and document artifacts -- **SUGGESTED_QUESTIONS** - Follow-up question suggestions - -## Using Annotations - -Annotations automatically render when using the `annotations` property on a message. Here's an example of how to render an image annotation: - -```tsx -const handler = useChat({ - initialMessages: [ - { - role: 'assistant', - content: 'Here is an image', - annotations: [ - { - type: 'image', - data: { - url: '/llama.png', - }, - }, - ], - } -}) - -return ( - - - - - {' '} - {/* Automatically renders IMAGE annotations */} - - - -) -``` - -In the example above, the `ChatMessage.Content.Image` component automatically renders the image annotation retrieved from the `annotations` property on the message which is retrieved by the `useChatMessage` hook. -The annotation is then passed to the `ChatImage` component which renders the image. - -## File Annotations - -Display file attachments with download links and preview capabilities. - -### Document File Annotations - -```typescript -const fileAnnotation = { - type: 'DOCUMENT_FILE', - data: { - files: [ - { - id: 'doc1', - name: 'quarterly-report.pdf', - type: 'application/pdf', - url: '/files/quarterly-report.pdf', - size: 2048576, // 2MB in bytes - metadata: { - title: 'Q4 2024 Quarterly Report', - author: 'Finance Team', - pages: 25, - }, - }, - { - id: 'doc2', - name: 'data-analysis.csv', - type: 'text/csv', - url: '/files/data-analysis.csv', - size: 1024000, - metadata: { - rows: 5000, - columns: 12, - }, - }, - ], - }, -} -``` - -## Source Annotations - -Display citations and source references with document grouping. - -### Creating Source Annotations - -```typescript -const sourceAnnotation = { - type: 'sources', - data: { - nodes: [ - { - id: 'source1', - url: '/documents/research-paper.pdf', - metadata: { - title: 'Machine Learning in Healthcare', - author: 'Dr. Jane Smith', - page_number: 15, - section: 'Methodology', - published_date: '2024-01-15', - }, - }, - { - id: 'source2', - url: '/documents/clinical-study.pdf', - metadata: { - title: 'Clinical Trial Results', - author: 'Medical Research Institute', - page_number: 8, - figure: 'Figure 3.2', - }, - }, - ], - }, -} -``` - -### Citation in Content - -Reference sources directly in your content using citation syntax: - -```typescript -const content = ` -Based on recent research [^1], machine learning shows promising results -in medical diagnosis. The clinical trial data [^2] supports these findings -with a 95% accuracy rate. - -[^1]: Machine Learning in Healthcare, p. 15 -[^2]: Clinical Trial Results, Figure 3.2 -` - -return { - role: 'assistant', - content, - annotations: [sourceAnnotation], -} -``` - -## Event Annotations - -Display process events, function calls, and system activities. - -### Basic Events - -```typescript -const eventAnnotation = { - type: 'events', - data: [ - { - type: 'function_call', - name: 'search_database', - args: { - query: 'machine learning papers', - limit: 10, - }, - result: 'Found 8 relevant papers', - timestamp: '2024-01-15T10:30:00Z', - }, - { - type: 'tool_use', - name: 'calculate_statistics', - args: { - dataset: 'user_engagement', - }, - result: { - mean: 4.2, - median: 4.1, - std_dev: 0.8, - }, - }, - ], -} -``` - -### Agent Events with Progress - -```typescript -const agentEventAnnotation = { - type: 'agent_events', - data: { - agent_name: 'Research Assistant', - total_steps: 4, - current_step: 2, - progress: 50, - events: [ - { - step: 1, - name: 'Search Documents', - status: 'completed', - result: 'Found 15 relevant documents', - }, - { - step: 2, - name: 'Analyze Content', - status: 'in_progress', - progress: 75, - }, - { - step: 3, - name: 'Generate Summary', - status: 'pending', - }, - { - step: 4, - name: 'Create Recommendations', - status: 'pending', - }, - ], - }, -} -``` - -## Artifact Annotations - -Create interactive code and document artifacts that users can edit. - -### Code Artifacts - -```typescript -const codeArtifact = { - type: 'artifact', - data: { - type: 'code', - data: { - title: 'Data Analysis Script', - file_name: 'analyze_data.py', - language: 'python', - code: ` -import pandas as pd -import matplotlib.pyplot as plt - -def analyze_sales_data(file_path): - # Load data - df = pd.read_csv(file_path) - - # Calculate monthly totals - monthly_sales = df.groupby('month')['sales'].sum() - - # Create visualization - plt.figure(figsize=(10, 6)) - monthly_sales.plot(kind='bar') - plt.title('Monthly Sales Analysis') - plt.ylabel('Sales ($)') - plt.show() - - return monthly_sales - -# Usage -sales_data = analyze_sales_data('sales.csv') -print(sales_data) - `, - }, - }, -} -``` - -### Document Artifacts - -```typescript -const documentArtifact = { - type: 'artifact', - data: { - type: 'document', - data: { - title: 'Project Proposal', - content: ` -# AI-Powered Analytics Platform - -## Executive Summary - -This proposal outlines the development of an AI-powered analytics platform -designed to help businesses make data-driven decisions. - -## Key Features - -- **Real-time Data Processing**: Stream analytics with sub-second latency -- **Machine Learning Models**: Automated insight generation -- **Interactive Dashboards**: Self-service analytics for business users - -## Implementation Timeline - -### Phase 1 (Months 1-3) -- Core platform development -- Basic ML model integration - -### Phase 2 (Months 4-6) -- Advanced analytics features -- Dashboard creation tools - -## Budget Estimate - -Total project cost: $250,000 - `, - }, - }, -} -``` - -## Suggested Questions - -Provide interactive follow-up questions to guide the conversation. - -```typescript -const suggestedQuestionsAnnotation = { - type: 'suggested_questions', - data: { - questions: [ - 'Can you explain the methodology in more detail?', - 'What are the potential limitations of this approach?', - 'How does this compare to traditional methods?', - 'What would be the next steps for implementation?', - ], - }, -} -``` - -## Custom Annotations - -Create domain-specific annotations for specialized content. - -### Weather Widget Example - -```typescript -// Define custom annotation type -interface WeatherAnnotation { - type: 'weather' - data: { - location: string - temperature: number - condition: string - humidity: number - windSpeed: number - forecast?: Array<{ - day: string - high: number - low: number - condition: string - }> - } -} - -// Create annotation -const weatherAnnotation: WeatherAnnotation = { - type: 'weather', - data: { - location: 'San Francisco, CA', - temperature: 22, - condition: 'sunny', - humidity: 65, - windSpeed: 12, - forecast: [ - { day: 'Tomorrow', high: 24, low: 18, condition: 'cloudy' }, - { day: 'Wednesday', high: 26, low: 20, condition: 'sunny' }, - ], - }, -} -``` - -### Custom Widget Implementation - -```tsx -import { useChatMessage, getAnnotationData } from '@llamaindex/chat-ui' - -interface WeatherData { - location: string - temperature: number - condition: string - humidity: number - windSpeed: number -} - -function WeatherWidget() { - const { message } = useChatMessage() - - const weatherData = getAnnotationData(message, 'weather') - - if (!weatherData?.[0]) return null - - const data = weatherData[0] - // Render weather data... -} -``` - -## Annotation Utilities - -### getAnnotationData - -Extract annotation data by type from messages: - -```tsx -import { getAnnotationData } from '@llamaindex/chat-ui' - -// Usage -return getAnnotationData(message, 'weather') -``` - -## Next Steps - -- [Artifacts](./artifacts.mdx) - Learn about interactive code and document artifacts -- [Widgets](./widgets.mdx) - Explore widget implementation details -- [Examples](./examples.mdx) - See complete annotation examples -- [Customization](./customization.mdx) - Style and customize annotation appearance diff --git a/docs/chat-ui/artifacts.mdx b/docs/chat-ui/artifacts.mdx index 80208d74..839a22cc 100644 --- a/docs/chat-ui/artifacts.mdx +++ b/docs/chat-ui/artifacts.mdx @@ -28,9 +28,9 @@ Code artifacts provide interactive code editing with full syntax highlighting an ### Creating Code Artifacts ```typescript -// Server-side: Create code artifact annotation +// Server-side: Create code artifact part const codeArtifact = { - type: 'artifact', + type: 'data-artifact', data: { type: 'code', data: { @@ -106,7 +106,7 @@ export async function POST(request: Request) { // Send code artifact const artifact = { - type: 'artifact', + type: 'data-artifact', data: { type: 'code', data: { @@ -118,11 +118,8 @@ export async function POST(request: Request) { }, } - // wrap the annotation in a code block with the language key is 'annotation' - const codeBlock = `\n\`\`\`annotation\n${JSON.stringify(codeArtifact)}\n\`\`\`\n` - - // send the artifact with the 0: prefix to make it inline - controller.enqueue(encoder.encode(`0:${JSON.stringify(codeBlock)}\\n`)) + // send the artifact with the data: prefix for SSE format + controller.enqueue(encoder.encode(`data: ${JSON.stringify(artifact)}\\n`)) // Send follow-up text controller.enqueue( @@ -137,8 +134,8 @@ export async function POST(request: Request) { return new Response(stream, { headers: { - 'Content-Type': 'text/plain; charset=utf-8', - 'X-Vercel-AI-Data-Stream': 'v1', + 'Content-Type': 'text/event-stream', + 'Connection': 'keep-alive', }, }) } @@ -178,7 +175,7 @@ Document artifacts provide rich text editing with markdown support and real-time ```typescript const documentArtifact = { - type: 'artifact', + type: 'data-artifact', data: { type: 'document', data: { @@ -372,7 +369,11 @@ Document artifacts provide: import { ChatSection, ChatCanvas } from '@llamaindex/chat-ui' function ChatWithCanvas() { - const handler = useChat({ api: '/api/chat' }) + const handler = useChat({ + transport: new DefaultChatTransport({ + api: '/api/chat', + }), + }) return ( @@ -469,7 +470,7 @@ function CustomChat() { } ``` -You can also custom ArtifactCard for your artifact type. +You can also customize ArtifactCard for your artifact type: ```tsx import { Image } from 'lucide-react' @@ -484,37 +485,27 @@ function CustomArtifactCard({ data }: { data: Artifact }) { /> ) } - -// update markdown annotation renderers to use your custom artifact card - - - ``` -To trigger your custom artifact viewer, the AI response should include an annotation with the matching artifact type: +To trigger your custom artifact viewer, the AI response should include a part with the matching artifact type: -```tsx -// Example of how to create an artifact in AI response -const response = `Here is your image! - -\`\`\`annotation -${JSON.stringify({ - type: 'artifact', - data: { - type: 'image', // This matches your viewer's check - data: { - imageUrl: 'https://example.com/image.jpg', - caption: 'A beautiful landscape' - }, - created_at: Date.now(), - }, -})} -\`\`\` -` +```ts +message = { + parts: [ + { + type: 'data-artifact', + data: { + type: 'code', + data: { + title: 'Data Visualization Script', + file_name: 'visualize_data.py', + language: 'python', + code: 'import matplotlib.pyplot as plt\n# Code...', + }, + } + } + ] +} ``` You can create multiple custom artifact viewers for different content types: @@ -536,7 +527,7 @@ For a complete working example of custom artifact viewers, check out the demo im - Custom `ImageArtifactViewer` implementation - Integration with existing chat components -- Sample messages with artifact annotations +- Sample messages with artifact parts - Copy-to-clipboard functionality for the code ### Canvas Auto-Show @@ -547,7 +538,7 @@ The canvas automatically appears when artifacts are present: // Canvas appears automatically when message contains artifacts - + ``` @@ -732,4 +723,4 @@ function CopyArtifact() { - [Examples](./examples.mdx) - See complete artifact implementations - [Customization](./customization.mdx) - Style and customize artifact appearance - [Widgets](./widgets.mdx) - Explore related widget functionality -- [Annotations](./annotations.mdx) - Understand the annotation system +- [Parts](./parts.mdx) - Understand the message parts system diff --git a/docs/chat-ui/core-components.mdx b/docs/chat-ui/core-components.mdx index 1fda83c1..16a4b53b 100644 --- a/docs/chat-ui/core-components.mdx +++ b/docs/chat-ui/core-components.mdx @@ -13,10 +13,14 @@ The `ChatSection` is the root component that provides context and layout for all ```tsx import { ChatSection } from '@llamaindex/chat-ui' -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' function MyChat() { - const handler = useChat({ api: '/api/chat' }) + const handler = useChat({ + transport: new DefaultChatTransport({ + api: '/api/chat', + }), + }) return } ``` @@ -120,7 +124,7 @@ Action buttons for the message list: ```tsx - + ``` @@ -134,7 +138,7 @@ Action buttons for the message list: ## ChatMessage -Individual message component with full annotation support and role-based rendering. +Individual message component which renders, the avatar, the content and actions of a message. ### Basic Usage @@ -148,7 +152,7 @@ function CustomMessage({ message, isLast }) {
- + @@ -167,6 +171,18 @@ interface ChatMessageProps { } ``` +### Message Structure + +The Message data structure stores the content of a message in so called parts: + +```typescript +interface Message { + id: string + role: 'user' | 'assistant' | 'system' + parts: MessagePart[] +} +``` + ### Sub-components #### ChatMessage.Avatar @@ -183,17 +199,16 @@ User/assistant avatar display: #### ChatMessage.Content -Main content area with annotation support: +This is the main content area which configures a couple of renders for specific message parts: ```tsx - - - - - - - - + + + + + + + ``` @@ -208,18 +223,16 @@ Message-level actions (copy, regenerate, etc.): ``` -### Content Types +### Message Parts -The content system supports multiple annotation types: +There are different renderers available for each part in the message: -- **Markdown** - Rich text with LaTeX support -- **Image** - Image display with preview -- **Artifact** - Interactive code/document editing -- **Source** - Citation and source links -- **Event** - Process events and status -- **AgentEvent** - Agent-specific events with progress -- **DocumentFile** - File attachments -- **SuggestedQuestions** - Follow-up question suggestions +- **TextPart** - Rich text with Markdown and LaTeX support +- **ArtifactPart** - Interactive code/document editing +- **SourcesPart** - Citation and source links +- **EventPart** - Process events and status updates +- **FilePart** - File attachments and uploads +- **SuggestedQuestionsPart** - Follow-up question suggestions ## ChatInput @@ -364,7 +377,11 @@ function ChatWithCanvas() { ```tsx function AdvancedChat() { - const handler = useChat({ api: '/api/chat' }) + const handler = useChat({ + transport: new DefaultChatTransport({ + api: '/api/chat', + }), + }) return ( @@ -407,16 +424,52 @@ All components have access to chat context through hooks: import { useChatUI, useChatMessage } from '@llamaindex/chat-ui' function CustomComponent() { - const { messages, isLoading, append } = useChatUI() + const { messages, status, sendMessage } = useChatUI() const { message } = useChatMessage() // Only in message context // Component logic } ``` +## Message Parts System + +Each message can have multiple parts. Parts are rendered in the order they are received. +For more information on parts, see [Message Parts](./parts.mdx). + +### Creating Custom Parts + +```tsx +import { usePart } from '@llamaindex/chat-ui' + +function CustomPart() { + const { data } = usePart() + + return ( +
+ {/* Custom part rendering */} +
+ ) +} +``` + +### Backend Integration + +Parts can be sent from the backend via SSE protocol: + +```typescript +// Backend example +const parts = [ + { type: 'weather', data: weatherInfo }, + { type: 'sources', data: sourceNodes } +] + +// Send as SSE +response.write(`data: ${JSON.stringify({ parts })}\n\n`) +``` + ## Next Steps - [Widgets](./widgets.mdx) - Learn about specialized content widgets -- [Annotations](./annotations.mdx) - Implement rich content support +- [Message Parts](./message-parts.mdx) - Implement rich content support with parts - [Hooks](./hooks.mdx) - Understand the hook system - [Customization](./customization.mdx) - Style and theme the components \ No newline at end of file diff --git a/docs/chat-ui/customization.mdx b/docs/chat-ui/customization.mdx index 02dbad02..fcc89c3d 100644 --- a/docs/chat-ui/customization.mdx +++ b/docs/chat-ui/customization.mdx @@ -108,9 +108,9 @@ function CustomMessageLayout() {
- - - + + + @@ -168,7 +168,7 @@ function RoleBasedMessage() {
- +
@@ -514,7 +514,11 @@ export const useTheme = () => useContext(ThemeContext) ```tsx function ThemedChatSection() { const { theme } = useTheme() - const handler = useChat({ api: '/api/chat' }) + const handler = useChat({ + transport: new DefaultChatTransport({ + api: '/api/chat', + }), + }) const themeClasses = { light: 'bg-white text-gray-900', diff --git a/docs/chat-ui/getting-started.mdx b/docs/chat-ui/getting-started.mdx index dae6d99d..44e58e0a 100644 --- a/docs/chat-ui/getting-started.mdx +++ b/docs/chat-ui/getting-started.mdx @@ -121,7 +121,7 @@ The `markdown.css` file includes styling for code blocks using [highlight.js](ht ### 1. Create a Chat API Route -Set up an API route to handle chat requests. Here's an example using Next.js: +Set up an API route to handle chat requests. Here's an example using Next.js with LlamaIndex: ```typescript // app/api/chat/route.ts @@ -130,13 +130,14 @@ import { NextResponse } from 'next/server' export async function POST(request: Request) { const { messages } = await request.json() - // Your chat logic here + // Your LlamaIndex chat logic here const response = await generateChatResponse(messages) + // Return streaming response in LlamaIndex format return new Response(response, { headers: { - 'Content-Type': 'text/plain; charset=utf-8', - 'X-Vercel-AI-Data-Stream': 'v1', + 'Content-Type': 'text/event-stream', + 'Connection': 'keep-alive', }, }) } @@ -144,17 +145,21 @@ export async function POST(request: Request) { ### 2. Create Your Chat Component -The easiest way to get started is to connect the whole `ChatSection` component with `useChat` hook from [vercel/ai](https://github.com/vercel/ai): +The easiest way to get started is to connect the whole `ChatSection` component with `useChat` hook from `@ai-sdk/react`: ```tsx 'use client' import { ChatSection } from '@llamaindex/chat-ui' -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' export default function Chat() { const handler = useChat({ - api: '/api/chat', + // use transport to specify the chat API endpoint + // https://ai-sdk.dev/docs/migration-guides/migration-guide-5-0#chat-transport-architecture + transport: new DefaultChatTransport({ + api: '/api/chat', + }), }) return ( @@ -172,7 +177,7 @@ Components are designed to be composable. You can use them as is with the simple ```tsx import { ChatSection, ChatMessages, ChatInput } from '@llamaindex/chat-ui' import LlamaCloudSelector from './components/LlamaCloudSelector' // your custom component -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' const ChatExample = () => { const handler = useChat() @@ -234,7 +239,7 @@ Additionally, you can also override each component's styles by setting custom cl ```tsx import { ChatSection, ChatMessages, ChatInput } from '@llamaindex/chat-ui' -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' const ChatExample = () => { const handler = useChat() @@ -269,7 +274,11 @@ import { } from '@llamaindex/chat-ui' function CustomChat() { - const handler = useChat({ api: '/api/chat' }) + const handler = useChat({ + transport: new DefaultChatTransport({ + api: '/api/chat', + }), + }) return ( @@ -290,7 +299,7 @@ Provide initial context or welcome messages: ```tsx const handler = useChat({ api: '/api/chat', - initialMessages: [ + messages: [ { id: '1', role: 'assistant', @@ -309,7 +318,7 @@ For any language that the LLM generates, you can specify a custom renderer to re Now that you have a basic chat interface running: 1. **Explore Components** - Learn about [Core Components](./core-components.mdx) for customization -2. **Add Rich Content** - Implement [Annotations](./annotations.mdx) for images, files, and sources +2. **Add Rich Content** - Implement [Parts](./parts.mdx) for images, files, and sources 3. **Enable Artifacts** - Set up [Artifacts](./artifacts.mdx) for interactive code and documents 4. **Customize Styling** - Read the [Customization](./customization.mdx) guide for theming @@ -323,7 +332,7 @@ Now that you have a basic chat interface running: **Build errors**: Check that your bundler supports the package's export conditions. -**Chat not working**: Verify your API route is returning the correct response format for the Vercel AI SDK. +**Chat not working**: Verify your API route is returning the correct response format for the LlamaIndex streaming protocol. ### Getting Help diff --git a/docs/chat-ui/hooks.mdx b/docs/chat-ui/hooks.mdx index 783f36af..ad87714a 100644 --- a/docs/chat-ui/hooks.mdx +++ b/docs/chat-ui/hooks.mdx @@ -20,9 +20,8 @@ function CustomChatComponent() { input, setInput, isLoading, - error, - append, - reload, + sendMessage, + regenerate, stop, setMessages, requestData, @@ -30,9 +29,10 @@ function CustomChatComponent() { } = useChatUI() const handleSendMessage = async () => { - await append({ + await sendMessage({ + id: 'user-msg-1', role: 'user', - content: input, + parts: [{ type: 'text', text: input }], }) } @@ -48,14 +48,14 @@ function CustomChatComponent() { **Returned Properties:** -- `messages` - Array of chat messages +- `messages` - Array of chat messages with parts - `input` - Current input value - `setInput` - Function to update input -- `isLoading` - Loading state boolean -- `error` - Error object if any -- `append` - Function to add message -- `reload` - Function to reload last message +- `isLoading` - Loading state boolean (computed from status) +- `status` - Current chat status ('submitted' | 'streaming' | 'ready' | 'error') +- `sendMessage` - Function to send a message - `stop` - Function to stop current generation +- `regenerate` - Function to regenerate a message - `setMessages` - Function to update message array - `requestData` - Additional request data - `setRequestData` - Function to update request data @@ -73,11 +73,14 @@ function CustomMessageContent() { return (

Role: {message.role}

-

Content: {message.content}

+

Parts: {message.parts.length}

Is last message: {isLast ? 'Yes' : 'No'}

- {message.annotations && ( -

Has annotations: {message.annotations.length}

- )} + + {message.parts.map((part, index) => ( +
+

Part {index + 1}: {part.type}

+
+ ))}
) } @@ -85,9 +88,63 @@ function CustomMessageContent() { **Returned Properties:** -- `message` - Current message object +- `message` - Current message object with parts array - `isLast` - Boolean indicating if this is the last message +### usePart + +Access the current message content within part components. This hook provides type-safe access to specific part types in the current message. + +```tsx +import { usePart } from '@llamaindex/chat-ui' + +function TextPartComponent() { + const textPart = usePart('text') + + if (!textPart) return null + + return

{textPart.text}

+} + +function ArtifactPartComponent() { + const artifactPart = usePart('data-artifact') + + if (!artifactPart) return null + + return ( +
+

{artifactPart.title}

+

Type: {artifactPart.data.type}

+
+ ) +} + +function CustomPartComponent() { + const customPart = usePart('data-custom-type') + + if (!customPart) return null + + return +} +``` + +**Function Overloads:** + +The hook provides automatic type inference for built-in part types: + +- `usePart('text')` → `TextPart | null` +- `usePart('data-file')` → `FilePart | null` +- `usePart('data-artifact')` → `ArtifactPart | null` +- `usePart('data-event')` → `EventPart | null` +- `usePart('data-sources')` → `SourcesPart | null` +- `usePart('data-suggestion')` → `SuggestionPart | null` + +**Usage Notes:** + +- Must be used within a `ChatPartProvider` context +- Returns `null` if the part type doesn't match the current part +- For custom part types, use the generic parameter: `usePart('custom-type')` + ### useChatInput Access input form state and handlers. @@ -136,21 +193,34 @@ Access messages list state and handlers. import { useChatMessages } from '@llamaindex/chat-ui' function CustomMessageList() { - const { messages, isLoading, reload, stop, isEmpty, scrollToBottom } = + const { messages, isLoading, regenerate, stop, isEmpty, scrollToBottom } = useChatMessages() return (
{messages.map((msg, i) => ( -
{msg.content}
+
+

Role: {msg.role}

+
+ {msg.parts.map((part, index) => ( +
+ {part.type === 'text' ? ( +

{part.text}

+ ) : ( +

{JSON.stringify(part.data)}

+ )} +
+ ))} +
+
))}
{isEmpty &&

No messages yet

} - @@ -163,7 +233,7 @@ function CustomMessageList() { - `messages` - Array of messages - `isLoading` - Loading state -- `reload` - Reload last message +- `regenerate` - Regenerate last message - `stop` - Stop generation - `isEmpty` - Boolean if no messages - `scrollToBottom` - Function to scroll to bottom @@ -326,13 +396,11 @@ function useCustomChat() { const messages = useChatMessages() const sendMessageWithMetadata = async (content: string, metadata: any) => { - await chatUI.append( - { - role: 'user', - content, - }, - { data: metadata } - ) + await chatUI.sendMessage({ + id: `user-${Date.now()}`, + role: 'user', + parts: [{ type: 'text', text: content }] + }, { data: metadata }) } const getLastAssistantMessage = () => { @@ -398,7 +466,7 @@ function SafeHookUsage() { ## Next Steps -- [Annotations](./annotations.mdx) - Learn how hooks work with annotation data +- [Parts](./parts.mdx) - Learn how hooks work with message parts - [Customization](./customization.mdx) - Use hooks for custom styling and behavior - [Examples](./examples.mdx) - See complete examples using hooks - [Core Components](./core-components.mdx) - Understand component-hook relationships @@ -620,10 +688,10 @@ function WorkflowChatApp() { > - - - {/* Custom annotations for UIEvents */} - + + + {/* Renderer for custom message parts */} + @@ -664,7 +732,7 @@ def handle_start_event(ev: StartEvent) -> MyNextEvent: #### Built-in Workflow Events -The hook automatically processes workflow events (using `useWorkflow`) and renders them as annotations in the chat interface, making it easy to build rich conversational experiences with LlamaDeploy workflows. +The hook automatically processes workflow events (using `useWorkflow`) and renders them as parts in the chat interface, making it easy to build rich conversational experiences with LlamaDeploy workflows. Your LlamaDeploy workflows can send three main types of events to enhance the chat experience: ##### 1. SourceNodesEvent - Citations and References @@ -693,7 +761,7 @@ ctx.write_event_to_stream( ) ``` -> Note: Your `ChatMessage.Content` component needs to have the `ChatMessage.Content.Source` component as a child to display the citations and references (as shown in the example above). +> Note: Your `ChatMessage.Content` component needs to have the `ChatMessage.Part.Source` component as a child to display the citations and references (as shown in the example above). ##### 2. ArtifactEvent - Code and Artifacts @@ -720,11 +788,11 @@ ctx.write_event_to_stream( ) ``` -> Note: Your `ChatMessage.Content` component needs to have the `ChatMessage.Content.Markdown` component as a child to display the artifacts inline in the markdown component (as shown in the example above). +> Note: Your `ChatMessage.Content` component needs to have the `ChatMessage.Part.Markdown` component as a child to display the artifacts inline in the markdown component (as shown in the example above). ##### 3. UIEvent - Custom UI Components -Send custom data to render it in a specialized UI components. In your workflow code, you can send `UIEvent`s for this - for example to render a weather annotation in the chat interface: +Send custom data to render it in a specialized UI components. In your workflow code, you can send `UIEvent`s for this - for example to render a weather part in the chat interface: ```python from llama_index.core.chat_ui.events import ( @@ -752,4 +820,4 @@ ctx.write_event_to_stream( ) ``` -To render this custom UI component, you need to add it as child to your `ChatMessage.Content` component. The example above will render the [`WeatherAnnotation`](../../examples/llamadeploy/chat/ui/components/custom/custom-weather.tsx) component in the chat interface. +To render this custom UI component, you need to add it as child to your `ChatMessage.Content` component. The example above will render the [`WeatherPart`](../../examples/llamadeploy/chat/ui/components/custom/custom-weather.tsx) component in the chat interface. diff --git a/docs/chat-ui/index.mdx b/docs/chat-ui/index.mdx index 4bad661d..28b9010c 100644 --- a/docs/chat-ui/index.mdx +++ b/docs/chat-ui/index.mdx @@ -8,7 +8,7 @@ LlamaIndex Chat UI is a comprehensive React component library designed for build ## Key Features - **Complete Chat Interface** - Full-featured chat components with message history, input, and OpenAI-style canvas -- **Rich Annotations** - Support for images, files, sources, events, and custom annotations +- **Rich Parts** - Support for images, files, sources, events, and custom parts - **Interactive Artifacts** - Code and document artifacts with editing and version management - **File Upload Support** - Built-in handling for multiple file types (PDF, images, documents) - **Beautiful** - Built on shadcn/ui for beautiful UI @@ -29,7 +29,7 @@ LlamaIndex Chat UI is a comprehensive React component library designed for build The library includes a comprehensive widget system for handling various content types: - **Content Widgets** - Markdown, code blocks, image display -- **Annotation Widgets** - Sources, events, suggested questions +- **Widgets for Part Rendering** - Sources, events, suggested questions - **Interactive Widgets** - File upload, document editing, code editing ## Getting Started @@ -46,7 +46,7 @@ For more information on configuration, please see detailed guide in [Getting Sta ```tsx import { ChatSection } from '@llamaindex/chat-ui' -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' export default function MyChat() { const handler = useChat({ @@ -62,7 +62,7 @@ This creates a complete chat interface with: - Message history display - User input with file upload - Loading states and error handling -- Support for rich content and annotations +- Support for rich content using renderers for message parts ## Architecture @@ -71,7 +71,7 @@ The library follows a composable architecture where you can: 1. **Use the complete solution** - `ChatSection` provides everything out of the box 2. **Compose custom layouts** - Mix and match components for custom designs 3. **Extend with widgets** - Add specialized content handling -4. **Create custom annotations** - Build domain-specific content types +4. **Create custom parts** - Build domain-specific content types ## Integration diff --git a/docs/chat-ui/meta.json b/docs/chat-ui/meta.json index b89fb86e..d7fed6ff 100644 --- a/docs/chat-ui/meta.json +++ b/docs/chat-ui/meta.json @@ -9,7 +9,7 @@ "core-components", "widgets", "hooks", - "annotations", + "parts", "artifacts", "customization", "examples" diff --git a/docs/chat-ui/parts.mdx b/docs/chat-ui/parts.mdx new file mode 100644 index 00000000..5782c013 --- /dev/null +++ b/docs/chat-ui/parts.mdx @@ -0,0 +1,515 @@ +--- +title: Message Parts +description: Working with rich content parts for multimedia and interactive chat experiences +--- + +Message parts are the building blocks for creating rich, interactive chat experiences beyond simple text. They allow you to embed text, files, sources, events, artifacts, and custom content types directly into chat messages as structured components. + +## Parts System Overview + +Chat-UI supports two fundamental types of parts that make up chat messages: + +### 1. Text Parts +Text parts contain markdown content that gets rendered as formatted text. They use the `text` type and are the primary way to display textual content. + +### 2. Data Parts +Data parts contain structured data for rich interactive components like weather widgets, file attachments, sources, and more. +Both built-in and custom parts are both using the `data-` prefix. We're using here the convention from Vercel AI SDK 5 to use the `data-` prefix to detect data parts in messages. + +## Message Structure with Parts + +```typescript +interface Message { + id: string + role: 'user' | 'assistant' | 'system' + parts: MessagePart[] +} + +// Two types of parts +type MessagePart = TextPart | DataPart + +// Text parts for markdown content +interface TextPart { + type: 'text' + text: string +} + +// Data parts for rich components +interface DataPart { + id?: string // if provided, only the last part with same id is kept + type: string // should use 'data-' prefix for data parts + data?: any +} +``` + +## How Chat-UI Renders Parts + +Parts are automatically rendered when using the `ChatMessage.Content` component. Each part type has a corresponding component that checks if the current part matches its type: + +```tsx + + + {/* Built-in part components */} + + + + + + + + {/* Custom part components */} + + + + +``` + +The rendering system: +1. Iterates through each part in `message.parts` +2. Provides each part to all child components via `ChatPartProvider` +3. Each component uses `usePart(partType)` to check if it should render +4. Only the matching component renders, others return `null` + +## Built-in Parts + +Chat-UI provides several built-in part types for common use cases: + +### Text Parts (`text`) +Display markdown content with syntax highlighting, links, and formatting. + +```typescript +const textPart = { + type: 'text', + text: ` +# Heading +This is **bold** and *italic* text. + +\`\`\`javascript +console.log('Hello, world!') +\`\`\` + ` +} +``` + +### File Parts (`data-file`) +Display file attachments with download links and metadata. + +```typescript +const filePart = { + type: 'data-file', + data: { + name: 'quarterly-report.pdf', + type: 'application/pdf', + url: '/files/quarterly-report.pdf', + size: 2048576 // bytes + } +} +``` + +### Source Parts (`data-sources`) +Display citations and source references with document grouping. + +```typescript +const sourcesPart = { + type: 'data-sources', + data: { + nodes: [ + { + id: 'source1', + url: '/documents/research-paper.pdf', + metadata: { + title: 'Machine Learning in Healthcare', + author: 'Dr. Jane Smith', + page_number: 15, + section: 'Methodology' + } + } + ] + } +} +``` + +### Event Parts (`data-event`) +Display process events, function calls, and system activities with status updates. + +```typescript +const eventPart = { + id: 'search_event', // Same ID will update previous event + type: 'data-event', + data: { + title: 'Calling tool `search_database`', + status: 'success', + data: { + query: 'machine learning papers', + result: 'Found 8 relevant papers' + } + } +} +``` + +### Artifact Parts (`data-artifact`) +Create interactive code and document artifacts that users can edit. + +```typescript +const artifactPart = { + type: 'data-artifact', + data: { + type: 'code', + data: { + title: 'Data Analysis Script', + file_name: 'analyze_data.py', + language: 'python', + code: ` +import pandas as pd +import matplotlib.pyplot as plt + +def analyze_sales_data(file_path): + df = pd.read_csv(file_path) + monthly_sales = df.groupby('month')['sales'].sum() + return monthly_sales + ` + } + } +} +``` + +### Suggestion Parts (`data-suggested_questions`) +Provide interactive follow-up questions to guide conversation. + +```typescript +const suggestionPart = { + type: 'data-suggested_questions', + data: [ + 'Can you explain the methodology in more detail?', + 'What are the potential limitations?', + 'How does this compare to traditional methods?' + ] +} +``` + +## Creating Custom Parts + +Create domain-specific parts for specialized content by implementing a custom render component: + +### 1. Define the Part Type and Data Interface + +```typescript +const WeatherPartType = 'data-weather' + +type WeatherData = { + location: string + temperature: number + condition: string + humidity: number + windSpeed: number +} +``` + +### 2. Create the Component + +```tsx +import { usePart } from '@llamaindex/chat-ui' + +export function WeatherPart() { + // usePart returns data only if current part matches the type + const weatherData = usePart(WeatherPartType) + + if (!weatherData) return null + + return ( +
+

{weatherData.location}

+
{weatherData.temperature}°C
+
{weatherData.condition}
+
+ Humidity: {weatherData.humidity}% + Wind: {weatherData.windSpeed} km/h +
+
+ ) +} +``` + +### 3. Add to Message Rendering + +```tsx + + + + + {/* Add your custom component */} + + + +``` + +## Adding Parts from Backend via SSE Protocol + +Parts are streamed using the **Server-Sent Events (SSE)** protocol, which provides real-time communication between the server and client. +Read more about SSE protocol in [Vercel AI SDK 5](https://ai-sdk.dev/docs/migration-guides/migration-guide-5-0#proprietary-protocol---server-sent-events) documentation. + +Here's how the streaming implementation works in the backend: + +### Response Headers + +The server must set specific headers for SSE streaming: + +```typescript +return new Response(stream, { + headers: { + 'Content-Type': 'text/event-stream', + 'Connection': 'keep-alive', + }, +}) +``` + +### Stream Format + +Each chunk sent to the client must follow the SSE format with a `data:` prefix: + +```typescript +const DATA_PREFIX = 'data: ' + +function writeStream(chunk: TextChunk | DataChunk) { + controller.enqueue( + encoder.encode(`${DATA_PREFIX}${JSON.stringify(chunk)}\n\n`) + ) +} +``` + +### Chunk Types + +The streaming protocol supports two types of chunks: + +#### Text Chunks (for streaming text content) +```typescript +interface TextChunk { + type: 'text-start' | 'text-delta' | 'text-end' + id: string + delta?: string // only for text-delta +} + +// Example sequence: +// data: {"type":"text-start","id":"msg-123"} +// data: {"type":"text-delta","id":"msg-123","delta":"Hello "} +// data: {"type":"text-delta","id":"msg-123","delta":"world!"} +// data: {"type":"text-end","id":"msg-123"} +``` + +#### Data Chunks (for rich components) +```typescript +interface DataChunk { + id?: string // optional - same ID replaces previous parts + type: `data-${string}` // requires 'data-' prefix + data: Record +} + +// Example: +// data: {"type":"data-weather","data":{"location":"SF","temp":22}} +``` + +### Implementation Example + +```typescript +const fakeChatStream = (parts: (string | MessagePart)[]): ReadableStream => { + return new ReadableStream({ + async start(controller) { + const encoder = new TextEncoder() + + function writeStream(chunk: TextChunk | DataChunk) { + controller.enqueue( + encoder.encode(`${DATA_PREFIX}${JSON.stringify(chunk)}\n\n`) + ) + } + + async function writeText(content: string) { + const messageId = crypto.randomUUID() + + // Start text stream + writeStream({ id: messageId, type: 'text-start' }) + + // Stream tokens + for (const token of content.split(' ')) { + writeStream({ + id: messageId, + type: 'text-delta', + delta: token + ' ' + }) + await new Promise(resolve => setTimeout(resolve, 30)) + } + + // End text stream + writeStream({ id: messageId, type: 'text-end' }) + } + + async function writeData(data: MessagePart) { + writeStream({ + id: data.id, + type: `data-${data.type}`, + data: data.data + }) + } + + // Stream all parts + for (const item of parts) { + if (typeof item === 'string') { + await writeText(item) + } else { + await writeData(item) + } + } + + controller.close() + }, + }) +} +``` + +### Important ID Behavior for Data Parts + +When data parts have the same `id`, only the **last** data part with that ID will exist in `message.parts`. This is useful for: + +- **Single data display**: Show only the final result (e.g., hide loading, show final weather data) +- **Progressive updates**: Update the same component as new data arrives (e.g., streaming events) + +If you want multiple parts of the same type, **don't provide an ID** or use different IDs. + +Example: + +1. When calling a tool, send an event with tool call information: + +```typescript +part1 = { + id: 'demo_sample_event_id', + type: 'data-event', + data: { + title: 'Calling tool `get_weather` with input `San Francisco, CA`', + status: 'pending', + }, +} +``` + +2. When the tool call is completed, send an event with the tool call result. The previous event with the same id will be replaced by the new one. + +```typescript +part2 = { + id: 'demo_sample_event_id', + type: 'data-event', + data: { + title: 'Calling tool `get_weather` with input `San Francisco, CA`', + status: 'pending', + }, +} +``` + +When checking `message.parts`, you will only see the last event with the final result. + + +### Important Notes + +- **SSE Format**: Each message must be prefixed with `data: ` and end with `\n\n` +- **JSON Encoding**: All chunks are JSON-encoded objects +- **Text Streaming**: Text content requires start/delta/end sequence for proper rendering +- **Data Parts**: Must use `data-` prefix in the type field +- **ID Behavior**: Same IDs in data parts will replace previous parts with that ID + + +## Complete Message Example + +```typescript +const message = { + id: 'msg-123', + role: 'assistant', + parts: [ + { + type: 'text', + text: 'I\'ve analyzed your data and here are the results:' + }, + { + type: 'data-artifact', + data: { + type: 'code', + data: { + title: 'Sales Analysis', + file_name: 'analysis.py', + language: 'python', + code: 'import pandas as pd\n# Analysis code...' + } + } + }, + { + type: 'data-sources', + data: { + nodes: [ + { + id: '1', + url: '/data/sales.csv', + metadata: { title: 'Sales Data Q4 2024' } + } + ] + } + }, + { + type: 'data-suggested_questions', + data: [ + 'Can you explain the quarterly trends?', + 'What about the seasonal patterns?', + 'How can we improve performance?' + ] + } + ] +} +``` + +## Utility Functions + +### usePart Hook + +Extract part data by type within part components: + +```tsx +import { usePart } from '@llamaindex/chat-ui' + +function CustomPartComponent() { + // Returns data only if current part matches type, null otherwise + const weatherData = usePart('data-weather') + const textContent = usePart('text') + + // Component logic... +} +``` + +### getParts Function + +Extract all parts of a specific type from a message: + +```tsx +import { getParts } from '@llamaindex/chat-ui' + +// Get all text content from a message +const allTextParts = getParts(message, 'text') + +// Get all weather data parts +const allWeatherData = getParts(message, 'data-weather') +``` + +This function is useful for: +- Aggregating data from multiple parts +- Building summaries or indexes +- Processing historical data + +## Best Practices + +1. **Use the `data-` prefix** for all custom part types +2. **Provide IDs** only when you want parts to replace each other +3. **Keep data structures simple** and serializable +4. **Handle null cases** in custom components when data doesn't match +5. **Mix text and data parts** to create rich, contextual experiences +6. **Stream progressively** to improve perceived performance + +## Next Steps + +- [Artifacts](./artifacts.mdx) - Learn about interactive code and document artifacts +- [Widgets](./widgets.mdx) - Explore widget implementation details +- [Examples](./examples.mdx) - See complete implementation examples +- [Customization](./customization.mdx) - Style and customize part appearance diff --git a/docs/chat-ui/widgets.mdx b/docs/chat-ui/widgets.mdx index 979c185f..03b72df9 100644 --- a/docs/chat-ui/widgets.mdx +++ b/docs/chat-ui/widgets.mdx @@ -3,13 +3,13 @@ title: Widgets description: Comprehensive guide to specialized content widgets for rich chat experiences --- -Widgets are specialized components for displaying and interacting with rich content in chat messages. They provide functionality beyond simple text, enabling multimedia, interactive elements, and custom annotations. +Widgets are specialized components for displaying and interacting with rich content in chat messages. They provide functionality beyond simple text, enabling multimedia, interactive elements, and custom parts. This section describes how to use them standalone. -## Content Widgets +## ChatUI Widgets -### Markdown +## Markdown Renders rich text with LaTeX math support, syntax highlighting, and citations. @@ -155,70 +155,26 @@ export default function Home() { - **Image Support** - Embed images - **Live Preview** - Real-time markdown preview -## Annotation Widgets +### ChatFile -Used for rendering additional rich content in a chat message. See [Annotations](./annotations.mdx) for more information on how to add annotations to a message. - -### ChatImage - -Displays images with preview and zoom functionality. - -```tsx -import { ChatImage } from '@llamaindex/chat-ui/widgets' - -function ImageDisplay() { - return ( - - ) -} -``` - -**Features:** - -- **Zoom & Pan** - Interactive image viewing -- **Lazy Loading** - Performance optimization -- **Alt Text** - Accessibility support -- **Error Handling** - Graceful fallback for broken images - -### ChatFiles - -Displays file attachments with download and preview. +Displays a file attachment like image, pdf, etc. ```tsx -import { ChatFiles } from '@llamaindex/chat-ui/widgets' +import { ChatFile } from '@llamaindex/chat-ui/widgets' function FileDisplay() { return ( - ) } ``` -**Supported File Types:** - -- **PDF** - Inline viewer -- **Images** - Thumbnail preview -- **Text Files** - Content preview -- **CSV** - Data table preview -- **Word Documents** - Document preview - ### ChatSources Displays source citations with document grouping. @@ -253,52 +209,36 @@ function SourceDisplay() { - **Click to View** - Opens source documents - **Metadata Display** - Shows title, author, date -### ChatEvents +### ChatEvent -Displays collapsible process events and status updates. +Displays collapsible process event with status updates. ```tsx -import { ChatEvents } from '@llamaindex/chat-ui/widgets' +import { ChatEvent } from '@llamaindex/chat-ui/widgets' -function EventDisplay() { +// When Event with loading status +function SearchDocumentsEvent() { return ( - ) } -``` - -### ChatAgentEvents - -Displays agent-specific events with progress tracking. - -```tsx -import { ChatAgentEvents } from '@llamaindex/chat-ui/widgets' -function AgentEventDisplay() { +// When Event with success status +function SearchDocumentsResult() { return ( - ) } @@ -311,7 +251,7 @@ Interactive follow-up question suggestions. ```tsx import { SuggestedQuestions } from '@llamaindex/chat-ui/widgets' -function QuestionSuggestions({ append, requestData }) { +function QuestionSuggestions({ regenerate, requestData }) { return ( ) @@ -350,8 +290,6 @@ function ChatStarters() { } ``` -## Utility Widgets - ### FileUploader Drag-and-drop file upload with validation. @@ -427,7 +365,7 @@ function DocInfo({ document }) { } ``` -### Citation +## Citation Individual citation component with linking. @@ -451,7 +389,7 @@ function CitationLink({ source, index }) { ### Automatic Rendering -Annotation widgets render based on message annotations through dedicated annotation components: +Other widgets render based on message parts through dedicated components: ```tsx import { ChatMessage } from '@llamaindex/chat-ui' @@ -460,17 +398,17 @@ function MessageWithWidgets({ message }) { return ( - - {/* Renders ChatImage */} - {/* Renders ChatSources */} - {/* Renders ChatEvents */} + + {/* Renders ChatImage */} + {/* Renders ChatSources */} + {/* Renders ChatEvents */} ) } ``` -The `ChatMessage.Content.*` components internally use the annotation pattern described in [Annotations](./annotations.mdx), extracting data with `getAnnotationData` and passing it to the respective widgets. +The `ChatMessage.Part.*` components internally use the parts pattern described in [Parts](./parts.mdx), extracting data with `usePart` and passing it to the respective widgets. ### Manual Widget Usage @@ -506,7 +444,7 @@ function CustomMessageLayout({ message }) { Create custom widgets by following this pattern: ```tsx -interface WeatherData { +type WeatherData = { location: string temperature: number condition: string @@ -542,7 +480,7 @@ function App() { ## Next Steps -- [Annotations](./annotations.mdx) - Learn how to create and send annotation data +- [Parts](./parts.mdx) - Learn how to create and send parts - [Artifacts](./artifacts.mdx) - Implement interactive code and document artifacts - [Hooks](./hooks.mdx) - Understand the widget hook system - [Customization](./customization.mdx) - Style and customize widget appearance diff --git a/examples/fastapi/backend/app/chat.py b/examples/fastapi/backend/app/chat.py index b06c5aad..22be2a45 100644 --- a/examples/fastapi/backend/app/chat.py +++ b/examples/fastapi/backend/app/chat.py @@ -1,7 +1,6 @@ from fastapi import APIRouter, Request from fastapi.responses import StreamingResponse - -from app.vercel import VercelStreamResponse +from .vercel import SSEStreamResponse, get_text router = APIRouter(prefix="/chat") @@ -10,49 +9,92 @@ async def chat(request: Request) -> StreamingResponse: data = await request.json() messages = data.get("messages", []) - last_message = messages[-1] if messages else {"content": ""} + last_message = messages[-1] if messages else {} + content = get_text(last_message) - query_text = f'User query: "{last_message.get("content", "")}".\\n' + query_text = f'User query: "{content}".\n' - sample_text = """ -Welcome to the demo of @llamaindex/chat-ui. Let me show you the different types of components that can be triggered from the server. + # Advanced sample parts matching the Next.js advanced route + sample_parts = [ + "Welcome to the demo of @llamaindex/chat-ui. Let me show you the different types of components that can be triggered from the server.", + + """ +### Text Part +Text part is used to display text in the chat. It is in markdown format. +You can use markdown syntax to format the text. Some examples: + +- **bold** -> this is bold text +- *italic* -> this is italic text +- [link](https://www.google.com) -> this is a link -### Markdown with code block +You can also display a code block inside markdown. ```js const a = 1 const b = 2 const c = a + b console.log(c) -``` +```""", -### Annotations + """ +### Parts -""" - - text_tokens = sample_text.split(' ') - - sample_annotations = [ +Beside text, you can also display parts in the chat. Parts can be displayed before or after the text. + +**Built-in parts** + +@llamaindex/chat-ui provides some built-in parts for you to use + +- **file** -> display a file with name and url +- **event** -> display a event with title, status, and data +- **artifact** -> display a code artifact +- **sources** -> display a list of sources +- **suggested_questions** -> display a list of suggested questions + +**Custom parts** + +You can also create your own custom parts. + +- **weather** -> display a weather card +- **wiki** -> display a wiki card +""", + + "**file**: Here is the demo of a file part", { - "type": "sources", + "type": "file", "data": { - "nodes": [ - {"id": "1", "url": "/sample.pdf"}, - {"id": "2", "url": "/sample.pdf"}, - ], - }, + "filename": "upload.pdf", + "mediaType": "application/pdf", + "url": "https://pdfobject.com/pdf/sample.pdf" + } }, + + "**event**: Here is the demo of event parts. The second event part will override the first one because they have the same id", { - "type": "artifact", + "id": "demo_sample_event_id", + "type": "event", "data": { - "type": "code", + "title": "Calling tool `get_weather` with input `San Francisco, CA`", + "status": "pending" + } + }, + { + "id": "demo_sample_event_id", # Same id to override previous part + "type": "event", + "data": { + "title": "Got response from tool `get_weather` with input `San Francisco, CA`", + "status": "success", "data": { - "file_name": "sample.ts", - "language": "typescript", - "code": 'console.log("Hello, world!");', - }, - }, + "location": "San Francisco, CA", + "temperature": 22, + "condition": "sunny", + "humidity": 65, + "windSpeed": 12 + } + } }, + + "**weather**: Here is the demo of a weather part. It is a custom part", { "type": "weather", "data": { @@ -60,15 +102,55 @@ async def chat(request: Request) -> StreamingResponse: "temperature": 22, "condition": "sunny", "humidity": 65, - "windSpeed": 12, - }, + "windSpeed": 12 + } }, - ] - - events = [ - query_text, - *[f"{token} " for token in text_tokens], - *sample_annotations, + + "**wiki**: Here is the demo of a wiki part", + { + "type": "wiki", + "data": { + "title": "LlamaIndex", + "summary": "LlamaIndex is a framework for building AI applications.", + "url": "https://www.llamaindex.ai", + "category": "AI", + "lastUpdated": "2025-06-02" + } + }, + + "**artifact**: Here is the demo of a artifact part", + { + "type": "artifact", + "data": { + "type": "code", + "data": { + "file_name": "code.py", + "code": 'print("Hello, world!")', + "language": "python" + } + } + }, + + "**sources**: Here is the demo of a sources part", + { + "type": "sources", + "data": { + "nodes": [ + {"id": "1", "url": "/sample.pdf"}, + {"id": "2", "url": "/sample.pdf"} + ] + } + }, + + "**suggested_questions**: Here is the demo of a suggested_questions part", + { + "type": "suggested_questions", + "data": [ + "I think you should go to the beach", + "I think you should go to the mountains", + "I think you should go to the city" + ] + } ] - return VercelStreamResponse(events=events) + return SSEStreamResponse(parts=sample_parts, query=query_text) diff --git a/examples/fastapi/backend/app/vercel.py b/examples/fastapi/backend/app/vercel.py index 5f0a91b9..84f4c302 100644 --- a/examples/fastapi/backend/app/vercel.py +++ b/examples/fastapi/backend/app/vercel.py @@ -1,58 +1,86 @@ import asyncio import json -from typing import Any, AsyncGenerator, Iterable, Union - +import uuid +from typing import Any, AsyncGenerator, Dict, Union from fastapi.responses import StreamingResponse +DATA_PREFIX = "data: " +TOKEN_DELAY = 0.03 # 30ms delay between tokens +PART_DELAY = 1.0 # 1s delay between parts + -class VercelStreamResponse(StreamingResponse): +class SSEStreamResponse(StreamingResponse): """ - Converts preprocessed events into Vercel-compatible streaming response format. + New SSE format compatible with Vercel/AI SDK 5 useChat """ - TEXT_PREFIX = "0:" - DATA_PREFIX = "8:" - ERROR_PREFIX = "3:" - - def __init__( - self, - events: Iterable[Any], - *args: Any, - **kwargs: Any, - ): - stream = self._stream_event(events=events) - super().__init__(stream, *args, **kwargs) - - async def _stream_event(self, events: Iterable[Any]) -> AsyncGenerator[str, None]: - stream_started = False - for event in events: - if not stream_started: - yield self.convert_text("") - stream_started = True - # Simulate a small delay between events - await asyncio.sleep(0.1) - if isinstance(event, str): - yield self.convert_text(event) - elif isinstance(event, dict): - yield self.convert_data(event) - else: - raise ValueError(f"Unknown event type: {type(event)}") - - @classmethod - def convert_text(cls, token: str) -> str: - """Convert text event to Vercel format.""" - # Escape newlines and double quotes to avoid breaking the stream - token = json.dumps(token) - return f"{cls.TEXT_PREFIX}{token}\n" - - @classmethod - def convert_data(cls, data: Union[dict, str]) -> str: - """Convert data event to Vercel format.""" - data_str = json.dumps(data) if isinstance(data, dict) else data - return f"{cls.DATA_PREFIX}[{data_str}]\n" - - @classmethod - def convert_error(cls, error: str) -> str: - """Convert error event to Vercel format.""" - error_str = json.dumps(error) - return f"{cls.ERROR_PREFIX}{error_str}\n" + def __init__(self, parts: list[Union[str, Dict[str, Any]]], query: str = "", **kwargs): + stream = self._create_stream(query, parts) + super().__init__( + stream, + media_type="text/event-stream", + headers={"Connection": "keep-alive"}, + **kwargs + ) + + async def _create_stream(self, query: str, parts: list[Union[str, Dict[str, Any]]]) -> AsyncGenerator[str, None]: + """Create SSE stream with new format""" + + async def write_text(content: str) -> AsyncGenerator[str, None]: + """Write text content with token-by-token streaming""" + # Generate unique message id + message_id = str(uuid.uuid4()) + + # Start text chunk + start_chunk = {"id": message_id, "type": "text-start"} + yield f"{DATA_PREFIX}{json.dumps(start_chunk)}\n\n" + + # Stream tokens + for token in content.split(' '): + if token: # Skip empty tokens + delta_chunk = { + "id": message_id, + "type": "text-delta", + "delta": token + " " + } + yield f"{DATA_PREFIX}{json.dumps(delta_chunk)}\n\n" + await asyncio.sleep(TOKEN_DELAY) + + # End text chunk + end_chunk = {"id": message_id, "type": "text-end"} + yield f"{DATA_PREFIX}{json.dumps(end_chunk)}\n\n" + + async def write_data(data: Dict[str, Any]) -> AsyncGenerator[str, None]: + """Write data part""" + chunk = { + "type": f"data-{data['type']}", # Add data- prefix + "data": data.get("data", {}) + } + + # Only include id if it exists + if data.get("id"): + chunk["id"] = data["id"] + + yield f"{DATA_PREFIX}{json.dumps(chunk)}\n\n" + await asyncio.sleep(PART_DELAY) + + # Stream the query first + if query: + async for chunk in write_text(query): + yield chunk + + # Stream all parts + for item in parts: + if isinstance(item, str): + async for chunk in write_text(item): + yield chunk + elif isinstance(item, dict): + async for chunk in write_data(item): + yield chunk + +def get_text(message: Any) -> str: + return "\n\n".join( + part["text"] + for part in message["parts"] + if part.get("type") == "text" and "text" in part + ) \ No newline at end of file diff --git a/examples/fastapi/frontend/app/globals.css b/examples/fastapi/frontend/app/globals.css index 057cfb1f..cca3888e 100644 --- a/examples/fastapi/frontend/app/globals.css +++ b/examples/fastapi/frontend/app/globals.css @@ -244,116 +244,5 @@ } body { @apply bg-background text-foreground antialiased; - font-feature-settings: 'cv11', 'ss01'; - font-variation-settings: 'opsz' 32; - } - - html { - scroll-behavior: smooth; - } - - /* Custom scrollbar */ - ::-webkit-scrollbar { - width: 6px; - height: 6px; - } - - ::-webkit-scrollbar-track { - background: transparent; - } - - ::-webkit-scrollbar-thumb { - background: rgba(255, 255, 255, 0.2); - border-radius: 3px; - } - - ::-webkit-scrollbar-thumb:hover { - background: rgba(255, 255, 255, 0.3); - } - - /* Selection styling */ - ::selection { - background: rgba(186, 186, 233, 0.3); - color: inherit; - } - - ::-moz-selection { - background: rgba(186, 186, 233, 0.3); - color: inherit; - } - - /* Utility classes for animations */ - .animate-fade-in { - animation: var(--animate-fade-in); - } - - .animate-fade-in-up { - animation: var(--animate-fade-in-up); - } - - .animate-scale-in { - animation: var(--animate-scale-in); - } - - .animate-slide-in-right { - animation: var(--animate-slide-in-right); - } - - .animate-pulse-glow { - animation: var(--animate-pulse-glow); - } - - .animate-float { - animation: var(--animate-float); - } - - /* Glass morphism utilities */ - .glass { - background: var(--glass-gradient); - backdrop-filter: blur(20px); - -webkit-backdrop-filter: blur(20px); - border: 1px solid rgba(255, 255, 255, 0.1); - } - - .glass-border { - border: 1px solid; - border-image: var(--glass-border) 1; - } - - /* Shimmer effect */ - .shimmer { - background: linear-gradient( - 90deg, - rgba(255, 255, 255, 0) 0%, - rgba(255, 255, 255, 0.1) 50%, - rgba(255, 255, 255, 0) 100% - ); - background-size: 1000px 100%; - animation: var(--animate-shimmer); - } - - /* Text gradient utilities */ - .text-gradient-purple { - background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - background-clip: text; - } - - .text-gradient-rainbow { - background: linear-gradient( - 90deg, - #ff006e, - #8338ec, - #3a86ff, - #06ffa5, - #ffbe0b, - #fb5607 - ); - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - background-clip: text; - background-size: 300% 100%; - animation: var(--animate-shimmer); } } diff --git a/examples/fastapi/frontend/app/layout.tsx b/examples/fastapi/frontend/app/layout.tsx index 4f5724c6..564a3c62 100644 --- a/examples/fastapi/frontend/app/layout.tsx +++ b/examples/fastapi/frontend/app/layout.tsx @@ -8,8 +8,8 @@ import { Inter } from 'next/font/google' const inter = Inter({ subsets: ['latin'] }) export const metadata: Metadata = { - title: 'LlamaIndex Chat UI - Next.js Example', - description: 'A simple Next.js application using @llamaindex/chat-ui', + title: 'LlamaIndex Chat UI - FastAPI Example', + description: 'A simple interface using @llamaindex/chat-ui', } export default function RootLayout({ diff --git a/examples/fastapi/frontend/app/page.tsx b/examples/fastapi/frontend/app/page.tsx index efdc051e..b0f06fec 100644 --- a/examples/fastapi/frontend/app/page.tsx +++ b/examples/fastapi/frontend/app/page.tsx @@ -8,14 +8,21 @@ import { ChatSection, useChatUI, } from '@llamaindex/chat-ui' -import { Message, useChat } from 'ai/react' -import { CustomWeatherAnnotation } from '../components/custom-weather-annotation' +import { UIMessage, useChat } from '@ai-sdk/react' +import { WeatherPart } from '../components/custom-weather' +import { DefaultChatTransport } from 'ai' +import { WikiPart } from '../components/custom-wiki' -const initialMessages: Message[] = [ +const initialMessages: UIMessage[] = [ { id: '1', - content: 'Hello! How can I help you today?', role: 'assistant', + parts: [ + { + type: 'text', + text: 'Hello! How can I help you today?', + }, + ], }, ] @@ -39,8 +46,10 @@ export default function Page(): JSX.Element { function ChatExample() { const handler = useChat({ - api: 'http://localhost:8000/api/chat', - initialMessages, + transport: new DefaultChatTransport({ + api: 'http://localhost:8000/api/chat', + }), + messages: initialMessages, }) return ( @@ -69,7 +78,7 @@ function ChatExample() { } function CustomChatMessages() { - const { messages, isLoading, append } = useChatUI() + const { messages } = useChatUI() return ( <> @@ -85,10 +94,15 @@ function CustomChatMessages() { {message.role === 'user' ? 'U' : 'AI'}
- - - - + + + + + + + + + diff --git a/examples/fastapi/frontend/components/custom-weather-annotation.tsx b/examples/fastapi/frontend/components/custom-weather.tsx similarity index 64% rename from examples/fastapi/frontend/components/custom-weather-annotation.tsx rename to examples/fastapi/frontend/components/custom-weather.tsx index e50b5eb4..b2b0ef24 100644 --- a/examples/fastapi/frontend/components/custom-weather-annotation.tsx +++ b/examples/fastapi/frontend/components/custom-weather.tsx @@ -1,8 +1,8 @@ 'use client' -import { useChatMessage, getAnnotationData } from '@llamaindex/chat-ui' +import { usePart } from '@llamaindex/chat-ui' -interface WeatherData { +type WeatherData = { location: string temperature: number condition: string @@ -10,20 +10,36 @@ interface WeatherData { windSpeed: number } -export function CustomWeatherAnnotation() { - const { message } = useChatMessage() +const WeatherPartType = 'data-weather' - const weatherData = getAnnotationData(message, 'weather') +type WeatherPart = { + type: typeof WeatherPartType + data: WeatherData +} - if (weatherData.length === 0) return null +// A custom part component that is used to display weather information in a chat message +export function WeatherPart() { + const weatherData = usePart(WeatherPartType)?.data + if (!weatherData) return null + return +} - const data = weatherData[0] +function WeatherCard({ data }: { data: WeatherData }) { + const iconMap: Record = { + sunny: '☀️', + cloudy: '☁️', + rainy: '🌧️', + snowy: '❄️', + stormy: '⛈️', + } return ( -
+
- + + {iconMap[data.condition.toLowerCase()] || '🌤️'} +

{data.location}

@@ -46,17 +62,3 @@ export function CustomWeatherAnnotation() {
) } - -function WeatherIcon({ condition }: { condition: string }) { - const iconMap: Record = { - sunny: '☀️', - cloudy: '☁️', - rainy: '🌧️', - snowy: '❄️', - stormy: '⛈️', - } - - return ( - {iconMap[condition.toLowerCase()] || '🌤️'} - ) -} diff --git a/examples/fastapi/frontend/components/custom-wiki.tsx b/examples/fastapi/frontend/components/custom-wiki.tsx new file mode 100644 index 00000000..da8400c0 --- /dev/null +++ b/examples/fastapi/frontend/components/custom-wiki.tsx @@ -0,0 +1,114 @@ +'use client' + +import { usePart } from '@llamaindex/chat-ui' + +type WikiData = { + title: string + summary: string + url: string + category: string + lastUpdated: string +} + +const WikiPartType = 'data-wiki' + +type WikiPart = { + type: typeof WikiPartType + data: WikiData +} + +export function WikiPart() { + const wikiData = usePart(WikiPartType)?.data + if (!wikiData) return null + return +} + +// A UI widget that displays wiki information, it can be used inline with markdown text +function WikiCard({ data }: { data: WikiData }) { + const iconMap: Record = { + science: '🧪', + history: '📜', + technology: '💻', + biology: '🧬', + geography: '🌍', + literature: '📚', + art: '🎨', + music: '🎵', + } + + const getCategoryColor = (category: string) => { + const colors: Record = { + science: 'from-blue-50 to-blue-100 border-blue-200 text-blue-900', + history: 'from-amber-50 to-amber-100 border-amber-200 text-amber-900', + technology: + 'from-purple-50 to-purple-100 border-purple-200 text-purple-900', + biology: 'from-green-50 to-green-100 border-green-200 text-green-900', + geography: 'from-teal-50 to-teal-100 border-teal-200 text-teal-900', + literature: + 'from-indigo-50 to-indigo-100 border-indigo-200 text-indigo-900', + art: 'from-pink-50 to-pink-100 border-pink-200 text-pink-900', + music: 'from-violet-50 to-violet-100 border-violet-200 text-violet-900', + } + return ( + colors[category.toLowerCase()] || + 'from-gray-50 to-gray-100 border-gray-200 text-gray-900' + ) + } + + const categoryColorClass = getCategoryColor(data.category) + + return ( +
+
+
+ + {iconMap[data.category.toLowerCase()] || '📖'} + +
+
+

{data.title}

+

+ {data.summary} +

+
+
+ +
+
+ 📂 + {data.category} +
+
+ 📅 + {data.lastUpdated} +
+
+ + +
+ ) +} diff --git a/examples/fastapi/frontend/package.json b/examples/fastapi/frontend/package.json index fa87800d..22ed1b6e 100644 --- a/examples/fastapi/frontend/package.json +++ b/examples/fastapi/frontend/package.json @@ -11,7 +11,9 @@ }, "dependencies": { "@llamaindex/chat-ui": "latest", - "ai": "^4.3.16", + "@ai-sdk/react": "^2.0.4", + "@ai-sdk/rsc": "^1.0.4", + "ai": "^5.0.4", "next": "^15.3.2", "react": "^19.1.0", "react-dom": "^19.1.0" @@ -29,4 +31,4 @@ "tailwindcss": "^4.0.7", "typescript": "^5.3.3" } -} +} \ No newline at end of file diff --git a/examples/llamadeploy/chat/custom-ui/ui/app/page.tsx b/examples/llamadeploy/chat/custom-ui/ui/app/page.tsx index aa635f2f..09e9a9be 100644 --- a/examples/llamadeploy/chat/custom-ui/ui/app/page.tsx +++ b/examples/llamadeploy/chat/custom-ui/ui/app/page.tsx @@ -10,7 +10,7 @@ import { useChatUI, useChatWorkflow, } from '@llamaindex/chat-ui' -import { WeatherAnnotation } from '@/components/custom/custom-weather' +import { WeatherPart } from '@/components/custom/custom-weather' import { CLIHumanInput } from '@/components/custom/human-input' import { Select, @@ -105,9 +105,9 @@ function CustomChatMessages({ - - - + + + diff --git a/examples/llamadeploy/chat/custom-ui/ui/components/custom/custom-weather.tsx b/examples/llamadeploy/chat/custom-ui/ui/components/custom/custom-weather.tsx index aa727624..b2b0ef24 100644 --- a/examples/llamadeploy/chat/custom-ui/ui/components/custom/custom-weather.tsx +++ b/examples/llamadeploy/chat/custom-ui/ui/components/custom/custom-weather.tsx @@ -1,8 +1,8 @@ 'use client' -import { useChatMessage, getAnnotationData } from '@llamaindex/chat-ui' +import { usePart } from '@llamaindex/chat-ui' -interface WeatherData { +type WeatherData = { location: string temperature: number condition: string @@ -10,14 +10,18 @@ interface WeatherData { windSpeed: number } -// A custom annotation component that is used to display weather information in a chat message -// The weather data is extracted from annotations in the message that has type 'weather' -export function WeatherAnnotation() { - const { message } = useChatMessage() - const weatherData = getAnnotationData(message, 'weather') +const WeatherPartType = 'data-weather' - if (weatherData.length === 0) return null - return +type WeatherPart = { + type: typeof WeatherPartType + data: WeatherData +} + +// A custom part component that is used to display weather information in a chat message +export function WeatherPart() { + const weatherData = usePart(WeatherPartType)?.data + if (!weatherData) return null + return } function WeatherCard({ data }: { data: WeatherData }) { @@ -29,13 +33,13 @@ function WeatherCard({ data }: { data: WeatherData }) { stormy: '⛈️', } - if (!data.location) return null - return (
- {iconMap[data.condition] || '🌤️'} + + {iconMap[data.condition.toLowerCase()] || '🌤️'} +

{data.location}

diff --git a/examples/llamadeploy/chat/custom-ui/ui/package.json b/examples/llamadeploy/chat/custom-ui/ui/package.json index 9b6a1cfd..ae19cdf5 100644 --- a/examples/llamadeploy/chat/custom-ui/ui/package.json +++ b/examples/llamadeploy/chat/custom-ui/ui/package.json @@ -11,7 +11,8 @@ "dependencies": { "@llamaindex/chat-ui": "latest", "@radix-ui/react-select": "^2.1.1", - "ai": "^4.3.16", + "@ai-sdk/react": "^2.0.4", + "ai": "^5.0.4", "class-variance-authority": "^0.7.0", "clsx": "^2.1.1", "lucide-react": "^0.453.0", diff --git a/examples/llamadeploy/workflow/ui/package.json b/examples/llamadeploy/workflow/ui/package.json index a1bae08c..297bddd1 100644 --- a/examples/llamadeploy/workflow/ui/package.json +++ b/examples/llamadeploy/workflow/ui/package.json @@ -10,7 +10,8 @@ }, "dependencies": { "@llamaindex/chat-ui": "latest", - "ai": "^4.3.16", + "@ai-sdk/react": "^2.0.4", + "ai": "^5.0.4", "next": "^15.3.2", "react": "^19.1.0", "react-dom": "^19.1.0" diff --git a/examples/nextjs/app/api/chat/advanced/route.ts b/examples/nextjs/app/api/chat/advanced/route.ts index 4a302051..ca7753da 100644 --- a/examples/nextjs/app/api/chat/advanced/route.ts +++ b/examples/nextjs/app/api/chat/advanced/route.ts @@ -3,47 +3,27 @@ * * This example demonstrates advanced streaming features: * - Text streaming with token-by-token delivery - * - Both standard annotations (sent after text) and inline annotations (embedded in text) - * - Inline annotations are embedded as special code blocks within the markdown stream + * - Both standard annotations (sent after text) and artifacts inlined in the markdown stream * - Multiple annotation types: sources, artifacts, and custom components (wiki) * - * Use this example to understand how to mix regular content with interactive - * components that appear at specific positions in the chat stream. */ -import { NextResponse, type NextRequest } from 'next/server' +import { NextRequest } from 'next/server' +import { chatHandler, MessagePart } from '../handler' -const TOKEN_DELAY = 30 // 30ms delay between tokens -const TEXT_PREFIX = '0:' // vercel ai text prefix -const ANNOTATION_PREFIX = '8:' // vercel ai annotation prefix -const INLINE_ANNOTATION_KEY = 'annotation' // the language key to detect inline annotation code in markdown -const ANNOTATION_DELAY = 1000 // 1 second delay between annotations +const SAMPLE_PARTS: (string | MessagePart)[] = [ + 'Welcome to the demo of @llamaindex/chat-ui. Let me show you the different types of components that can be triggered from the server.', -export async function POST(request: NextRequest) { - try { - const { messages } = await request.json() - const lastMessage = messages[messages.length - 1] - - const stream = fakeChatStream(`User query: "${lastMessage.content}".\n`) - - return new Response(stream, { - headers: { - 'Content-Type': 'text/plain; charset=utf-8', - 'X-Vercel-AI-Data-Stream': 'v1', - Connection: 'keep-alive', - }, - }) - } catch (error) { - const detail = (error as Error).message - return NextResponse.json({ detail }, { status: 500 }) - } -} - -const SAMPLE_TEXT = [ ` -Welcome to the demo of @llamaindex/chat-ui. Let me show you the different types of components that can be triggered from the server. +### Text Part +Text part is used to display text in the chat. It is in markdown format. +You can use markdown syntax to format the text. Some examples: -### Markdown with code block +- **bold** -> this is bold text +- *italic* -> this is italic text +- [link](https://www.google.com) -> this is a link + +You can also display a code block inside markdown. \`\`\`js const a = 1 @@ -51,63 +31,80 @@ const b = 2 const c = a + b console.log(c) \`\`\` - `, - '\n ### Demo inline annotations \n', - 'Here are some steps to create a simple wiki app: \n', - '1. Create package.json file:', + + ` +### Parts + +Beside text, you can also display parts in the chat. Parts can be displayed before or after the text. + +**Built-in parts** + +@llamaindex/chat-ui provides some built-in parts for you to use + +- **file** -> display a file with name and url +- **event** -> display a event with title, status, and data +- **artifact** -> display a code artifact +- **sources** -> display a list of sources +- **suggested_questions** -> display a list of suggested questions + +**Custom parts** + +You can also create your own custom parts. + +- **weather** -> display a weather card +- **wiki** -> display a wiki card + `, + + '**file**: Here is the demo of a file part', { - type: 'artifact', + type: 'file', data: { - type: 'code', - created_at: 1717334400000, - data: { - file_name: 'package.json', - language: 'json', - code: `{ - "name": "wiki-app", - "version": "1.0.0", - "description": "Wiki application", - "main": "wiki.js", - "dependencies": { - "axios": "^1.0.0", - "wiki-api": "^2.1.0" - } -}`, - }, + filename: 'upload.pdf', + mediaType: 'application/pdf', + url: 'https://pdfobject.com/pdf/sample.pdf', }, }, - '2. Check the wiki fetching script:', + + '**event**: Here is the demo of event parts. The second event part will override the first one because they have the same id', { - type: 'artifact', + id: 'demo_sample_event_id', + type: 'event', data: { - created_at: 1717334500000, - type: 'code', - data: { - file_name: 'wiki.js', - language: 'javascript', - code: `async function getWiki(search) { - const response = await fetch("/api/wiki?search=" + search); - const data = await response.json(); - return data; -}`, - }, + title: 'Calling tool `get_weather` with input `San Francisco, CA`', + status: 'pending', }, }, - '3. Run getWiki with the search term:', { - type: 'artifact', + id: 'demo_sample_event_id', // use the same id to override the previous part + type: 'event', data: { - created_at: 1717334600000, - type: 'code', + title: + 'Got response from tool `get_weather` with input `San Francisco, CA`', + status: 'success', data: { - file_name: 'wiki.js', - language: 'javascript', - code: `getWiki(\`What is \${search}?\`);`, + location: 'San Francisco, CA', + temperature: 22, + condition: 'sunny', + humidity: 65, + windSpeed: 12, }, }, }, - '4. Check the current wiki:', + + '**weather**: Here is the demo of a weather part. It is a custom part', + { + type: 'weather', + data: { + location: 'San Francisco, CA', + temperature: 22, + condition: 'sunny', + humidity: 65, + windSpeed: 12, + }, + }, + + '**wiki**: Here is the demo of a wiki part', { type: 'wiki', data: { @@ -118,64 +115,21 @@ console.log(c) lastUpdated: '2025-06-02', }, }, - '#### 🎯 Demo generating a document artifact', + + '**artifact**: Here is the demo of a artifact part', { type: 'artifact', data: { - type: 'document', + type: 'code', data: { - title: 'Sample document', - content: `# Getting Started Guide - - ## Introduction - This comprehensive guide will walk you through everything you need to know to get started with our platform. Whether you're a beginner or an experienced user, you'll find valuable information here. - - ## Key Features - - **Easy Setup**: Get running in minutes - - **Powerful Tools**: Access advanced capabilities - - **Great Documentation**: Find answers quickly - - **Active Community**: Get help when needed - - ## Setup Process - 1. Install Dependencies - First, ensure you have all required dependencies installed on your system. - - 2. Configuration - Update your configuration files with the necessary settings: - - API keys - - Environment variables - - User preferences - - 3. First Steps - Begin with basic operations to familiarize yourself with the platform. - - ## Best Practices - - Always backup your data - - Follow security guidelines - - Keep your dependencies updated - - Document your changes - - ## Troubleshooting - If you encounter issues, try these steps: - 1. Check logs for errors - 2. Verify configurations - 3. Update to latest version - 4. Contact support if needed - - ## Additional Resources - - [Documentation](https://docs.example.com) - - [API Reference](https://api.example.com) - - [Community Forums](https://community.example.com) - - Feel free to explore and reach out if you need assistance!`, - type: 'markdown', + file_name: 'code.py', + code: 'print("Hello, world!")', + language: 'python', }, }, }, - '\n\n Please feel free to open the document in the canvas and edit it. The document will be saved as a new version', -] -const SAMPLE_SOURCES = [ + '**sources**: Here is the demo of a sources part', { type: 'sources', data: { @@ -185,59 +139,18 @@ const SAMPLE_SOURCES = [ ], }, }, -] -const fakeChatStream = (query: string): ReadableStream => { - return new ReadableStream({ - async start(controller) { - const encoder = new TextEncoder() - controller.enqueue( - encoder.encode(`${TEXT_PREFIX}${JSON.stringify(query)}\n`) - ) - - // insert inline annotations - for (const item of SAMPLE_TEXT) { - if (typeof item === 'string') { - for (const token of item.split(' ')) { - await new Promise(resolve => setTimeout(resolve, TOKEN_DELAY)) - controller.enqueue( - encoder.encode(`${TEXT_PREFIX}${JSON.stringify(`${token} `)}\n`) - ) - } - } else { - await new Promise(resolve => setTimeout(resolve, ANNOTATION_DELAY)) - // append inline annotation with 0: prefix - const annotationCode = toInlineAnnotationCode(item) - controller.enqueue( - encoder.encode(`${TEXT_PREFIX}${JSON.stringify(annotationCode)}\n`) - ) - } - } - - // insert sources in fixed positions - for (const item of SAMPLE_SOURCES) { - controller.enqueue( - encoder.encode(`${ANNOTATION_PREFIX}${JSON.stringify([item])}\n`) - ) - } - - controller.close() - }, - }) -} + '**suggested_questions**: Here is the demo of a suggested_questions part', + { + type: 'suggested_questions', + data: [ + 'I think you should go to the beach', + 'I think you should go to the mountains', + 'I think you should go to the city', + ], + }, +] -/** - * To append inline annotations to the stream, we need to wrap the annotation in a code block with the language key. - * The language key is `annotation` and the code block is wrapped in backticks. - * The prefix `0:` ensures it will be treated as inline markdown. Example: - * - * 0:\`\`\`annotation - * \{ - * "type": "artifact", - * "data": \{...\} - * \} - * \`\`\` - */ -function toInlineAnnotationCode(item: any) { - return `\n\`\`\`${INLINE_ANNOTATION_KEY}\n${JSON.stringify(item)}\n\`\`\`\n` +export async function POST(request: NextRequest) { + return chatHandler(request, SAMPLE_PARTS) } diff --git a/examples/nextjs/app/api/chat/edge/route.ts b/examples/nextjs/app/api/chat/edge/route.ts index 382e6e89..874f0486 100644 --- a/examples/nextjs/app/api/chat/edge/route.ts +++ b/examples/nextjs/app/api/chat/edge/route.ts @@ -1,40 +1,19 @@ /** * This is an example to demo chat-ui with edge runtime, same functionality as chat/route.ts + * + * This is a simple example demonstrating: * - Text streaming with token-by-token delivery * - Basic markdown content with code blocks - * - Custom annotations (weather) sent after text completion - * - Standard annotations (sources) sent after text completion + * - Standard parts (sources) sent after text completion + * - Custom parts (weather) sent after text completion * */ -import { NextResponse, type NextRequest } from 'next/server' - -const TOKEN_DELAY = 30 // 30ms delay between tokens -const TEXT_PREFIX = '0:' // vercel ai text prefix -const ANNOTATION_PREFIX = '8:' // vercel ai annotation prefix - -export const runtime = 'edge' // This is the key difference from chat/route.ts -export async function POST(request: NextRequest) { - try { - const { messages } = await request.json() - const lastMessage = messages[messages.length - 1] - - const stream = fakeChatStream(`User query: "${lastMessage.content}".\n`) - - return new Response(stream, { - headers: { - 'Content-Type': 'text/plain; charset=utf-8', - 'X-Vercel-AI-Data-Stream': 'v1', - Connection: 'keep-alive', - }, - }) - } catch (error) { - const detail = (error as Error).message - return NextResponse.json({ detail }, { status: 500 }) - } -} +import { NextRequest } from 'next/server' +import { chatHandler, MessagePart } from '../handler' -const SAMPLE_TEXT = ` +const SAMPLE_PARTS: (string | MessagePart)[] = [ + ` Welcome to the demo of @llamaindex/chat-ui. Let me show you the different types of components that can be triggered from the server. ### Markdown with code block @@ -46,10 +25,21 @@ const c = a + b console.log(c) \`\`\` -### Annotations +### Parts: + `, + + 'Let me show the sources (type=sources):', + { + type: 'sources', + data: { + nodes: [ + { id: '1', url: '/sample.pdf' }, + { id: '2', url: '/sample.pdf' }, + ], + }, + }, -` -const SAMPLE_ANNOTATIONS = [ + 'Let me show a weather card (type=weather):', { type: 'weather', data: { @@ -60,39 +50,10 @@ const SAMPLE_ANNOTATIONS = [ windSpeed: 12, }, }, - { - type: 'sources', - data: { - nodes: [ - { id: '1', url: '/sample.pdf' }, - { id: '2', url: '/sample.pdf' }, - ], - }, - }, ] -const fakeChatStream = (query: string): ReadableStream => { - return new ReadableStream({ - async start(controller) { - const encoder = new TextEncoder() - controller.enqueue( - encoder.encode(`${TEXT_PREFIX}${JSON.stringify(query)}\n`) - ) - - for (const token of SAMPLE_TEXT.split(' ')) { - await new Promise(resolve => setTimeout(resolve, TOKEN_DELAY)) - controller.enqueue( - encoder.encode(`${TEXT_PREFIX}${JSON.stringify(`${token} `)}\n`) - ) - } - - for (const item of SAMPLE_ANNOTATIONS) { - controller.enqueue( - encoder.encode(`${ANNOTATION_PREFIX}${JSON.stringify([item])}\n`) - ) - } +export const runtime = 'edge' // This is the key difference from chat/route.ts - controller.close() - }, - }) +export async function POST(request: NextRequest) { + return chatHandler(request, SAMPLE_PARTS) } diff --git a/examples/nextjs/app/api/chat/handler.ts b/examples/nextjs/app/api/chat/handler.ts new file mode 100644 index 00000000..623401cf --- /dev/null +++ b/examples/nextjs/app/api/chat/handler.ts @@ -0,0 +1,130 @@ +import { NextResponse, type NextRequest } from 'next/server' + +const TOKEN_DELAY = 30 // 30ms delay between tokens +const PART_DELAY = 1000 // 1s delay between parts +const DATA_PREFIX = 'data: ' // use data: prefix for SSE format + +interface TextChunk { + type: 'text-delta' | 'text-start' | 'text-end' + id: string + delta?: string +} + +interface DataChunk { + id?: string // optional id for data parts. Only the last data part with that id will be shown + type: `data-${string}` // requires `data-` prefix when sending data parts + data: Record +} + +interface TextPart { + type: 'text' + text: string +} + +export interface MessagePart { + id?: string + type: string + data?: any +} + +export async function chatHandler( + request: NextRequest, + parts: (string | MessagePart)[] +) { + try { + // extract query from last message + const { messages } = await request.json() + const query = getText(messages[messages.length - 1]?.parts ?? []) + + // create a stream + const stream = fakeChatStream(`User query: "${query}".\n`, parts) + + // return the stream + return new Response(stream, { + // Set headers for Server-Sent Events (SSE) + headers: { + 'Content-Type': 'text/event-stream', + Connection: 'keep-alive', + }, + }) + } catch (error) { + const detail = (error as Error).message + return NextResponse.json({ detail }, { status: 500 }) + } +} + +function getText(message: { parts: MessagePart[] }): string { + return message.parts + .filter((part): part is TextPart => part.type === 'text') + .map(part => part.text) + .join('\n\n') +} + +const fakeChatStream = ( + query: string, + parts: (string | MessagePart)[] +): ReadableStream => { + return new ReadableStream({ + async start(controller) { + const encoder = new TextEncoder() + + function writeStream(chunk: TextChunk | DataChunk) { + controller.enqueue( + encoder.encode(`${DATA_PREFIX}${JSON.stringify(chunk)}\n\n`) + ) + } + + async function writeText(content: string) { + // init a unique message id + const messageId = crypto.randomUUID() + + // important: we need to write the start chunk first + const startChunk: TextChunk = { id: messageId, type: 'text-start' } + writeStream(startChunk) + + // simulate token-by-token streaming + for (const token of content.split(' ')) { + const deltaChunk: TextChunk = { + id: messageId, + type: 'text-delta', + delta: token + ' ', + } + writeStream(deltaChunk) + await new Promise(resolve => setTimeout(resolve, TOKEN_DELAY)) + } + + // important: we need to write the end chunk last + const endChunk: TextChunk = { id: messageId, type: 'text-end' } + + writeStream(endChunk) + } + + async function writeData(data: { + type: string + data?: any + id?: string + }) { + const chunk: DataChunk = { + id: data.id, + type: `data-${data.type}`, + data: data.data, + } + writeStream(chunk) + await new Promise(resolve => setTimeout(resolve, PART_DELAY)) + } + + // show the query message + await writeText(query) + + for (const item of parts) { + if (typeof item === 'string') { + await writeText(item) + } else { + await writeData(item) + } + } + + controller.close() + }, + }) +} diff --git a/examples/nextjs/app/api/chat/route.ts b/examples/nextjs/app/api/chat/route.ts index c6db16bd..7cde6b07 100644 --- a/examples/nextjs/app/api/chat/route.ts +++ b/examples/nextjs/app/api/chat/route.ts @@ -4,39 +4,16 @@ * This is a simple example demonstrating: * - Text streaming with token-by-token delivery * - Basic markdown content with code blocks - * - Custom annotations (weather) sent after text completion - * - Standard annotations (sources) sent after text completion + * - Standard parts (sources) sent after text completion + * - Custom parts (weather) sent after text completion * - * Use this example as a starting point for implementing basic chat functionality - * with \@llamaindex/chat-ui components. */ -import { NextResponse, type NextRequest } from 'next/server' -const TOKEN_DELAY = 30 // 30ms delay between tokens -const TEXT_PREFIX = '0:' // vercel ai text prefix -const ANNOTATION_PREFIX = '8:' // vercel ai annotation prefix +import { NextRequest } from 'next/server' +import { chatHandler, MessagePart } from './handler' -export async function POST(request: NextRequest) { - try { - const { messages } = await request.json() - const lastMessage = messages[messages.length - 1] - - const stream = fakeChatStream(`User query: "${lastMessage.content}".\n`) - - return new Response(stream, { - headers: { - 'Content-Type': 'text/plain; charset=utf-8', - 'X-Vercel-AI-Data-Stream': 'v1', - Connection: 'keep-alive', - }, - }) - } catch (error) { - const detail = (error as Error).message - return NextResponse.json({ detail }, { status: 500 }) - } -} - -const SAMPLE_TEXT = ` +const SAMPLE_PARTS: (string | MessagePart)[] = [ + ` Welcome to the demo of @llamaindex/chat-ui. Let me show you the different types of components that can be triggered from the server. ### Markdown with code block @@ -48,10 +25,21 @@ const c = a + b console.log(c) \`\`\` -### Annotations +### Parts: + `, + + 'Let me show the sources (type=sources):', + { + type: 'sources', + data: { + nodes: [ + { id: '1', url: '/sample.pdf' }, + { id: '2', url: '/sample.pdf' }, + ], + }, + }, -` -const SAMPLE_ANNOTATIONS = [ + 'Let me show a weather card (type=weather):', { type: 'weather', data: { @@ -62,39 +50,8 @@ const SAMPLE_ANNOTATIONS = [ windSpeed: 12, }, }, - { - type: 'sources', - data: { - nodes: [ - { id: '1', url: '/sample.pdf' }, - { id: '2', url: '/sample.pdf' }, - ], - }, - }, ] -const fakeChatStream = (query: string): ReadableStream => { - return new ReadableStream({ - async start(controller) { - const encoder = new TextEncoder() - controller.enqueue( - encoder.encode(`${TEXT_PREFIX}${JSON.stringify(query)}\n`) - ) - - for (const token of SAMPLE_TEXT.split(' ')) { - await new Promise(resolve => setTimeout(resolve, TOKEN_DELAY)) - controller.enqueue( - encoder.encode(`${TEXT_PREFIX}${JSON.stringify(`${token} `)}\n`) - ) - } - - for (const item of SAMPLE_ANNOTATIONS) { - controller.enqueue( - encoder.encode(`${ANNOTATION_PREFIX}${JSON.stringify([item])}\n`) - ) - } - - controller.close() - }, - }) +export async function POST(request: NextRequest) { + return chatHandler(request, SAMPLE_PARTS) } diff --git a/examples/nextjs/app/page.tsx b/examples/nextjs/app/page.tsx index 7c6c0997..bddd7770 100644 --- a/examples/nextjs/app/page.tsx +++ b/examples/nextjs/app/page.tsx @@ -8,15 +8,21 @@ import { ChatSection, useChatUI, } from '@llamaindex/chat-ui' -import { Message, useChat } from 'ai/react' -import { WeatherAnnotation } from '../components/custom-weather' -import { WikiCard } from '@/components/custom-wiki' +import { UIMessage, useChat } from '@ai-sdk/react' +import { WeatherPart } from '../components/custom-weather' +import { DefaultChatTransport } from 'ai' +import { WikiPart } from '../components/custom-wiki' -const initialMessages: Message[] = [ +const initialMessages: UIMessage[] = [ { id: '1', - content: 'Hello! How can I help you today?', role: 'assistant', + parts: [ + { + type: 'text', + text: 'Hello! How can I help you today?', + }, + ], }, ] @@ -40,15 +46,17 @@ export default function Page(): JSX.Element { function ChatExample() { const handler = useChat({ - api: '/api/chat', + transport: new DefaultChatTransport({ + // uncomment this to try advanced example in app/api/chat/advanced/route.ts + api: '/api/chat/advanced', - // uncomment this to try edge runtime example in app/api/chat/edge/route.ts - // api: '/api/chat/edge', + // uncomment this to try basic example in app/api/chat/route.ts + // api: '/api/chat', - // uncomment this to try advanced example in app/api/chat/advanced/route.ts - // api: '/api/chat/advanced', - - initialMessages, + // uncomment this to try edge runtime example in app/api/chat/edge/route.ts + // api: '/api/chat/edge', + }), + messages: initialMessages, }) return ( @@ -77,7 +85,7 @@ function ChatExample() { } function CustomChatMessages() { - const { messages, isLoading, append } = useChatUI() + const { messages } = useChatUI() return ( <> @@ -93,18 +101,15 @@ function CustomChatMessages() { {message.role === 'user' ? 'U' : 'AI'}
- - - - {/* annotation components under the Markdown text */} - - + + + + + + + + + diff --git a/examples/nextjs/app/rsc/action.tsx b/examples/nextjs/app/rsc/action.tsx index 29ef44d9..29d4af46 100644 --- a/examples/nextjs/app/rsc/action.tsx +++ b/examples/nextjs/app/rsc/action.tsx @@ -1,18 +1,16 @@ 'use server' -import { defaultAnnotationRenderers } from '@llamaindex/chat-ui' -import { Markdown } from '@llamaindex/chat-ui/widgets' -import { createStreamableUI } from 'ai/rsc' +import { createStreamableUI } from '@ai-sdk/rsc' import { ReactNode } from 'react' +import { MessagePart } from '@llamaindex/chat-ui' +import { MessageDisplay } from './display' -const TOKEN_DELAY = 30 -const ANNOTATION_DELAY = 300 -const INLINE_ANNOTATION_KEY = 'annotation' +const DELAY = 300 export async function chatAction(question: string) { const uiStream = createStreamableUI() - let assistantMsg = '' + let parts: MessagePart[] = [] const responseStream = fakeChatStream(question) responseStream @@ -20,17 +18,11 @@ export async function chatAction(question: string) { new WritableStream({ write: (data: any) => { if (typeof data === 'string') { - assistantMsg += data + parts = parts.concat({ type: 'text', text: data }) } else { - assistantMsg += toInlineAnnotationCode(data) + parts = parts.concat(data) } - - uiStream.update( - - ) + uiStream.update() }, close: () => { uiStream.done() @@ -42,7 +34,7 @@ export async function chatAction(question: string) { return uiStream.value as Promise } -const SAMPLE_TEXT = [ +const SAMPLE_PARTS = [ ` Welcome to the demo of @llamaindex/chat-ui. Let me show you the different types of components that can be triggered from the server. @@ -56,11 +48,11 @@ console.log(c) \`\`\` `, - '\n ### Demo inline annotations \n', + '\n ### Demo parts \n', 'Here are some steps to create a simple wiki app: \n', '1. Create package.json file:', { - type: 'artifact', + type: 'data-artifact', data: { type: 'code', created_at: 1717334400000, @@ -82,7 +74,7 @@ console.log(c) }, '2. Check the wiki fetching script:', { - type: 'artifact', + type: 'data-artifact', data: { created_at: 1717334500000, type: 'code', @@ -99,7 +91,7 @@ console.log(c) }, '3. Run getWiki with the search term:', { - type: 'artifact', + type: 'data-artifact', data: { created_at: 1717334600000, type: 'code', @@ -112,7 +104,7 @@ console.log(c) }, '#### 🎯 Demo generating a document artifact', { - type: 'artifact', + type: 'data-artifact', data: { type: 'document', data: { @@ -172,23 +164,12 @@ function fakeChatStream(question: string): ReadableStream { async start(controller) { controller.enqueue(`User question: ${question}. \n `) - for (const item of SAMPLE_TEXT) { - if (typeof item === 'string') { - for (const token of item.split(' ')) { - await new Promise(resolve => setTimeout(resolve, TOKEN_DELAY)) - controller.enqueue(`${token} `) - } - } else { - await new Promise(resolve => setTimeout(resolve, ANNOTATION_DELAY)) - controller.enqueue(item) - } + for (const item of SAMPLE_PARTS) { + await new Promise(resolve => setTimeout(resolve, DELAY)) + controller.enqueue(item) } controller.close() }, }) } - -function toInlineAnnotationCode(item: any) { - return `\n\`\`\`${INLINE_ANNOTATION_KEY}\n${JSON.stringify(item)}\n\`\`\`\n` -} diff --git a/examples/nextjs/app/rsc/ai.ts b/examples/nextjs/app/rsc/ai.ts index c43de824..6eec77bc 100644 --- a/examples/nextjs/app/rsc/ai.ts +++ b/examples/nextjs/app/rsc/ai.ts @@ -1,8 +1,8 @@ 'use server' -import { createAI } from 'ai/rsc' +import { createAI } from '@ai-sdk/rsc' import { chatAction } from './action' -import { Message } from 'ai' +import { UIMessage as Message } from 'ai' import { ReactNode } from 'react' // define AI state and AI provider for RSC app diff --git a/examples/nextjs/app/rsc/display.tsx b/examples/nextjs/app/rsc/display.tsx new file mode 100644 index 00000000..c1246daf --- /dev/null +++ b/examples/nextjs/app/rsc/display.tsx @@ -0,0 +1,21 @@ +'use client' + +import { + ArtifactPartUI, + ChatPartProvider, + MarkdownPartUI, + MessagePart, +} from '@llamaindex/chat-ui' + +export function MessageDisplay({ parts }: { parts: MessagePart[] }) { + return ( +
+ {parts.map((part, index) => ( + + + + + ))} +
+ ) +} diff --git a/examples/nextjs/app/rsc/page.tsx b/examples/nextjs/app/rsc/page.tsx index c8f5c0d8..b84cbd9d 100644 --- a/examples/nextjs/app/rsc/page.tsx +++ b/examples/nextjs/app/rsc/page.tsx @@ -53,11 +53,7 @@ function CustomChatMessages() { const frontendMessages = messages.map(message => ({ ...message, - display: ( - - {(message as Message & { display: ReactNode }).display} - - ), + display: (message as Message & { display: ReactNode }).display, })) return ( diff --git a/examples/nextjs/app/rsc/use-chat-rsc.tsx b/examples/nextjs/app/rsc/use-chat-rsc.tsx index 3efb5c67..5f5d729b 100644 --- a/examples/nextjs/app/rsc/use-chat-rsc.tsx +++ b/examples/nextjs/app/rsc/use-chat-rsc.tsx @@ -1,7 +1,7 @@ 'use client' -import { generateId, Message } from 'ai' -import { useActions, useUIState } from 'ai/rsc' +import { generateId, TextPart, UIMessage } from 'ai' +import { useActions, useUIState } from '@ai-sdk/rsc' import { useState } from 'react' import { AIProvider } from './ai' import { ChatHandler } from '@llamaindex/chat-ui' @@ -9,52 +9,71 @@ import { ChatHandler } from '@llamaindex/chat-ui' // simple hook to create chat handler from RSC actions // then we can easily use it with @llamaindex/chat-ui export function useChatRSC(): ChatHandler { - const [input, setInput] = useState('') - const [isLoading, setIsLoading] = useState(false) + const [status, setStatus] = useState< + 'submitted' | 'streaming' | 'ready' | 'error' + >('ready') const [messages, setMessages] = useUIState() const { chatAction } = useActions() // similar append function as useChat hook - const append = async (message: Omit) => { - const newMsg: Message = { ...message, id: generateId() } + const append = async (message: Omit) => { + const newMsg: UIMessage = { ...message, id: generateId() } - setIsLoading(true) + setStatus('streaming') try { setMessages(prev => [ ...prev, { ...newMsg, display: ( -
- {message.content} -
+ <> + {message.parts.map((part, index) => { + if (part.type === 'text') { + return ( +
+ {part.text} +
+ ) + } + return null + })} + ), }, ]) - const assistantMsg = await chatAction(newMsg.content) + + const messageContent = newMsg.parts + .filter((part): part is TextPart => part.type === 'text') + .map(part => part.text) + .join('\n\n') + + const assistantMsg = await chatAction(messageContent) setMessages(prev => [ ...prev, { id: generateId(), role: 'assistant', - content: '', + parts: [], display: assistantMsg, }, ]) } catch (error) { console.error(error) + setStatus('error') } - setIsLoading(false) - setInput('') + setStatus('ready') - return message.content + return message } return { - input, - setInput, - isLoading, - append, + sendMessage: async message => { + append(message) + }, + status, messages, setMessages: setMessages as ChatHandler['setMessages'], } diff --git a/examples/nextjs/components/custom-weather.tsx b/examples/nextjs/components/custom-weather.tsx index 8d67a2cc..b2b0ef24 100644 --- a/examples/nextjs/components/custom-weather.tsx +++ b/examples/nextjs/components/custom-weather.tsx @@ -1,8 +1,8 @@ 'use client' -import { useChatMessage, getAnnotationData } from '@llamaindex/chat-ui' +import { usePart } from '@llamaindex/chat-ui' -interface WeatherData { +type WeatherData = { location: string temperature: number condition: string @@ -10,14 +10,18 @@ interface WeatherData { windSpeed: number } -// A custom annotation component that is used to display weather information in a chat message -// The weather data is extracted from annotations in the message that has type 'weather' -export function WeatherAnnotation() { - const { message } = useChatMessage() - const weatherData = getAnnotationData(message, 'weather') +const WeatherPartType = 'data-weather' - if (weatherData.length === 0) return null - return +type WeatherPart = { + type: typeof WeatherPartType + data: WeatherData +} + +// A custom part component that is used to display weather information in a chat message +export function WeatherPart() { + const weatherData = usePart(WeatherPartType)?.data + if (!weatherData) return null + return } function WeatherCard({ data }: { data: WeatherData }) { diff --git a/examples/nextjs/components/custom-wiki.tsx b/examples/nextjs/components/custom-wiki.tsx index e25e797e..da8400c0 100644 --- a/examples/nextjs/components/custom-wiki.tsx +++ b/examples/nextjs/components/custom-wiki.tsx @@ -1,6 +1,8 @@ 'use client' -interface WikiData { +import { usePart } from '@llamaindex/chat-ui' + +type WikiData = { title: string summary: string url: string @@ -8,8 +10,21 @@ interface WikiData { lastUpdated: string } +const WikiPartType = 'data-wiki' + +type WikiPart = { + type: typeof WikiPartType + data: WikiData +} + +export function WikiPart() { + const wikiData = usePart(WikiPartType)?.data + if (!wikiData) return null + return +} + // A UI widget that displays wiki information, it can be used inline with markdown text -export function WikiCard({ data }: { data: WikiData }) { +function WikiCard({ data }: { data: WikiData }) { const iconMap: Record = { science: '🧪', history: '📜', diff --git a/examples/nextjs/package.json b/examples/nextjs/package.json index 22794a84..43658eef 100644 --- a/examples/nextjs/package.json +++ b/examples/nextjs/package.json @@ -11,7 +11,9 @@ }, "dependencies": { "@llamaindex/chat-ui": "latest", - "ai": "^4.3.16", + "@ai-sdk/react": "^2.0.4", + "@ai-sdk/rsc": "^1.0.4", + "ai": "^5.0.4", "next": "^15.3.2", "react": "^19.1.0", "react-dom": "^19.1.0" diff --git a/packages/chat-ui/CLAUDE.md b/packages/chat-ui/CLAUDE.md index 9a324628..c4f07f92 100644 --- a/packages/chat-ui/CLAUDE.md +++ b/packages/chat-ui/CLAUDE.md @@ -142,10 +142,14 @@ type ChatHandler = { ```typescript import { ChatSection } from '@llamaindex/chat-ui' -import { useChat } from 'ai/react' +import { useChat } from '@ai-sdk/react' function MyChat() { - const chatHandler = useChat({ api: '/api/chat' }) + const handler = useChat({ + transport: new DefaultChatTransport({ + api: '/api/chat', + }), + }) return } ``` diff --git a/packages/chat-ui/package.json b/packages/chat-ui/package.json index 092150f9..b8a7968b 100644 --- a/packages/chat-ui/package.json +++ b/packages/chat-ui/package.json @@ -91,6 +91,7 @@ "remark-gfm": "^3.0.1", "remark-math": "^5.1.1", "remark-parse": "^11.0.0", + "uuid": "^11.1.0", "unist-util-visit": "^5.0.0", "tailwind-merge": "^2.1.0", "vaul": "^0.9.1" diff --git a/packages/chat-ui/src/chat/annotations/annotations.ts b/packages/chat-ui/src/chat/annotations/annotations.ts deleted file mode 100644 index bc809ce0..00000000 --- a/packages/chat-ui/src/chat/annotations/annotations.ts +++ /dev/null @@ -1,28 +0,0 @@ -import { Message } from '../chat.interface' -import { getInlineAnnotations } from './inline' -import { isMessageAnnotation, MessageAnnotation } from './types' -import { getVercelAnnotations } from './vercel' - -/** - * Type for annotation parser functions - */ -type AnnotationParser = (message: Message) => unknown[] - -/** - * Gets all annotation data from a message by type, combining results from multiple parsers - * @param message - The message to extract annotations from - * @param type - The annotation type to filter by (can be standard or custom) - * @param parsers - Array of parser functions to use (defaults to Vercel and inline parsers) - * @returns Array of data from annotations of the specified type from all parsers - */ -export function getAnnotationData( - message: Message, - type: string, - parsers: AnnotationParser[] = [getVercelAnnotations, getInlineAnnotations] -): T[] { - const allAnnotations = parsers - .flatMap(parser => parser(message)) - .filter(a => isMessageAnnotation(a)) as MessageAnnotation[] - - return allAnnotations.filter(a => a.type === type).map(a => a.data) as T[] -} diff --git a/packages/chat-ui/src/chat/annotations/index.ts b/packages/chat-ui/src/chat/annotations/index.ts deleted file mode 100644 index 32e1ce9c..00000000 --- a/packages/chat-ui/src/chat/annotations/index.ts +++ /dev/null @@ -1,3 +0,0 @@ -export * from './annotations' -export * from './types' -export * from './inline' diff --git a/packages/chat-ui/src/chat/annotations/inline.ts b/packages/chat-ui/src/chat/annotations/inline.ts deleted file mode 100644 index 6643f856..00000000 --- a/packages/chat-ui/src/chat/annotations/inline.ts +++ /dev/null @@ -1,84 +0,0 @@ -import { remark } from 'remark' -import remarkParse from 'remark-parse' -import { visit } from 'unist-util-visit' -import { Message } from '../chat.interface' -import { isMessageAnnotation, MessageAnnotation } from './types' - -const INLINE_ANNOTATION_KEY = 'annotation' - -// parse Markdown and extract code blocks -function parseMarkdownCodeBlocks(markdown: string) { - const markdownCodeBlocks: { - language: string | null - code: string - }[] = [] - - // Parse Markdown to AST using remark - const processor = remark().use(remarkParse) - const ast = processor.parse(markdown) - - // Visit all code nodes in the AST - visit(ast, 'code', (node: any) => { - markdownCodeBlocks.push({ - language: node.lang || null, // Language is stored in node.lang - code: node.value, // Code content is stored in node.value - }) - }) - - return markdownCodeBlocks -} - -// extract all inline annotations from markdown -export function getInlineAnnotations(message: Message): unknown[] { - const codeBlocks = parseMarkdownCodeBlocks(message.content) - return codeBlocks - .filter(block => block.language === INLINE_ANNOTATION_KEY) - .map(block => tryParse(block.code)) - .filter(Boolean) // filter out null values -} - -// convert annotation to inline markdown -export function toInlineAnnotation(annotation: MessageAnnotation) { - return `\n\`\`\`${INLINE_ANNOTATION_KEY}\n${JSON.stringify(annotation)}\n\`\`\`\n` -} - -/** - * Parses and validates an inline annotation from a code block - * @param language - The language identifier from the markdown code block - * @param codeValue - The raw code content from a markdown code block - * @returns The parsed annotation if valid, null if not an annotation or invalid - */ -export function parseInlineAnnotation( - language: string, - codeValue: string -): MessageAnnotation | null { - // Check if this is an inline annotation code block - if (language !== INLINE_ANNOTATION_KEY) { - return null - } - - try { - const annotation = tryParse(codeValue) - - if (annotation === null || !isMessageAnnotation(annotation)) { - console.warn( - `Invalid inline annotation: ${codeValue}, expected an object` - ) - return null - } - - return annotation - } catch (error) { - console.warn(`Failed to parse inline annotation: ${codeValue}`, error) - return null - } -} - -// try to parse the code value as a JSON object and return null if it fails -function tryParse(codeValue: string) { - try { - return JSON.parse(codeValue) - } catch (error) { - return null - } -} diff --git a/packages/chat-ui/src/chat/annotations/types.ts b/packages/chat-ui/src/chat/annotations/types.ts deleted file mode 100644 index 42b3bbbf..00000000 --- a/packages/chat-ui/src/chat/annotations/types.ts +++ /dev/null @@ -1,26 +0,0 @@ -export enum MessageAnnotationType { - IMAGE = 'image', - DOCUMENT_FILE = 'document_file', - SOURCES = 'sources', - EVENTS = 'events', - SUGGESTED_QUESTIONS = 'suggested_questions', - AGENT_EVENTS = 'agent', - ARTIFACT = 'artifact', -} - -export type MessageAnnotation = { - type: string - data: T -} - -export function isMessageAnnotation( - annotation: unknown -): annotation is MessageAnnotation { - return ( - annotation !== null && - typeof annotation === 'object' && - 'type' in annotation && - 'data' in annotation && - typeof (annotation as any).type === 'string' - ) -} diff --git a/packages/chat-ui/src/chat/annotations/vercel.ts b/packages/chat-ui/src/chat/annotations/vercel.ts deleted file mode 100644 index f152a19b..00000000 --- a/packages/chat-ui/src/chat/annotations/vercel.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { Message } from '../chat.interface' - -/** - * Gets annotation data directly from a message by type - * @param message - The message to extract annotations from - * @param type - The annotation type to filter by (can be standard or custom) - * @returns Array of data from annotations of the specified type, or null if none found - */ -export function getVercelAnnotations(message: Message): unknown[] { - return message.annotations ?? [] -} diff --git a/packages/chat-ui/src/chat/canvas/artifact-card.tsx b/packages/chat-ui/src/chat/canvas/artifact-card.tsx index 4417db60..ea04ae13 100644 --- a/packages/chat-ui/src/chat/canvas/artifact-card.tsx +++ b/packages/chat-ui/src/chat/canvas/artifact-card.tsx @@ -19,10 +19,12 @@ export function ArtifactCard({ data, getTitle = getCardTitle, iconMap = IconMap, + className, }: { data: Artifact getTitle?: (data: Artifact) => string iconMap?: Record + className?: string }) { const { openArtifactInCanvas, @@ -41,7 +43,8 @@ export function ArtifactCard({
openArtifactInCanvas(data)} > diff --git a/packages/chat-ui/src/chat/canvas/artifacts.ts b/packages/chat-ui/src/chat/canvas/artifacts.ts index c38e1806..432a536f 100644 --- a/packages/chat-ui/src/chat/canvas/artifacts.ts +++ b/packages/chat-ui/src/chat/canvas/artifacts.ts @@ -1,6 +1,6 @@ import { Message } from '../chat.interface' -import { MessageAnnotationType, getAnnotationData } from '../annotations' -import { getInlineAnnotations } from '../annotations/inline' +import { ArtifactPartType } from '../message-parts/types' +import { getParts } from '../message-parts/utils' // check if two artifacts are equal by comparing their type and created time export function isEqualArtifact(a: Artifact, b: Artifact) { @@ -15,9 +15,7 @@ export function extractArtifactsFromAllMessages(messages: Message[]) { } export function extractArtifactsFromMessage(message: Message): Artifact[] { - return getAnnotationData(message, MessageAnnotationType.ARTIFACT, [ - getInlineAnnotations, // only extract artifacts from inline annotations - ]) + return getParts(message, ArtifactPartType).map(part => part.data) } export type CodeArtifactError = { diff --git a/packages/chat-ui/src/chat/canvas/context.tsx b/packages/chat-ui/src/chat/canvas/context.tsx index 7fffca80..308751d0 100644 --- a/packages/chat-ui/src/chat/canvas/context.tsx +++ b/packages/chat-ui/src/chat/canvas/context.tsx @@ -19,7 +19,7 @@ import { } from './artifacts' import { Message } from '../chat.interface' import { useChatUI } from '../chat.context' -import { toInlineAnnotation } from '../annotations' +import { v4 as uuid } from 'uuid' interface ChatCanvasContextType { allArtifacts: Artifact[] @@ -51,7 +51,8 @@ export function ChatCanvasProvider({ children: ReactNode autoOpenCanvas?: boolean }) { - const { messages, isLoading, append, requestData, setMessages } = useChatUI() + const { messages, isLoading, sendMessage, requestData, setMessages } = + useChatUI() const [isCanvasOpen, setIsCanvasOpen] = useState(false) // whether the canvas is open const [displayedArtifact, setDisplayedArtifact] = useState() // the artifact currently displayed in the canvas @@ -107,19 +108,33 @@ export function ChatCanvasProvider({ created_at: Date.now(), } - const newMessages = [ + const newMessages: Message[] = [ ...messages, { id: `restore-msg-${Date.now()}`, role: 'user', - content: `Restore to ${artifact.type} version ${getArtifactVersion(artifact).versionNumber}`, + parts: [ + { + type: 'text', + text: `Restore to ${artifact.type} version ${getArtifactVersion(artifact).versionNumber}`, + }, + ], }, { id: `restore-success-${Date.now()}`, role: 'assistant', - content: `Successfully restored to ${artifact.type} version ${getArtifactVersion(artifact).versionNumber}${toInlineAnnotation({ type: 'artifact', data: newArtifact })}`, + parts: [ + { + type: 'text', + text: `Successfully restored to ${artifact.type} version ${getArtifactVersion(artifact).versionNumber}`, + }, + { + type: 'data-artifact', + data: newArtifact, + }, + ], }, - ] as (Message & { id: string })[] + ] setMessages(newMessages) @@ -158,17 +173,33 @@ export function ChatCanvasProvider({ if (!newArtifact) return - const newMessages = [ + const newMessages: Message[] = [ ...messages, { + id: uuid(), role: 'user', - content: `Update content for ${artifact.type} artifact version ${getArtifactVersion(artifact).versionNumber}`, + parts: [ + { + type: 'text', + text: `Update content for ${artifact.type} artifact version ${getArtifactVersion(artifact).versionNumber}`, + }, + ], }, { + id: uuid(), role: 'assistant', - content: `Updated content for ${artifact.type} artifact version ${getArtifactVersion(artifact).versionNumber}${toInlineAnnotation({ type: 'artifact', data: newArtifact })}`, + parts: [ + { + type: 'text', + text: `Updated content for ${artifact.type} artifact version ${getArtifactVersion(artifact).versionNumber}`, + }, + { + type: 'data-artifact', + data: newArtifact, + }, + ], }, - ] as (Message & { id: string })[] + ] setMessages(newMessages) openArtifactInCanvas(newArtifact) @@ -201,12 +232,18 @@ export function ChatCanvasProvider({ const fixCodeErrors = (artifact: CodeArtifact) => { const errors = getCodeErrors(artifact) if (errors.length === 0) return - append( + sendMessage( { + id: uuid(), role: 'user', - content: `Please fix the following errors: ${errors.join('\n')} happened when running the code.`, + parts: [ + { + type: 'text', + text: `Please fix the following errors: ${errors.join('\n')} happened when running the code.`, + }, + ], }, - { data: requestData } + { body: requestData } ) } diff --git a/packages/chat-ui/src/chat/chat-annotations.tsx b/packages/chat-ui/src/chat/chat-annotations.tsx deleted file mode 100644 index eb2a328e..00000000 --- a/packages/chat-ui/src/chat/chat-annotations.tsx +++ /dev/null @@ -1,122 +0,0 @@ -import { - ChatAgentEvents, - ChatEvents, - ChatFiles, - ChatImage, - ChatSources, - EventData, - ImageData, - DocumentFileData, - AgentEventData, - SuggestedQuestionsData, - SuggestedQuestions, - SourceData, - SourceNode, -} from '../widgets/index.js' // this import needs the file extension as it's importing the widget bundle -import { MessageAnnotationType } from './annotations/types.js' -import { getAnnotationData } from './annotations/annotations.js' -import { useChatMessage } from './chat-message.context.js' -import { useChatUI } from './chat.context.js' -import { Message } from './chat.interface.js' - -export function EventAnnotations() { - const { message, isLast, isLoading } = useChatMessage() - const showLoading = (isLast && isLoading) ?? false - - const eventData = getAnnotationData( - message, - MessageAnnotationType.EVENTS - ) - if (eventData.length === 0) return null - return -} - -export function AgentEventAnnotations() { - const { message, isLast } = useChatMessage() - - const agentEventData = getAnnotationData( - message, - MessageAnnotationType.AGENT_EVENTS - ) - if (agentEventData.length === 0) return null - return ( - - ) -} - -export function ImageAnnotations() { - const { message } = useChatMessage() - - const imageData = getAnnotationData(message, 'image') - if (imageData.length === 0) return null - return -} - -export function DocumentFileAnnotations() { - const { message } = useChatMessage() - - const contentFileData = getAnnotationData( - message, - MessageAnnotationType.DOCUMENT_FILE - ) - if (contentFileData.length === 0) return null - - const alignmentClass = message.role === 'user' ? 'ml-auto' : 'mr-auto' - return -} - -function preprocessSourceNodes(nodes: SourceNode[]): SourceNode[] { - // Filter source nodes has lower score - const processedNodes = nodes.map(node => { - // remove trailing slash for node url if exists - if (node.url) { - node.url = node.url.replace(/\/$/, '') - } - return node - }) - return processedNodes -} - -export function getSourceNodes(message: Message): SourceNode[] { - const data = getAnnotationData( - message, - MessageAnnotationType.SOURCES - ) - return data - .map(item => ({ - ...item, - nodes: item.nodes ? preprocessSourceNodes(item.nodes) : [], - })) - .flatMap(item => item.nodes) -} - -export function SourceAnnotations() { - const { message } = useChatMessage() - - const nodes = getSourceNodes(message) - if (nodes.length === 0) return null - return -} - -export function SuggestedQuestionsAnnotations() { - const { append, requestData } = useChatUI() - const { message, isLast } = useChatMessage() - if (!isLast || !append) return null - - const suggestedQuestionsData = getAnnotationData( - message, - MessageAnnotationType.SUGGESTED_QUESTIONS - ) - if (suggestedQuestionsData.length === 0) return null - return ( - - ) -} diff --git a/packages/chat-ui/src/chat/chat-input.tsx b/packages/chat-ui/src/chat/chat-input.tsx index 9b39303b..352fb049 100644 --- a/packages/chat-ui/src/chat/chat-input.tsx +++ b/packages/chat-ui/src/chat/chat-input.tsx @@ -6,11 +6,13 @@ import { Textarea } from '../ui/textarea' import { FileUploader } from '../widgets/index.js' // this import needs the file extension as it's importing the widget bundle import { useChatUI } from './chat.context' import { Message } from './chat.interface' +import { v4 as uuidv4 } from 'uuid' +import { MessagePart } from './message-parts' interface ChatInputProps extends React.PropsWithChildren { className?: string resetUploadedFiles?: () => void - annotations?: any + attachments?: MessagePart[] } interface ChatInputFormProps extends React.PropsWithChildren { @@ -55,21 +57,21 @@ export const useChatInput = () => { } function ChatInput(props: ChatInputProps) { - const { input, setInput, append, isLoading, requestData } = useChatUI() + const { input, setInput, sendMessage, isLoading, requestData } = useChatUI() const isDisabled = isLoading || !input.trim() const [isComposing, setIsComposing] = useState(false) const submit = async () => { - const newMessage: Omit = { + const newMessage: Message = { + id: uuidv4(), role: 'user', - content: input, - annotations: props.annotations, + parts: [{ type: 'text', text: input }, ...(props.attachments ?? [])], } setInput('') // Clear the input props.resetUploadedFiles?.() // Reset the uploaded files - await append(newMessage, { data: requestData }) + await sendMessage(newMessage, { body: requestData }) } const handleSubmit = async (e: React.FormEvent) => { diff --git a/packages/chat-ui/src/chat/chat-message.context.ts b/packages/chat-ui/src/chat/chat-message.context.ts index f78e645f..887d0f22 100644 --- a/packages/chat-ui/src/chat/chat-message.context.ts +++ b/packages/chat-ui/src/chat/chat-message.context.ts @@ -1,11 +1,9 @@ import { createContext, useContext } from 'react' -import { ChatHandler, Message } from './chat.interface' +import { Message } from './chat.interface' export interface ChatMessageContext { message: Message isLast: boolean - isLoading?: boolean - append?: ChatHandler['append'] } export const chatMessageContext = createContext(null) diff --git a/packages/chat-ui/src/chat/chat-message.tsx b/packages/chat-ui/src/chat/chat-message.tsx index 37fa1ce4..1417e438 100644 --- a/packages/chat-ui/src/chat/chat-message.tsx +++ b/packages/chat-ui/src/chat/chat-message.tsx @@ -1,82 +1,46 @@ import { Bot, Check, Copy, RefreshCw } from 'lucide-react' -import { ComponentType, memo, useMemo } from 'react' +import { memo } from 'react' import { useCopyToClipboard } from '../hook/use-copy-to-clipboard' import { cn } from '../lib/utils' import { Button } from '../ui/button' -import { - CitationComponentProps, - Markdown, - LanguageRendererProps, -} from '../widgets/index.js' -import { - AgentEventAnnotations, - DocumentFileAnnotations, - EventAnnotations, - ImageAnnotations, - SourceAnnotations, - SuggestedQuestionsAnnotations, - getSourceNodes, -} from './chat-annotations' import { ChatMessageProvider, useChatMessage } from './chat-message.context.js' import { useChatUI } from './chat.context.js' -import { ChatHandler, Message } from './chat.interface' -import { defaultAnnotationRenderers } from './chat-renderers.js' +import { Message } from './chat.interface' +import { + ArtifactPartUI, + EventPartUI, + FilePartUI, + MarkdownPartUI, + SourcesPartUI, + SuggestionPartUI, + TextPart, + TextPartType, +} from './message-parts' +import { ChatPartProvider } from './message-parts/context.js' interface ChatMessageProps extends React.PropsWithChildren { message: Message isLast: boolean className?: string - isLoading?: boolean - append?: ChatHandler['append'] } interface ChatMessageAvatarProps extends React.PropsWithChildren { className?: string } -export enum ContentPosition { - TOP = -9999, - CHAT_EVENTS = 0, - AFTER_EVENTS = 1, - CHAT_AGENT_EVENTS = 2, - AFTER_AGENT_EVENTS = 3, - CHAT_IMAGE = 4, - AFTER_IMAGE = 5, - BEFORE_MARKDOWN = 6, - MARKDOWN = 7, - AFTER_MARKDOWN = 8, - CHAT_DOCUMENT_FILES = 9, - AFTER_DOCUMENT_FILES = 10, - CHAT_SOURCES = 11, - AFTER_SOURCES = 12, - SUGGESTED_QUESTIONS = 13, - AFTER_SUGGESTED_QUESTIONS = 14, - BOTTOM = 9999, -} - interface ChatMessageContentProps extends React.PropsWithChildren { className?: string - isLoading?: boolean - append?: ChatHandler['append'] - message?: Message // in case you want to customize the message } interface ChatMessageActionsProps extends React.PropsWithChildren { className?: string } -interface ChatMarkdownProps extends React.PropsWithChildren { - citationComponent?: ComponentType - className?: string - languageRenderers?: Record> - annotationRenderers?: Record> -} - function ChatMessage(props: ChatMessageProps) { const children = props.children ?? ( <> - + ) @@ -86,8 +50,6 @@ function ChatMessage(props: ChatMessageProps) { value={{ message: props.message, isLast: props.isLast, - isLoading: props.isLoading, - append: props.append, }} >
@@ -117,65 +79,50 @@ function ChatMessageAvatar(props: ChatMessageAvatarProps) { } function ChatMessageContent(props: ChatMessageContentProps) { + const { message } = useChatMessage() const children = props.children ?? ( <> - - - - - - - + + + + + + ) return (
- {children} + {message.parts.map((part, index) => ( + + {children} + + ))}
) } -function ChatMarkdown(props: ChatMarkdownProps) { - const { message } = useChatMessage() - - const nodes = useMemo(() => getSourceNodes(message), [message]) - - return ( - - ) -} - function ChatMessageActions(props: ChatMessageActionsProps) { - const { reload, requestData, isLoading } = useChatUI() + const { regenerate, requestData, isLoading } = useChatUI() const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) const { message, isLast } = useChatMessage() if (message.role !== 'assistant') return null const isLastMessageFromAssistant = message.role === 'assistant' && isLast - const showReload = reload && !isLoading && isLastMessageFromAssistant + const showReload = regenerate && !isLoading && isLastMessageFromAssistant + + // content to copy is all text parts joined by newlines + const messageTextContent = message.parts + .filter((part): part is TextPart => part.type === TextPartType) + .map(part => part.text) + .join('\n\n') const children = props.children ?? ( <> - - -
- -
- - - ) -} - -function mergeAdjacentEvents(events: AgentEventData[]): MergedEvent[] { - const mergedEvents: MergedEvent[] = [] - - for (const event of events) { - const lastMergedEvent = mergedEvents[mergedEvents.length - 1] - - const eventStep: StepText | StepProgress = event.data - ? ({ - text: event.text, - progress: event.data, - } as StepProgress) - : ({ - text: event.text, - } as StepText) - - if (lastMergedEvent && lastMergedEvent.agent === event.agent) { - lastMergedEvent.steps.push(eventStep) - } else { - mergedEvents.push({ - agent: event.agent, - steps: [eventStep], - icon: AgentIcons[event.agent.toLowerCase()] ?? icons.Bot, - }) - } - } - - return mergedEvents -} diff --git a/packages/chat-ui/src/widgets/chat-event.tsx b/packages/chat-ui/src/widgets/chat-event.tsx new file mode 100644 index 00000000..d67fe179 --- /dev/null +++ b/packages/chat-ui/src/widgets/chat-event.tsx @@ -0,0 +1,103 @@ +import { + ChevronDown, + ChevronRight, + CheckCircle, + XCircle, + Loader2, +} from 'lucide-react' +import { useState } from 'react' +import { Button } from '../ui/button' +import { + Collapsible, + CollapsibleContent, + CollapsibleTrigger, +} from '../ui/collapsible' +import { cn } from '../lib/utils' + +export type ChatEvent = { + title: string + description?: string + status: 'pending' | 'success' | 'error' + data?: any +} + +export function ChatEvent({ + event, + className, + renderData, +}: { + event: ChatEvent + className?: string + renderData?: (data: ChatEvent['data']) => React.ReactNode +}) { + const [isDataOpen, setIsDataOpen] = useState(false) + + const getStatusIcon = () => { + switch (event.status) { + case 'pending': + return + case 'success': + return + case 'error': + return + } + } + + const getStatusColor = () => { + switch (event.status) { + case 'pending': + return 'border-yellow-400' + case 'success': + return 'border-green-400' + case 'error': + return 'border-red-400' + } + } + + return ( +
+ {/* Header with title and status */} +
+
+

{event.title}

+ {event.description && ( +

+ {event.description} +

+ )} +
+
+ {getStatusIcon()} + {event.status} +
+
+ + {/* Data section if data exists */} + {event.data && ( +
+ + + + + + {renderData ? ( + renderData(event.data) + ) : ( +
+                  {JSON.stringify(event.data, null, 2)}
+                
+ )} +
+
+
+ )} +
+ ) +} diff --git a/packages/chat-ui/src/widgets/chat-events.tsx b/packages/chat-ui/src/widgets/chat-events.tsx deleted file mode 100644 index a4b05db5..00000000 --- a/packages/chat-ui/src/widgets/chat-events.tsx +++ /dev/null @@ -1,53 +0,0 @@ -import { ChevronDown, ChevronRight, Loader2 } from 'lucide-react' -import { useState } from 'react' -import { Button } from '../ui/button' -import { - Collapsible, - CollapsibleContent, - CollapsibleTrigger, -} from '../ui/collapsible' - -export type EventData = { - title: string -} - -export function ChatEvents({ - data, - showLoading, -}: { - data: EventData[] - showLoading: boolean -}) { - const [isOpen, setIsOpen] = useState(false) - - const buttonLabel = isOpen ? 'Hide events' : 'Show events' - - const EventIcon = isOpen ? ( - - ) : ( - - ) - - return ( -
- - - - - -
- {data.map((eventItem, index) => ( -
- {eventItem.title} -
- ))} -
-
-
-
- ) -} diff --git a/packages/chat-ui/src/widgets/chat-file.tsx b/packages/chat-ui/src/widgets/chat-file.tsx new file mode 100644 index 00000000..0dd976af --- /dev/null +++ b/packages/chat-ui/src/widgets/chat-file.tsx @@ -0,0 +1,72 @@ +import { FileIcon } from 'lucide-react' +import { cn } from '../lib/utils' +import { DocxIcon } from '../ui/icons/docx' +import { PDFIcon } from '../ui/icons/pdf' +import { SheetIcon } from '../ui/icons/sheet' +import { TxtIcon } from '../ui/icons/txt' + +export type FileData = { + filename: string + mediaType: string // https://www.iana.org/assignments/media-types/media-types.xhtml + url: string // can be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs). +} + +const FileIconMap: Record = { + csv: , + pdf: , + docx: , + txt: , +} + +export function ChatFile({ + file, + className, +}: { + file: FileData + className?: string +}) { + const isImage = isImageFile(file) + const fileExtension = getFileExtension(file.filename) + + const handleClick = () => { + if (file.url) { + window.open(file.url, '_blank', 'noopener,noreferrer') + } + } + + return ( +
+
+ {file.url && isImage ? ( + uploaded-image + ) : ( +
+ {FileIconMap[fileExtension] ?? } +
+ )} +
+
{file.filename}
+
+ ) +} + +// Helper function to check if file is an image +function isImageFile(file: FileData): boolean { + return file.mediaType.startsWith('image/') +} + +// Helper function to get file extension +function getFileExtension(fileName: string): string { + return fileName.split('.').pop()?.toLowerCase() ?? '' +} diff --git a/packages/chat-ui/src/widgets/chat-files.tsx b/packages/chat-ui/src/widgets/chat-files.tsx deleted file mode 100644 index c523cf6f..00000000 --- a/packages/chat-ui/src/widgets/chat-files.tsx +++ /dev/null @@ -1,27 +0,0 @@ -import { DocumentInfo, DocumentFile } from './document-info' -import { cn } from '../lib/utils' - -export type DocumentFileData = { - files: DocumentFile[] -} - -export function ChatFiles({ - data, - className, -}: { - data: DocumentFileData - className?: string -}) { - if (!data.files.length) return null - return ( -
- {data.files.map(file => ( - - ))} -
- ) -} diff --git a/packages/chat-ui/src/widgets/chat-image.tsx b/packages/chat-ui/src/widgets/chat-image.tsx deleted file mode 100644 index c5ab393a..00000000 --- a/packages/chat-ui/src/widgets/chat-image.tsx +++ /dev/null @@ -1,11 +0,0 @@ -export type ImageData = { - url: string -} - -export function ChatImage({ data }: { data: ImageData }) { - return ( -
- chat_image -
- ) -} diff --git a/packages/chat-ui/src/widgets/chat-sources.tsx b/packages/chat-ui/src/widgets/chat-sources.tsx index a48b259d..86cf32ac 100644 --- a/packages/chat-ui/src/widgets/chat-sources.tsx +++ b/packages/chat-ui/src/widgets/chat-sources.tsx @@ -1,11 +1,30 @@ import { useMemo } from 'react' import { Document, DocumentInfo, SourceNode } from './document-info' +import { cn } from '../lib/utils' export type SourceData = { nodes: SourceNode[] } -export function ChatSources({ data }: { data: SourceData }) { +export function preprocessSourceNodes(nodes: SourceNode[]): SourceNode[] { + // Filter source nodes has lower score + const processedNodes = nodes.map(node => { + // remove trailing slash for node url if exists + if (node.url) { + node.url = node.url.replace(/\/$/, '') + } + return node + }) + return processedNodes +} + +export function ChatSources({ + data, + className, +}: { + data: SourceData + className?: string +}) { const documents: Document[] = useMemo(() => { // group nodes by document (a document must have a URL) const nodesByUrl: Record = {} @@ -27,7 +46,7 @@ export function ChatSources({ data }: { data: SourceData }) { if (documents.length === 0) return null return ( -
+
Sources:
{documents.map((document, index) => { diff --git a/packages/chat-ui/src/widgets/index.tsx b/packages/chat-ui/src/widgets/index.tsx index b1460277..95e4233f 100644 --- a/packages/chat-ui/src/widgets/index.tsx +++ b/packages/chat-ui/src/widgets/index.tsx @@ -1,10 +1,8 @@ 'use client' // Other useful components -export * from './chat-agent-events' -export * from './chat-events' -export * from './chat-files' -export * from './chat-image' +export * from './chat-event' +export * from './chat-file' export * from './chat-sources' export * from './markdown' export * from './codeblock' diff --git a/packages/chat-ui/src/widgets/markdown.tsx b/packages/chat-ui/src/widgets/markdown.tsx index b4b8a7ad..1cb75073 100644 --- a/packages/chat-ui/src/widgets/markdown.tsx +++ b/packages/chat-ui/src/widgets/markdown.tsx @@ -8,7 +8,6 @@ import { DocumentInfo } from './document-info' import { SourceData } from './chat-sources' import { Citation, CitationComponentProps } from './citation' import { cn } from '../lib/utils' -import { parseInlineAnnotation } from '../chat/annotations/inline' const MemoizedReactMarkdown: FC = memo( ReactMarkdown, @@ -118,7 +117,6 @@ export function Markdown({ citationComponent: CitationComponent, className: customClassName, languageRenderers, - annotationRenderers, }: { content: string sources?: SourceData @@ -126,7 +124,6 @@ export function Markdown({ citationComponent?: ComponentType className?: string languageRenderers?: Record> - annotationRenderers?: Record> }) { const processedContent = preprocessContent(content) @@ -158,30 +155,6 @@ export function Markdown({ const language = (match && match[1]) || '' const codeValue = String(children).replace(/\n$/, '') - const annotation = parseInlineAnnotation(language, codeValue) - - if (annotation) { - // Check if we have a specific renderer for it - if (annotationRenderers?.[annotation.type]) { - const CustomRenderer = annotationRenderers[annotation.type] - return ( -
- -
- ) - } - - // If no custom renderer found, render an error message - return ( -
-
- Annotation Render Error: No renderer found - for annotation type “{annotation.type}”. -
-
- ) - } - if (inline) { return ( diff --git a/packages/chat-ui/src/widgets/starter-questions.tsx b/packages/chat-ui/src/widgets/starter-questions.tsx index 5d20f21f..c1a07a0b 100644 --- a/packages/chat-ui/src/widgets/starter-questions.tsx +++ b/packages/chat-ui/src/widgets/starter-questions.tsx @@ -1,10 +1,11 @@ -import { ChatHandler } from '../chat/chat.interface' +import { ChatContext } from '../chat/chat.interface' import { cn } from '../lib/utils' import { Button } from '../ui/button' +import { v4 as uuidv4 } from 'uuid' interface StarterQuestionsProps { questions: string[] - append: ChatHandler['append'] + sendMessage: ChatContext['sendMessage'] className?: string } @@ -16,7 +17,13 @@ export function StarterQuestions(props: StarterQuestionsProps) {