diff --git a/examples/tauri-postgres/src-tauri/src/main.rs b/examples/tauri-postgres/src-tauri/src/main.rs index cc3f2ae731..0b496cc09c 100644 --- a/examples/tauri-postgres/src-tauri/src/main.rs +++ b/examples/tauri-postgres/src-tauri/src/main.rs @@ -300,13 +300,13 @@ async fn tauri_init_command( *connection.db.lock().await = Some(pg); *connection.conn.lock().await = Some(conn); - app_handle.emit_all("downloading_ollama_model", "llama2").unwrap(); + app_handle.emit_all("loading_ollama", "llama2").unwrap(); *connection.llama.lock().await = Some(Ollama::new("http://127.0.0.1".to_string(), ollama_port)); - app_handle.emit_all("downloaded_ollama_model", "llama2").unwrap(); + app_handle.emit_all("loaded_ollama", "llama2").unwrap(); - app_handle.emit_all("downloading_fastembed_model", "bge-fast-en").unwrap(); + app_handle.emit_all("loading_fastembed", "bge-fast-en").unwrap(); *connection.flag_embedding.lock().await = Some(create_embedding_model(resource_path_pgdir)); - app_handle.emit_all("downloaded_fastembed_model", "bge-fast-en").unwrap(); + app_handle.emit_all("loaded_fastembed", "bge-fast-en").unwrap(); Ok(()) diff --git a/examples/tauri-postgres/src/App.tsx b/examples/tauri-postgres/src/App.tsx index 9408a8dd5d..ac7e9ba400 100644 --- a/examples/tauri-postgres/src/App.tsx +++ b/examples/tauri-postgres/src/App.tsx @@ -3,6 +3,7 @@ import { attachConsole } from "tauri-plugin-log-api"; attachConsole(); import { listen } from "@tauri-apps/api/event"; +import { Command } from "@tauri-apps/api/shell"; import "animate.css/animate.min.css"; import Board from "./pages/Board"; import { useEffect, useState, createContext } from "react"; @@ -34,8 +35,9 @@ const App = () => { const [electric, setElectric] = useState(); const [showMenu, setShowMenu] = useState(false); const [synced, setSynced] = useState(false); - const [ollamaDownloaded, setOllamaDownloaded] = useState(false); - const [fastembedDownloaded, setFastembedDownloaded] = useState(false); + const [ollamaLoaded, setOllamaLoaded] = useState(false); + const [llama2Downoaded, setLlama2Downoaded] = useState(false); + const [fastembedLoaded, setFastembedLoaded] = useState(false); useEffect(() => { const init = async () => { @@ -56,30 +58,30 @@ const App = () => { }, []); useEffect(() => { - let unListenOllamaDownloaded: null | (() => void) = null; - let unListenFastembedDownloaded: null | (() => void) = null; + let unListenOllamaLoaded: null | (() => void) = null; + let unListenFastembedLoaded: null | (() => void) = null; let ignore = false; const init = async () => { - unListenOllamaDownloaded = await listen( - "downloaded_ollama_model", + unListenOllamaLoaded = await listen( + "loaded_ollama", (event) => { if (ignore) return; - setOllamaDownloaded(true); + setOllamaLoaded(true); } ); - unListenFastembedDownloaded = await listen( - "downloading_fastembed_model", + unListenFastembedLoaded = await listen( + "loaded_fastembed", (event) => { if (ignore) return; - setFastembedDownloaded(true); + setFastembedLoaded(true); } ); if (ignore) { - unListenOllamaDownloaded?.(); - unListenOllamaDownloaded = null; - unListenFastembedDownloaded?.(); - unListenFastembedDownloaded = null; + unListenOllamaLoaded?.(); + unListenOllamaLoaded = null; + unListenFastembedLoaded?.(); + unListenFastembedLoaded = null; } }; @@ -87,18 +89,36 @@ const App = () => { return () => { ignore = true; - unListenOllamaDownloaded?.(); - unListenOllamaDownloaded = null; - unListenFastembedDownloaded?.(); - unListenFastembedDownloaded = null; + unListenOllamaLoaded?.(); + unListenOllamaLoaded = null; + unListenFastembedLoaded?.(); + unListenFastembedLoaded = null; }; }, []); + useEffect(() => { + if (!ollamaLoaded) return; + let ignore = false; + const init = async () => { + console.log("pulling llama2") + const command = Command.sidecar('ollama', ["pull", "llama2"]) + await command.execute() + console.log("pulled llama2") + if (ignore) return; + setLlama2Downoaded(true) + } + init() + return () => { + ignore = true; + } + }, [ollamaLoaded]) + if ( electric === undefined || !synced || - !ollamaDownloaded || - !fastembedDownloaded + !ollamaLoaded || + !fastembedLoaded || + !llama2Downoaded ) { return (
@@ -110,14 +130,19 @@ const App = () => { {!synced && (
Syncing Issues...
)} - {!ollamaDownloaded && ( + {!ollamaLoaded && ( +
+ Loading Ollama... +
+ )} + {(ollamaLoaded && !llama2Downoaded) && (
- Downloading Ollama Model... + Downloading Llama2...
)} - {!fastembedDownloaded && ( + {!fastembedLoaded && (
- Downloading FastEmbed Model... + Loading FastEmbed...
)}
diff --git a/examples/tauri-postgres/src/pages/Chat/index.tsx b/examples/tauri-postgres/src/pages/Chat/index.tsx index 5bf14189ec..f312eda5be 100644 --- a/examples/tauri-postgres/src/pages/Chat/index.tsx +++ b/examples/tauri-postgres/src/pages/Chat/index.tsx @@ -26,12 +26,16 @@ function Chat() { SELECT title, description FROM issue INNER JOIN document ON document.issue_id = issue.id ORDER BY document.embeddings <=> '${embedding}' - LIMIT 5; + LIMIT 50; `, }); const context = issues - .map((issue: any) => `${issue.title}\n${issue.description}`) - .join("\n\n\n"); + .map( + (issue: any) => + `# [${issue.title}](/issue/${issue.id})\n${issue.description}` + ) + .join("\n---\n\n") + .slice(0, 4 * 4096 - (100 + question.length)); // 4096 token limit, tokens are ~4 bytes console.log("startChat", { question: question, context: context ?? "" }); invoke("start_chat", { question: question, context: context ?? "" }); }; @@ -117,7 +121,9 @@ function Chat() { >
{working && answer.length === 0 ? ( -
+
+ +
) : ( {answerText} )} diff --git a/examples/tauri-postgres/src/utils/filterState.ts b/examples/tauri-postgres/src/utils/filterState.ts index d16e37e4c8..6e9ec457d8 100644 --- a/examples/tauri-postgres/src/utils/filterState.ts +++ b/examples/tauri-postgres/src/utils/filterState.ts @@ -27,7 +27,7 @@ export function useFilterState(): [ .flat(); const query = searchParams.get("query"); const searchType = - (searchParams.get("searchType") as "basic" | "vector") ?? "basic"; + (searchParams.get("searchType") as "basic" | "vector") ?? "vector"; const state = { orderBy,