Skip to content

Commit

Permalink
Merge pull request #28 from tyllenb/main
Browse files Browse the repository at this point in the history
Add OpenRouter for access to more AI Models
  • Loading branch information
adamcohenhillel authored Feb 13, 2024
2 parents da320fb + 7247994 commit 7fef4fa
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 59 deletions.
80 changes: 39 additions & 41 deletions app/src/components/Chat.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,7 @@ import PromptForm from "./PromptForm";
import { toast } from "sonner";
import NewConversationButton from "./NewConversationButton";
import { NavMenu } from "./NavMenu";
import {
useQuery,
useMutation,
} from '@tanstack/react-query'
import { useQuery, useMutation } from "@tanstack/react-query";
import SideMenu from "./SideMenu";

export default function Chat({
Expand All @@ -25,44 +22,48 @@ export default function Chat({
const [messages, setMessages] = useState<Message[]>([]);
const [conversationId, setConversationId] = useState<number | null>(null);
const [waitingForResponse, setWaitingForResponse] = useState(false);

const sendMessageAndReceiveResponse = useMutation({
mutationFn: async (userMessage: Message) => {
const { data: sendMessageData, error: sendMessageError } = await supabaseClient
.from('conversations')
.update({ context: [...messages, userMessage] })
.eq('id', conversationId);

const { data: sendMessageData, error: sendMessageError } =
await supabaseClient
.from("conversations")
.update({ context: [...messages, userMessage] })
.eq("id", conversationId);

if (sendMessageError) throw sendMessageError;

setMessages([...messages, userMessage]);
setWaitingForResponse(true);

const { data: aiResponseData, error: aiResponseError } = await supabaseClient.functions.invoke("chat", {
body: { messageHistory: [...messages, userMessage] },
});
const { data: aiResponseData, error: aiResponseError } =
await supabaseClient.functions.invoke("chat", {
body: { messageHistory: [...messages, userMessage] },
});

if (aiResponseError) throw aiResponseError;

const {data: updateConversationData, error: updateConversationError} = await supabaseClient
.from('conversations')
.update({ context: [...messages, userMessage, aiResponseData.msg] })
.eq('id', conversationId);


const { data: updateConversationData, error: updateConversationError } =
await supabaseClient
.from("conversations")
.update({ context: [...messages, userMessage, aiResponseData.msg] })
.eq("id", conversationId);

if (updateConversationError) throw updateConversationError;

return aiResponseData;
},
onError: (error) => {
toast.error(error.message || "Unknown error");
setWaitingForResponse(false);
},
onSuccess: (aiResponse) => {
setMessages(currentMessages => {
setMessages((currentMessages) => {
return [...currentMessages, aiResponse.msg as Message];
});

setWaitingForResponse(false);
}
},
});

const newConversation = useMutation({
Expand Down Expand Up @@ -94,16 +95,16 @@ export default function Chat({
setConversationId(data[0].id);
setWaitingForResponse(false);
},
})
});

const getConversation = useQuery({
queryKey: ['conversation', conversationId],
queryKey: ["conversation", conversationId],
queryFn: async () => {
if (conversationId === null) {
const { data, error } = await supabaseClient
.from('conversations')
.select('*')
.order('created_at', { ascending: false })
.from("conversations")
.select("*")
.order("created_at", { ascending: false })
.limit(1);
if (error) {
throw error;
Expand All @@ -118,17 +119,17 @@ export default function Chat({
} else {
setMessages([]);
const { data, error } = await supabaseClient
.from('conversations')
.select('*')
.eq('id', conversationId)
.from("conversations")
.select("*")
.eq("id", conversationId)
.single();
if (error) {
throw error;
}
return data;
}
}
})
},
});

useEffect(() => {
if (getConversation.data) {
Expand All @@ -147,10 +148,10 @@ export default function Chat({
<>
<div className="h-24 bg-gradient-to-b from-background flex justify-between items-center fixed top-0 w-full"></div>
<div className="fixed flex space-x-4 top-4 left-4">
<SideMenu
supabaseClient={supabaseClient}
setConversationId={setConversationId}
/>
<SideMenu
supabaseClient={supabaseClient}
setConversationId={setConversationId}
/>
</div>
<div className="fixed flex space-x-4 top-4 right-4">
<NavMenu>
Expand All @@ -165,10 +166,7 @@ export default function Chat({
</div>

<div className="p-8 mt-12 mb-32">
<ChatLog
messages={messages}
waitingForResponse={waitingForResponse}
/>
<ChatLog messages={messages} waitingForResponse={waitingForResponse} />
</div>

<div ref={bottomRef} />
Expand Down
3 changes: 2 additions & 1 deletion docs/getting_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ We will use Supabase as our database (with vector search, pgvector), authenticat
10. Now when we have the CLI, we need to login with oour Supabase account, running `supabase login` - this should pop up a browser window, which should prompt you through the auth
11. And link our Supabase CLI to a specific project, our newly created one, by running `supabase link --project-ref <your-project-id>` (you can check what the project id is from the Supabase web UI, or by running `supabase projects list`, and it will be under "reference id") - you can skip (enter) the database password, it's not needed.
12. Now let's deploy our functions! ([see guide for more details](https://supabase.com/../guides/functions/deploy)) `supabase functions deploy --no-verify-jwt` (see [issue re:security](https://github.com/adamcohenhillel/AdDeus/issues/3))
13. Lasly - if you're planning to first use OpenAI as your Foundation model provider, then you'd need to also run the following command, to make sure the functions have everything they need to run properly: `supabase secrets set OPENAI_API_KEY=<your-openai-api-key>` (Ollama setup guide is coming out soon)
13. If you're planning to first use OpenAI as your Foundation model provider, then you'd need to also run the following command, to make sure the functions have everything they need to run properly: `supabase secrets set OPENAI_API_KEY=<your-openai-api-key>` (Ollama setup guide is coming out soon)
14. If you want access to tons of AI Models, both Open & Closed Source, set up your OpenRouter API Key. Go to [OpenRouter](https://openrouter.ai/) to get your API Key, then run `supabase secrets set OPENROUTER_API_KEY=<your-openrouter-api-key>`.

If everything worked, we should now be able to start chatting with our personal AI via the app - so let's set that up!

Expand Down
Binary file added docs/images/openrouter.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
65 changes: 48 additions & 17 deletions supabase/functions/chat/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,25 @@ import { corsHeaders } from "../common/cors.ts";
import { supabaseClient } from "../common/supabaseClient.ts";
import { ApplicationError, UserError } from "../common/errors.ts";

async function generateResponse(
useOpenRouter,
openaiClient,
openRouterClient,
messages
) {
const client = useOpenRouter ? openRouterClient : openaiClient;
const modelName = useOpenRouter
? "nousresearch/nous-capybara-34b"
: "gpt-4-1106-preview";

const { choices } = await client.chat.completions.create({
model: modelName,
messages,
});
console.log("Completion: ", choices[0]);
return choices[0].message;
}

const chat = async (req) => {
if (req.method === "OPTIONS") {
return new Response("ok", { headers: corsHeaders });
Expand All @@ -21,29 +40,43 @@ const chat = async (req) => {
throw new ApplicationError(
"Unable to get auth user details in request data"
);
const { messageHistory } = await req.json();
const requestBody = await req.json();
const { messageHistory } = requestBody;

if (!messageHistory) throw new UserError("Missing query in request data");

const openaiClient = new OpenAI({
apiKey: Deno.env.get("OPENAI_API_KEY"),
});

const openRouterApiKey = Deno.env.get("OPENROUTER_API_KEY");
const useOpenRouter = Boolean(openRouterApiKey); // Use OpenRouter if API key is available

let openRouterClient;
if (useOpenRouter) {
openRouterClient = new OpenAI({
baseURL: "https://openrouter.ai/api/v1",
apiKey: openRouterApiKey,
});
}

console.log("messageHistory: ", messageHistory);

// embed the last messageHistory message
// Embed the last messageHistory message using OpenAI's embeddings API
const embeddingsResponse = await openaiClient.embeddings.create({
model: "text-embedding-ada-002",
input: messageHistory[messageHistory.length - 1].content,
});
const embeddings = embeddingsResponse.data[0].embedding;
console.log("Embeddings:", embeddings);

// Retrieve records from Supabase based on embeddings similarity
const { data: relevantRecords, error: recordsError } = await supabase.rpc(
"match_records_embeddings_similarity",
{
query_embedding: JSON.stringify(embeddings), // Pass the embedding you want to compare
match_threshold: 0.8, // Choose an appropriate threshold for your data
match_count: 10, // Choose the number of matches
query_embedding: JSON.stringify(embeddings),
match_threshold: 0.8,
match_count: 10,
}
);

Expand All @@ -67,27 +100,25 @@ const chat = async (req) => {
console.log("messages: ", messages);

try {
let completion = await openaiClient.chat.completions.create({
model: "gpt-4-1106-preview",
messages: messages,
});
console.log("completion: ", completion);
console.log(
"completion.choices[0].content: ",
completion.choices[0].content
const responseMessage = await generateResponse(
useOpenRouter,
openaiClient,
openRouterClient,
messages
);

return new Response(
JSON.stringify({
msg: completion.choices[0].message,
msg: responseMessage,
}),
{
headers: { ...corsHeaders, "Content-Type": "application/json" },
status: 200,
}
);
} catch (openAiError) {
console.log("!!! Error in OpenAI fallback: ", openAiError);
throw openAiError;
} catch (error) {
console.log("Error: ", error);
throw new ApplicationError("Error processing chat completion");
}

return new Response(
Expand Down

0 comments on commit 7fef4fa

Please sign in to comment.