diff --git a/src/components/Chat/Chat.tsx b/src/components/Chat/Chat.tsx
index ded2b6250..35286ff54 100644
--- a/src/components/Chat/Chat.tsx
+++ b/src/components/Chat/Chat.tsx
@@ -29,15 +29,13 @@ import {
useRef,
useState,
} from 'react'
-import toast from 'react-hot-toast'
-import { Button, Container, Text, Title } from '@mantine/core'
+import { Button, Text } from '@mantine/core'
import { useTranslation } from 'next-i18next'
import { getEndpoint } from '@/utils/app/api'
import {
saveConversation,
saveConversations,
- updateConversation,
} from '@/utils/app/conversation'
import { throttle } from '@/utils/data/throttle'
@@ -46,6 +44,7 @@ import {
type ChatBody,
type Conversation,
type Message,
+ Content,
} from '@/types/chat'
import { type Plugin } from '@/types/plugin'
@@ -55,7 +54,7 @@ import { ChatInput } from './ChatInput'
import { ChatLoader } from './ChatLoader'
import { ErrorMessageDiv } from './ErrorMessageDiv'
import { MemoizedChatMessage } from './MemoizedChatMessage'
-import { fetchPresignedUrl } from '~/components/UIUC-Components/ContextCards'
+import { fetchPresignedUrl } from '~/utils/apiUtils'
import { type CourseMetadata } from '~/types/courseMetadata'
@@ -75,7 +74,6 @@ import ChatNavbar from '../UIUC-Components/navbars/ChatNavbar'
import { notifications } from '@mantine/notifications'
import { Montserrat } from 'next/font/google'
import { montserrat_heading, montserrat_paragraph } from 'fonts'
-import { NextResponse } from 'next/server'
const montserrat_med = Montserrat({
weight: '500',
@@ -114,6 +112,7 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => {
loading,
prompts,
showModelSettings,
+ isImg2TextLoading
},
handleUpdateConversation,
dispatch: homeDispatch,
@@ -173,14 +172,90 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => {
}
}
+ const handleImageContent = async (message: Message, endpoint: string, updatedConversation: Conversation, searchQuery: string, controller: AbortController) => {
+ const imageContent = (message.content as Content[]).filter(content => content.type === 'image_url');
+ if (imageContent.length > 0) {
+ homeDispatch({ field: 'isImg2TextLoading', value: true })
+ const chatBody: ChatBody = {
+ model: updatedConversation.model,
+ messages: [
+ {
+ ...message,
+ content: [
+ ...imageContent,
+ { type: 'text', text: 'Provide detailed description of the image(s) focusing on any text (OCR information), distinct objects, colors, and actions depicted. Include contextual information, subtle details, and specific terminologies relevant for semantic document retrieval.' }
+ ]
+ }
+ ],
+ key: courseMetadata?.openai_api_key && courseMetadata?.openai_api_key != '' ? courseMetadata.openai_api_key : apiKey,
+ prompt: updatedConversation.prompt,
+ temperature: updatedConversation.temperature,
+ course_name: getCurrentPageName(),
+ stream: false,
+ };
+
+ try {
+ const response = await fetch(endpoint, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify(chatBody),
+ signal: controller.signal,
+ });
+
+ if (!response.ok) {
+ const final_response = await response.json();
+ homeDispatch({ field: 'loading', value: false });
+ homeDispatch({ field: 'messageIsStreaming', value: false });
+ throw new Error(final_response.message);
+ }
+
+ const data = await response.json();
+ const imgDesc = data.choices[0].message.content || '';
+
+ searchQuery += ` Image description: ${imgDesc}`;
+
+ const imgDescIndex = (message.content as Content[]).findIndex(content => content.type === 'text' && (content.text as string).startsWith('Image description: '));
+
+ if (imgDescIndex !== -1) {
+ (message.content as Content[])[imgDescIndex] = { type: 'text', text: `Image description: ${imgDesc}` };
+ } else {
+ (message.content as Content[]).push({ type: 'text', text: `Image description: ${imgDesc}` });
+ }
+ } catch (error) {
+ console.error('Error in chat.tsx running onResponseCompletion():', error);
+ controller.abort();
+ } finally {
+ homeDispatch({ field: 'isImg2TextLoading', value: false })
+ };
+ }
+ return searchQuery;
+ }
+
+ const handleContextSearch = async (message: Message, selectedConversation: Conversation, searchQuery: string) => {
+ if (getCurrentPageName() != 'gpt4') {
+ const token_limit = OpenAIModels[selectedConversation?.model.id as OpenAIModelID].tokenLimit
+ await fetchContexts(getCurrentPageName(), searchQuery, token_limit).then((curr_contexts) => {
+ message.contexts = curr_contexts as ContextWithMetadata[]
+ })
+ }
+ }
+
// THIS IS WHERE MESSAGES ARE SENT.
const handleSend = useCallback(
async (message: Message, deleteCount = 0, plugin: Plugin | null = null) => {
+
+ setCurrentMessage(message)
// New way with React Context API
// TODO: MOVE THIS INTO ChatMessage
// console.log('IN handleSend: ', message)
// setSearchQuery(message.content)
- const searchQuery = message.content
+ let searchQuery = Array.isArray(message.content)
+ ? message.content.map((content) => content.text).join(' ')
+ : message.content;
+
+ // console.log("QUERY: ", searchQuery)
if (selectedConversation) {
let updatedConversation: Conversation
@@ -206,21 +281,18 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => {
homeDispatch({ field: 'loading', value: true })
homeDispatch({ field: 'messageIsStreaming', value: true })
- // Run context search, attach to Message object.
- if (getCurrentPageName() != 'gpt4') {
- // THE ONLY place we fetch contexts (except ExtremePromptStuffing is still in api/chat.ts)
- const token_limit =
- OpenAIModels[selectedConversation?.model.id as OpenAIModelID]
- .tokenLimit
- await fetchContexts(
- getCurrentPageName(),
- searchQuery,
- token_limit,
- ).then((curr_contexts) => {
- message.contexts = curr_contexts as ContextWithMetadata[]
- })
+ const endpoint = getEndpoint(plugin);
+
+ const controller = new AbortController()
+
+ // Run image to text conversion, attach to Message object.
+ if (Array.isArray(message.content)) {
+ searchQuery = await handleImageContent(message, endpoint, updatedConversation, searchQuery, controller);
}
+ // Run context search, attach to Message object.
+ await handleContextSearch(message, selectedConversation, searchQuery);
+
const chatBody: ChatBody = {
model: updatedConversation.model,
messages: updatedConversation.messages,
@@ -232,8 +304,9 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => {
prompt: updatedConversation.prompt,
temperature: updatedConversation.temperature,
course_name: getCurrentPageName(),
+ stream: true
}
- const endpoint = getEndpoint(plugin) // THIS is where we could support EXTREME prompt stuffing.
+
let body
if (!plugin) {
body = JSON.stringify(chatBody)
@@ -248,7 +321,8 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => {
?.requiredKeys.find((key) => key.key === 'GOOGLE_CSE_ID')?.value,
})
}
- const controller = new AbortController()
+
+ // This is where we call the OpenAI API
const response = await fetch(endpoint, {
method: 'POST',
headers: {
@@ -301,13 +375,17 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => {
}
if (!plugin) {
if (updatedConversation.messages.length === 1) {
- const { content } = message
+ const { content } = message;
+ // Use only texts instead of content itself
+ const contentText = Array.isArray(content)
+ ? content.map((content) => content.text).join(' ')
+ : content;
const customName =
- content.length > 30 ? content.substring(0, 30) + '...' : content
+ contentText.length > 30 ? contentText.substring(0, 30) + '...' : contentText;
updatedConversation = {
...updatedConversation,
name: customName,
- }
+ };
}
homeDispatch({ field: 'loading', value: false })
const reader = data.getReader()
@@ -390,6 +468,7 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => {
updatedConversations.push(updatedConversation)
}
homeDispatch({ field: 'conversations', value: updatedConversations })
+ console.log('updatedConversations: ', updatedConversations)
saveConversations(updatedConversations)
homeDispatch({ field: 'messageIsStreaming', value: false })
} else {
@@ -434,6 +513,20 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => {
],
)
+ const handleRegenerate = useCallback(() => {
+ if (currentMessage && Array.isArray(currentMessage.content)) {
+ // Find the index of the existing image description
+ const imgDescIndex = (currentMessage.content as Content[]).findIndex(content => content.type === 'text' && (content.text as string).startsWith('Image description: '));
+
+ if (imgDescIndex !== -1) {
+ // Remove the existing image description
+ (currentMessage.content as Content[]).splice(imgDescIndex, 1);
+ }
+
+ handleSend(currentMessage, 2, null);
+ }
+ }, [currentMessage, handleSend]);
+
const scrollToBottom = useCallback(() => {
if (autoScrollEnabled) {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' })
@@ -575,6 +668,64 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => {
)
}
+ // Inside Chat function before the return statement
+ const renderMessageContent = (message: Message) => {
+ if (Array.isArray(message.content)) {
+ return (
+ <>
+ {message.content.map((content, index) => {
+ if (content.type === 'image' && content.image_url) {
+ return ;
+ }
+ return {content.text};
+ })}
+ >
+ );
+ }
+ return {message.content};
+ };
+
+ const updateMessages = (updatedMessage: Message, messageIndex: number) => {
+ return selectedConversation?.messages.map((message, index) => {
+ return index === messageIndex ? updatedMessage : message;
+ });
+ };
+
+ const updateConversations = (updatedConversation: Conversation) => {
+ return conversations.map((conversation) =>
+ conversation.id === selectedConversation?.id ? updatedConversation : conversation
+ );
+ };
+
+ const onImageUrlsUpdate = useCallback((updatedMessage: Message, messageIndex: number) => {
+ if (!selectedConversation) {
+ throw new Error("No selected conversation found");
+ }
+
+ const updatedMessages = updateMessages(updatedMessage, messageIndex);
+ if (!updatedMessages) {
+ throw new Error("Failed to update messages");
+ }
+
+ const updatedConversation = {
+ ...selectedConversation,
+ messages: updatedMessages,
+ };
+
+ homeDispatch({
+ field: 'selectedConversation',
+ value: updatedConversation,
+ });
+
+ const updatedConversations = updateConversations(updatedConversation);
+ if (!updatedConversations) {
+ throw new Error("Failed to update conversations");
+ }
+
+ homeDispatch({ field: 'conversations', value: updatedConversations });
+ saveConversations(updatedConversations);
+ }, [selectedConversation, conversations]);
+
return (
@@ -671,14 +822,16 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => {
{
- setCurrentMessage(editedMessage)
+ // setCurrentMessage(editedMessage)
handleSend(
editedMessage,
selectedConversation?.messages.length - index,
)
}}
+ onImageUrlsUpdate={onImageUrlsUpdate}
/>
))}
{loading && }
@@ -694,18 +847,15 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => {
stopConversationRef={stopConversationRef}
textareaRef={textareaRef}
onSend={(message, plugin) => {
- setCurrentMessage(message)
+ // setCurrentMessage(message)
handleSend(message, 0, plugin)
}}
onScrollDownClick={handleScrollDown}
- onRegenerate={() => {
- if (currentMessage) {
- handleSend(currentMessage, 2, null)
- }
- }}
+ onRegenerate={handleRegenerate}
showScrollDownButton={showScrollDownButton}
inputContent={inputContent}
setInputContent={setInputContent}
+ courseName={getCurrentPageName()}
/>
{/*
*/}
>
diff --git a/src/components/Chat/ChatInput.tsx b/src/components/Chat/ChatInput.tsx
index cab4d5443..318e2b5f2 100644
--- a/src/components/Chat/ChatInput.tsx
+++ b/src/components/Chat/ChatInput.tsx
@@ -1,3 +1,4 @@
+// chatinput.tsx
import {
IconArrowDown,
IconBolt,
@@ -5,6 +6,9 @@ import {
IconPlayerStop,
IconRepeat,
IconSend,
+ IconPhoto,
+ IconAlertCircle,
+ IconX
} from '@tabler/icons-react'
import {
KeyboardEvent,
@@ -17,8 +21,7 @@ import {
} from 'react'
import { useTranslation } from 'next-i18next'
-
-import { Message } from '@/types/chat'
+import { Content, Message } from '@/types/chat'
import { Plugin } from '@/types/plugin'
import { Prompt } from '@/types/prompt'
@@ -28,6 +31,26 @@ import { PluginSelect } from './PluginSelect'
import { PromptList } from './PromptList'
import { VariableModal } from './VariableModal'
+import { notifications } from '@mantine/notifications';
+import { useMantineTheme, Modal, Tooltip } from '@mantine/core';
+import { Montserrat } from 'next/font/google'
+
+import { v4 as uuidv4 } from 'uuid';
+
+import React from 'react'
+
+import { CSSProperties } from 'react';
+
+import { fetchPresignedUrl, uploadToS3 } from 'src/utils/apiUtils';
+import { ImagePreview } from './ImagePreview'
+import { OpenAIModelID } from '~/types/openai'
+
+const montserrat_med = Montserrat({
+ weight: '500',
+ subsets: ['latin'],
+})
+
+
interface Props {
onSend: (message: Message, plugin: Plugin | null) => void
onRegenerate: () => void
@@ -37,6 +60,12 @@ interface Props {
showScrollDownButton: boolean
inputContent: string
setInputContent: (content: string) => void
+ courseName: string
+}
+
+interface ProcessedImage {
+ resizedFile: File;
+ dataUrl: string;
}
export const ChatInput = ({
@@ -48,6 +77,7 @@ export const ChatInput = ({
showScrollDownButton,
inputContent,
setInputContent,
+ courseName,
}: Props) => {
const { t } = useTranslation('chat')
@@ -57,7 +87,7 @@ export const ChatInput = ({
dispatch: homeDispatch,
} = useContext(HomeContext)
- const [content, setContent] = useState()
+ const [content, setContent] = useState(() => inputContent);
const [isTyping, setIsTyping] = useState(false)
const [showPromptList, setShowPromptList] = useState(false)
const [activePromptIndex, setActivePromptIndex] = useState(0)
@@ -66,8 +96,50 @@ export const ChatInput = ({
const [isModalVisible, setIsModalVisible] = useState(false)
const [showPluginSelect, setShowPluginSelect] = useState(false)
const [plugin, setPlugin] = useState(null)
-
+ const [uploadingImage, setUploadingImage] = useState(false);
+ const [imageError, setImageError] = useState(null);
+ const [isDragging, setIsDragging] = useState(false);
+ const imageUploadRef = useRef(null);
const promptListRef = useRef(null)
+ const [imageFiles, setImageFiles] = useState([]);
+ const [imagePreviewUrls, setImagePreviewUrls] = useState([]);
+ const chatInputContainerRef = useRef(null);
+ const [isFocused, setIsFocused] = useState(false);
+ const [imagePreviews, setImagePreviews] = useState([]);
+ const [selectedImage, setSelectedImage] = useState(null);
+ const [isModalOpen, setIsModalOpen] = useState(false);
+ const [imageUrls, setImageUrls] = useState([]);
+
+
+ const removeButtonStyle: CSSProperties = {
+ position: 'absolute',
+ top: '-8px',
+ right: '-8px',
+ display: 'flex',
+ alignItems: 'center',
+ justifyContent: 'center',
+ width: '24px',
+ height: '24px',
+ borderRadius: '50%',
+ backgroundColor: '#A9A9A9', // Changed to a darker gray
+ color: 'white', // White icon color
+ border: '2px solid white', // White border
+ cursor: 'pointer',
+ zIndex: 2,
+ };
+
+ const removeButtonHoverStyle: CSSProperties = {
+ backgroundColor: '#505050', // Even darker gray for hover state
+ };
+
+ // Dynamically set the padding based on image previews presence
+ const chatInputContainerStyle: CSSProperties = {
+ paddingTop: imagePreviewUrls.length > 0 ? '10px' : '0',
+ paddingRight: imagePreviewUrls.length > 0 ? '10px' : '0',
+ paddingBottom: '0',
+ paddingLeft: '10px',
+ borderRadius: '4px', // This will round the edges slightly
+ };
const filteredPrompts = prompts.filter((prompt) =>
prompt.name.toLowerCase().includes(promptInputValue.toLowerCase()),
@@ -90,25 +162,72 @@ export const ChatInput = ({
setContent(value)
updatePromptListVisibility(value)
}
+ // Assuming Message, Role, and Plugin types are already defined in your codebase
- const handleSend = () => {
+ type Role = 'user' | 'system'; // Add other roles as needed
+
+
+ const handleSend = async () => {
if (messageIsStreaming) {
- return
+ return;
}
- if (!content) {
- alert(t('Please enter a message'))
- return
+ const textContent = content;
+ let imageContent: Content[] = []; // Explicitly declare the type for imageContent
+
+ if (imageFiles.length > 0 && !uploadingImage) {
+ setUploadingImage(true);
+ try {
+ // If imageUrls is empty, upload all images and get their URLs
+ const imageUrlsToUse = imageUrls.length > 0 ? imageUrls :
+ await Promise.all(imageFiles.map(file => uploadImageAndGetUrl(file, courseName)));
+
+ // Construct image content for the message
+ imageContent = imageUrlsToUse
+ .filter((url): url is string => url !== '') // Type-guard to filter out empty strings
+ .map(url => ({
+ type: "image_url",
+ image_url: { url }
+ }));
+
+ // console.log("Final imageUrls: ", imageContent)
+
+ // Clear the files after uploading
+ setImageFiles([]);
+ setImagePreviewUrls([]);
+ } catch (error) {
+ console.error('Error uploading files:', error);
+ setImageError('Error uploading files');
+ } finally {
+ setUploadingImage(false);
+ }
}
- onSend({ role: 'user', content }, plugin)
- setContent('')
- setPlugin(null)
-
- if (window.innerWidth < 640 && textareaRef && textareaRef.current) {
- textareaRef.current.blur()
+ if (!textContent && imageContent.length === 0) {
+ alert(t('Please enter a message or upload an image'));
+ return;
}
- }
+
+ // Construct the content array
+ const contentArray: Content[] = [
+ ...(textContent ? [{ type: "text", text: textContent }] : []),
+ ...imageContent
+ ];
+
+ // Create a structured message for GPT-4 Vision
+ const messageForGPT4Vision: Message = {
+ role: 'user',
+ content: contentArray
+ };
+
+ // Use the onSend prop to send the structured message
+ onSend(messageForGPT4Vision, plugin); // Cast to unknown then to Message if needed
+
+ // Reset states
+ setContent('');
+ setPlugin(null);
+ setImagePreviews([]);
+ };
const handleStopConversation = () => {
stopConversationRef.current = true
@@ -199,7 +318,7 @@ export const ChatInput = ({
}
}, [])
- const handlePromptSelect = (prompt: Prompt) => {
+ const handlePromptSelect = useCallback((prompt: Prompt) => {
const parsedVariables = parseVariables(prompt.content)
const filteredVariables = parsedVariables.filter(
(variable) => variable !== undefined,
@@ -215,24 +334,9 @@ export const ChatInput = ({
})
updatePromptListVisibility(prompt.content)
}
- }
-
- // const handlePromptSelect = (prompt: Prompt) => {
- // const parsedVariables = parseVariables(prompt.content);
- // setVariables(parsedVariables);
-
- // if (parsedVariables.length > 0) {
- // setIsModalVisible(true);
- // } else {
- // setContent((prevContent) => {
- // const updatedContent = prevContent?.replace(/\/\w*$/, prompt.content);
- // return updatedContent;
- // });
- // updatePromptListVisibility(prompt.content);
- // }
- // };
+ }, [parseVariables, setContent, updatePromptListVisibility]);
- const handleSubmit = (updatedVariables: string[]) => {
+ const handleSubmit = useCallback((updatedVariables: string[]) => {
const newContent = content?.replace(/{{(.*?)}}/g, (match, variable) => {
const index = variables.indexOf(variable)
return updatedVariables[index] || ''
@@ -243,14 +347,283 @@ export const ChatInput = ({
if (textareaRef && textareaRef.current) {
textareaRef.current.focus()
}
+ }, [variables, setContent, textareaRef]); // Add dependencies used in the function
+
+ // https://platform.openai.com/docs/guides/vision/what-type-of-files-can-i-upload
+ const validImageTypes = ['.jpg', '.jpeg', '.png', '.webp', '.gif'];
+
+ const isImageValid = (fileName: string): boolean => {
+ const ext = fileName.slice(fileName.lastIndexOf(".") + 1).toLowerCase();
+ return validImageTypes.includes(`.${ext}`);
}
+ // const uploadToS3 = async (file: File) => {
+ // if (!file) {
+ // console.error('No file provided for upload');
+ // return;
+ // }
+
+ // // Generate a unique file name using uuidv4
+ // const uniqueFileName = `${uuidv4()}.${file.name.split('.').pop()}`;
+ // const s3_filepath = `courses/${courseName}/${uniqueFileName}`; // Define s3_filepath here
+
+ // console.log('uploadToS3 called with uniqueFileName:', uniqueFileName);
+ // console.log('uploadToS3 called with s3_filepath:', s3_filepath);
+
+ // // Prepare the request body for the API call
+ // // Prepare the request body for the API call
+ // const requestObject = {
+ // method: 'POST',
+ // headers: {
+ // 'Content-Type': 'application/json',
+ // },
+ // body: JSON.stringify({
+ // uniqueFileName: uniqueFileName,
+ // fileType: file.type,
+ // courseName: courseName,
+ // }),
+ // };
+
+ // try {
+ // // Call your API to get the presigned POST data
+ // const response = await fetch('/api/UIUC-api/uploadToS3', requestObject);
+ // if (!response.ok) {
+ // throw new Error(`HTTP error! Status: ${response.status}`);
+ // }
+ // const { post } = await response.json();
+
+ // // Use the presigned POST data to upload the file to S3
+ // const formData = new FormData();
+ // Object.entries(post.fields).forEach(([key, value]) => {
+ // formData.append(key, value as string);
+ // });
+ // formData.append('file', file);
+
+ // // Post the file to the S3 bucket using the presigned URL and form data
+ // const uploadResponse = await fetch(post.url, {
+ // method: 'POST',
+ // body: formData,
+ // });
+
+ // if (!uploadResponse.ok) {
+ // throw new Error('Failed to upload the file to S3');
+ // }
+
+ // // Construct the URL to the uploaded file using the response from the presigned POST
+ // const uploadedImageUrl = `https://${aws_config.bucketName}.s3.${aws_config.region}.amazonaws.com/${encodeURIComponent(s3_filepath)}`;
+
+ // return uploadedImageUrl;
+ // } catch (error) {
+ // console.error('Error uploading file:', error);
+ // }
+ // };
+
+
+
+ const ingestFile = async (file: File | null) => {
+ if (!file) return;
+
+ const fileExtension = file.name.slice(((file.name.lastIndexOf(".") - 1) >>> 0) + 2);
+ const uniqueFileName = `${uuidv4()}.${fileExtension}`;
+
+ const queryParams = new URLSearchParams({
+ courseName: courseName,
+ fileName: uniqueFileName,
+ }).toString();
+
+ const requestObject = {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ query: {
+ fileName: file.name,
+ courseName: courseName,
+ },
+ }
+
+ // Actually we CAN await here, just don't await this function.
+ console.log('right before call /ingest...')
+ const response = await fetch(
+ `/api/UIUC-api/ingest?${queryParams}`,
+ requestObject,
+ )
+
+ // check if the response was ok
+ if (response.ok) {
+ const data = await response.json()
+ // console.log(file.name as string + ' ingested successfully!!')
+ console.log('Success or Failure:', data)
+ return data
+ } else {
+ console.log('Error during ingest:', response.statusText)
+ console.log('Full Response message:', response)
+ return response
+ }
+ }
+
+ const showToastOnInvalidImage = useCallback(() => {
+ notifications.show({
+ id: 'error-notification',
+ withCloseButton: true,
+ onClose: () => console.log('error unmounted'),
+ onOpen: () => console.log('error mounted'),
+ autoClose: 8000,
+ title: 'Invalid Image Type',
+ message: 'Unsupported file type. Please upload .jpg or .png images.',
+ color: 'red',
+ radius: 'lg',
+ icon: ,
+ className: 'my-notification-class',
+ style: { backgroundColor: '#15162c' },
+ withBorder: true,
+ loading: false,
+ });
+ }, []);
+
+ const handleImageUpload = useCallback(async (files: File[]) => {
+ const validFiles = files.filter(file => isImageValid(file.name));
+ const invalidFilesCount = files.length - validFiles.length;
+
+ if (invalidFilesCount > 0) {
+ setImageError(`${invalidFilesCount} invalid file type(s). Please upload .jpg or .png images.`);
+ showToastOnInvalidImage();
+ }
+
+ const imageProcessingPromises = validFiles.map(file => {
+ return new Promise((resolve, reject) => {
+ const reader = new FileReader();
+
+ reader.onloadend = () => {
+ const result = reader.result;
+ if (typeof result === 'string') {
+ const img = new Image();
+ img.src = result;
+
+ img.onload = () => {
+ let newWidth, newHeight;
+ const MAX_WIDTH = 2048;
+ const MAX_HEIGHT = 2048;
+ const MIN_SIDE = 768;
+
+ if (img.width > img.height) {
+ newHeight = MIN_SIDE;
+ newWidth = (img.width / img.height) * newHeight;
+ if (newWidth > MAX_WIDTH) {
+ newWidth = MAX_WIDTH;
+ newHeight = (img.height / img.width) * newWidth;
+ }
+ } else {
+ newWidth = MIN_SIDE;
+ newHeight = (img.height / img.width) * newWidth;
+ if (newHeight > MAX_HEIGHT) {
+ newHeight = MAX_HEIGHT;
+ newWidth = (img.width / img.height) * newHeight;
+ }
+ }
+
+ const canvas = document.createElement('canvas');
+ const ctx = canvas.getContext('2d');
+ if (ctx) {
+ canvas.width = newWidth;
+ canvas.height = newHeight;
+ ctx.drawImage(img, 0, 0, newWidth, newHeight);
+
+ canvas.toBlob((blob) => {
+ if (blob) {
+ const resizedFile = new File([blob], file.name, {
+ type: 'image/jpeg',
+ lastModified: Date.now(),
+ });
+
+ resolve({ resizedFile, dataUrl: canvas.toDataURL('image/jpeg') });
+ } else {
+ reject(new Error('Canvas toBlob failed'));
+ }
+ }, 'image/jpeg', 0.9);
+ } else {
+ reject(new Error('Canvas Context is null'));
+ }
+ };
+ } else {
+ reject(new Error('FileReader did not return a string result'));
+ }
+ };
+
+ reader.onerror = reject;
+ reader.readAsDataURL(file);
+ });
+ });
+
+ try {
+ const processedImages = await Promise.all(imageProcessingPromises);
+ setImageFiles(prev => [...prev, ...processedImages.map(img => img.resizedFile)]);
+ setImagePreviewUrls(prev => [...prev, ...processedImages.map(img => img.dataUrl)]);
+
+ // Store the URLs of the uploaded images
+ const uploadedImageUrls = (await Promise.all(processedImages.map(img => uploadImageAndGetUrl(img.resizedFile, courseName)))).filter(Boolean);
+ setImageUrls(uploadedImageUrls as string[]);
+ } catch (error) {
+ console.error('Error processing files:', error);
+ }
+ }, [setImageError, setImageFiles, setImagePreviewUrls, showToastOnInvalidImage, courseName]);
+
+ // Function to open the modal with the selected image
+ const openModal = (imageSrc: string) => {
+ setSelectedImage(imageSrc);
+ setIsModalOpen(true);
+ };
+
+ const theme = useMantineTheme();
+
useEffect(() => {
- setContent(inputContent)
- if (textareaRef.current) {
- textareaRef.current.focus()
+ if (selectedConversation?.model.id !== OpenAIModelID.GPT_4_VISION) {
+ return; // Exit early if the model is not GPT-4 Vision
}
- }, [inputContent, textareaRef])
+
+ const handleDocumentDragOver = (e: DragEvent) => {
+ e.preventDefault();
+ setIsDragging(true);
+ };
+
+ const handleDocumentDrop = (e: DragEvent) => {
+ e.preventDefault();
+ setIsDragging(false);
+ if (e.dataTransfer && e.dataTransfer.items && e.dataTransfer.items.length > 0) {
+ const files = Array.from(e.dataTransfer.items)
+ .filter(item => item.kind === 'file')
+ .map(item => item.getAsFile())
+ .filter(file => file !== null) as File[];
+ if (files.length > 0) {
+ handleImageUpload(files);
+ }
+ }
+ };
+
+
+ const handleDocumentDragLeave = (e: DragEvent) => {
+ setIsDragging(false);
+ };
+
+ document.addEventListener('dragover', handleDocumentDragOver);
+ document.addEventListener('drop', handleDocumentDrop);
+ document.addEventListener('dragleave', handleDocumentDragLeave);
+
+ return () => {
+ // Clean up the event listeners when the component is unmounted
+ document.removeEventListener('dragover', handleDocumentDragOver);
+ document.removeEventListener('drop', handleDocumentDrop);
+ document.removeEventListener('dragleave', handleDocumentDragLeave);
+ };
+ }, [handleImageUpload, selectedConversation?.model.id]);
+
+ useEffect(() => {
+ if (imageError) {
+ showToastOnInvalidImage();
+ setImageError(null);
+ }
+ }, [imageError, showToastOnInvalidImage]);
+
useEffect(() => {
if (promptListRef.current) {
@@ -262,9 +635,8 @@ export const ChatInput = ({
if (textareaRef && textareaRef.current) {
textareaRef.current.style.height = 'inherit'
textareaRef.current.style.height = `${textareaRef.current?.scrollHeight}px`
- textareaRef.current.style.overflow = `${
- textareaRef?.current?.scrollHeight > 400 ? 'auto' : 'hidden'
- }`
+ textareaRef.current.style.overflow = `${textareaRef?.current?.scrollHeight > 400 ? 'auto' : 'hidden'
+ }`
}
}, [content])
@@ -285,8 +657,28 @@ export const ChatInput = ({
}
}, [])
+ useEffect(() => {
+ // This will focus the div as soon as the component mounts
+ if (chatInputContainerRef.current) {
+ chatInputContainerRef.current.focus();
+ }
+ }, []);
+
+ // This is where we upload images and generate their presigned url
+ async function uploadImageAndGetUrl(file: File, courseName: string): Promise {
+ try {
+ const uploadedImageUrl = await uploadToS3(file, courseName);
+ const presignedUrl = await fetchPresignedUrl(uploadedImageUrl as string);
+ return presignedUrl;
+ } catch (error) {
+ console.error('Upload failed for file', file.name, error);
+ setImageError(`Upload failed for file: ${file.name}`);
+ return '';
+ }
+ }
+
return (
-
+
- {/* Small title below the main chat input bar */}
- {/*
- {t('Advanced version of ChatGPT, built for UIUC. Forked from ')}
-
- ChatBot UI
-
- .{' '}
-
*/}
)
}
diff --git a/src/components/Chat/ChatMessage.tsx b/src/components/Chat/ChatMessage.tsx
index a216dde40..487fe520d 100644
--- a/src/components/Chat/ChatMessage.tsx
+++ b/src/components/Chat/ChatMessage.tsx
@@ -1,5 +1,5 @@
// ChatMessage.tsx
-import { Text, Group } from '@mantine/core'
+import { Text, Group, createStyles, Tooltip, Paper, Collapse, Accordion } from '@mantine/core'
import {
IconCheck,
IconCopy,
@@ -12,7 +12,7 @@ import { FC, memo, useContext, useEffect, useRef, useState } from 'react'
import { useTranslation } from 'next-i18next'
import { updateConversation } from '@/utils/app/conversation'
-import { ContextWithMetadata, Message } from '@/types/chat'
+import { Content, ContextWithMetadata, Message } from '@/types/chat'
import HomeContext from '~/pages/api/home/home.context'
import { CodeBlock } from '../Markdown/CodeBlock'
import { MemoizedReactMarkdown } from '../Markdown/MemoizedReactMarkdown'
@@ -22,6 +22,30 @@ import remarkGfm from 'remark-gfm'
import remarkMath from 'remark-math'
import { ContextCards } from '~/components/UIUC-Components/ContextCards'
+import { ImagePreview } from './ImagePreview'
+import { LoadingSpinner } from '../UIUC-Components/LoadingSpinner'
+import { fetchPresignedUrl } from '~/utils/apiUtils'
+import dayjs from 'dayjs';
+import utc from 'dayjs/plugin/utc';
+import { montserrat_paragraph } from 'fonts'
+
+const useStyles = createStyles((theme) => ({
+ imageContainerStyle: {
+ maxWidth: '25%',
+ flex: '1 0 21%',
+ padding: '0.5rem',
+ borderRadius: '0.5rem',
+ },
+ imageStyle: {
+ width: '100%',
+ height: '100px',
+ objectFit: 'cover',
+ borderRadius: '0.5rem',
+ borderColor: theme.colors.gray[6],
+ borderWidth: '1px',
+ borderStyle: 'solid',
+ }
+}))
// Component that's the Timer for GPT's response duration.
const Timer: React.FC<{ timerVisible: boolean }> = ({ timerVisible }) => {
@@ -56,10 +80,12 @@ export interface Props {
messageIndex: number
onEdit?: (editedMessage: Message) => void
context?: ContextWithMetadata[]
+ contentRenderer?: (message: Message) => JSX.Element;
+ onImageUrlsUpdate?: (message: Message, messageIndex: number) => void;
}
export const ChatMessage: FC = memo(
- ({ message, messageIndex, onEdit }) => {
+ ({ message, messageIndex, onEdit, onImageUrlsUpdate }) => {
const { t } = useTranslation('chat')
const {
@@ -68,19 +94,25 @@ export const ChatMessage: FC = memo(
conversations,
currentMessage,
messageIsStreaming,
+ isImg2TextLoading
},
dispatch: homeDispatch,
} = useContext(HomeContext)
const [isEditing, setIsEditing] = useState(false)
const [isTyping, setIsTyping] = useState(false)
- const [messageContent, setMessageContent] = useState(message.content)
+ const [messageContent, setMessageContent] = useState('')
+
+ const [imageUrls, setImageUrls] = useState([]);
+
const [messagedCopied, setMessageCopied] = useState(false)
const textareaRef = useRef(null)
// SET TIMER for message writing (from gpt-4)
const [timerVisible, setTimerVisible] = useState(false)
+
+ const { classes } = useStyles() // for Accordion
useEffect(() => {
if (message.role === 'assistant') {
if (
@@ -114,6 +146,62 @@ export const ChatMessage: FC = memo(
}
}, [message.role, messageIsStreaming, messageIndex, selectedConversation])
+ function deepEqual(a: any, b: any) {
+ if (a === b) {
+ return true;
+ }
+
+ if (typeof a !== 'object' || a === null || typeof b !== 'object' || b === null) {
+ return false;
+ }
+
+ const keysA = Object.keys(a), keysB = Object.keys(b);
+
+ if (keysA.length !== keysB.length) {
+ return false;
+ }
+
+ for (const key of keysA) {
+ if (!keysB.includes(key) || !deepEqual(a[key], b[key])) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ useEffect(() => {
+ const fetchUrl = async () => {
+ let isValid = false;
+ if (Array.isArray(message.content)) {
+ const updatedContent = await Promise.all(message.content.map(async (content) => {
+ if (content.type === 'image_url' && content.image_url) {
+ console.log("Checking if image url is valid: ", content.image_url.url)
+ isValid = await checkIfUrlIsValid(content.image_url.url);
+ if (isValid) {
+ setImageUrls(prevUrls => [...prevUrls, content.image_url?.url as string]);
+ return content;
+ } else {
+ const path = extractPathFromUrl(content.image_url.url);
+ console.log("Fetching presigned url for: ", path)
+ const presignedUrl = await getPresignedUrl(path);
+ setImageUrls(prevUrls => [...prevUrls, presignedUrl]);
+ return { ...content, image_url: { url: presignedUrl } };
+ }
+ }
+ return content;
+ }));
+ if (!isValid && onImageUrlsUpdate && !deepEqual(updatedContent, message.content)) {
+ console.log("Updated content: ", updatedContent, "Previous content: ", message.content)
+ onImageUrlsUpdate({ ...message, content: updatedContent }, messageIndex);
+ }
+ }
+ };
+ if (message.role === 'user') {
+ fetchUrl();
+ }
+ }, [message.content, messageIndex]);
+
const toggleEditing = () => {
setIsEditing(!isEditing)
}
@@ -176,7 +264,7 @@ export const ChatMessage: FC = memo(
const copyOnClick = () => {
if (!navigator.clipboard) return
- navigator.clipboard.writeText(message.content).then(() => {
+ navigator.clipboard.writeText(message.content as string).then(() => {
setMessageCopied(true)
setTimeout(() => {
setMessageCopied(false)
@@ -186,8 +274,18 @@ export const ChatMessage: FC = memo(
useEffect(() => {
// setMessageContent(message.content)
- console.log('IN ChatMessage 189 adding hi to messages: ', message)
- setMessageContent(`${message.content} hi`)
+ if (Array.isArray(message.content)) {
+ const textContent = message.content
+ .filter((content) => content.type === 'text')
+ .map((content) => content.text)
+ .join(' ')
+ setMessageContent(textContent)
+ console.log('IN ChatMessage 188 not adding hi to messages: ', message)
+ } else {
+ console.log('IN ChatMessage 189 adding hi to messages: ', message)
+ setMessageContent(`${message.content} hi`)
+ }
+ // console.log('IN ChatMessage 189 adding hi to messages: ', message)
// RIGHT HERE, run context search.
@@ -198,6 +296,10 @@ export const ChatMessage: FC = memo(
// }
}, [message.content])
+ useEffect(() => {
+ setImageUrls([]);
+ }, [message]);
+
useEffect(() => {
if (textareaRef.current) {
textareaRef.current.style.height = 'inherit'
@@ -205,6 +307,57 @@ export const ChatMessage: FC = memo(
}
}, [isEditing])
+ async function getPresignedUrl(uploadedImageUrl: string): Promise {
+ try {
+ const presignedUrl = await fetchPresignedUrl(uploadedImageUrl);
+ return presignedUrl;
+ } catch (error) {
+ console.error('Failed to fetch presigned URL for', uploadedImageUrl, error);
+ return '';
+ }
+ }
+
+ async function checkIfUrlIsValid(url: string): Promise {
+ try {
+ dayjs.extend(utc);
+ const urlObject = new URL(url);
+ let creationDateString = urlObject.searchParams.get('X-Amz-Date') as string
+
+ // Manually reformat the creationDateString to standard ISO 8601 format
+ creationDateString = creationDateString.replace(
+ /^(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})(\d{2})Z$/,
+ '$1-$2-$3T$4:$5:$6Z'
+ );
+
+ // Adjust the format in the dayjs.utc function if necessary
+ const creationDate = dayjs.utc(creationDateString, 'YYYYMMDDTHHmmss[Z]');
+
+ const expiresInSecs = Number(urlObject.searchParams.get('X-Amz-Expires') as string);
+
+ const expiryDate = creationDate.add(expiresInSecs, 'second');
+ const isExpired = expiryDate.toDate() < new Date();
+
+ if (isExpired) {
+ console.log('URL is expired'); // Keep this log if necessary for debugging
+ return false;
+ } else {
+ return true;
+ }
+ } catch (error) {
+ console.error('Failed to validate URL', url, error);
+ return false;
+ }
+ }
+
+ function extractPathFromUrl(url: string): string {
+ const urlObject = new URL(url);
+ let path = urlObject.pathname
+ if (path.startsWith('/')) {
+ path = path.substring(1);
+ }
+ return path;
+ }
+
return (
= memo(
) : (
- {message.content}
+ {Array.isArray(message.content) ? (
+
+ {message.content.map((content, index) => {
+ if (content.type === 'text') {
+ if ((content.text as string).trim().startsWith('Image description:')) {
+ console.log("Image description found: ", content.text)
+ return (
+
+
+
+ This image description is used to find relevant documents and provide intelligent context for GPT-4 Vision.
+
+
+ {content.text}
+
+
+
+ );
+ } else {
+ return (
+
{content.text}
+ );
+ }
+ }
+ })}
+ {isImg2TextLoading && messageIndex == (selectedConversation?.messages.length ?? 0) - 1 && (
+
+
Generating Image Description:
+
+
+ )}
+
+ {message.content.filter(item => item.type === 'image_url').map((content, index) => (
+
+ ))}
+
+
+ ) : (
+ message.content
+ )}
+
)}
{!isEditing && (
{
diff --git a/src/components/Chatbar/Chatbar.tsx b/src/components/Chatbar/Chatbar.tsx
index 5f2dde2cf..961b74c2c 100644
--- a/src/components/Chatbar/Chatbar.tsx
+++ b/src/components/Chatbar/Chatbar.tsx
@@ -211,9 +211,14 @@ export const Chatbar = () => {
(context) => context['course_name '] === currentCourseName,
);
const searchTermMatch = conversation.messages[0]?.contexts?.[0]?.['course_name '].toLocaleLowerCase().includes(searchTerm.toLowerCase()) ||
- conversation.messages.some((message) =>
- message.content.toLowerCase().includes(searchTerm.toLowerCase())
- );
+ conversation.messages.some((message) => {
+ if (typeof message.content === 'string') {
+ return message.content.toLowerCase().includes(searchTerm.toLowerCase());
+ } else if (Array.isArray(message.content)) {
+ return message.content.some(content => content.text?.toLowerCase().includes(searchTerm.toLowerCase()));
+ }
+ return false;
+ });
const isMatch = (showCurrentCourseOnly ? courseMatch : true) && searchTermMatch;
return isMatch;
};
diff --git a/src/components/UIUC-Components/ContextCards.tsx b/src/components/UIUC-Components/ContextCards.tsx
index de8b2f602..c122481c5 100644
--- a/src/components/UIUC-Components/ContextCards.tsx
+++ b/src/components/UIUC-Components/ContextCards.tsx
@@ -5,18 +5,7 @@ import Link from 'next/link'
import axios from 'axios'
import { ContextWithMetadata } from '~/types/chat'
import { montserrat_paragraph } from 'fonts'
-
-export async function fetchPresignedUrl(filePath: string) {
- try {
- const response = await axios.post('/api/download', {
- filePath,
- })
- return response.data.url
- } catch (error) {
- console.error('Error fetching presigned URL:', error)
- return null
- }
-}
+import { fetchPresignedUrl } from '~/utils/apiUtils'
export const ContextCards = ({
contexts,
diff --git a/src/components/UIUC-Components/EditCourseCard.tsx b/src/components/UIUC-Components/EditCourseCard.tsx
index 47a643fc7..7910f9258 100644
--- a/src/components/UIUC-Components/EditCourseCard.tsx
+++ b/src/components/UIUC-Components/EditCourseCard.tsx
@@ -38,7 +38,7 @@ import { useRouter } from 'next/router'
import { LoadingSpinner } from '~/components/UIUC-Components/LoadingSpinner'
// import axios from 'axios'
import { WebScrape } from '~/components/UIUC-Components/WebScrape'
-import { callSetCourseMetadata } from '~/utils/apiUtils'
+import { callSetCourseMetadata, uploadToS3 } from '~/utils/apiUtils'
import { montserrat_heading, montserrat_paragraph } from 'fonts'
import { notifications } from '@mantine/notifications'
import SetExampleQuestions from './SetExampleQuestions'
@@ -174,57 +174,6 @@ const EditCourseCard = ({
return response
}
- const uploadToS3 = async (file: File | null) => {
- if (!file) return
-
- const requestObject = {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json',
- },
- body: JSON.stringify({
- fileName: file.name,
- fileType: file.type,
- courseName: course_name,
- }),
- }
-
- try {
- interface PresignedPostResponse {
- post: {
- url: string
- fields: { [key: string]: string }
- }
- }
-
- // Then, update the lines where you fetch the response and parse the JSON
- const response = await fetch('/api/UIUC-api/uploadToS3', requestObject)
- const data = (await response.json()) as PresignedPostResponse
-
- const { url, fields } = data.post as {
- url: string
- fields: { [key: string]: string }
- }
- const formData = new FormData()
-
- Object.entries(fields).forEach(([key, value]) => {
- formData.append(key, value)
- })
-
- formData.append('file', file)
-
- await fetch(url, {
- method: 'POST',
- body: formData,
- })
-
- console.log(file.name + 'uploaded to S3 successfully!!')
- return data.post.fields.key
- } catch (error) {
- console.error('Error uploading file:', error)
- }
- }
-
const handleKeyUpdate = async (inputValue: string, isAzure: boolean) => {
if (inputValue === '' && courseMetadata?.openai_api_key === '') {
console.log('Key already empty')
@@ -721,7 +670,7 @@ const EditCourseCard = ({
if (e.target.files?.length) {
console.log('Uploading to s3')
const banner_s3_image = await uploadToS3(
- e.target.files?.[0] ?? null,
+ e.target.files?.[0] ?? null, course_name
)
if (banner_s3_image && courseMetadata) {
courseMetadata.banner_image_s3 = banner_s3_image
diff --git a/src/components/UIUC-Components/MakeQueryAnalysisPage.tsx b/src/components/UIUC-Components/MakeQueryAnalysisPage.tsx
index 4d7d62f75..00aa1069d 100644
--- a/src/components/UIUC-Components/MakeQueryAnalysisPage.tsx
+++ b/src/components/UIUC-Components/MakeQueryAnalysisPage.tsx
@@ -1,8 +1,7 @@
import Head from 'next/head'
import { montserrat_heading, montserrat_paragraph } from 'fonts'
// import { DropzoneS3Upload } from '~/components/UIUC-Components/Upload_S3'
-import { fetchPresignedUrl } from '~/components/UIUC-Components/ContextCards'
-
+import { fetchPresignedUrl } from '~/utils/apiUtils'
import {
// Badge,
// MantineProvider,
diff --git a/src/pages/api/UIUC-api/uploadToS3.ts b/src/pages/api/UIUC-api/uploadToS3.ts
index 356785993..a1214a139 100644
--- a/src/pages/api/UIUC-api/uploadToS3.ts
+++ b/src/pages/api/UIUC-api/uploadToS3.ts
@@ -3,12 +3,12 @@ import { S3Client } from '@aws-sdk/client-s3'
import { NextApiRequest, NextApiResponse } from 'next'
import { createPresignedPost } from '@aws-sdk/s3-presigned-post'
-const aws_config = {
+export const aws_config = {
bucketName: 'uiuc-chatbot',
region: 'us-east-1',
accessKeyId: process.env.AWS_KEY,
secretAccessKey: process.env.AWS_SECRET,
-}
+};
console.log('bucket name ---------------', process.env.S3_BUCKET_NAME)
console.log('aws ---------------', process.env.AWS_KEY)
diff --git a/src/pages/api/chat.ts b/src/pages/api/chat.ts
index 8afc87449..94b2d8f6c 100644
--- a/src/pages/api/chat.ts
+++ b/src/pages/api/chat.ts
@@ -1,7 +1,7 @@
// src/pages/api/chat.ts
import { DEFAULT_SYSTEM_PROMPT, DEFAULT_TEMPERATURE } from '@/utils/app/const'
import { OpenAIError, OpenAIStream } from '@/utils/server'
-import { ChatBody, ContextWithMetadata, OpenAIChatMessage } from '@/types/chat'
+import { ChatBody, Content, ContextWithMetadata, OpenAIChatMessage } from '@/types/chat'
// @ts-expect-error - no types
import wasm from '../../../node_modules/@dqbd/tiktoken/lite/tiktoken_bg.wasm?module'
import tiktokenModel from '@dqbd/tiktoken/encoders/cl100k_base.json'
@@ -17,7 +17,7 @@ export const config = {
const handler = async (req: Request): Promise => {
try {
- const { model, messages, key, prompt, temperature, course_name } =
+ const { model, messages, key, prompt, temperature, course_name, stream } =
(await req.json()) as ChatBody
await init((imports) => WebAssembly.instantiate(wasm, imports))
@@ -40,7 +40,13 @@ const handler = async (req: Request): Promise => {
}
// ! PROMPT STUFFING
- const search_query = messages[messages.length - 1]?.content as string // most recent message
+ let search_query: string;
+ if (typeof messages[messages.length - 1]?.content === 'string') {
+ search_query = messages[messages.length - 1]?.content as string;
+ } else {
+ search_query = (messages[messages.length - 1]?.content as Content[]).map(c => c.text || '').join(' ');
+ }
+ // most recent message
const contexts_arr = messages[messages.length - 1]
?.contexts as ContextWithMetadata[]
@@ -62,7 +68,7 @@ const handler = async (req: Request): Promise => {
// else if (course_name == 'global' || course_name == 'search-all') {
// todo
// }
- else {
+ else if (stream) {
// regular context stuffing
const stuffedPrompt = (await getStuffedPrompt(
course_name,
@@ -70,7 +76,22 @@ const handler = async (req: Request): Promise => {
contexts_arr,
token_limit,
)) as string
- messages[messages.length - 1]!.content = stuffedPrompt
+ if (typeof messages[messages.length - 1]?.content === 'string') {
+ messages[messages.length - 1]!.content = stuffedPrompt;
+ } else if (Array.isArray(messages[messages.length - 1]?.content) &&
+ (messages[messages.length - 1]!.content as Content[]).every(item => 'type' in item)) {
+
+ const contentArray = messages[messages.length - 1]!.content as Content[];
+ const textContentIndex = contentArray.findIndex(item => item.type === 'text') || 0;
+
+ if (textContentIndex !== -1 && contentArray[textContentIndex]) {
+ // Replace existing text content with the new stuffed prompt
+ contentArray[textContentIndex] = { ...contentArray[textContentIndex], text: stuffedPrompt, type: 'text' };
+ } else {
+ // Add new stuffed prompt if no text content exists
+ contentArray.push({ type: 'text', text: stuffedPrompt });
+ }
+ }
}
// Take most recent N messages that will fit in the context window
@@ -82,36 +103,47 @@ const handler = async (req: Request): Promise => {
for (let i = messages.length - 1; i >= 0; i--) {
const message = messages[i]
if (message) {
- const tokens = encoding.encode(message.content)
+ let content: string;
+ if (typeof message.content === 'string') {
+ content = message.content;
+ } else {
+ content = message.content.map(c => c.text || '').join(' ');
+ }
+ const tokens = encoding.encode(content)
if (tokenCount + tokens.length + 1000 > token_limit) {
break
}
tokenCount += tokens.length
messagesToSend = [
- { role: message.role, content: message.content },
+ { role: message.role, content: message.content as Content[] },
...messagesToSend,
]
}
}
encoding.free() // keep this
- // console.log('Prompt being sent to OpenAI: ', promptToSend)
- // console.log('Message history being sent to OpenAI: ', messagesToSend)
+ console.log('Prompt being sent to OpenAI: ', promptToSend)
+ console.log('Message history being sent to OpenAI: ', messagesToSend)
// Add custom instructions to system prompt
const systemPrompt =
promptToSend + "Only answer if it's related to the course materials."
- const stream = await OpenAIStream(
+ const apiStream = await OpenAIStream(
model,
systemPrompt,
temperatureToUse,
key,
messagesToSend,
+ stream
)
+ if (stream) {
+ return new NextResponse(apiStream)
+ } else {
+ return new NextResponse(JSON.stringify(apiStream))
+ }
- return new NextResponse(stream)
} catch (error) {
if (error instanceof OpenAIError) {
const { name, message } = error
diff --git a/src/pages/api/contextStuffingHelper.ts b/src/pages/api/contextStuffingHelper.ts
index dcc622772..29ca184a8 100644
--- a/src/pages/api/contextStuffingHelper.ts
+++ b/src/pages/api/contextStuffingHelper.ts
@@ -67,10 +67,10 @@ export async function getStuffedPrompt(
'\n\nNow please respond to my query: ' +
searchQuery
const totalNumTokens = encoding.encode(stuffedPrompt).length
- console.log('Stuffed prompt', stuffedPrompt.substring(0, 3700))
- console.log(
- `Total number of tokens: ${totalNumTokens}. Number of docs: ${contexts.length}, number of valid docs: ${validDocs.length}`,
- )
+ // console.log('Stuffed prompt', stuffedPrompt.substring(0, 3700))
+ // console.log(
+ // `Total number of tokens: ${totalNumTokens}. Number of docs: ${contexts.length}, number of valid docs: ${validDocs.length}`,
+ // )
return stuffedPrompt
} catch (e) {
diff --git a/src/pages/api/getContexts.ts b/src/pages/api/getContexts.ts
index aa7bee4b9..ea5d884b2 100644
--- a/src/pages/api/getContexts.ts
+++ b/src/pages/api/getContexts.ts
@@ -28,6 +28,7 @@ export const fetchContexts = async (
}
}
+export default fetchContexts;
// Axios doesn't work in Next.js Edge runtime, so using standard fetch instead.
// export async function fetchContextsNOAXIOS(
// course_name: string,
diff --git a/src/pages/api/getExtremePrompt.ts b/src/pages/api/getExtremePrompt.ts
index e8d2d6871..65e10950d 100644
--- a/src/pages/api/getExtremePrompt.ts
+++ b/src/pages/api/getExtremePrompt.ts
@@ -24,3 +24,5 @@ export async function getExtremePrompt(
// console.log('finalPromptStr:\n', finalPromptStr)
return finalPromptStr
}
+
+export default getExtremePrompt;
\ No newline at end of file
diff --git a/src/pages/api/google.ts b/src/pages/api/google.ts
index 153d0cea6..8719e846f 100644
--- a/src/pages/api/google.ts
+++ b/src/pages/api/google.ts
@@ -17,7 +17,13 @@ const handler = async (req: NextApiRequest, res: NextApiResponse) => {
req.body as GoogleBody
const userMessage = messages?.[messages.length - 1] ?? { content: '' }
- const query = encodeURIComponent(userMessage.content.trim())
+ let queryContent = '';
+ if (typeof userMessage.content === 'string') {
+ queryContent = userMessage.content.trim();
+ } else if (Array.isArray(userMessage.content)) {
+ queryContent = userMessage.content.map(content => content.text).join(' ').trim();
+ }
+ const query = encodeURIComponent(queryContent);
// const userMessage = messages[messages.length - 1]
// const query = encodeURIComponent(userMessage.content.trim())
@@ -107,7 +113,7 @@ const handler = async (req: NextApiRequest, res: NextApiResponse) => {
It's 70 degrees and sunny in San Francisco today. [[1]](https://www.google.com/search?q=weather+san+francisco)
Input:
- ${userMessage.content.trim()}
+ ${queryContent}
Sources:
${filteredSources.map((source) => {
diff --git a/src/pages/api/home/home.context.tsx b/src/pages/api/home/home.context.tsx
index b4866f445..521c8ff73 100644
--- a/src/pages/api/home/home.context.tsx
+++ b/src/pages/api/home/home.context.tsx
@@ -20,6 +20,7 @@ export interface HomeContextProps {
conversation: Conversation,
data: KeyValuePair,
) => void
+ setIsImg2TextLoading: (isImg2TextLoading: boolean) => void;
}
const HomeContext = createContext(undefined!)
diff --git a/src/pages/api/home/home.state.tsx b/src/pages/api/home/home.state.tsx
index 608886672..640319887 100644
--- a/src/pages/api/home/home.state.tsx
+++ b/src/pages/api/home/home.state.tsx
@@ -29,6 +29,7 @@ export interface HomeInitialState {
serverSidePluginKeysSet: boolean
cooldown: number
showModelSettings: boolean
+ isImg2TextLoading: boolean
}
export const initialState: HomeInitialState = {
@@ -55,4 +56,5 @@ export const initialState: HomeInitialState = {
serverSidePluginKeysSet: false,
cooldown: 0,
showModelSettings: false,
+ isImg2TextLoading: false,
}
diff --git a/src/pages/api/home/home.tsx b/src/pages/api/home/home.tsx
index 9833a97f5..6e4629beb 100644
--- a/src/pages/api/home/home.tsx
+++ b/src/pages/api/home/home.tsx
@@ -26,7 +26,7 @@ import { getSettings } from '@/utils/app/settings'
import { type Conversation } from '@/types/chat'
import { type KeyValuePair } from '@/types/data'
import { type FolderInterface, type FolderType } from '@/types/folder'
-import { OpenAIModelID, OpenAIModels, fallbackModelID } from '@/types/openai'
+import { OpenAIModel, OpenAIModelID, OpenAIModels, fallbackModelID } from '@/types/openai'
import { type Prompt } from '@/types/prompt'
import { Chat } from '@/components/Chat/Chat'
@@ -49,7 +49,7 @@ const Home = () => {
const { getModelsError } = useErrorService()
const [isLoading, setIsLoading] = useState(true) // Add a new state for loading
- const defaultModelId = 'gpt-4'
+ const defaultModelId = OpenAIModelID.GPT_4
const serverSidePluginKeysSet = true
const contextValue = useCreateReducer({
@@ -84,23 +84,10 @@ const Home = () => {
useEffect(() => {
// Set model (to only available models)
- // First try to use selectedconversation model, if not available, use default model
const modelId = selectedConversation?.model.id
- // 1. Default to GPT-4 (either OpenAI or Azure) if available
- // Fallback
-
- // , otherwise fallback to the first model in the list (random)
- const defaultModel = models.find(model => (model.id === 'gpt-4-from-canada-east' || model.id === 'gpt-4')) || models[0]
- let model = models.find((model) => model.id === modelId) || defaultModel
-
- console.debug('Home.tsx -- setting DefaultModelId -- avail models: ', models)
- console.debug('Home.tsx -- setting DefaultModelId -- from selectedConto:', modelId)
-
- if (!model) {
- console.log('NO MODELS FOUND -- Falling back to GPT-4 standard: ', OpenAIModels['gpt-4'])
- model = OpenAIModels['gpt-4']
- }
- console.log('Home.tsx -- setting DefaultModelId SETTING IT TO: ', model)
+ console.log("In effect of home, selectedConversation model id: ", modelId)
+ const lastConversation = conversations[conversations.length - 1]
+ const model = selectBestModel(lastConversation, models, defaultModelId);
dispatch({
field: 'defaultModelId',
@@ -117,7 +104,7 @@ const Home = () => {
value: convo_with_valid_model,
})
}
- console.debug("In effect of home Using model: ", defaultModel)
+ // console.debug("In effect of home Using model: ", defaultModel)
}, [models])
@@ -313,6 +300,29 @@ const Home = () => {
saveFolders(updatedFolders)
}
+
+ const selectBestModel = (lastConversation: Conversation | undefined, models: OpenAIModel[], defaultModelId: OpenAIModelID): OpenAIModel => {
+ // If models array is empty, return defaultModelId
+ if (!models.length) {
+ return OpenAIModels[defaultModelId]
+ }
+
+ // If the last conversation's model is available, use it
+ if (lastConversation && lastConversation.model && models.some(model => model.id === lastConversation.model.id)) {
+ return lastConversation.model as OpenAIModel;
+ } else {
+ // If 'gpt-4-from-canada-east' or 'gpt-4' are available, use whichever is available
+ const preferredModel = models.find(model => ['gpt-4-from-canada-east', 'gpt-4'].includes(model.id));
+
+ if (preferredModel) {
+ return preferredModel;
+ } else {
+ // Fallback to the default model
+ return models.find((model) => model.id === defaultModelId) || models[0] || OpenAIModels[defaultModelId];
+ }
+ }
+ }
+
// CONVERSATION OPERATIONS --------------------------------------------
const handleNewConversation = () => {
@@ -320,11 +330,15 @@ const Home = () => {
console.debug("Models available: ", models)
console.debug("IN NEW CONVERSATION Using model: ", defaultModelId)
+ // Determine the model to use for the new conversation
+ const model = selectBestModel(lastConversation, models, defaultModelId);
+ console.debug('NEW CONVO : handleNewConversation SETTING IT TO: ', model);
+
const newConversation: Conversation = {
id: uuidv4(),
name: t('New Conversation'),
messages: [],
- model: OpenAIModels[defaultModelId],
+ model: model,
prompt: DEFAULT_SYSTEM_PROMPT,
temperature: lastConversation?.temperature ?? DEFAULT_TEMPERATURE,
folderId: null,
@@ -359,7 +373,85 @@ const Home = () => {
dispatch({ field: 'conversations', value: all })
}
+ // Image to Text
+ const setIsImg2TextLoading = (isImg2TextLoading: boolean) => {
+ dispatch({ field: 'isImg2TextLoading', value: isImg2TextLoading });
+ };
+
+ const [isDragging, setIsDragging] = useState(false);
+ const [dragEnterCounter, setDragEnterCounter] = useState(0);
+
+ const GradientIconPhoto = () => (
+
+ );
+
// EFFECTS --------------------------------------------
+ useEffect(() => {
+ const handleDocumentDragOver = (e: DragEvent) => {
+ e.preventDefault();
+ };
+
+ const handleDocumentDragEnter = (e: DragEvent) => {
+ setDragEnterCounter((prev) => prev + 1);
+ setIsDragging(true);
+ };
+
+ const handleDocumentDragLeave = (e: DragEvent) => {
+ e.preventDefault();
+ setDragEnterCounter((prev) => prev - 1);
+ if (dragEnterCounter === 1 || e.relatedTarget === null) {
+ setIsDragging(false);
+ }
+ };
+
+ const handleDocumentDrop = (e: DragEvent) => {
+ e.preventDefault();
+ setIsDragging(false);
+ setDragEnterCounter(0);
+ };
+
+ const handleDocumentKeyDown = (e: KeyboardEvent) => {
+ if (e.key === 'Escape') {
+ setIsDragging(false);
+ setDragEnterCounter(0);
+ }
+ };
+
+ const handleMouseOut = (e: MouseEvent) => {
+ if (!e.relatedTarget) {
+ setIsDragging(false);
+ setDragEnterCounter(0);
+ }
+ };
+
+ document.addEventListener('dragover', handleDocumentDragOver);
+ document.addEventListener('dragenter', handleDocumentDragEnter);
+ document.addEventListener('dragleave', handleDocumentDragLeave);
+ document.addEventListener('drop', handleDocumentDrop);
+ document.addEventListener('keydown', handleDocumentKeyDown);
+ window.addEventListener('mouseout', handleMouseOut);
+
+ return () => {
+ document.removeEventListener('dragover', handleDocumentDragOver);
+ document.removeEventListener('dragenter', handleDocumentDragEnter);
+ document.removeEventListener('dragleave', handleDocumentDragLeave);
+ document.removeEventListener('drop', handleDocumentDrop);
+ document.removeEventListener('keydown', handleDocumentKeyDown);
+ window.removeEventListener('mouseout', handleMouseOut);
+ };
+ }, []);
useEffect(() => {
if (window.innerWidth < 640) {
@@ -468,54 +560,65 @@ const Home = () => {
return <>>
}
return (
-
-
- UIUC.chat
-
-
-
-
- {selectedConversation && (
-
-
-
-
-
-
-
-
-
- {course_metadata && (
-
- )}
+
+
+
+ UIUC.chat
+
+
+
+
+ {selectedConversation && (
+
+
+
-
-
-
- )}
-
+
+ {isDragging && selectedConversation?.model.id === OpenAIModelID.GPT_4_VISION && (
+
+
+ Drop your image here!
+
+ )}
+
+
+
+ {course_metadata && (
+
+ )}
+
+
+
+
+
+ )}
+
+
)
}
export default Home
diff --git a/src/styles/globals.css b/src/styles/globals.css
index 69dc032a4..d7731018d 100644
--- a/src/styles/globals.css
+++ b/src/styles/globals.css
@@ -298,3 +298,17 @@ li {
background-color: #807f7f;
border-radius: 4px;
}
+
+@keyframes pulsate {
+ 0% {
+ color: #ffffff;
+ }
+
+ 100% {
+ color: #9D4EDD;
+ }
+}
+
+.pulsate {
+ animation: pulsate 1s infinite alternate;
+}
\ No newline at end of file
diff --git a/src/types/chat.ts b/src/types/chat.ts
index 551e424ab..91999f20a 100644
--- a/src/types/chat.ts
+++ b/src/types/chat.ts
@@ -3,14 +3,22 @@ import { OpenAIModel } from './openai'
export interface Message {
// id: string;
role: Role
- content: string
+ content: string | Content[]
contexts?: ContextWithMetadata[] // todo: make sure things works.
responseTimeSec?: number
}
+export interface Content {
+ type: string;
+ text?: string;
+ image_url?: {
+ url: string;
+ };
+};
+
export interface OpenAIChatMessage {
role: Role
- content: string
+ content: Content[]
}
export interface ContextWithMetadata {
@@ -34,6 +42,7 @@ export interface ChatBody {
prompt: string
temperature: number
course_name: string
+ stream: boolean
// NO FOLDER ID
}
diff --git a/src/types/openai.ts b/src/types/openai.ts
index e1edffa80..d36d8a802 100644
--- a/src/types/openai.ts
+++ b/src/types/openai.ts
@@ -12,6 +12,7 @@ export enum OpenAIModelID {
GPT_3_5_16k = 'gpt-3.5-turbo-16k',
GPT_4 = 'gpt-4',
GPT_4_1106_PREVIEW = 'gpt-4-1106-preview',
+ GPT_4_VISION = 'gpt-4-vision-preview',
// GPT_4_32K = 'gpt-4-32k',
// Azure -- ONLY GPT-4 supported for now... due to deployment param being env var...
GPT_4_AZURE = 'gpt-4-from-canada-east',
@@ -25,13 +26,13 @@ export const fallbackModelID = OpenAIModelID.GPT_4
export const OpenAIModels: Record
= {
[OpenAIModelID.GPT_3_5]: {
id: OpenAIModelID.GPT_3_5,
- name: 'GPT-3.5-4k',
+ name: 'GPT-3.5 Turbo',
maxLength: 12000,
tokenLimit: 4096,
},
[OpenAIModelID.GPT_3_5_16k]: {
id: OpenAIModelID.GPT_3_5_16k,
- name: 'GPT-3.5-16k (large context)',
+ name: 'GPT-3.5 (16k large context)',
maxLength: 49000,
tokenLimit: 16385,
},
@@ -43,7 +44,7 @@ export const OpenAIModels: Record = {
},
[OpenAIModelID.GPT_4_1106_PREVIEW]: {
id: OpenAIModelID.GPT_4_1106_PREVIEW,
- name: 'GPT-4-1106-preview (128k)',
+ name: 'GPT-4 Turbo (128k)',
maxLength: 24000,
tokenLimit: 128000,
},
@@ -73,4 +74,10 @@ export const OpenAIModels: Record = {
maxLength: 24000,
tokenLimit: 8192,
},
+ [OpenAIModelID.GPT_4_VISION]: {
+ id: OpenAIModelID.GPT_4_VISION,
+ name: 'GPT-4 Vision',
+ maxLength: 8000,
+ tokenLimit: 15000, // TPM of 40,000 -- so have to reduce this, despite it supporting up to 128k
+ },
}
diff --git a/src/utils/apiUtils.ts b/src/utils/apiUtils.ts
index 86c3e7ad9..7f5193a5f 100644
--- a/src/utils/apiUtils.ts
+++ b/src/utils/apiUtils.ts
@@ -4,6 +4,8 @@ import {
type CourseMetadata,
} from '~/types/courseMetadata'
import { log } from 'next-axiom'
+import { v4 as uuidv4 } from 'uuid';
+import axios from 'axios'
export const config = {
runtime: 'edge',
@@ -51,3 +53,69 @@ export const callSetCourseMetadata = async (
return false
}
}
+
+export const uploadToS3 = async (file: File | null, course_name: string) => {
+ if (!file) return
+
+ const uniqueFileName = `${uuidv4()}.${file.name.split('.').pop()}`;
+
+ const requestObject = {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify({
+ fileName: file.name,
+ fileType: file.type,
+ courseName: course_name,
+ uniqueFileName: uniqueFileName,
+ }),
+ }
+
+ try {
+ interface PresignedPostResponse {
+ post: {
+ url: string
+ fields: { [key: string]: string }
+ }
+ }
+
+ // Then, update the lines where you fetch the response and parse the JSON
+ const response = await fetch('/api/UIUC-api/uploadToS3', requestObject)
+ const data = (await response.json()) as PresignedPostResponse
+
+ const { url, fields } = data.post as {
+ url: string
+ fields: { [key: string]: string }
+ }
+ const formData = new FormData()
+
+ Object.entries(fields).forEach(([key, value]) => {
+ formData.append(key, value)
+ })
+
+ formData.append('file', file)
+
+ await fetch(url, {
+ method: 'POST',
+ body: formData,
+ })
+
+ console.log(file.name + 'uploaded to S3 successfully!!')
+ return data.post.fields.key
+ } catch (error) {
+ console.error('Error uploading file:', error)
+ }
+}
+
+export async function fetchPresignedUrl(filePath: string) {
+ try {
+ const response = await axios.post('/api/download', {
+ filePath,
+ })
+ return response.data.url
+ } catch (error) {
+ console.error('Error fetching presigned URL:', error)
+ return null
+ }
+}
diff --git a/src/utils/server/index.ts b/src/utils/server/index.ts
index c868594bb..16536e4f2 100644
--- a/src/utils/server/index.ts
+++ b/src/utils/server/index.ts
@@ -39,6 +39,7 @@ export const OpenAIStream = async (
temperature: number,
key: string,
messages: OpenAIChatMessage[],
+ stream: boolean,
) => {
console.debug("In OpenAIStream, model: ", model)
@@ -88,6 +89,21 @@ export const OpenAIStream = async (
// })
// console.debug("Final request sent to OpenAI ", JSON.stringify(JSON.parse(final_request_to_openai), null, 2))
+ const body = JSON.stringify({
+ ...(OPENAI_API_TYPE === 'openai' && { model: model.id }),
+ messages: [
+ {
+ role: 'system',
+ content: systemPrompt,
+ },
+ ...messages,
+ ],
+ max_tokens: 1000,
+ temperature: temperature,
+ stream: stream,
+ })
+ // This could be logged and tracked better
+ // console.log("openai api body: ", body)
const res = await fetch(url, {
headers: {
@@ -104,19 +120,7 @@ export const OpenAIStream = async (
}),
},
method: 'POST',
- body: JSON.stringify({
- ...(OPENAI_API_TYPE === 'openai' && { model: model.id }),
- messages: [
- {
- role: 'system',
- content: systemPrompt,
- },
- ...messages,
- ],
- max_tokens: 1000,
- temperature: temperature,
- stream: true,
- }),
+ body: body,
})
const encoder = new TextEncoder()
@@ -139,34 +143,43 @@ export const OpenAIStream = async (
}
}
- const stream = new ReadableStream({
- async start(controller) {
- const onParse = (event: ParsedEvent | ReconnectInterval) => {
- if (event.type === 'event') {
- const data = event.data
-
- try {
- const json = JSON.parse(data)
- if (json.choices[0].finish_reason != null) {
- controller.close()
- return
+ if (stream) {
+ console.log("Streaming response ")
+ const apiStream = new ReadableStream({
+ async start(controller) {
+ const onParse = (event: ParsedEvent | ReconnectInterval) => {
+ if (event.type === 'event') {
+ const data = event.data
+
+ try {
+ const json = JSON.parse(data)
+ if (json.choices[0].finish_reason != null) {
+ controller.close()
+ return
+ }
+ const text = json.choices[0].delta.content
+ const queue = encoder.encode(text)
+ controller.enqueue(queue)
+ } catch (e) {
+ controller.error(e)
}
- const text = json.choices[0].delta.content
- const queue = encoder.encode(text)
- controller.enqueue(queue)
- } catch (e) {
- controller.error(e)
}
}
- }
- const parser = createParser(onParse)
+ const parser = createParser(onParse)
- for await (const chunk of res.body as any) {
- parser.feed(decoder.decode(chunk))
- }
- },
- })
+ for await (const chunk of res.body as any) {
+ parser.feed(decoder.decode(chunk))
+ }
+ },
+ })
+
+ return apiStream
+ } else {
+ console.log("Non Streaming response ")
+ const json = await res.json()
+ console.log('Final OpenAI response: ', json)
+ return json
+ }
- return stream
}