diff --git a/src/components/Chat/Chat.tsx b/src/components/Chat/Chat.tsx index ded2b6250..35286ff54 100644 --- a/src/components/Chat/Chat.tsx +++ b/src/components/Chat/Chat.tsx @@ -29,15 +29,13 @@ import { useRef, useState, } from 'react' -import toast from 'react-hot-toast' -import { Button, Container, Text, Title } from '@mantine/core' +import { Button, Text } from '@mantine/core' import { useTranslation } from 'next-i18next' import { getEndpoint } from '@/utils/app/api' import { saveConversation, saveConversations, - updateConversation, } from '@/utils/app/conversation' import { throttle } from '@/utils/data/throttle' @@ -46,6 +44,7 @@ import { type ChatBody, type Conversation, type Message, + Content, } from '@/types/chat' import { type Plugin } from '@/types/plugin' @@ -55,7 +54,7 @@ import { ChatInput } from './ChatInput' import { ChatLoader } from './ChatLoader' import { ErrorMessageDiv } from './ErrorMessageDiv' import { MemoizedChatMessage } from './MemoizedChatMessage' -import { fetchPresignedUrl } from '~/components/UIUC-Components/ContextCards' +import { fetchPresignedUrl } from '~/utils/apiUtils' import { type CourseMetadata } from '~/types/courseMetadata' @@ -75,7 +74,6 @@ import ChatNavbar from '../UIUC-Components/navbars/ChatNavbar' import { notifications } from '@mantine/notifications' import { Montserrat } from 'next/font/google' import { montserrat_heading, montserrat_paragraph } from 'fonts' -import { NextResponse } from 'next/server' const montserrat_med = Montserrat({ weight: '500', @@ -114,6 +112,7 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { loading, prompts, showModelSettings, + isImg2TextLoading }, handleUpdateConversation, dispatch: homeDispatch, @@ -173,14 +172,90 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { } } + const handleImageContent = async (message: Message, endpoint: string, updatedConversation: Conversation, searchQuery: string, controller: AbortController) => { + const imageContent = (message.content as Content[]).filter(content => content.type === 'image_url'); + if (imageContent.length > 0) { + homeDispatch({ field: 'isImg2TextLoading', value: true }) + const chatBody: ChatBody = { + model: updatedConversation.model, + messages: [ + { + ...message, + content: [ + ...imageContent, + { type: 'text', text: 'Provide detailed description of the image(s) focusing on any text (OCR information), distinct objects, colors, and actions depicted. Include contextual information, subtle details, and specific terminologies relevant for semantic document retrieval.' } + ] + } + ], + key: courseMetadata?.openai_api_key && courseMetadata?.openai_api_key != '' ? courseMetadata.openai_api_key : apiKey, + prompt: updatedConversation.prompt, + temperature: updatedConversation.temperature, + course_name: getCurrentPageName(), + stream: false, + }; + + try { + const response = await fetch(endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(chatBody), + signal: controller.signal, + }); + + if (!response.ok) { + const final_response = await response.json(); + homeDispatch({ field: 'loading', value: false }); + homeDispatch({ field: 'messageIsStreaming', value: false }); + throw new Error(final_response.message); + } + + const data = await response.json(); + const imgDesc = data.choices[0].message.content || ''; + + searchQuery += ` Image description: ${imgDesc}`; + + const imgDescIndex = (message.content as Content[]).findIndex(content => content.type === 'text' && (content.text as string).startsWith('Image description: ')); + + if (imgDescIndex !== -1) { + (message.content as Content[])[imgDescIndex] = { type: 'text', text: `Image description: ${imgDesc}` }; + } else { + (message.content as Content[]).push({ type: 'text', text: `Image description: ${imgDesc}` }); + } + } catch (error) { + console.error('Error in chat.tsx running onResponseCompletion():', error); + controller.abort(); + } finally { + homeDispatch({ field: 'isImg2TextLoading', value: false }) + }; + } + return searchQuery; + } + + const handleContextSearch = async (message: Message, selectedConversation: Conversation, searchQuery: string) => { + if (getCurrentPageName() != 'gpt4') { + const token_limit = OpenAIModels[selectedConversation?.model.id as OpenAIModelID].tokenLimit + await fetchContexts(getCurrentPageName(), searchQuery, token_limit).then((curr_contexts) => { + message.contexts = curr_contexts as ContextWithMetadata[] + }) + } + } + // THIS IS WHERE MESSAGES ARE SENT. const handleSend = useCallback( async (message: Message, deleteCount = 0, plugin: Plugin | null = null) => { + + setCurrentMessage(message) // New way with React Context API // TODO: MOVE THIS INTO ChatMessage // console.log('IN handleSend: ', message) // setSearchQuery(message.content) - const searchQuery = message.content + let searchQuery = Array.isArray(message.content) + ? message.content.map((content) => content.text).join(' ') + : message.content; + + // console.log("QUERY: ", searchQuery) if (selectedConversation) { let updatedConversation: Conversation @@ -206,21 +281,18 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { homeDispatch({ field: 'loading', value: true }) homeDispatch({ field: 'messageIsStreaming', value: true }) - // Run context search, attach to Message object. - if (getCurrentPageName() != 'gpt4') { - // THE ONLY place we fetch contexts (except ExtremePromptStuffing is still in api/chat.ts) - const token_limit = - OpenAIModels[selectedConversation?.model.id as OpenAIModelID] - .tokenLimit - await fetchContexts( - getCurrentPageName(), - searchQuery, - token_limit, - ).then((curr_contexts) => { - message.contexts = curr_contexts as ContextWithMetadata[] - }) + const endpoint = getEndpoint(plugin); + + const controller = new AbortController() + + // Run image to text conversion, attach to Message object. + if (Array.isArray(message.content)) { + searchQuery = await handleImageContent(message, endpoint, updatedConversation, searchQuery, controller); } + // Run context search, attach to Message object. + await handleContextSearch(message, selectedConversation, searchQuery); + const chatBody: ChatBody = { model: updatedConversation.model, messages: updatedConversation.messages, @@ -232,8 +304,9 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { prompt: updatedConversation.prompt, temperature: updatedConversation.temperature, course_name: getCurrentPageName(), + stream: true } - const endpoint = getEndpoint(plugin) // THIS is where we could support EXTREME prompt stuffing. + let body if (!plugin) { body = JSON.stringify(chatBody) @@ -248,7 +321,8 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { ?.requiredKeys.find((key) => key.key === 'GOOGLE_CSE_ID')?.value, }) } - const controller = new AbortController() + + // This is where we call the OpenAI API const response = await fetch(endpoint, { method: 'POST', headers: { @@ -301,13 +375,17 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { } if (!plugin) { if (updatedConversation.messages.length === 1) { - const { content } = message + const { content } = message; + // Use only texts instead of content itself + const contentText = Array.isArray(content) + ? content.map((content) => content.text).join(' ') + : content; const customName = - content.length > 30 ? content.substring(0, 30) + '...' : content + contentText.length > 30 ? contentText.substring(0, 30) + '...' : contentText; updatedConversation = { ...updatedConversation, name: customName, - } + }; } homeDispatch({ field: 'loading', value: false }) const reader = data.getReader() @@ -390,6 +468,7 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { updatedConversations.push(updatedConversation) } homeDispatch({ field: 'conversations', value: updatedConversations }) + console.log('updatedConversations: ', updatedConversations) saveConversations(updatedConversations) homeDispatch({ field: 'messageIsStreaming', value: false }) } else { @@ -434,6 +513,20 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { ], ) + const handleRegenerate = useCallback(() => { + if (currentMessage && Array.isArray(currentMessage.content)) { + // Find the index of the existing image description + const imgDescIndex = (currentMessage.content as Content[]).findIndex(content => content.type === 'text' && (content.text as string).startsWith('Image description: ')); + + if (imgDescIndex !== -1) { + // Remove the existing image description + (currentMessage.content as Content[]).splice(imgDescIndex, 1); + } + + handleSend(currentMessage, 2, null); + } + }, [currentMessage, handleSend]); + const scrollToBottom = useCallback(() => { if (autoScrollEnabled) { messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }) @@ -575,6 +668,64 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { ) } + // Inside Chat function before the return statement + const renderMessageContent = (message: Message) => { + if (Array.isArray(message.content)) { + return ( + <> + {message.content.map((content, index) => { + if (content.type === 'image' && content.image_url) { + return Uploaded content; + } + return {content.text}; + })} + + ); + } + return {message.content}; + }; + + const updateMessages = (updatedMessage: Message, messageIndex: number) => { + return selectedConversation?.messages.map((message, index) => { + return index === messageIndex ? updatedMessage : message; + }); + }; + + const updateConversations = (updatedConversation: Conversation) => { + return conversations.map((conversation) => + conversation.id === selectedConversation?.id ? updatedConversation : conversation + ); + }; + + const onImageUrlsUpdate = useCallback((updatedMessage: Message, messageIndex: number) => { + if (!selectedConversation) { + throw new Error("No selected conversation found"); + } + + const updatedMessages = updateMessages(updatedMessage, messageIndex); + if (!updatedMessages) { + throw new Error("Failed to update messages"); + } + + const updatedConversation = { + ...selectedConversation, + messages: updatedMessages, + }; + + homeDispatch({ + field: 'selectedConversation', + value: updatedConversation, + }); + + const updatedConversations = updateConversations(updatedConversation); + if (!updatedConversations) { + throw new Error("Failed to update conversations"); + } + + homeDispatch({ field: 'conversations', value: updatedConversations }); + saveConversations(updatedConversations); + }, [selectedConversation, conversations]); + return (
@@ -671,14 +822,16 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { { - setCurrentMessage(editedMessage) + // setCurrentMessage(editedMessage) handleSend( editedMessage, selectedConversation?.messages.length - index, ) }} + onImageUrlsUpdate={onImageUrlsUpdate} /> ))} {loading && } @@ -694,18 +847,15 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { stopConversationRef={stopConversationRef} textareaRef={textareaRef} onSend={(message, plugin) => { - setCurrentMessage(message) + // setCurrentMessage(message) handleSend(message, 0, plugin) }} onScrollDownClick={handleScrollDown} - onRegenerate={() => { - if (currentMessage) { - handleSend(currentMessage, 2, null) - } - }} + onRegenerate={handleRegenerate} showScrollDownButton={showScrollDownButton} inputContent={inputContent} setInputContent={setInputContent} + courseName={getCurrentPageName()} /> {/*
*/} diff --git a/src/components/Chat/ChatInput.tsx b/src/components/Chat/ChatInput.tsx index cab4d5443..318e2b5f2 100644 --- a/src/components/Chat/ChatInput.tsx +++ b/src/components/Chat/ChatInput.tsx @@ -1,3 +1,4 @@ +// chatinput.tsx import { IconArrowDown, IconBolt, @@ -5,6 +6,9 @@ import { IconPlayerStop, IconRepeat, IconSend, + IconPhoto, + IconAlertCircle, + IconX } from '@tabler/icons-react' import { KeyboardEvent, @@ -17,8 +21,7 @@ import { } from 'react' import { useTranslation } from 'next-i18next' - -import { Message } from '@/types/chat' +import { Content, Message } from '@/types/chat' import { Plugin } from '@/types/plugin' import { Prompt } from '@/types/prompt' @@ -28,6 +31,26 @@ import { PluginSelect } from './PluginSelect' import { PromptList } from './PromptList' import { VariableModal } from './VariableModal' +import { notifications } from '@mantine/notifications'; +import { useMantineTheme, Modal, Tooltip } from '@mantine/core'; +import { Montserrat } from 'next/font/google' + +import { v4 as uuidv4 } from 'uuid'; + +import React from 'react' + +import { CSSProperties } from 'react'; + +import { fetchPresignedUrl, uploadToS3 } from 'src/utils/apiUtils'; +import { ImagePreview } from './ImagePreview' +import { OpenAIModelID } from '~/types/openai' + +const montserrat_med = Montserrat({ + weight: '500', + subsets: ['latin'], +}) + + interface Props { onSend: (message: Message, plugin: Plugin | null) => void onRegenerate: () => void @@ -37,6 +60,12 @@ interface Props { showScrollDownButton: boolean inputContent: string setInputContent: (content: string) => void + courseName: string +} + +interface ProcessedImage { + resizedFile: File; + dataUrl: string; } export const ChatInput = ({ @@ -48,6 +77,7 @@ export const ChatInput = ({ showScrollDownButton, inputContent, setInputContent, + courseName, }: Props) => { const { t } = useTranslation('chat') @@ -57,7 +87,7 @@ export const ChatInput = ({ dispatch: homeDispatch, } = useContext(HomeContext) - const [content, setContent] = useState() + const [content, setContent] = useState(() => inputContent); const [isTyping, setIsTyping] = useState(false) const [showPromptList, setShowPromptList] = useState(false) const [activePromptIndex, setActivePromptIndex] = useState(0) @@ -66,8 +96,50 @@ export const ChatInput = ({ const [isModalVisible, setIsModalVisible] = useState(false) const [showPluginSelect, setShowPluginSelect] = useState(false) const [plugin, setPlugin] = useState(null) - + const [uploadingImage, setUploadingImage] = useState(false); + const [imageError, setImageError] = useState(null); + const [isDragging, setIsDragging] = useState(false); + const imageUploadRef = useRef(null); const promptListRef = useRef(null) + const [imageFiles, setImageFiles] = useState([]); + const [imagePreviewUrls, setImagePreviewUrls] = useState([]); + const chatInputContainerRef = useRef(null); + const [isFocused, setIsFocused] = useState(false); + const [imagePreviews, setImagePreviews] = useState([]); + const [selectedImage, setSelectedImage] = useState(null); + const [isModalOpen, setIsModalOpen] = useState(false); + const [imageUrls, setImageUrls] = useState([]); + + + const removeButtonStyle: CSSProperties = { + position: 'absolute', + top: '-8px', + right: '-8px', + display: 'flex', + alignItems: 'center', + justifyContent: 'center', + width: '24px', + height: '24px', + borderRadius: '50%', + backgroundColor: '#A9A9A9', // Changed to a darker gray + color: 'white', // White icon color + border: '2px solid white', // White border + cursor: 'pointer', + zIndex: 2, + }; + + const removeButtonHoverStyle: CSSProperties = { + backgroundColor: '#505050', // Even darker gray for hover state + }; + + // Dynamically set the padding based on image previews presence + const chatInputContainerStyle: CSSProperties = { + paddingTop: imagePreviewUrls.length > 0 ? '10px' : '0', + paddingRight: imagePreviewUrls.length > 0 ? '10px' : '0', + paddingBottom: '0', + paddingLeft: '10px', + borderRadius: '4px', // This will round the edges slightly + }; const filteredPrompts = prompts.filter((prompt) => prompt.name.toLowerCase().includes(promptInputValue.toLowerCase()), @@ -90,25 +162,72 @@ export const ChatInput = ({ setContent(value) updatePromptListVisibility(value) } + // Assuming Message, Role, and Plugin types are already defined in your codebase - const handleSend = () => { + type Role = 'user' | 'system'; // Add other roles as needed + + + const handleSend = async () => { if (messageIsStreaming) { - return + return; } - if (!content) { - alert(t('Please enter a message')) - return + const textContent = content; + let imageContent: Content[] = []; // Explicitly declare the type for imageContent + + if (imageFiles.length > 0 && !uploadingImage) { + setUploadingImage(true); + try { + // If imageUrls is empty, upload all images and get their URLs + const imageUrlsToUse = imageUrls.length > 0 ? imageUrls : + await Promise.all(imageFiles.map(file => uploadImageAndGetUrl(file, courseName))); + + // Construct image content for the message + imageContent = imageUrlsToUse + .filter((url): url is string => url !== '') // Type-guard to filter out empty strings + .map(url => ({ + type: "image_url", + image_url: { url } + })); + + // console.log("Final imageUrls: ", imageContent) + + // Clear the files after uploading + setImageFiles([]); + setImagePreviewUrls([]); + } catch (error) { + console.error('Error uploading files:', error); + setImageError('Error uploading files'); + } finally { + setUploadingImage(false); + } } - onSend({ role: 'user', content }, plugin) - setContent('') - setPlugin(null) - - if (window.innerWidth < 640 && textareaRef && textareaRef.current) { - textareaRef.current.blur() + if (!textContent && imageContent.length === 0) { + alert(t('Please enter a message or upload an image')); + return; } - } + + // Construct the content array + const contentArray: Content[] = [ + ...(textContent ? [{ type: "text", text: textContent }] : []), + ...imageContent + ]; + + // Create a structured message for GPT-4 Vision + const messageForGPT4Vision: Message = { + role: 'user', + content: contentArray + }; + + // Use the onSend prop to send the structured message + onSend(messageForGPT4Vision, plugin); // Cast to unknown then to Message if needed + + // Reset states + setContent(''); + setPlugin(null); + setImagePreviews([]); + }; const handleStopConversation = () => { stopConversationRef.current = true @@ -199,7 +318,7 @@ export const ChatInput = ({ } }, []) - const handlePromptSelect = (prompt: Prompt) => { + const handlePromptSelect = useCallback((prompt: Prompt) => { const parsedVariables = parseVariables(prompt.content) const filteredVariables = parsedVariables.filter( (variable) => variable !== undefined, @@ -215,24 +334,9 @@ export const ChatInput = ({ }) updatePromptListVisibility(prompt.content) } - } - - // const handlePromptSelect = (prompt: Prompt) => { - // const parsedVariables = parseVariables(prompt.content); - // setVariables(parsedVariables); - - // if (parsedVariables.length > 0) { - // setIsModalVisible(true); - // } else { - // setContent((prevContent) => { - // const updatedContent = prevContent?.replace(/\/\w*$/, prompt.content); - // return updatedContent; - // }); - // updatePromptListVisibility(prompt.content); - // } - // }; + }, [parseVariables, setContent, updatePromptListVisibility]); - const handleSubmit = (updatedVariables: string[]) => { + const handleSubmit = useCallback((updatedVariables: string[]) => { const newContent = content?.replace(/{{(.*?)}}/g, (match, variable) => { const index = variables.indexOf(variable) return updatedVariables[index] || '' @@ -243,14 +347,283 @@ export const ChatInput = ({ if (textareaRef && textareaRef.current) { textareaRef.current.focus() } + }, [variables, setContent, textareaRef]); // Add dependencies used in the function + + // https://platform.openai.com/docs/guides/vision/what-type-of-files-can-i-upload + const validImageTypes = ['.jpg', '.jpeg', '.png', '.webp', '.gif']; + + const isImageValid = (fileName: string): boolean => { + const ext = fileName.slice(fileName.lastIndexOf(".") + 1).toLowerCase(); + return validImageTypes.includes(`.${ext}`); } + // const uploadToS3 = async (file: File) => { + // if (!file) { + // console.error('No file provided for upload'); + // return; + // } + + // // Generate a unique file name using uuidv4 + // const uniqueFileName = `${uuidv4()}.${file.name.split('.').pop()}`; + // const s3_filepath = `courses/${courseName}/${uniqueFileName}`; // Define s3_filepath here + + // console.log('uploadToS3 called with uniqueFileName:', uniqueFileName); + // console.log('uploadToS3 called with s3_filepath:', s3_filepath); + + // // Prepare the request body for the API call + // // Prepare the request body for the API call + // const requestObject = { + // method: 'POST', + // headers: { + // 'Content-Type': 'application/json', + // }, + // body: JSON.stringify({ + // uniqueFileName: uniqueFileName, + // fileType: file.type, + // courseName: courseName, + // }), + // }; + + // try { + // // Call your API to get the presigned POST data + // const response = await fetch('/api/UIUC-api/uploadToS3', requestObject); + // if (!response.ok) { + // throw new Error(`HTTP error! Status: ${response.status}`); + // } + // const { post } = await response.json(); + + // // Use the presigned POST data to upload the file to S3 + // const formData = new FormData(); + // Object.entries(post.fields).forEach(([key, value]) => { + // formData.append(key, value as string); + // }); + // formData.append('file', file); + + // // Post the file to the S3 bucket using the presigned URL and form data + // const uploadResponse = await fetch(post.url, { + // method: 'POST', + // body: formData, + // }); + + // if (!uploadResponse.ok) { + // throw new Error('Failed to upload the file to S3'); + // } + + // // Construct the URL to the uploaded file using the response from the presigned POST + // const uploadedImageUrl = `https://${aws_config.bucketName}.s3.${aws_config.region}.amazonaws.com/${encodeURIComponent(s3_filepath)}`; + + // return uploadedImageUrl; + // } catch (error) { + // console.error('Error uploading file:', error); + // } + // }; + + + + const ingestFile = async (file: File | null) => { + if (!file) return; + + const fileExtension = file.name.slice(((file.name.lastIndexOf(".") - 1) >>> 0) + 2); + const uniqueFileName = `${uuidv4()}.${fileExtension}`; + + const queryParams = new URLSearchParams({ + courseName: courseName, + fileName: uniqueFileName, + }).toString(); + + const requestObject = { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + query: { + fileName: file.name, + courseName: courseName, + }, + } + + // Actually we CAN await here, just don't await this function. + console.log('right before call /ingest...') + const response = await fetch( + `/api/UIUC-api/ingest?${queryParams}`, + requestObject, + ) + + // check if the response was ok + if (response.ok) { + const data = await response.json() + // console.log(file.name as string + ' ingested successfully!!') + console.log('Success or Failure:', data) + return data + } else { + console.log('Error during ingest:', response.statusText) + console.log('Full Response message:', response) + return response + } + } + + const showToastOnInvalidImage = useCallback(() => { + notifications.show({ + id: 'error-notification', + withCloseButton: true, + onClose: () => console.log('error unmounted'), + onOpen: () => console.log('error mounted'), + autoClose: 8000, + title: 'Invalid Image Type', + message: 'Unsupported file type. Please upload .jpg or .png images.', + color: 'red', + radius: 'lg', + icon: , + className: 'my-notification-class', + style: { backgroundColor: '#15162c' }, + withBorder: true, + loading: false, + }); + }, []); + + const handleImageUpload = useCallback(async (files: File[]) => { + const validFiles = files.filter(file => isImageValid(file.name)); + const invalidFilesCount = files.length - validFiles.length; + + if (invalidFilesCount > 0) { + setImageError(`${invalidFilesCount} invalid file type(s). Please upload .jpg or .png images.`); + showToastOnInvalidImage(); + } + + const imageProcessingPromises = validFiles.map(file => { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + + reader.onloadend = () => { + const result = reader.result; + if (typeof result === 'string') { + const img = new Image(); + img.src = result; + + img.onload = () => { + let newWidth, newHeight; + const MAX_WIDTH = 2048; + const MAX_HEIGHT = 2048; + const MIN_SIDE = 768; + + if (img.width > img.height) { + newHeight = MIN_SIDE; + newWidth = (img.width / img.height) * newHeight; + if (newWidth > MAX_WIDTH) { + newWidth = MAX_WIDTH; + newHeight = (img.height / img.width) * newWidth; + } + } else { + newWidth = MIN_SIDE; + newHeight = (img.height / img.width) * newWidth; + if (newHeight > MAX_HEIGHT) { + newHeight = MAX_HEIGHT; + newWidth = (img.width / img.height) * newHeight; + } + } + + const canvas = document.createElement('canvas'); + const ctx = canvas.getContext('2d'); + if (ctx) { + canvas.width = newWidth; + canvas.height = newHeight; + ctx.drawImage(img, 0, 0, newWidth, newHeight); + + canvas.toBlob((blob) => { + if (blob) { + const resizedFile = new File([blob], file.name, { + type: 'image/jpeg', + lastModified: Date.now(), + }); + + resolve({ resizedFile, dataUrl: canvas.toDataURL('image/jpeg') }); + } else { + reject(new Error('Canvas toBlob failed')); + } + }, 'image/jpeg', 0.9); + } else { + reject(new Error('Canvas Context is null')); + } + }; + } else { + reject(new Error('FileReader did not return a string result')); + } + }; + + reader.onerror = reject; + reader.readAsDataURL(file); + }); + }); + + try { + const processedImages = await Promise.all(imageProcessingPromises); + setImageFiles(prev => [...prev, ...processedImages.map(img => img.resizedFile)]); + setImagePreviewUrls(prev => [...prev, ...processedImages.map(img => img.dataUrl)]); + + // Store the URLs of the uploaded images + const uploadedImageUrls = (await Promise.all(processedImages.map(img => uploadImageAndGetUrl(img.resizedFile, courseName)))).filter(Boolean); + setImageUrls(uploadedImageUrls as string[]); + } catch (error) { + console.error('Error processing files:', error); + } + }, [setImageError, setImageFiles, setImagePreviewUrls, showToastOnInvalidImage, courseName]); + + // Function to open the modal with the selected image + const openModal = (imageSrc: string) => { + setSelectedImage(imageSrc); + setIsModalOpen(true); + }; + + const theme = useMantineTheme(); + useEffect(() => { - setContent(inputContent) - if (textareaRef.current) { - textareaRef.current.focus() + if (selectedConversation?.model.id !== OpenAIModelID.GPT_4_VISION) { + return; // Exit early if the model is not GPT-4 Vision } - }, [inputContent, textareaRef]) + + const handleDocumentDragOver = (e: DragEvent) => { + e.preventDefault(); + setIsDragging(true); + }; + + const handleDocumentDrop = (e: DragEvent) => { + e.preventDefault(); + setIsDragging(false); + if (e.dataTransfer && e.dataTransfer.items && e.dataTransfer.items.length > 0) { + const files = Array.from(e.dataTransfer.items) + .filter(item => item.kind === 'file') + .map(item => item.getAsFile()) + .filter(file => file !== null) as File[]; + if (files.length > 0) { + handleImageUpload(files); + } + } + }; + + + const handleDocumentDragLeave = (e: DragEvent) => { + setIsDragging(false); + }; + + document.addEventListener('dragover', handleDocumentDragOver); + document.addEventListener('drop', handleDocumentDrop); + document.addEventListener('dragleave', handleDocumentDragLeave); + + return () => { + // Clean up the event listeners when the component is unmounted + document.removeEventListener('dragover', handleDocumentDragOver); + document.removeEventListener('drop', handleDocumentDrop); + document.removeEventListener('dragleave', handleDocumentDragLeave); + }; + }, [handleImageUpload, selectedConversation?.model.id]); + + useEffect(() => { + if (imageError) { + showToastOnInvalidImage(); + setImageError(null); + } + }, [imageError, showToastOnInvalidImage]); + useEffect(() => { if (promptListRef.current) { @@ -262,9 +635,8 @@ export const ChatInput = ({ if (textareaRef && textareaRef.current) { textareaRef.current.style.height = 'inherit' textareaRef.current.style.height = `${textareaRef.current?.scrollHeight}px` - textareaRef.current.style.overflow = `${ - textareaRef?.current?.scrollHeight > 400 ? 'auto' : 'hidden' - }` + textareaRef.current.style.overflow = `${textareaRef?.current?.scrollHeight > 400 ? 'auto' : 'hidden' + }` } }, [content]) @@ -285,8 +657,28 @@ export const ChatInput = ({ } }, []) + useEffect(() => { + // This will focus the div as soon as the component mounts + if (chatInputContainerRef.current) { + chatInputContainerRef.current.focus(); + } + }, []); + + // This is where we upload images and generate their presigned url + async function uploadImageAndGetUrl(file: File, courseName: string): Promise { + try { + const uploadedImageUrl = await uploadToS3(file, courseName); + const presignedUrl = await fetchPresignedUrl(uploadedImageUrl as string); + return presignedUrl; + } catch (error) { + console.error('Upload failed for file', file.name, error); + setImageError(`Upload failed for file: ${file.name}`); + return ''; + } + } + return ( -
+
{messageIsStreaming && ( + {/* BUTTON 2: Image Icon and Input */} + {selectedConversation?.model.id === OpenAIModelID.GPT_4_VISION && ( + + )} + { + const files = e.target.files; + if (files) { + handleImageUpload(Array.from(files)); + } + }} + /> {showPluginSelect && (
)} - -