- {currentMessageImageUrls.map((imageUrl, index) => (
-
- {/* Message Actions for images */}
+
+ {currentMessageImageUrls.length === 1 ? (
+
+
+ {/* Message Actions for single image */}
+ {isHovered && (
+
+ )}
+
+
+
+ ) : (
+
+ {/* Message Actions for multiple images */}
{isHovered && (
)}
-
+
+ {currentMessageImageUrls.map((imageUrl, index) => (
+
+ ))}
+
- ))}
+ )}
)}
diff --git a/src/app/routes/chat/-components/chat/ImagePreferences.tsx b/src/app/routes/chat/-components/chat/ImagePreferences.tsx
new file mode 100644
index 0000000..9ab30bf
--- /dev/null
+++ b/src/app/routes/chat/-components/chat/ImagePreferences.tsx
@@ -0,0 +1,157 @@
+import { Slider } from "@/app/components/ui/slider";
+import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/app/components/ui/tooltip";
+import { cn } from "@/app/lib/utils";
+import { getModelById } from "@/server/ai/provider";
+import type { AspectRatio } from "@/server/ai/types/api";
+import { RectangleHorizontal, RectangleVertical, Square } from "lucide-react";
+import { useEffect, useRef } from "react";
+import { useTranslation } from "react-i18next";
+
+interface ImagePreferencesProps {
+ imageCount: number;
+ aspectRatio?: AspectRatio;
+ onImageCountChange: (count: number) => void;
+ onAspectRatioChange: (ratio: AspectRatio | undefined) => void;
+ currentProvider?: string;
+ currentModel?: string;
+ onClose: () => void;
+}
+
+// Aspect ratio icon mapping
+const AspectRatioIcons = {
+ "1:1": Square,
+ "16:9": RectangleHorizontal,
+ "9:16": RectangleVertical,
+ "4:3": RectangleHorizontal,
+ "3:4": RectangleVertical,
+};
+
+export function ImagePreferences({
+ imageCount,
+ aspectRatio,
+ onImageCountChange,
+ onAspectRatioChange,
+ currentProvider,
+ currentModel,
+ onClose,
+}: ImagePreferencesProps) {
+ const { t } = useTranslation();
+ const panelRef = useRef
(null);
+
+ // Get supported aspect ratios from current model
+ const supportedAspectRatios = (() => {
+ if (!currentProvider || !currentModel) return [];
+
+ try {
+ const model = getModelById(currentProvider, currentModel);
+ return model.supportedAspectRatios || [];
+ } catch {
+ return [];
+ }
+ })();
+
+ // Handle click outside to close panel
+ useEffect(() => {
+ const handleClickOutside = (event: MouseEvent) => {
+ if (panelRef.current && !panelRef.current.contains(event.target as Node)) {
+ onClose();
+ }
+ };
+
+ document.addEventListener("mousedown", handleClickOutside);
+ return () => {
+ document.removeEventListener("mousedown", handleClickOutside);
+ };
+ }, [onClose]);
+
+ return (
+
+
+ {/* Image Count Slider */}
+
+
+ {t("chat.imageCount")}
+ {imageCount}
+
+
onImageCountChange(value[0] || 1)}
+ max={4}
+ min={1}
+ step={1}
+ className="w-full"
+ />
+
+ 1
+ 4
+
+
+
+ {/* Aspect Ratio Selection */}
+ {supportedAspectRatios.length > 0 && (
+
+
{t("chat.aspectRatio")}
+
+ {/* Auto/None option */}
+
+
+
+
+
+
+ {t("chat.autoAspectRatio")}
+
+
+
+
+ {/* Supported aspect ratios */}
+ {supportedAspectRatios.map((ratio) => {
+ const IconComponent = AspectRatioIcons[ratio];
+ const isSelected = aspectRatio === ratio;
+ return (
+
+
+
+
+
+
+ {ratio}
+
+
+
+ );
+ })}
+
+
+ )}
+
+
+ );
+}
diff --git a/src/app/routes/chat/-components/chat/MessageActions.tsx b/src/app/routes/chat/-components/chat/MessageActions.tsx
index dcaa3ee..6d27e59 100644
--- a/src/app/routes/chat/-components/chat/MessageActions.tsx
+++ b/src/app/routes/chat/-components/chat/MessageActions.tsx
@@ -1,5 +1,6 @@
import { Button } from "@/app/components/ui/button";
import { cn } from "@/app/lib/utils";
+import JSZip from "jszip";
import { Check, Copy, Download, Loader2, Trash2, X } from "lucide-react";
import { useState } from "react";
import { useTranslation } from "react-i18next";
@@ -64,11 +65,19 @@ export function MessageActions({
const handleDownload = async () => {
if (!imageUrls || imageUrls.length === 0) return;
+ // For single image, download directly
+ if (imageUrls.length === 1) {
+ await downloadSingleImage(imageUrls[0]!, 0);
+ } else {
+ // For multiple images, download as ZIP
+ await downloadAllAsZip();
+ }
+ };
+
+ const downloadSingleImage = async (imageUrl: string, index: number) => {
setDownloadState("loading");
try {
- const imageUrl = imageUrls[0]!; // Only download the first image
-
// Create a temporary link element
const link = document.createElement("a");
link.href = imageUrl;
@@ -76,7 +85,7 @@ export function MessageActions({
link.target = "_blank";
link.rel = "noopener noreferrer";
- // Generate filename with timestamp
+ // Generate filename with timestamp and index
const timestamp = new Date().toISOString().slice(0, 19).replace(/[:-]/g, "");
let extension = "jpg"; // default extension
@@ -122,7 +131,13 @@ export function MessageActions({
}
}
- link.download = `typix-image-${timestamp}.${extension}`;
+ // Set filename: single image doesn't need index, multiple images include index
+ const filename =
+ imageUrls?.length === 1
+ ? `typix-image-${timestamp}.${extension}`
+ : `typix-image-${timestamp}-${index + 1}.${extension}`;
+
+ link.download = filename;
// Add to DOM, click, and remove
document.body.appendChild(link);
@@ -136,7 +151,6 @@ export function MessageActions({
// Fallback: open image in new tab
try {
- const imageUrl = imageUrls[0]!;
window.open(imageUrl, "_blank", "noopener,noreferrer");
setDownloadState("success");
resetStateAfterDelay(setDownloadState);
@@ -148,6 +162,93 @@ export function MessageActions({
}
};
+ const downloadAllAsZip = async () => {
+ if (!imageUrls || imageUrls.length === 0) return;
+
+ setDownloadState("loading");
+
+ try {
+ const zip = new JSZip();
+ const timestamp = new Date().toISOString().slice(0, 19).replace(/[:-]/g, "");
+
+ // Download all images and add to zip
+ const downloadPromises = imageUrls.map(async (imageUrl, index) => {
+ try {
+ const response = await fetch(imageUrl);
+ if (!response.ok) {
+ throw new Error(`Failed to fetch image ${index + 1}`);
+ }
+ const blob = await response.blob();
+
+ // Determine file extension
+ let extension = "jpg";
+ if (imageUrl.startsWith("data:")) {
+ const mimeType = imageUrl.split(":")[1]?.split(";")[0]?.split("/")[1];
+ if (mimeType) {
+ switch (mimeType.toLowerCase()) {
+ case "jpeg":
+ extension = "jpg";
+ break;
+ case "png":
+ extension = "png";
+ break;
+ case "gif":
+ extension = "gif";
+ break;
+ case "webp":
+ extension = "webp";
+ break;
+ default:
+ extension = mimeType;
+ }
+ }
+ } else {
+ try {
+ const urlPath = new URL(imageUrl).pathname;
+ const urlExtension = urlPath.split(".").pop()?.toLowerCase();
+ if (urlExtension && /^(jpg|jpeg|png|gif|webp|bmp|svg)$/i.test(urlExtension)) {
+ extension = urlExtension;
+ }
+ } catch {
+ // Keep default extension
+ }
+ }
+
+ const filename = `image-${index + 1}.${extension}`;
+ zip.file(filename, blob);
+ } catch (error) {
+ console.error(`Failed to download image ${index + 1}:`, error);
+ // Continue with other images even if one fails
+ }
+ });
+
+ await Promise.all(downloadPromises);
+
+ // Generate and download the ZIP file
+ const zipBlob = await zip.generateAsync({ type: "blob" });
+ const zipUrl = URL.createObjectURL(zipBlob);
+
+ const link = document.createElement("a");
+ link.href = zipUrl;
+ link.download = `typix-images-${timestamp}.zip`;
+ link.style.display = "none";
+
+ document.body.appendChild(link);
+ link.click();
+ document.body.removeChild(link);
+
+ // Clean up blob URL
+ setTimeout(() => URL.revokeObjectURL(zipUrl), 100);
+
+ setDownloadState("success");
+ resetStateAfterDelay(setDownloadState);
+ } catch (error) {
+ console.error("Failed to download images as ZIP:", error);
+ setDownloadState("error");
+ resetStateAfterDelay(setDownloadState);
+ }
+ };
+
const handleDelete = async () => {
if (!onDelete) return;
diff --git a/src/app/routes/chat/-hooks/useChat.ts b/src/app/routes/chat/-hooks/useChat.ts
index 8c27d10..38969e1 100644
--- a/src/app/routes/chat/-hooks/useChat.ts
+++ b/src/app/routes/chat/-hooks/useChat.ts
@@ -1,6 +1,7 @@
import { useAuth } from "@/app/hooks/useAuth";
import { useAiService } from "@/app/hooks/useService";
import { useChatService } from "@/app/hooks/useService";
+import type { AspectRatio } from "@/server/ai/types/api";
import type { chatService } from "@/server/service/chat";
import { localUserId } from "@/server/service/context";
import { useCallback, useEffect, useMemo, useState } from "react";
@@ -174,6 +175,7 @@ export const useChat = (initialChatId?: string, selectedProvider?: string, selec
title: t("chat.newChatTitle"),
provider,
model,
+ imageCount: 1, // Default image count for new chat
});
if (result?.id) {
@@ -229,7 +231,13 @@ export const useChat = (initialChatId?: string, selectedProvider?: string, selec
);
const sendMessage = useCallback(
- async (content: string, imageFiles?: File[], targetChatId?: string): Promise => {
+ async (
+ content: string,
+ imageFiles?: File[],
+ targetChatId?: string,
+ imageCount?: number,
+ aspectRatio?: AspectRatio,
+ ): Promise => {
const chatId = targetChatId || currentChatId;
// Convert image files to attachments with base64 data
@@ -286,6 +294,8 @@ export const useChat = (initialChatId?: string, selectedProvider?: string, selec
content,
attachments,
images, // Keep for backward compatibility
+ imageCount: imageCount || 1, // Pass the image count
+ aspectRatio, // Pass the aspect ratio
});
if (!result?.id) {
@@ -402,6 +412,8 @@ export const useChat = (initialChatId?: string, selectedProvider?: string, selec
type: "text",
attachments,
images, // Keep for backward compatibility
+ imageCount: imageCount || 1, // Pass the image count
+ aspectRatio, // Pass the aspect ratio
});
// Use returned messages to update the chat data instead of revalidating
diff --git a/src/app/routes/chat/index.tsx b/src/app/routes/chat/index.tsx
index 32c8131..c1db477 100644
--- a/src/app/routes/chat/index.tsx
+++ b/src/app/routes/chat/index.tsx
@@ -7,6 +7,7 @@ import { ChatInput } from "@/app/routes/chat/-components/chat/ChatInput";
import { ChatSidebar } from "@/app/routes/chat/-components/sidebar/ChatSidebar";
import { useChat } from "@/app/routes/chat/-hooks/useChat";
import { ChatSidebarProvider, useSidebar } from "@/app/routes/chat/-hooks/useChatSidebar";
+import type { AspectRatio } from "@/server/ai/types/api";
import { createFileRoute } from "@tanstack/react-router";
import { useEffect, useRef, useState } from "react";
import { useTranslation } from "react-i18next";
@@ -145,10 +146,15 @@ function ChatPageContent() {
});
}
};
- const handleSendMessage = async (content: string, imageFiles?: File[]) => {
+ const handleSendMessage = async (
+ content: string,
+ imageFiles?: File[],
+ imageCount?: number,
+ aspectRatio?: AspectRatio,
+ ) => {
try {
// Execute sendMessage and wait for completion
- await sendMessage(content, imageFiles);
+ await sendMessage(content, imageFiles, undefined, imageCount, aspectRatio);
} catch (error) {
console.error("Error sending message:", error);
diff --git a/src/server/ai/provider/cloudflare.ts b/src/server/ai/provider/cloudflare.ts
index 15b3ffb..10038bf 100644
--- a/src/server/ai/provider/cloudflare.ts
+++ b/src/server/ai/provider/cloudflare.ts
@@ -1,8 +1,72 @@
import { inCfWorker } from "@/server/lib/env";
-import { base64ToDataURI, readableStreamToDataURI } from "@/server/lib/util";
+import { base64ToDataURI, dataURItoBase64, readableStreamToDataURI } from "@/server/lib/util";
import { getContext } from "@/server/service/context";
+import { type TypixGenerateRequest, commonAspectRatioSizes } from "../types/api";
import type { AiProvider, ApiProviderSettings, ApiProviderSettingsItem } from "../types/provider";
-import { type ProviderSettingsType, doParseSettings, getProviderSettingsSchema } from "../types/provider";
+import {
+ type ProviderSettingsType,
+ chooseAblility,
+ doParseSettings,
+ findModel,
+ getProviderSettingsSchema,
+} from "../types/provider";
+
+// Single image generation helper function
+const generateSingle = async (request: TypixGenerateRequest, settings: ApiProviderSettings): Promise => {
+ const AI = getContext().AI;
+ const { builtin, apiKey, accountId } = Cloudflare.parseSettings(settings);
+
+ const model = findModel(Cloudflare, request.modelId);
+ const genType = chooseAblility(request, model.ability);
+
+ const params = {
+ prompt: request.prompt,
+ } as any;
+ if (request.aspectRatio) {
+ const size = commonAspectRatioSizes[request.aspectRatio];
+ params.width = size?.width;
+ params.height = size?.height;
+ }
+ if (genType === "i2i") {
+ params.image_b64 = dataURItoBase64(request.images![0]!);
+ }
+
+ if (inCfWorker && AI && builtin === true) {
+ const resp = await AI.run(request.modelId as unknown as any, params);
+
+ if (resp instanceof ReadableStream) {
+ return [await readableStreamToDataURI(resp)];
+ }
+
+ return [base64ToDataURI(resp.image)];
+ }
+
+ const resp = await fetch(`https://api.cloudflare.com/client/v4/accounts/${accountId}/ai/run/${request.modelId}`, {
+ method: "POST",
+ headers: {
+ Authorization: `Bearer ${apiKey}`,
+ },
+ body: JSON.stringify(params),
+ });
+
+ if (!resp.ok) {
+ if (resp.status === 401 || resp.status === 404) {
+ throw new Error("CONFIG_ERROR");
+ }
+
+ const errorText = await resp.text();
+ throw new Error(`Cloudflare API error: ${resp.status} ${resp.statusText} - ${errorText}`);
+ }
+
+ const contentType = resp.headers.get("Content-Type");
+ if (contentType?.includes("image/png") === true) {
+ const imageBuffer = await resp.arrayBuffer();
+ return [base64ToDataURI(Buffer.from(imageBuffer).toString("base64"))];
+ }
+
+ const result = (await resp.json()) as unknown as any;
+ return [base64ToDataURI(result.result.image)];
+};
const cloudflareSettingsNotBuiltInSchema = [
{
@@ -48,6 +112,13 @@ const Cloudflare: AiProvider = {
},
enabledByDefault: true,
models: [
+ {
+ id: "@cf/leonardo/lucid-origin",
+ name: "Lucid Origin",
+ ability: "t2i",
+ enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
+ },
{
id: "@cf/black-forest-labs/flux-1-schnell",
name: "FLUX.1-schnell",
@@ -59,12 +130,14 @@ const Cloudflare: AiProvider = {
name: "DreamShaper 8 LCM",
ability: "t2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
{
id: "@cf/bytedance/stable-diffusion-xl-lightning",
name: "Stable Diffusion XL Lightning",
ability: "t2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
// {
// id: "@cf/runwayml/stable-diffusion-v1-5-img2img",
@@ -77,6 +150,7 @@ const Cloudflare: AiProvider = {
name: "Stable Diffusion XL Base 1.0",
ability: "t2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
],
parseSettings: (settings: ApiProviderSettings) => {
@@ -84,59 +158,27 @@ const Cloudflare: AiProvider = {
return doParseSettings(settings, settingsSchema!) as CloudflareSettings;
},
generate: async (request, settings) => {
- const AI = getContext().AI;
- const { builtin, apiKey, accountId } = Cloudflare.parseSettings(settings);
+ try {
+ const imageCount = request.n || 1;
- if (inCfWorker && AI && builtin === true) {
- const resp = await AI.run(request.modelId as unknown as any, {
- prompt: request.prompt,
- });
+ // Generate images in parallel using Promise.all
+ const generatePromises = Array.from({ length: imageCount }, () => generateSingle(request, settings));
- if (resp instanceof ReadableStream) {
- return {
- images: [await readableStreamToDataURI(resp)],
- };
- }
+ const results = await Promise.all(generatePromises);
+ const allImages = results.flat();
return {
- images: [base64ToDataURI(resp.image)],
+ images: allImages,
};
- }
-
- const resp = await fetch(`https://api.cloudflare.com/client/v4/accounts/${accountId}/ai/run/${request.modelId}`, {
- method: "POST",
- headers: {
- Authorization: `Bearer ${apiKey}`,
- },
- body: JSON.stringify({
- prompt: request.prompt,
- }),
- });
-
- if (!resp.ok) {
- if (resp.status === 401 || resp.status === 404) {
+ } catch (error: any) {
+ if (error.message === "CONFIG_ERROR") {
return {
errorReason: "CONFIG_ERROR",
images: [],
};
}
-
- const errorText = await resp.text();
- throw new Error(`Cloudflare API error: ${resp.status} ${resp.statusText} - ${errorText}`);
+ throw error;
}
-
- const contentType = resp.headers.get("Content-Type");
- if (contentType?.includes("image/png") === true) {
- const imageBuffer = await resp.arrayBuffer();
- return {
- images: [base64ToDataURI(Buffer.from(imageBuffer).toString("base64"))],
- };
- }
-
- const result = (await resp.json()) as unknown as any;
- return {
- images: [base64ToDataURI(result.result.image)],
- };
},
};
diff --git a/src/server/ai/provider/fal.ts b/src/server/ai/provider/fal.ts
index 949030f..1b243dc 100644
--- a/src/server/ai/provider/fal.ts
+++ b/src/server/ai/provider/fal.ts
@@ -14,6 +14,15 @@ const falSettingsSchema = [
// Automatically generate type from schema
export type FalSettings = ProviderSettingsType;
+// square_hd, square, portrait_4_3, portrait_16_9, landscape_4_3, landscape_16_9
+const qwenAspectRatioSizes = {
+ "1:1": "square_hd",
+ "16:9": "portrait_16_9",
+ "9:16": "landscape_16_9",
+ "4:3": "portrait_4_3",
+ "3:4": "landscape_4_3",
+};
+
const Fal: AiProvider = {
id: "fal",
name: "Fal",
@@ -33,65 +42,79 @@ const Fal: AiProvider = {
name: "FLUX.1 Kontext [max]",
ability: "i2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
{
id: "fal-ai/flux-pro/kontext",
name: "FLUX.1 Kontext [pro]",
ability: "i2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
{
id: "fal-ai/qwen-image",
name: "Qwen Image",
ability: "i2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
],
parseSettings: (settings: ApiProviderSettings) => {
return doParseSettings(settings, falSettingsSchema) as FalSettings;
},
generate: async (request, settings) => {
- const { apiKey } = Fal.parseSettings(settings);
- const model = findModel(Fal, request.modelId);
+ try {
+ const { apiKey } = Fal.parseSettings(settings);
+ const model = findModel(Fal, request.modelId);
- const genType = chooseAblility(request, model.ability);
- let endpoint = "";
- switch (request.modelId) {
- case "fal-ai/gemini-25-flash-image":
- if (genType === "i2i") {
- endpoint = "/edit";
- }
- break;
- case "fal-ai/qwen-image":
- if (genType === "i2i") {
- endpoint = "-edit";
- }
- break;
- default:
- switch (genType) {
- case "t2i":
- endpoint = "/text-to-image";
- break;
- case "i2i": {
- // Check if this model supports multiple images
- const model = Fal.models.find((m) => m.id === request.modelId);
- const maxImages = model?.maxInputImages || 1;
+ const genType = chooseAblility(request, model.ability);
+ let endpoint = "";
+ switch (request.modelId) {
+ case "fal-ai/gemini-25-flash-image":
+ if (genType === "i2i") {
+ endpoint = "/edit";
+ }
+ break;
+ case "fal-ai/qwen-image":
+ if (genType === "i2i") {
+ endpoint = "-edit";
+ }
+ break;
+ default:
+ switch (genType) {
+ case "t2i":
+ endpoint = "/text-to-image";
+ break;
+ case "i2i": {
+ // Check if this model supports multiple images
+ const model = Fal.models.find((m) => m.id === request.modelId);
+ const maxImages = model?.maxInputImages || 1;
- if ((request.images?.length || 0) > 1 && maxImages > 1) {
- endpoint = "/multi";
+ if ((request.images?.length || 0) > 1 && maxImages > 1) {
+ endpoint = "/multi";
+ }
+ break;
}
- break;
}
- }
- }
+ }
- fal.config({ credentials: apiKey });
+ fal.config({ credentials: apiKey });
- let resp: Awaited>;
- try {
- const imageCount = request.images?.length || 0;
const input: any = { prompt: request.prompt };
+ // Add num_images parameter for multiple image generation
+ if (request.n && request.n > 1) {
+ input.num_images = request.n;
+ }
+
+ if (request.aspectRatio) {
+ if (request.modelId === "fal-ai/qwen-image") {
+ input.image_size = qwenAspectRatioSizes[request.aspectRatio];
+ } else {
+ input.aspect_ratio = request.aspectRatio;
+ }
+ }
+
if (genType === "i2i") {
if ((model.maxInputImages || 1) === 1) {
input.image_url = request.images?.[0];
@@ -100,7 +123,23 @@ const Fal: AiProvider = {
}
}
- resp = await fal.run(request.modelId + endpoint, { input });
+ const resp = await fal.run(request.modelId + endpoint, { input });
+
+ return {
+ images: await Promise.all(
+ (resp.data.images || []).map(async (image: { url: string }) => {
+ if (image.url) {
+ try {
+ return await fetchUrlToDataURI(image.url);
+ } catch (error) {
+ console.error("Fal image fetch error:", error);
+ return null;
+ }
+ }
+ return null;
+ }),
+ ).then((results) => results.filter(Boolean) as string[]),
+ };
} catch (error) {
if (error instanceof ApiError) {
if (error.status === 401 || error.status === 404) {
@@ -112,22 +151,6 @@ const Fal: AiProvider = {
}
throw error;
}
-
- return {
- images: await Promise.all(
- (resp.data.images || []).map(async (image: { url: string }) => {
- if (image.url) {
- try {
- return await fetchUrlToDataURI(image.url);
- } catch (error) {
- console.error("Fal image fetch error:", error);
- return null;
- }
- }
- return null;
- }),
- ).then((results) => results.filter(Boolean) as string[]),
- };
},
};
diff --git a/src/server/ai/provider/flux.ts b/src/server/ai/provider/flux.ts
index 4eb34ea..3dc97cb 100644
--- a/src/server/ai/provider/flux.ts
+++ b/src/server/ai/provider/flux.ts
@@ -1,7 +1,82 @@
import { fetchUrlToDataURI } from "@/server/lib/util";
+import type { TypixGenerateRequest } from "../types/api";
import type { AiProvider, ApiProviderSettings, ApiProviderSettingsItem } from "../types/provider";
import { type ProviderSettingsType, chooseAblility, doParseSettings, findModel } from "../types/provider";
+// Single image generation helper function
+const generateSingle = async (request: TypixGenerateRequest, settings: ApiProviderSettings): Promise => {
+ const { apiKey } = Flux.parseSettings(settings);
+
+ const model = findModel(Flux, request.modelId);
+ const genType = chooseAblility(request, model.ability);
+
+ const requestBody: any = {
+ prompt: request.prompt,
+ };
+ if (genType === "i2i" && request.images?.[0]) {
+ requestBody.image_url = request.images[0];
+ }
+
+ const submitResponse = await fetch(`https://api.bfl.ai/v1/${request.modelId}`, {
+ method: "POST",
+ headers: {
+ accept: "application/json",
+ "x-key": apiKey,
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify(requestBody),
+ });
+
+ if (!submitResponse.ok) {
+ if (submitResponse.status === 403) {
+ throw new Error("CONFIG_ERROR");
+ }
+ throw new Error(`Flux API error: ${submitResponse.status} ${submitResponse.statusText}`);
+ }
+
+ const submitData: FluxSubmitResponse = await submitResponse.json();
+ const { id: requestId, polling_url: pollingUrl } = submitData;
+
+ let attempts = 0;
+ const maxAttempts = 120;
+
+ while (attempts < maxAttempts) {
+ await new Promise((resolve) => setTimeout(resolve, 500));
+ attempts++;
+
+ const pollUrl = new URL(pollingUrl);
+ pollUrl.searchParams.set("id", requestId);
+
+ const pollResponse = await fetch(pollUrl.toString(), {
+ method: "GET",
+ headers: {
+ accept: "application/json",
+ "x-key": apiKey,
+ },
+ });
+
+ if (!pollResponse.ok) {
+ throw new Error(`Flux polling error: ${pollResponse.status} ${pollResponse.statusText}`);
+ }
+
+ const pollData: FluxPollResponse = await pollResponse.json();
+
+ if (pollData.status === "Ready" && pollData.result?.sample) {
+ try {
+ const imageDataUri = await fetchUrlToDataURI(pollData.result.sample);
+ return [imageDataUri];
+ } catch (error) {
+ console.error("Flux image fetch error:", error);
+ return [];
+ }
+ } else if (pollData.status === "Error" || pollData.status === "Failed") {
+ throw new Error(`Flux generation failed: ${pollData.error || "Unknown error"}`);
+ }
+ }
+
+ throw new Error("Flux generation timeout - exceeded maximum polling attempts");
+};
+
const fluxSettingsSchema = [
{
key: "apiKey",
@@ -39,119 +114,69 @@ const Flux: AiProvider = {
name: "FLUX.1 Kontext [max]",
ability: "i2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
{
id: "flux-kontext-pro",
name: "FLUX.1 Kontext [pro]",
ability: "i2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
{
id: "flux-pro-1.1-ultra",
name: "FLUX1.1 [pro] Ultra",
ability: "t2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
{
id: "flux-pro-1.1",
name: "FLUX1.1 [pro]",
ability: "t2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
{
id: "flux-pro",
name: "FLUX.1 [pro]",
ability: "t2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
{
id: "flux-dev",
name: "FLUX.1 [dev]",
ability: "t2i",
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
],
parseSettings: (settings: ApiProviderSettings) => {
return doParseSettings(settings, fluxSettingsSchema) as FluxSettings;
},
generate: async (request, settings) => {
- const { apiKey } = Flux.parseSettings(settings);
+ try {
+ const imageCount = request.n || 1;
- const model = findModel(Flux, request.modelId);
- const genType = chooseAblility(request, model.ability);
+ // Generate images in parallel using Promise.all
+ const generatePromises = Array.from({ length: imageCount }, () => generateSingle(request, settings));
- const requestBody: any = {
- prompt: request.prompt,
- };
- if (genType === "i2i" && request.images?.[0]) {
- requestBody.image_url = request.images[0];
- }
+ const results = await Promise.all(generatePromises);
+ const allImages = results.flat();
- const submitResponse = await fetch(`https://api.bfl.ai/v1/${request.modelId}`, {
- method: "POST",
- headers: {
- accept: "application/json",
- "x-key": apiKey,
- "Content-Type": "application/json",
- },
- body: JSON.stringify(requestBody),
- });
-
- if (!submitResponse.ok) {
- if (submitResponse.status === 403) {
+ return {
+ images: allImages,
+ };
+ } catch (error: any) {
+ if (error.message === "CONFIG_ERROR") {
return {
errorReason: "CONFIG_ERROR",
images: [],
};
}
- throw new Error(`Flux API error: ${submitResponse.status} ${submitResponse.statusText}`);
+ throw error;
}
-
- const submitData: FluxSubmitResponse = await submitResponse.json();
- const { id: requestId, polling_url: pollingUrl } = submitData;
-
- let attempts = 0;
- const maxAttempts = 120;
-
- while (attempts < maxAttempts) {
- await new Promise((resolve) => setTimeout(resolve, 500));
- attempts++;
-
- const pollUrl = new URL(pollingUrl);
- pollUrl.searchParams.set("id", requestId);
-
- const pollResponse = await fetch(pollUrl.toString(), {
- method: "GET",
- headers: {
- accept: "application/json",
- "x-key": apiKey,
- },
- });
-
- if (!pollResponse.ok) {
- throw new Error(`Flux polling error: ${pollResponse.status} ${pollResponse.statusText}`);
- }
-
- const pollData: FluxPollResponse = await pollResponse.json();
-
- if (pollData.status === "Ready" && pollData.result?.sample) {
- try {
- const imageDataUri = await fetchUrlToDataURI(pollData.result.sample);
- return {
- images: [imageDataUri],
- };
- } catch (error) {
- console.error("Flux image fetch error:", error);
- return {
- images: [],
- };
- }
- } else if (pollData.status === "Error" || pollData.status === "Failed") {
- throw new Error(`Flux generation failed: ${pollData.error || "Unknown error"}`);
- }
- }
-
- throw new Error("Flux generation timeout - exceeded maximum polling attempts");
},
};
diff --git a/src/server/ai/provider/google.ts b/src/server/ai/provider/google.ts
index 9c20816..064e1ba 100644
--- a/src/server/ai/provider/google.ts
+++ b/src/server/ai/provider/google.ts
@@ -1,4 +1,5 @@
import { GoogleGenAI } from "@google/genai";
+import type { TypixGenerateRequest } from "../types/api";
import type { AiProvider, ApiProviderSettings, ApiProviderSettingsItem } from "../types/provider";
import { type ProviderSettingsType, chooseAblility, doParseSettings, findModel } from "../types/provider";
@@ -13,6 +14,74 @@ const googleSettingsSchema = [
// Automatically generate type from schema
export type GoogleSettings = ProviderSettingsType;
+// Single image generation helper function
+const generateSingle = async (request: TypixGenerateRequest, settings: ApiProviderSettings): Promise => {
+ const { apiKey } = Google.parseSettings(settings);
+
+ const ai = new GoogleGenAI({ apiKey });
+
+ const ability = chooseAblility(request, findModel(Google, request.modelId).ability);
+
+ let contents: any;
+
+ if (ability === "t2i") {
+ // Text-to-image generation
+ contents = request.prompt;
+ } else {
+ // Image-to-image generation
+ const promptParts: any[] = [{ text: request.prompt }];
+
+ // Add images to the prompt
+ if (request.images && request.images.length > 0) {
+ for (const imageDataUri of request.images) {
+ // Extract MIME type and base64 data from DataURI
+ const [mimeTypePart, base64Data] = imageDataUri.split(",");
+ if (!base64Data || !mimeTypePart) {
+ throw new Error("Invalid DataURI format");
+ }
+
+ // Extract MIME type (e.g., "data:image/png;base64" -> "image/png")
+ const mimeTypeMatch = mimeTypePart.match(/data:([^;]+)/);
+ const mimeType = mimeTypeMatch ? mimeTypeMatch[1] : "image/png";
+
+ promptParts.push({
+ inlineData: {
+ mimeType,
+ data: base64Data,
+ },
+ });
+ }
+ }
+
+ contents = promptParts;
+ }
+
+ const response = await ai.models.generateContent({
+ model: request.modelId,
+ contents,
+ });
+
+ const images: string[] = [];
+
+ // Process response parts
+ if (response.candidates && response.candidates.length > 0) {
+ const candidate = response.candidates[0];
+ if (candidate?.content?.parts) {
+ for (const part of candidate.content.parts) {
+ if (part.inlineData) {
+ // Convert base64 to DataURI format
+ const mimeType = part.inlineData.mimeType || "image/png";
+ const base64Data = part.inlineData.data;
+ const dataUri = `data:${mimeType};base64,${base64Data}`;
+ images.push(dataUri);
+ }
+ }
+ }
+ }
+
+ return images;
+};
+
const Google: AiProvider = {
id: "google",
name: "Google",
@@ -24,14 +93,14 @@ const Google: AiProvider = {
id: "gemini-2.5-flash-image-preview",
name: "Nano Banana",
ability: "i2i",
- maxInputImages: 5,
+ maxInputImages: 4,
enabledByDefault: true,
},
{
id: "gemini-2.0-flash-preview-image-generation",
name: "Gemini 2.0 Flash Image Generation",
ability: "i2i",
- maxInputImages: 5,
+ maxInputImages: 4,
enabledByDefault: true,
},
{
@@ -63,72 +132,17 @@ const Google: AiProvider = {
return doParseSettings(settings, googleSettingsSchema) as GoogleSettings;
},
generate: async (request, settings) => {
- const { apiKey } = Google.parseSettings(settings);
-
- const ai = new GoogleGenAI({ apiKey });
-
try {
- const ability = chooseAblility(request, findModel(Google, request.modelId).ability);
-
- let contents: any;
-
- if (ability === "t2i") {
- // Text-to-image generation
- contents = request.prompt;
- } else {
- // Image-to-image generation
- const promptParts: any[] = [{ text: request.prompt }];
-
- // Add images to the prompt
- if (request.images && request.images.length > 0) {
- for (const imageDataUri of request.images) {
- // Extract MIME type and base64 data from DataURI
- const [mimeTypePart, base64Data] = imageDataUri.split(",");
- if (!base64Data || !mimeTypePart) {
- throw new Error("Invalid DataURI format");
- }
-
- // Extract MIME type (e.g., "data:image/png;base64" -> "image/png")
- const mimeTypeMatch = mimeTypePart.match(/data:([^;]+)/);
- const mimeType = mimeTypeMatch ? mimeTypeMatch[1] : "image/png";
-
- promptParts.push({
- inlineData: {
- mimeType,
- data: base64Data,
- },
- });
- }
- }
+ const imageCount = request.n || 1;
- contents = promptParts;
- }
+ // Generate images in parallel using Promise.all
+ const generatePromises = Array.from({ length: imageCount }, () => generateSingle(request, settings));
- const response = await ai.models.generateContent({
- model: request.modelId,
- contents,
- });
-
- const images: string[] = [];
-
- // Process response parts
- if (response.candidates && response.candidates.length > 0) {
- const candidate = response.candidates[0];
- if (candidate?.content?.parts) {
- for (const part of candidate.content.parts) {
- if (part.inlineData) {
- // Convert base64 to DataURI format
- const mimeType = part.inlineData.mimeType || "image/png";
- const base64Data = part.inlineData.data;
- const dataUri = `data:${mimeType};base64,${base64Data}`;
- images.push(dataUri);
- }
- }
- }
- }
+ const results = await Promise.all(generatePromises);
+ const allImages = results.flat();
return {
- images,
+ images: allImages,
};
} catch (error: any) {
// Handle common Google AI errors
diff --git a/src/server/ai/provider/openai.ts b/src/server/ai/provider/openai.ts
index d26b11b..10f68e8 100644
--- a/src/server/ai/provider/openai.ts
+++ b/src/server/ai/provider/openai.ts
@@ -54,6 +54,14 @@ const openAISettingsSchema = [
// Automatically generate type from schema
export type OpenAISettings = ProviderSettingsType;
+const aspectRatioSizes = {
+ "1:1": "1024x1024",
+ "16:9": "1792x1024",
+ "9:16": "1024x1792",
+ "4:3": "1536x1024",
+ "3:4": "1024x1536",
+};
+
const OpenAI: AiProvider = {
id: "openai",
name: "OpenAI",
@@ -67,6 +75,7 @@ const OpenAI: AiProvider = {
ability: "i2i",
maxInputImages: 3,
enabledByDefault: true,
+ supportedAspectRatios: ["1:1", "16:9", "9:16", "4:3", "3:4"],
},
],
parseSettings: (settings: ApiProviderSettings) => {
@@ -78,6 +87,10 @@ const OpenAI: AiProvider = {
const client = new openai.OpenAI({ baseURL, apiKey, dangerouslyAllowBrowser: true });
let generateResult: openai.ImagesResponse;
+ let size: any = null;
+ if (request.aspectRatio) {
+ size = aspectRatioSizes[request.aspectRatio];
+ }
try {
switch (chooseAblility(request, findModel(OpenAI, request.modelId).ability)) {
case "t2i":
@@ -86,15 +99,17 @@ const OpenAI: AiProvider = {
model,
prompt: request.prompt,
n: request.n || 1,
+ size,
});
break;
default:
// Image editing
generateResult = await client.images.edit({
model,
- image: request.images!.map(createImageStreamFromDataUri),
+ image: createImageStreamFromDataUri(request.images![0]!),
prompt: request.prompt,
n: request.n || 1,
+ size,
});
break;
}
diff --git a/src/server/ai/types/api.ts b/src/server/ai/types/api.ts
index db6d62b..0f2606e 100644
--- a/src/server/ai/types/api.ts
+++ b/src/server/ai/types/api.ts
@@ -1,12 +1,25 @@
import type { ErrorReason } from "@/server/db/schemas";
import z from "zod/v4";
+// Define supported aspect ratios array
+const aspectRatios = ["1:1", "16:9", "9:16", "4:3", "3:4"] as const;
+export type AspectRatio = (typeof aspectRatios)[number];
+
+export const commonAspectRatioSizes: Record = {
+ "1:1": { width: 1024, height: 1024 },
+ "16:9": { width: 1920, height: 1080 },
+ "9:16": { width: 1080, height: 1920 },
+ "4:3": { width: 1600, height: 1200 },
+ "3:4": { width: 1200, height: 1600 },
+};
+
export const TypixGenerateRequestSchema = z.object({
providerId: z.string(),
modelId: z.string(),
- n: z.number().int().min(1).default(1).optional(),
- images: z.array(z.string()).optional(), // Optional images for image generation, Data URI (base64)
prompt: z.string(),
+ images: z.array(z.string()).optional(), // Optional images for image generation, Data URI (base64)
+ n: z.number().int().min(1).default(1).optional(),
+ aspectRatio: z.enum(aspectRatios).optional(), // Optional aspect ratio
});
export type TypixGenerateRequest = z.infer;
diff --git a/src/server/ai/types/model.ts b/src/server/ai/types/model.ts
index 18ad6d2..e0dd49a 100644
--- a/src/server/ai/types/model.ts
+++ b/src/server/ai/types/model.ts
@@ -1,3 +1,5 @@
+import type { AspectRatio } from "./api";
+
export type Ability = "t2i" | "i2i";
export interface AiModel {
@@ -6,4 +8,5 @@ export interface AiModel {
ability: Ability; // Model image generation ability
maxInputImages?: number; // Maximum number of input images for i2i models, default is 1
enabledByDefault?: boolean; // Whether this model is enabled by default
+ supportedAspectRatios?: AspectRatio[]; // Supported aspect ratios for the model
}
diff --git a/src/server/db/util.ts b/src/server/db/util.ts
index e67c033..3ef1fa8 100644
--- a/src/server/db/util.ts
+++ b/src/server/db/util.ts
@@ -1,4 +1,3 @@
-import { sql } from "drizzle-orm";
import { text } from "drizzle-orm/sqlite-core";
import { customAlphabet } from "nanoid/non-secure";
diff --git a/src/server/lib/util.ts b/src/server/lib/util.ts
index 13f4a08..1ffa77b 100644
--- a/src/server/lib/util.ts
+++ b/src/server/lib/util.ts
@@ -2,6 +2,10 @@ export function base64ToDataURI(base64: string, fmt = "png") {
return `data:image/${fmt};base64,${base64}`;
}
+export function dataURItoBase64(dataURI: string) {
+ return dataURI.split(",")[1];
+}
+
export async function readableStreamToDataURI(stream: ReadableStream, fmt = "png") {
const reader = stream.getReader();
const chunks: Uint8Array[] = [];
diff --git a/src/server/service/chat/index.ts b/src/server/service/chat/index.ts
index 1a05e6c..9e39506 100644
--- a/src/server/service/chat/index.ts
+++ b/src/server/service/chat/index.ts
@@ -19,6 +19,14 @@ export const CreateChatSchema = createInsertSchema(chats)
})
.extend({
content: z.string().optional(),
+ /**
+ * Number of images to generate
+ */
+ imageCount: z.number().int().min(1).max(10).default(1),
+ /**
+ * Aspect ratio for image generation
+ */
+ aspectRatio: z.enum(["1:1", "16:9", "9:16", "4:3", "3:4"]).optional(),
/**
* Attachments for the first message
*/
@@ -59,6 +67,8 @@ const createChat = async (req: CreateChat, ctx: RequestContext) => {
type: "text",
provider: req.provider,
model: req.model,
+ imageCount: req.imageCount, // Pass the image count
+ aspectRatio: req.aspectRatio, // Pass the aspect ratio
attachments: req.attachments,
images: req.images,
},
@@ -263,6 +273,14 @@ export const CreateMessageSchema = createInsertSchema(messages)
.extend({
provider: z.string(),
model: z.string(),
+ /**
+ * Number of images to generate
+ */
+ imageCount: z.number().int().min(1).max(10).default(1),
+ /**
+ * Aspect ratio for image generation
+ */
+ aspectRatio: z.enum(["1:1", "16:9", "9:16", "4:3", "3:4"]).optional(),
/**
* base64-encoded image strings for attachments
*/
@@ -292,12 +310,25 @@ interface GenerationParams {
chatId: string;
userId: string;
userImages?: string[];
+ imageCount?: number; // Number of images to generate
+ aspectRatio?: string; // Aspect ratio for image generation
messageId?: string; // For regeneration, exclude this message from reference search
}
const executeImageGeneration = async (params: GenerationParams, ctx: RequestContext) => {
const { db } = getContext();
- const { generationId, prompt, provider: providerId, model: modelId, chatId, userId, userImages, messageId } = params;
+ const {
+ generationId,
+ prompt,
+ provider: providerId,
+ model: modelId,
+ chatId,
+ userId,
+ userImages,
+ imageCount,
+ aspectRatio,
+ messageId,
+ } = params;
try {
const providerInstance = getProviderById(providerId);
@@ -369,6 +400,8 @@ const executeImageGeneration = async (params: GenerationParams, ctx: RequestCont
modelId,
prompt,
images: referImages,
+ n: imageCount || 1, // Pass the image count to provider
+ aspectRatio: aspectRatio as any, // Pass the aspect ratio to provider
},
settings,
);
@@ -514,6 +547,8 @@ const createMessage = async (req: CreateMessage, ctx: RequestContext) => {
chatId: req.chatId,
userId,
userImages,
+ imageCount: req.imageCount, // Pass the image count
+ aspectRatio: req.aspectRatio, // Pass the aspect ratio
messageId: assistantMessage.id,
},
ctx,
@@ -632,6 +667,9 @@ const regenerateMessage = async (req: RegenerateMessage, ctx: RequestContext) =>
model: originalGeneration.model,
chatId: chat.id,
userId,
+ // For regeneration, we can infer imageCount from existing fileIds count
+ // or fallback to 1 if no previous results
+ imageCount: Array.isArray(originalGeneration.fileIds) ? originalGeneration.fileIds.length : 1,
messageId: req.messageId, // Exclude this message from reference search
},
ctx,
diff --git a/tailwind.config.js b/tailwind.config.js
index bc2d185..6e4d36d 100644
--- a/tailwind.config.js
+++ b/tailwind.config.js
@@ -2,6 +2,14 @@
export default {
content: ["./index.html", "./src/**/*.{js,ts,jsx,tsx}"],
theme: {
+ screens: {
+ xs: '475px',
+ sm: '640px',
+ md: '768px',
+ lg: '1024px',
+ xl: '1280px',
+ '2xl': '1536px',
+ },
extend: {
height: {
// Custom height for mobile content area excluding bottom navigation