Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions models/qwen2-4b-instruct-q4/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Qwen2-4B-Instruct (int4) - Placeholder

Place model manifest and quantized weights here for WebGPU/WebLLM runtime.
91 changes: 91 additions & 0 deletions packages/client/src/components/AIWorkAssistant/ChatPanel.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
import { useMemo, useState } from "react";
import { normalizeToJSON } from "@/lib/ai/normalize";
import { SYSTEM_PROMPT_EN } from "@/lib/ai/prompts/system.en";
import { SYSTEM_PROMPT_ES } from "@/lib/ai/prompts/system.es";
import { SYSTEM_PROMPT_PT } from "@/lib/ai/prompts/system.pt";
import { detectLang } from "@/lib/ai/languages";
import { pinFileToIPFS, pinJSONToIPFS } from "@/lib/ipfs/pin";
import type { WorkSession } from "@/schemas/workSession";
import { MicButton } from "./MicButton";
import { ReviewCard } from "./ReviewCard";

export function ChatPanel(props: {
imageFiles: File[];
startTime?: string;
endTime?: string;
onReady: (work: WorkSession, metadataCID: string, mediaCIDs: string[]) => void;
llm: { complete: (args: { system: string; input: string }) => Promise<string> } | null;
}) {
const [text, setText] = useState("");
const [pending, setPending] = useState(false);
const [error, setError] = useState<string | null>(null);
const [preview, setPreview] = useState<WorkSession | null>(null);
const [mediaCIDs, setMediaCIDs] = useState<string[] | null>(null);

const systemPrompt = useMemo(() => {
const lang = detectLang(text);
if (lang === "es") return SYSTEM_PROMPT_ES;
if (lang === "pt") return SYSTEM_PROMPT_PT;
return SYSTEM_PROMPT_EN;
}, [text]);

async function ensureMediaCIDs(files: File[]) {
if (mediaCIDs) return mediaCIDs;
const cids: string[] = [];
for (const f of files) {
const cid = await pinFileToIPFS(f);
cids.push(cid);
}
setMediaCIDs(cids);
return cids;
}

async function handleNormalize() {
setPending(true);
setError(null);
try {
const cids = await ensureMediaCIDs(props.imageFiles);
const photos = [
{ type: "before" as const, cid: cids[0] },
{ type: "after" as const, cid: cids[cids.length - 1] },
];
const work = await normalizeToJSON(
props.llm ?? { complete: async () => "{}" },
{ userText: text, photos, startTime: props.startTime, endTime: props.endTime },
systemPrompt
);
setPreview(work);
const metadataCID = await pinJSONToIPFS(work);
props.onReady(work, metadataCID, cids);
} catch (e: any) {
setError(e?.message || String(e));
} finally {
setPending(false);
}
}

return (
<div className="space-y-3">
<textarea
className="w-full rounded border p-3"
rows={4}
placeholder="Tell me what you did… (puedes hablar en español / você pode falar em português)"
value={text}
onChange={(e) => setText(e.target.value)}
/>
<div className="flex gap-2 items-center">
<MicButton onText={setText} />
<button
disabled={pending || !text.trim() || props.imageFiles.length === 0}
onClick={handleNormalize}
className="px-4 py-2 rounded bg-emerald-600 text-white"
type="button"
>
Let the AI structure it
</button>
</div>
{error && <p className="text-red-600 text-sm">{error}</p>}
{preview && <ReviewCard data={preview} />}
</div>
);
}
25 changes: 25 additions & 0 deletions packages/client/src/components/AIWorkAssistant/MicButton.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import { RiMicFill, RiStopFill } from "@remixicon/react";
import React from "react";
import { useSpeechToText } from "@/hooks/useSpeechToText";

export const MicButton: React.FC<
{ onText: (t: string) => void } & React.HTMLAttributes<HTMLButtonElement>
> = ({ onText, ...rest }) => {
const { listening, transcript, start, stop } = useSpeechToText();

React.useEffect(() => {
if (transcript) onText(transcript);
}, [transcript]);

return (
<button
{...rest}
type="button"
onClick={() => (listening ? stop() : start())}
className={`px-3 py-2 rounded ${listening ? "bg-red-600 text-white" : "bg-gray-100"}`}
aria-pressed={listening}
>
{listening ? <RiStopFill className="w-5 h-5" /> : <RiMicFill className="w-5 h-5" />}
</button>
);
};
26 changes: 26 additions & 0 deletions packages/client/src/components/AIWorkAssistant/ReviewCard.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import React from "react";
import type { WorkSession } from "@/schemas/workSession";

export const ReviewCard: React.FC<{ data: WorkSession }> = ({ data }) => {
return (
<div className="rounded border p-3 text-sm space-y-2">
<div className="font-semibold">AI Summary</div>
<div>
<span className="font-medium">Action:</span> {data.actionType}
</div>
<div>
<span className="font-medium">Description:</span> {data.description}
</div>
{data.location && (
<div>
<span className="font-medium">Location:</span> {data.location}
</div>
)}
{data.materialsUsed?.length > 0 && (
<div>
<span className="font-medium">Materials:</span> {data.materialsUsed.join(", ")}
</div>
)}
</div>
);
};
12 changes: 11 additions & 1 deletion packages/client/src/components/Garden/Work.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,17 @@ const WorkList = ({ works, actions, workFetchStatus }: WorkListProps) => {
);
return (
<li style={style} className="p-2">
<MinimalWorkCard onClick={onOpen} work={work as unknown as Work} actionTitle={title} />
<MinimalWorkCard
onClick={onOpen}
work={{
id: work.id,
title,
description: work.feedback,
status: work.status as any,
createdAt: work.createdAt,
gardenId: work.gardenAddress,
}}
/>
</li>
);
});
Expand Down
2 changes: 2 additions & 0 deletions packages/client/src/hooks/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ export { useNavigateToTop } from "./useNavigateToTop";
export { useOffline } from "./useOffline";
export type { UseStorageManagerReturn } from "./useStorageManager";
export { useStorageManager } from "./useStorageManager";
export { useLocalLLM } from "./useLocalLLM";
export { useSpeechToText } from "./useSpeechToText";

// Export job queue and works hooks
export {
Expand Down
47 changes: 47 additions & 0 deletions packages/client/src/hooks/useLocalLLM.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import { useEffect, useMemo, useRef, useState } from "react";

export type LLMProxy = {
complete: (args: { system: string; input: string }) => Promise<string>;
};

export function useLocalLLM() {
const workerRef = useRef<Worker | null>(null);
const [ready, setReady] = useState(false);

useEffect(() => {
const worker = new Worker(new URL("@/workers/ai-worker.ts", import.meta.url), {
type: "module",
});
workerRef.current = worker;
const onMessage = (e: MessageEvent) => {
if (e.data?.type === "ready") setReady(true);
};
worker.addEventListener("message", onMessage);
worker.postMessage({ type: "init" });
return () => {
worker.removeEventListener("message", onMessage);
worker.terminate();
workerRef.current = null;
};
}, []);

const api: LLMProxy | null = useMemo(() => {
if (!workerRef.current) return null;
return {
complete: (args) =>
new Promise((resolve) => {
const id = Math.random().toString(36).slice(2);
const onMessage = (e: MessageEvent) => {
if (e.data?.type === "complete" && e.data?.id === id) {
workerRef.current?.removeEventListener("message", onMessage);
resolve(e.data.text as string);
}
};
workerRef.current?.addEventListener("message", onMessage);
workerRef.current?.postMessage({ type: "complete", id, ...args });
}),
};
}, [workerRef.current, ready]);

return { llm: api, ready } as const;
}
2 changes: 1 addition & 1 deletion packages/client/src/hooks/useOffline.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ export function useOffline() {
const { data: stats } = useQueueStatistics();

// Get pending work items using the job queue directly (no polling)
const pendingWork = stats ? stats.pending + stats.failed : 0;
void stats; // kept for potential future use

useEffect(() => {
const handleOnline = () => {
Expand Down
53 changes: 53 additions & 0 deletions packages/client/src/hooks/useSpeechToText.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import { useEffect, useRef, useState } from "react";

export type STTState = {
listening: boolean;
transcript: string;
error?: string;
};

export function useSpeechToText() {
const [state, setState] = useState<STTState>({ listening: false, transcript: "" });
const recognitionRef = useRef<any>(null);

useEffect(() => {
// @ts-ignore
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
if (SpeechRecognition) {
const rec = new SpeechRecognition();
recognitionRef.current = rec;
rec.lang = "en-US";
rec.continuous = false;
rec.interimResults = true;
rec.onresult = (event: any) => {
let finalText = "";
for (let i = event.resultIndex; i < event.results.length; i++) {
const res = event.results[i];
finalText += res[0].transcript;
}
setState((s) => ({ ...s, transcript: finalText }));
};
rec.onend = () => setState((s) => ({ ...s, listening: false }));
rec.onerror = (e: any) => setState((s) => ({ ...s, error: String(e?.error || e) }));
}
return () => {
try {
recognitionRef.current?.stop?.();
} catch {}
};
}, []);

const start = () => {
setState((s) => ({ ...s, listening: true, transcript: "" }));
try {
recognitionRef.current?.start?.();
} catch {}
};
const stop = () => {
try {
recognitionRef.current?.stop?.();
} catch {}
};

return { ...state, start, stop } as const;
}
2 changes: 1 addition & 1 deletion packages/client/src/hooks/useWorkApprovals.ts
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ export function useWorkApprovals(attesterAddress?: string) {
];

// Event-driven invalidation for approval jobs
useJobQueueEvents(["job:added", "job:completed", "job:failed"], (eventType, data) => {
useJobQueueEvents(["job:added", "job:completed", "job:failed"], (_eventType, data) => {
if ("job" in data && data.job.kind === "approval") {
queryClient.invalidateQueries({
queryKey: ["workApprovals", "byAttester", attesterAddress, chainId],
Expand Down
40 changes: 40 additions & 0 deletions packages/client/src/lib/ai/actionMap.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
export const ACTION_SYNONYMS = {
planting: [
"planting",
"plant",
"sowing",
"siembra",
"sembrar",
"planté",
"plantei",
"plantio",
"plantar",
],
invasive_removal: [
"weeding",
"invasive removal",
"remove invasives",
"desyerbar",
"quitar malezas",
"remoção de invasoras",
"capina",
],
watering: ["watering", "regar", "riego", "regando", "regar plantas"],
litter_cleanup: [
"cleanup",
"trash pickup",
"litter",
"limpieza de basura",
"limpeza de lixo",
"coleta de lixo",
],
harvesting: ["harvest", "cosecha", "cosechar", "colheita", "colher"],
} as const;

export const normalizeAction = (utterance: string): keyof typeof ACTION_SYNONYMS | null => {
const u = utterance.toLowerCase();
for (const [k, vals] of Object.entries(ACTION_SYNONYMS)) {
if (vals.some((v) => u.includes(v))) return k as any;
}
return null;
};
7 changes: 7 additions & 0 deletions packages/client/src/lib/ai/languages.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
export type Lang = "en" | "es" | "pt";
export const detectLang = (s: string): Lang => {
const t = s.normalize("NFKD").toLowerCase();
if (/[áéíóúñ¿¡]/.test(t) || /\b(el|la|los|las|de|y|que|para|con)\b/.test(t)) return "es";
if (/[ãõçáéíóú]/.test(t) || /\b(o|a|os|as|de|que|para|com|e)\b/.test(t)) return "pt";
return "en";
};
40 changes: 40 additions & 0 deletions packages/client/src/lib/ai/normalize.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import { WorkSessionSchema } from "@/schemas/workSession";
import { detectLang } from "./languages";
import { normalizeAction } from "./actionMap";

export type NormalizeInput = {
userText: string;
photos: { type: "before" | "after"; cid: string }[];
startTime?: string;
endTime?: string;
};

export async function normalizeToJSON(llm: any, input: NormalizeInput, systemPrompt: string) {
const lang = detectLang(input.userText);
const hintedAction = normalizeAction(input.userText);
const hint = hintedAction ? `\nHINT actionType: ${hintedAction}\n` : "\n";

const userMsg = `
${hint}
USER_TEXT:
${input.userText}

PHOTOS:
${JSON.stringify(input.photos)}
START: ${input.startTime ?? ""}
END: ${input.endTime ?? ""}
LANG: ${lang}
`;

const raw = await llm.complete({ system: systemPrompt, input: userMsg });
const parsed = JSON.parse(raw);
parsed.photos = input.photos;
parsed.lang = lang;
parsed.aiAssisted = true;

const result = WorkSessionSchema.safeParse(parsed);
if (!result.success) {
throw new Error("Normalization failed: " + JSON.stringify(result.error.issues));
}
return result.data;
}
Loading