diff --git a/example/convex/_generated/api.d.ts b/example/convex/_generated/api.d.ts
index d562b3df..9bcb1812 100644
--- a/example/convex/_generated/api.d.ts
+++ b/example/convex/_generated/api.d.ts
@@ -18,6 +18,7 @@ import type * as chat_human from "../chat/human.js";
import type * as chat_streamAbort from "../chat/streamAbort.js";
import type * as chat_streaming from "../chat/streaming.js";
import type * as chat_streamingReasoning from "../chat/streamingReasoning.js";
+import type * as coreMemories_utils from "../coreMemories/utils.js";
import type * as crons from "../crons.js";
import type * as debugging_rawRequestResponseHandler from "../debugging/rawRequestResponseHandler.js";
import type * as etc_objects from "../etc/objects.js";
@@ -71,6 +72,7 @@ declare const fullApi: ApiFromModules<{
"chat/streamAbort": typeof chat_streamAbort;
"chat/streaming": typeof chat_streaming;
"chat/streamingReasoning": typeof chat_streamingReasoning;
+ "coreMemories/utils": typeof coreMemories_utils;
crons: typeof crons;
"debugging/rawRequestResponseHandler": typeof debugging_rawRequestResponseHandler;
"etc/objects": typeof etc_objects;
@@ -135,6 +137,61 @@ export declare const components: {
boolean
>;
};
+ coreMemories: {
+ append: FunctionReference<
+ "mutation",
+ "internal",
+ { field: "persona" | "human"; text: string; userId?: string },
+ null
+ >;
+ get: FunctionReference<
+ "query",
+ "internal",
+ { userId?: string },
+ null | {
+ _creationTime: number;
+ _id: string;
+ human: string;
+ persona: string;
+ userId?: string;
+ }
+ >;
+ getOrCreate: FunctionReference<
+ "mutation",
+ "internal",
+ { human: string; persona: string; userId?: string },
+ null | {
+ _creationTime: number;
+ _id: string;
+ human: string;
+ persona: string;
+ userId?: string;
+ }
+ >;
+ remove: FunctionReference<
+ "mutation",
+ "internal",
+ { userId?: string },
+ null
+ >;
+ replace: FunctionReference<
+ "mutation",
+ "internal",
+ {
+ field: "persona" | "human";
+ newContent: string;
+ oldContent: string;
+ userId?: string;
+ },
+ number
+ >;
+ update: FunctionReference<
+ "mutation",
+ "internal",
+ { human?: string; persona?: string; userId?: string },
+ null
+ >;
+ };
files: {
addFile: FunctionReference<
"mutation",
@@ -838,7 +895,7 @@ export declare const components: {
"mutation",
"internal",
{ messageIds: Array },
- Array
+ any
>;
deleteByOrder: FunctionReference<
"mutation",
diff --git a/example/convex/agents/config.ts b/example/convex/agents/config.ts
index 11d0c19e..31198043 100644
--- a/example/convex/agents/config.ts
+++ b/example/convex/agents/config.ts
@@ -12,4 +12,6 @@ export const defaultConfig = {
},
// If you want to use vector search, you need to set this.
textEmbeddingModel,
+ // Enable built-in memory tools (append/replace core memory, message search)
+ memoryTools: true,
} satisfies Config;
diff --git a/example/convex/coreMemories/utils.ts b/example/convex/coreMemories/utils.ts
new file mode 100644
index 00000000..d698bea8
--- /dev/null
+++ b/example/convex/coreMemories/utils.ts
@@ -0,0 +1,78 @@
+import { components } from "../_generated/api";
+import { mutation, query } from "../_generated/server";
+import { v } from "convex/values";
+import { getAuthUserId } from "../utils";
+
+export const get = query({
+ args: {},
+ handler: async (ctx) => {
+ const userId = await getAuthUserId(ctx);
+ return await ctx.runQuery(components.agent.coreMemories.get, { userId });
+ },
+});
+
+export const getOrCreate = mutation({
+ args: {},
+ handler: async (ctx) => {
+ const userId = await getAuthUserId(ctx);
+ return await ctx.runMutation(components.agent.coreMemories.getOrCreate, {
+ userId,
+ persona: "",
+ human: "",
+ });
+ },
+});
+
+export const update = mutation({
+ args: {
+ persona: v.optional(v.string()),
+ human: v.optional(v.string()),
+ },
+ handler: async (ctx, args) => {
+ const userId = await getAuthUserId(ctx);
+ await ctx.runMutation(components.agent.coreMemories.update, {
+ userId,
+ ...args,
+ });
+ },
+});
+
+export const append = mutation({
+ args: {
+ field: v.union(v.literal("persona"), v.literal("human")),
+ text: v.string(),
+ },
+ handler: async (ctx, args) => {
+ const userId = await getAuthUserId(ctx);
+ await ctx.runMutation(components.agent.coreMemories.append, {
+ userId,
+ field: args.field,
+ text: args.text,
+ });
+ },
+});
+
+export const replace = mutation({
+ args: {
+ field: v.union(v.literal("persona"), v.literal("human")),
+ oldContent: v.string(),
+ newContent: v.string(),
+ },
+ handler: async (ctx, args) => {
+ const userId = await getAuthUserId(ctx);
+ return await ctx.runMutation(components.agent.coreMemories.replace, {
+ userId,
+ field: args.field,
+ oldContent: args.oldContent,
+ newContent: args.newContent,
+ });
+ },
+});
+
+export const remove = mutation({
+ args: {},
+ handler: async (ctx) => {
+ const userId = await getAuthUserId(ctx);
+ await ctx.runMutation(components.agent.coreMemories.remove, { userId });
+ },
+});
diff --git a/example/convex/modelsForDemo.ts b/example/convex/modelsForDemo.ts
index 982a7f03..4a8f6031 100644
--- a/example/convex/modelsForDemo.ts
+++ b/example/convex/modelsForDemo.ts
@@ -4,13 +4,13 @@ import type { LanguageModelV2 } from "@ai-sdk/provider";
import { openai } from "@ai-sdk/openai";
import { groq } from "@ai-sdk/groq";
import { mockModel } from "@convex-dev/agent";
+import { google } from "@ai-sdk/google";
let languageModel: LanguageModelV2;
let textEmbeddingModel: EmbeddingModel;
if (process.env.OPENAI_API_KEY) {
languageModel = openai.chat("gpt-4o-mini");
- textEmbeddingModel = openai.textEmbeddingModel("text-embedding-3-small");
} else if (process.env.GROQ_API_KEY) {
languageModel = groq.languageModel(
"meta-llama/llama-4-scout-17b-16e-instruct",
@@ -24,5 +24,11 @@ if (process.env.OPENAI_API_KEY) {
);
}
+if (process.env.OPENAI_API_KEY) {
+ textEmbeddingModel = openai.textEmbeddingModel("text-embedding-3-small");
+} else if (process.env.GOOGLE_GENERATIVE_AI_API_KEY) {
+ textEmbeddingModel = google.textEmbedding("gemini-embedding-001");
+}
+
// If you want to use different models for examples, you can change them here.
export { languageModel, textEmbeddingModel };
diff --git a/example/package-lock.json b/example/package-lock.json
index d5fbd30a..66c6aa26 100644
--- a/example/package-lock.json
+++ b/example/package-lock.json
@@ -8,6 +8,7 @@
"name": "agent-example",
"version": "0.0.0",
"dependencies": {
+ "@ai-sdk/google": "^2.0.14",
"@ai-sdk/groq": "^2.0.0",
"@ai-sdk/openai": "^2.0.0",
"@ai-sdk/provider": "^2.0.0",
@@ -141,6 +142,39 @@
"zod": "^3.25.76 || ^4"
}
},
+ "node_modules/@ai-sdk/google": {
+ "version": "2.0.14",
+ "resolved": "https://registry.npmjs.org/@ai-sdk/google/-/google-2.0.14.tgz",
+ "integrity": "sha512-OCBBkEUq1RNLkbJuD+ejqGsWDD0M5nRyuFWDchwylxy0J4HSsAiGNhutNYVTdnqmNw+r9LyZlkyZ1P4YfAfLdg==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@ai-sdk/provider": "2.0.0",
+ "@ai-sdk/provider-utils": "3.0.9"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "zod": "^3.25.76 || ^4"
+ }
+ },
+ "node_modules/@ai-sdk/google/node_modules/@ai-sdk/provider-utils": {
+ "version": "3.0.9",
+ "resolved": "https://registry.npmjs.org/@ai-sdk/provider-utils/-/provider-utils-3.0.9.tgz",
+ "integrity": "sha512-Pm571x5efqaI4hf9yW4KsVlDBDme8++UepZRnq+kqVBWWjgvGhQlzU8glaFq0YJEB9kkxZHbRRyVeHoV2sRYaQ==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@ai-sdk/provider": "2.0.0",
+ "@standard-schema/spec": "^1.0.0",
+ "eventsource-parser": "^3.0.5"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "zod": "^3.25.76 || ^4"
+ }
+ },
"node_modules/@ai-sdk/groq": {
"version": "2.0.11",
"resolved": "https://registry.npmjs.org/@ai-sdk/groq/-/groq-2.0.11.tgz",
diff --git a/example/package.json b/example/package.json
index 4f8c5691..f934332d 100644
--- a/example/package.json
+++ b/example/package.json
@@ -14,6 +14,7 @@
"lint": "tsc -p convex && eslint convex"
},
"dependencies": {
+ "@ai-sdk/google": "^2.0.14",
"@ai-sdk/groq": "^2.0.0",
"@ai-sdk/openai": "^2.0.0",
"@ai-sdk/provider": "^2.0.0",
diff --git a/example/ui/coreMemories/MemoryUI.tsx b/example/ui/coreMemories/MemoryUI.tsx
new file mode 100644
index 00000000..54367a54
--- /dev/null
+++ b/example/ui/coreMemories/MemoryUI.tsx
@@ -0,0 +1,95 @@
+import { useMutation, useQuery } from "convex/react";
+import { api } from "../../convex/_generated/api";
+import { useEffect, useState } from "react";
+
+export default function MemoryUI() {
+ const ensureCore = useMutation(api.coreMemories.utils.getOrCreate);
+
+ useEffect(() => {
+ void ensureCore();
+ }, [ensureCore]);
+
+ return (
+
+ );
+}
+
+function MemoryEditor() {
+ const mem = useQuery(api.coreMemories.utils.get, {});
+ const update = useMutation(api.coreMemories.utils.update);
+ const append = useMutation(api.coreMemories.utils.append);
+ const replace = useMutation(api.coreMemories.utils.replace);
+ const remove = useMutation(api.coreMemories.utils.remove);
+
+ const [persona, setPersona] = useState("");
+ const [human, setHuman] = useState("");
+ const [appendField, setAppendField] = useState<"persona" | "human">("persona");
+ const [appendText, setAppendText] = useState("");
+ const [replaceField, setReplaceField] = useState<"persona" | "human">("persona");
+ const [oldText, setOldText] = useState("");
+ const [newText, setNewText] = useState("");
+
+ useEffect(() => {
+ if (mem) {
+ setPersona(mem.persona ?? "");
+ setHuman(mem.human ?? "");
+ }
+ }, [mem]);
+
+ return (
+
+
Edit Memory
+
+
+
+
+
+
+
+
+
+ );
+}
diff --git a/example/ui/main.tsx b/example/ui/main.tsx
index 995a77dc..080d0fd8 100644
--- a/example/ui/main.tsx
+++ b/example/ui/main.tsx
@@ -9,6 +9,7 @@ import FilesImages from "./files/FilesImages";
import RateLimiting from "./rate_limiting/RateLimiting";
import { WeatherFashion } from "./workflows/WeatherFashion";
import RagBasic from "./rag/RagBasic";
+import MemoryUI from "./coreMemories/MemoryUI";
const convex = new ConvexReactClient(import.meta.env.VITE_CONVEX_URL as string);
@@ -40,6 +41,7 @@ export function App() {
} />
} />
} />
+ } />
@@ -69,6 +71,17 @@ function Index() {
enough to see it in action.
+
+
+ Core Memories
+
+
+ Manage persona/human core memories with CRUD operations, append, and replace functions.
+
+
(char ? char.toUpperCase() : ""))
+ .replace(/^(.)/, (char) => char.toUpperCase());
+}
+
+function toCamelCase(str) {
+ const pascal = toPascalCase(str);
+ if (pascal === pascal.toUpperCase()) {
+ return pascal.toLowerCase();
+ }
+ return pascal.charAt(0).toLowerCase() + pascal.slice(1);
+}
+
+function toKebabCase(str) {
+ return str
+ .replace(/([a-z])([A-Z])/g, "$1-$2")
+ .replace(/[-_\s]+/g, "-")
+ .toLowerCase();
+}
+
+function toSnakeCase(str) {
+ return str
+ .replace(/([a-z])([A-Z])/g, "$1_$2")
+ .replace(/[-_\s]+/g, "_")
+ .toLowerCase();
+}
+
+function toSpaceCase(str) {
+ return str
+ .replace(/([a-z])([A-Z])/g, "$1 $2")
+ .replace(/[-_]+/g, " ")
+ .toLowerCase();
+}
+
+function toTitleCase(str) {
+ if (str === str.toUpperCase()) {
+ return str;
+ }
+ return toSpaceCase(str)
+ .split(" ")
+ .map((word) => word.charAt(0).toUpperCase() + word.slice(1))
+ .join(" ");
+}
+
+// Function to get all files recursively, excluding specified directories
+function getAllFiles(dir, excludeDirs = [".git", "node_modules", ".cursor"]) {
+ const files = [];
+
+ function traverse(currentPath) {
+ const items = readdirSync(currentPath);
+ for (const item of items) {
+ const fullPath = join(currentPath, item);
+ const stats = statSync(fullPath);
+
+ if (stats.isDirectory()) {
+ if (!excludeDirs.includes(item)) {
+ traverse(fullPath);
+ }
+ } else {
+ // Only process text files (skip binary files)
+ const ext = extname(item).toLowerCase();
+ const textExtensions = [
+ ".ts",
+ ".tsx",
+ ".js",
+ ".jsx",
+ ".cjs",
+ ".mjs",
+ ".json",
+ ".md",
+ ".txt",
+ ".yaml",
+ ".yml",
+ ".html",
+ ".css",
+ ".scss",
+ ".less",
+ ".xml",
+ ".config",
+ ];
+
+ if (textExtensions.includes(ext) || !ext) {
+ files.push(fullPath);
+ }
+ }
+ }
+ }
+
+ traverse(dir);
+ return files;
+}
+
+// Function to replace all occurrences in a file
+function replaceInFile(filePath, replacements) {
+ try {
+ let content = readFileSync(filePath, "utf8");
+ let hasChanges = false;
+
+ for (const [oldText, newText] of replacements) {
+ if (content.includes(oldText)) {
+ content = content.replaceAll(oldText, newText);
+ hasChanges = true;
+ }
+ }
+
+ if (hasChanges) {
+ writeFileSync(filePath, content, "utf8");
+ console.log(`Updated: ${filePath}`);
+ }
+ } catch (error) {
+ // Skip files that can't be read as text
+ if (error.code !== "EISDIR") {
+ console.warn(`Warning: Could not process ${filePath}: ${error.message}`);
+ }
+ }
+}
+
+// Main setup function
+async function setup() {
+ console.log("š Convex Component Setup\n");
+
+ const rl = readline.createInterface({
+ input: process.stdin,
+ output: process.stdout,
+ });
+
+ // Current directory name
+ const currentDirName = basename(process.cwd());
+
+ // Prompt for component name
+ const componentName = await new Promise((resolve) => {
+ rl.question(
+ `Enter your component name (e.g., "document search" or "RAG") [${currentDirName}]: `,
+ (answer) => {
+ resolve(answer.trim() || currentDirName);
+ }
+ );
+ });
+
+ if (!componentName.trim()) {
+ console.error("ā Component name is required!");
+ process.exit(1);
+ }
+
+ // Prompt for npm package name
+ const npmPackageName = await new Promise((resolve) => {
+ rl.question(
+ `Enter your npm package name [${toKebabCase(componentName)}]: `,
+ (answer) => {
+ resolve(answer.trim() || toKebabCase(componentName));
+ }
+ );
+ });
+
+ // Prompt for repository name
+ const repoName = await new Promise((resolve) => {
+ rl.question(
+ `Enter your repository name [${toKebabCase(componentName)}]: `,
+ (answer) => {
+ resolve(answer.trim() || toKebabCase(componentName));
+ }
+ );
+ });
+
+ rl.close();
+
+ // Generate all case variations
+ const cases = {
+ pascal: toPascalCase(componentName),
+ camel: toCamelCase(componentName),
+ kebab: toKebabCase(componentName),
+ snake: toSnakeCase(componentName),
+ space: toSpaceCase(componentName),
+ title: toTitleCase(componentName),
+ };
+
+ console.log("\nš Component name variations:");
+ console.log(` PascalCase: ${cases.pascal}`);
+ console.log(` camelCase: ${cases.camel}`);
+ console.log(` kebab-case: ${cases.kebab}`);
+ console.log(` snake_case: ${cases.snake}`);
+ console.log(` space case: ${cases.space}`);
+ console.log(` Title Case: ${cases.title}`);
+ console.log(` NPM package: ${npmPackageName}`);
+ console.log(` Repository: ${repoName}\n`);
+
+ // Define all replacements
+ const replacements = [
+ // NPM package name
+ ["@convex-dev/agent", npmPackageName],
+
+ // Repository name
+ ["get-convex/agent", repoName],
+
+ // Component name variations
+ ["Agent", cases.pascal],
+ ["agent", cases.camel],
+ ["agent", cases.kebab],
+ ["agent", cases.snake],
+ ["agent", cases.space],
+ ["Agent", cases.title],
+ ];
+
+ console.log("š Finding files to update...");
+ const files = getAllFiles(".");
+ console.log(`Found ${files.length} files to process.\n`);
+
+ console.log("š Processing files...");
+ let processedCount = 0;
+
+ for (const file of files) {
+ replaceInFile(file, replacements);
+ processedCount++;
+ }
+
+ console.log(`\nā
Setup complete! Processed ${processedCount} files.`);
+ console.log("\nš Next steps: check out README.md");
+
+ // Prompt to delete rename.mjs
+ const rl2 = readline.createInterface({
+ input: process.stdin,
+ output: process.stdout,
+ });
+
+ const shouldDelete = await new Promise((resolve) => {
+ rl2.question(
+ "\nšļø Would you like to delete the rename.mjs file now? (y/N): ",
+ (answer) => {
+ resolve(
+ answer.toLowerCase().trim() === "y" ||
+ answer.toLowerCase().trim() === "yes"
+ );
+ }
+ );
+ });
+
+ rl2.close();
+
+ if (shouldDelete) {
+ try {
+ const { unlinkSync } = await import("fs");
+ unlinkSync("./rename.mjs");
+ console.log("ā
rename.mjs has been deleted.");
+ } catch (error) {
+ console.error("ā Failed to delete rename.mjs:", error.message);
+ }
+ } else {
+ console.log("š rename.mjs kept. You can delete it manually when ready.");
+ }
+}
+
+// Run the setup
+setup().catch(console.error);
\ No newline at end of file
diff --git a/src/client/coreMemory.ts b/src/client/coreMemory.ts
new file mode 100644
index 00000000..56642847
--- /dev/null
+++ b/src/client/coreMemory.ts
@@ -0,0 +1,29 @@
+import type { ModelMessage } from "ai";
+import type { AgentComponent, RunActionCtx, RunQueryCtx } from "./types.js";
+
+/**
+ * Load core memory for a user and convert it into system messages.
+ * Returns an empty array if no userId provided or no core memory found.
+ */
+export async function fetchCoreMemoryMessages(
+ ctx: RunQueryCtx | RunActionCtx,
+ component: AgentComponent,
+ userId: string | undefined,
+): Promise {
+ if (!userId) return [];
+ const coreMemory = await ctx.runQuery(component.coreMemories.get, { userId });
+ const messages: ModelMessage[] = [];
+ if (coreMemory?.persona) {
+ messages.push({
+ role: "system",
+ content: `Core Memory - Agent Persona: ${coreMemory.persona}`,
+ });
+ }
+ if (coreMemory?.human) {
+ messages.push({
+ role: "system",
+ content: `Core Memory - Human Context: ${coreMemory.human}`,
+ });
+ }
+ return messages;
+}
diff --git a/src/client/index.ts b/src/client/index.ts
index e20b7887..64b50006 100644
--- a/src/client/index.ts
+++ b/src/client/index.ts
@@ -244,6 +244,14 @@ export class Agent<
* determines when to stop. Defaults to the AI SDK default.
*/
stopWhen?: StopCondition | Array>;
+ /**
+ * Whether to automatically include memory tools (memory_rethink, memory_append,
+ * memory_replace, message_search) when an embedding model is available.
+ * Memory tools allow the agent to manage core memory and search through
+ * message history using semantic search.
+ * Defaults to false.
+ */
+ memoryTools?: boolean;
/**
* @deprecated Use `languageEmbeddingModel` instead.
*/
@@ -1670,4 +1678,4 @@ function errorToString(error: unknown): string {
return error.message;
}
return String(error);
-}
+}
\ No newline at end of file
diff --git a/src/client/memory.ts b/src/client/memory.ts
new file mode 100644
index 00000000..3e83460b
--- /dev/null
+++ b/src/client/memory.ts
@@ -0,0 +1,179 @@
+import z from "zod/v3";
+import { createTool, type ToolCtx } from "./createTool.js";
+import type { AgentComponent } from "./types.js";
+import { embed } from "ai";
+import { getModelName } from "../shared.js";
+
+type Label = "human" | "persona";
+
+function labelSchema() {
+ return z
+ .enum(["human", "persona"])
+ .describe("The memory block to edit: either 'human' or 'persona'");
+}
+
+async function ensureCoreMemory(ctx: ToolCtx, component: AgentComponent) {
+ return await ctx.runMutation(component.coreMemories.getOrCreate, {
+ userId: ctx.userId,
+ persona: "",
+ human: "",
+ });
+}
+
+export function memoryTools(component: AgentComponent) {
+ const memory_rethink = createTool({
+ description:
+ "Completely rewrite a core memory block. Use for large reorganizations, not small edits.",
+ args: z
+ .object({
+ label: labelSchema(),
+ new_memory: z
+ .string()
+ .describe(
+ "The new memory contents with information integrated from existing memory blocks and context.",
+ ),
+ })
+ .required(),
+ handler: async (ctx, { label, new_memory }): Promise => {
+ await ensureCoreMemory(ctx, component);
+ if (label === "persona") {
+ await ctx.runMutation(component.coreMemories.update, {
+ userId: ctx.userId,
+ persona: new_memory,
+ });
+ } else {
+ await ctx.runMutation(component.coreMemories.update, {
+ userId: ctx.userId,
+ human: new_memory,
+ });
+ }
+ return `Rewrote core memory '${label}'.`;
+ },
+ });
+
+ const memory_append = createTool({
+ description: "Append to the contents of core memory.",
+ args: z
+ .object({
+ label: labelSchema().describe("Section of the memory to edit."),
+ content: z
+ .string()
+ .describe(
+ "Content to append to the memory. All unicode (including emojis) are supported.",
+ ),
+ })
+ .required(),
+ handler: async (ctx, { label, content }): Promise => {
+ await ensureCoreMemory(ctx, component);
+ await ctx.runMutation(component.coreMemories.append, {
+ userId: ctx.userId,
+ field: label as Label,
+ text: content,
+ });
+ return `Appended ${content.length} character(s) to '${label}'.`;
+ },
+ });
+
+ const memory_replace = createTool({
+ description:
+ "Replace the contents of core memory. To delete memories, use an empty string for new_content.",
+ args: z
+ .object({
+ label: labelSchema().describe("Section of the memory to edit."),
+ old_content: z
+ .string()
+ .describe("String to replace. Must be an exact match."),
+ new_content: z
+ .string()
+ .describe(
+ "Content to write to the memory. All unicode (including emojis) are supported.",
+ ),
+ })
+ .required(),
+ handler: async (ctx, { label, old_content, new_content }): Promise => {
+ await ensureCoreMemory(ctx, component);
+ try {
+ const occurrences = await ctx.runMutation(component.coreMemories.replace, {
+ userId: ctx.userId,
+ field: label as Label,
+ oldContent: old_content,
+ newContent: new_content,
+ });
+ return occurrences > 0
+ ? `Replaced ${occurrences} occurrence(s) in '${label}'.`
+ : `No exact matches found in '${label}'.`;
+ } catch (error) {
+ if (error instanceof Error && error.message.includes("oldContent must be non-empty")) {
+ return "old_content must be non-empty for replacement.";
+ }
+ throw error;
+ }
+ },
+ });
+
+ const message_search = createTool({
+ description:
+ "Search archival memory (messages) using semantic (embedding-based) and text search with optional temporal filtering.",
+ args: z
+ .object({
+ query: z
+ .string()
+ .describe("String to search for using semantic similarity."),
+ top_k: z
+ .number()
+ .int()
+ .positive()
+ .optional()
+ .describe(
+ "Maximum number of results to return. Uses system default if not specified.",
+ )
+ })
+ .required(),
+ handler: async (
+ ctx,
+ { query, top_k },
+ ): Promise => {
+ const limit = Math.min(Math.max(top_k ?? 10, 1), 100);
+ // Vector search if model available; otherwise fallback to text-only
+ let embedding: number[] | undefined;
+ let embeddingModel: string | undefined;
+ const textEmbeddingModel = ctx.agent?.options.textEmbeddingModel;
+ if (textEmbeddingModel) {
+ try {
+ const e = await embed({ model: textEmbeddingModel, value: query });
+ embedding = e.embedding;
+ embeddingModel = getModelName(textEmbeddingModel);
+ } catch {
+ embedding = undefined;
+ embeddingModel = undefined;
+ }
+ }
+
+ const messages = await ctx.runAction(component.messages.searchMessages, {
+ searchAllMessagesForUserId: ctx.userId ?? undefined,
+ threadId: ctx.userId ? undefined : ctx.threadId,
+ text: query,
+ textSearch: true,
+ vectorSearch: !!embedding,
+ embedding,
+ embeddingModel,
+ limit,
+ messageRange: { before: 0, after: 0 },
+ });
+
+ if (messages.length === 0) {
+ return "No matching messages found.";
+ }
+ return `Top ${messages.length} result(s):\n` + messages.map((m, i) => `${i + 1}. ${m.text}`).join("\n");
+ },
+ });
+
+ return {
+ memory_rethink,
+ memory_append,
+ memory_replace,
+ message_search,
+ } as const;
+}
+
+export type MemoryTools = ReturnType;
diff --git a/src/client/search.test.ts b/src/client/search.test.ts
index cfb2e198..2aab8158 100644
--- a/src/client/search.test.ts
+++ b/src/client/search.test.ts
@@ -551,6 +551,40 @@ describe("search.ts", () => {
expect(result.order).toBeUndefined();
expect(result.stepOrder).toBeUndefined();
});
+
+ it("should include core memory system messages when present", async () => {
+ // Reset mocks so we can control specific call order/values
+ vi.mocked(mockCtx.runQuery).mockReset();
+ vi.mocked(mockCtx.runAction).mockReset();
+ // No recent messages or search
+ const coreMemoryMessages: ModelMessage[] = [
+ { role: "system", content: "Core Memory - Agent Persona: Helpful" },
+ {
+ role: "system",
+ content: "Core Memory - Human Context: Prefers concise answers",
+ },
+ ];
+
+ const result = await fetchContextWithPrompt(mockCtx, components.agent, {
+ ...baseArgs,
+ userId: "userCore",
+ threadId: "threadCore",
+ prompt: undefined,
+ messages: undefined,
+ promptMessageId: undefined,
+ contextOptions: { recentMessages: 0 },
+ coreMemoryMessages,
+ });
+
+ expect(result.messages).toHaveLength(2);
+ expect(result.messages[0].role).toBe("system");
+ expect(String(result.messages[0].content)).toContain(
+ "Core Memory - Agent Persona: Helpful",
+ );
+ expect(String(result.messages[1].content)).toContain(
+ "Core Memory - Human Context: Prefers concise answers",
+ );
+ });
});
describe("fetchContextWithPrompt - Integration Tests", () => {
@@ -750,6 +784,7 @@ describe("search.ts", () => {
...args.inputPrompt,
...args.recent,
...args.search,
+ ...args.coreMemory,
...args.existingResponses,
];
});
@@ -780,6 +815,7 @@ describe("search.ts", () => {
inputPrompt: expect.arrayContaining([
expect.objectContaining({ content: "Custom prompt" }),
]),
+ coreMemory: expect.any(Array),
existingResponses: [], // No existing responses in this test
userId: "userContext",
threadId,
@@ -807,6 +843,7 @@ describe("search.ts", () => {
const contextHandler = vi.fn(async (ctx, args) => {
const allMessages = [
...args.search,
+ ...args.coreMemory,
...args.recent,
...args.inputMessages,
...args.inputPrompt,
@@ -854,7 +891,7 @@ describe("search.ts", () => {
content: "This is a custom system message added by contextHandler",
};
- return [customSystemMessage, ...args.recent, ...args.inputPrompt];
+ return [customSystemMessage, ...args.coreMemory, ...args.recent, ...args.inputPrompt];
});
const result = await fetchContextWithPrompt(ctx, components.agent, {
@@ -889,7 +926,7 @@ describe("search.ts", () => {
const contextHandler = vi.fn(async (ctx, args) => {
// Put search messages first, then recent, then prompt
- return [...args.search, ...args.recent, ...args.inputPrompt];
+ return [...args.search, ...args.coreMemory, ...args.recent, ...args.inputPrompt];
});
const result = await fetchContextWithPrompt(ctx, components.agent, {
@@ -914,6 +951,7 @@ describe("search.ts", () => {
expect.objectContaining({
search: expect.any(Array),
recent: expect.any(Array),
+ coreMemory: expect.any(Array),
inputPrompt: expect.arrayContaining([
expect.objectContaining({ content: "Tell me about cats" }),
]),
@@ -950,6 +988,7 @@ describe("search.ts", () => {
// Put existing responses first to test they're properly identified
return [
...args.recent,
+ ...args.coreMemory,
...args.existingResponses,
...args.inputPrompt,
];
@@ -972,6 +1011,7 @@ describe("search.ts", () => {
recent: expect.arrayContaining([
expect.objectContaining({ content: "Before prompt" }),
]),
+ coreMemory: expect.any(Array),
existingResponses: expect.arrayContaining([
expect.objectContaining({ content: "Existing response 1" }),
expect.objectContaining({ content: "Existing response 2" }),
diff --git a/src/client/search.ts b/src/client/search.ts
index 8a9be7ac..aa84b74a 100644
--- a/src/client/search.ts
+++ b/src/client/search.ts
@@ -110,6 +110,8 @@ export async function fetchRecentAndSearchMessages(
upToAndIncludingMessageId?: string;
contextOptions: ContextOptions;
getEmbedding?: GetEmbedding;
+ embedding?: number[];
+ embeddingModel?: string;
},
): Promise<{ recentMessages: MessageDoc[]; searchMessages: MessageDoc[] }> {
assert(args.userId || args.threadId, "Specify userId or threadId");
@@ -146,8 +148,8 @@ export async function fetchRecentAndSearchMessages(
throw new Error("searchUserMessages only works in an action");
}
let text = args.searchText;
- let embedding: number[] | undefined;
- let embeddingModel: string | undefined;
+ let embedding: number[] | undefined = args.embedding;
+ let embeddingModel: string | undefined = args.embeddingModel;
if (!text) {
if (targetMessageId) {
const targetMessage = recentMessages.find(
@@ -292,7 +294,7 @@ export async function embedMessages(
// Find the indexes of the messages that have text.
const textIndexes = messageTexts
.map((t, i) => (t ? i : undefined))
- .filter((i) => i !== undefined);
+ .filter((i): i is number => i !== undefined);
if (textIndexes.length === 0) {
return undefined;
}
@@ -424,7 +426,7 @@ export async function generateAndSaveEmbeddings(
/**
* Similar to fetchContextMessages, but also combines the input messages,
- * with search context, recent messages, input messages, then prompt messages.
+ * with search context, core memory, recent messages, input messages, then prompt messages.
* If there is a promptMessageId and prompt message(s) provided, it will splice
* the prompt messages into the history to replace the promptMessageId message,
* but still be followed by any existing messages that were in response to the
@@ -440,6 +442,7 @@ export async function fetchContextWithPrompt(
userId: string | undefined;
threadId: string | undefined;
agentName?: string;
+ coreMemoryMessages?: ModelMessage[];
} & Options &
Config,
): Promise<{
@@ -451,6 +454,7 @@ export async function fetchContextWithPrompt(
const promptArray = getPromptArray(args.prompt);
+ // Compute search text and embedding at the root and pass down to dependents
const searchText = promptArray.length
? extractText(promptArray.at(-1)!)
: args.promptMessageId
@@ -458,6 +462,40 @@ export async function fetchContextWithPrompt(
: args.messages?.at(-1)
? extractText(args.messages.at(-1)!)
: undefined;
+ let embedding: number[] | undefined;
+ let embeddingModel: string | undefined;
+ if (args.promptMessageId) {
+ const targetSearchFields = await ctx.runQuery(
+ component.messages.getMessageSearchFields,
+ { messageId: args.promptMessageId },
+ );
+ embedding = targetSearchFields.embedding;
+ embeddingModel = targetSearchFields.embeddingModel;
+ // If no embedding saved but we have text + model, embed once here
+ if (!embedding && targetSearchFields.text && textEmbeddingModel) {
+ const embedded = await embedMany(ctx, {
+ ...args,
+ userId,
+ threadId,
+ values: [targetSearchFields.text],
+ textEmbeddingModel,
+ });
+ embedding = embedded.embeddings[0];
+ embeddingModel = getModelName(textEmbeddingModel);
+
+ // TODO: save the embedding to the database
+ }
+ } else if (searchText && textEmbeddingModel) {
+ const embedded = await embedMany(ctx, {
+ ...args,
+ userId,
+ threadId,
+ values: [searchText],
+ textEmbeddingModel,
+ });
+ embedding = embedded.embeddings[0];
+ embeddingModel = getModelName(textEmbeddingModel);
+ }
// If only a messageId is provided, this will add that message to the end.
const { recentMessages, searchMessages } = await fetchRecentAndSearchMessages(
ctx,
@@ -468,23 +506,8 @@ export async function fetchContextWithPrompt(
targetMessageId: args.promptMessageId,
searchText,
contextOptions: args.contextOptions ?? {},
- getEmbedding: async (text) => {
- assert(
- textEmbeddingModel,
- "A textEmbeddingModel is required to be set on the Agent that you're doing vector search with",
- );
- return {
- embedding: (
- await embedMany(ctx, {
- ...args,
- userId,
- values: [text],
- textEmbeddingModel,
- })
- ).embeddings[0],
- textEmbeddingModel,
- };
- },
+ embedding,
+ embeddingModel,
},
);
@@ -524,19 +547,22 @@ export async function fetchContextWithPrompt(
const search = searchMessages
.map((m) => m.message)
- .filter((m) => !!m)
+ .filter((m): m is NonNullable => !!m)
.map(deserializeMessage);
const recent = prePromptDocs
.map((m) => m.message)
- .filter((m) => !!m)
+ .filter((m): m is NonNullable => !!m)
.map(deserializeMessage);
const inputMessages = messages.map(deserializeMessage);
const inputPrompt = promptArray.map(deserializeMessage);
const existingResponses = existingResponseDocs
.map((m) => m.message)
- .filter((m) => !!m)
+ .filter((m): m is NonNullable => !!m)
.map(deserializeMessage);
+ // Core memory provided by caller to separate concerns from search.
+ const coreMemoryMessages: ModelMessage[] = args.coreMemoryMessages ?? [];
+
let processedMessages = args.contextHandler
? await args.contextHandler(ctx, {
search,
@@ -544,11 +570,13 @@ export async function fetchContextWithPrompt(
inputMessages,
inputPrompt,
existingResponses,
+ coreMemory: coreMemoryMessages,
userId,
threadId,
})
: [
...search,
+ ...coreMemoryMessages,
...recent,
...inputMessages,
...inputPrompt,
diff --git a/src/client/start.ts b/src/client/start.ts
index cd452b07..d9d434f3 100644
--- a/src/client/start.ts
+++ b/src/client/start.ts
@@ -31,6 +31,8 @@ import { wrapTools, type ToolCtx } from "./createTool.js";
import type { Agent } from "./index.js";
import { omit } from "convex-helpers";
import { saveInputMessages } from "./saveInputMessages.js";
+import { memoryTools } from "./memory.js";
+import { fetchCoreMemoryMessages } from "./coreMemory.js";
export async function start<
T,
@@ -73,9 +75,10 @@ export async function start<
* If provided alongside prompt, the ordering will be:
* 1. system prompt
* 2. search context
- * 3. recent messages
- * 4. these messages
- * 5. prompt messages, including those already on the same `order` as
+ * 3. core memory
+ * 4. recent messages
+ * 5. these messages
+ * 6. prompt messages, including those already on the same `order` as
* the promptMessageId message, if provided.
*/
messages?: (ModelMessage | Message)[];
@@ -128,6 +131,12 @@ export async function start<
?.userId) ??
undefined;
+ const coreMemoryMessages = await fetchCoreMemoryMessages(
+ ctx,
+ component,
+ userId,
+ );
+
const context = await fetchContextWithPrompt(ctx, component, {
...opts,
userId,
@@ -135,6 +144,7 @@ export async function start<
messages: args.messages,
prompt: args.prompt,
promptMessageId: args.promptMessageId,
+ coreMemoryMessages,
});
const saveMessages = opts.storageOptions?.saveMessages ?? "promptAndOutput";
@@ -187,7 +197,14 @@ export async function start<
promptMessageId,
agent: opts.agentForToolCtx,
} satisfies ToolCtx;
- const tools = wrapTools(toolCtx, args.tools) as Tools;
+
+ // Conditionally add memory tools if enabled and embedding model is available
+ const toolsToWrap: (ToolSet | undefined)[] = [args.tools];
+ if (opts.memoryTools && opts.textEmbeddingModel) {
+ toolsToWrap.push(memoryTools(component));
+ }
+
+ const tools = wrapTools(toolCtx, ...toolsToWrap) as Tools;
const aiArgs = {
...opts.callSettings,
providerOptions: opts.providerOptions,
diff --git a/src/client/types.ts b/src/client/types.ts
index cf25eaea..a92146ee 100644
--- a/src/client/types.ts
+++ b/src/client/types.ts
@@ -90,6 +90,14 @@ export type Config = {
* but you can override this by providing a context handler. Here you can
* filter, modify, or enrich the context messages. If provided, the default
* ordering will not apply. This excludes the system message / instructions.
+ *
+ * Default ordering (when no contextHandler provided):
+ * 1) search results
+ * 2) core memory (system messages)
+ * 3) recent thread messages
+ * 4) input messages (args.messages)
+ * 5) input prompt (args.prompt)
+ * 6) existing responses (same order as promptMessageId)
*/
contextHandler?: ContextHandler;
/**
@@ -126,6 +134,14 @@ export type Config = {
* Defaults to 1.
*/
maxSteps?: number;
+ /**
+ * Whether to automatically include memory tools (memory_rethink, memory_append,
+ * memory_replace, message_search) when an embedding model is available.
+ * Memory tools allow the agent to manage core memory and search through
+ * message history using semantic search.
+ * Defaults to false.
+ */
+ memoryTools?: boolean;
};
/**
@@ -268,6 +284,10 @@ export type ContextHandler = (
* message.
*/
existingResponses: ModelMessage[];
+ /**
+ * Core memory messages for the user, injected as system messages.
+ */
+ coreMemory: ModelMessage[];
/**
* The user associated with the generation, if any.
*/
@@ -706,6 +726,14 @@ export type Options = {
* but you can override this by providing a context handler. Here you can
* filter, modify, or enrich the context messages. If provided, the default
* ordering will not apply. This excludes the system message / instructions.
+ *
+ * Default ordering (when no contextHandler provided):
+ * 1) search results
+ * 2) core memory (system messages)
+ * 3) recent thread messages
+ * 4) input messages (args.messages)
+ * 5) input prompt (args.prompt)
+ * 6) existing responses (same order as promptMessageId)
*/
contextHandler?: ContextHandler;
};
diff --git a/src/component/_generated/api.d.ts b/src/component/_generated/api.d.ts
index 7419e6dc..20496108 100644
--- a/src/component/_generated/api.d.ts
+++ b/src/component/_generated/api.d.ts
@@ -9,6 +9,7 @@
*/
import type * as apiKeys from "../apiKeys.js";
+import type * as coreMemories from "../coreMemories.js";
import type * as files from "../files.js";
import type * as messages from "../messages.js";
import type * as streams from "../streams.js";
@@ -33,6 +34,7 @@ import type {
*/
declare const fullApi: ApiFromModules<{
apiKeys: typeof apiKeys;
+ coreMemories: typeof coreMemories;
files: typeof files;
messages: typeof messages;
streams: typeof streams;
@@ -55,6 +57,56 @@ export type Mounts = {
issue: FunctionReference<"mutation", "public", { name?: string }, string>;
validate: FunctionReference<"query", "public", { apiKey: string }, boolean>;
};
+ coreMemories: {
+ append: FunctionReference<
+ "mutation",
+ "public",
+ { field: "persona" | "human"; text: string; userId?: string },
+ null
+ >;
+ get: FunctionReference<
+ "query",
+ "public",
+ { userId?: string },
+ null | {
+ _creationTime: number;
+ _id: string;
+ human: string;
+ persona: string;
+ userId?: string;
+ }
+ >;
+ getOrCreate: FunctionReference<
+ "mutation",
+ "public",
+ { human: string; persona: string; userId?: string },
+ null | {
+ _creationTime: number;
+ _id: string;
+ human: string;
+ persona: string;
+ userId?: string;
+ }
+ >;
+ remove: FunctionReference<"mutation", "public", { userId?: string }, null>;
+ replace: FunctionReference<
+ "mutation",
+ "public",
+ {
+ field: "persona" | "human";
+ newContent: string;
+ oldContent: string;
+ userId?: string;
+ },
+ number
+ >;
+ update: FunctionReference<
+ "mutation",
+ "public",
+ { human?: string; persona?: string; userId?: string },
+ null
+ >;
+ };
files: {
addFile: FunctionReference<
"mutation",
@@ -666,7 +718,7 @@ export type Mounts = {
"mutation",
"public",
{ messageIds: Array },
- Array
+ any
>;
deleteByOrder: FunctionReference<
"mutation",
diff --git a/src/component/coreMemories.test.ts b/src/component/coreMemories.test.ts
new file mode 100644
index 00000000..e51aad05
--- /dev/null
+++ b/src/component/coreMemories.test.ts
@@ -0,0 +1,123 @@
+///
+
+import { convexTest } from "convex-test";
+import { describe, expect, test } from "vitest";
+import { api } from "./_generated/api.js";
+import schema from "./schema.js";
+import { modules } from "./setup.test.js";
+
+describe("coreMemories", () => {
+ test("create, get and update fields", async () => {
+ const t = convexTest(schema, modules);
+
+ const doc = await t.mutation(api.coreMemories.getOrCreate, {
+ userId: "u1",
+ persona: "p1",
+ human: "h1",
+ });
+
+ const got = await t.query(api.coreMemories.get, { userId: "u1" });
+ expect(got?._id).toBe(doc?._id);
+ expect(got?.persona).toBe("p1");
+ expect(got?.human).toBe("h1");
+
+ await t.mutation(api.coreMemories.update, {
+ userId: "u1",
+ persona: "p2",
+ });
+
+ const after = await t.query(api.coreMemories.get, { userId: "u1" });
+ expect(after?.persona).toBe("p2");
+ expect(after?.human).toBe("h1");
+ });
+
+ test("update supports null to unset fields", async () => {
+ const t = convexTest(schema, modules);
+
+ await t.mutation(api.coreMemories.getOrCreate, {
+ userId: "u2",
+ persona: "p1",
+ human: "h1",
+ });
+
+ await t.mutation(api.coreMemories.update, {
+ userId: "u2",
+ persona: "",
+ });
+
+ const got = await t.query(api.coreMemories.get, { userId: "u2" });
+ expect(got?.persona).toBe("");
+ expect(got?.human).toBe("h1");
+ });
+
+ test("append modifies specified field", async () => {
+ const t = convexTest(schema, modules);
+ const doc = await t.mutation(api.coreMemories.getOrCreate, {
+ userId: "u3",
+ persona: "",
+ human: "",
+ });
+
+ await t.mutation(api.coreMemories.append, {
+ userId: "u3",
+ field: "persona",
+ text: "A",
+ });
+ await t.mutation(api.coreMemories.append, {
+ userId: "u3",
+ field: "persona",
+ text: "B",
+ });
+ const got = await t.query(api.coreMemories.get, { userId: "u3" });
+ expect(got?._id).toBe(doc?._id);
+ expect(got?.persona).toBe("\nA\nB");
+ });
+
+ test("replace and remove operate correctly", async () => {
+ const t = convexTest(schema, modules);
+
+ await t.mutation(api.coreMemories.getOrCreate, {
+ userId: "u4",
+ persona: "",
+ human: "hello",
+ });
+
+ // replace "ll" with "XX"
+ const occurrences = await t.mutation(api.coreMemories.replace, {
+ userId: "u4",
+ field: "human",
+ oldContent: "ll",
+ newContent: "XX",
+ });
+ expect(occurrences).toBe(1);
+ let got = await t.query(api.coreMemories.get, { userId: "u4" });
+ expect(got?.human).toBe("heXXo");
+
+ // remove the entire document
+ await t.mutation(api.coreMemories.remove, {
+ userId: "u4",
+ });
+ got = await t.query(api.coreMemories.get, { userId: "u4" });
+ expect(got).toBe(null);
+ });
+
+ test("getOrCreate prevents duplicates per userId", async () => {
+ const t = convexTest(schema, modules);
+ const doc1 = await t.mutation(api.coreMemories.getOrCreate, {
+ userId: "u5",
+ persona: "p1",
+ human: "h1",
+ });
+ const doc2 = await t.mutation(api.coreMemories.getOrCreate, {
+ userId: "u5",
+ persona: "p2",
+ human: "h2",
+ });
+ // Should keep the same document (getOrCreate returns the existing one)
+ const got = await t.query(api.coreMemories.get, { userId: "u5" });
+ expect(got?._id).toBe(doc1?._id);
+ expect(doc2?._id).toBe(doc1?._id);
+ expect(got?.persona).toBe("p1"); // Should keep original values
+ expect(got?.human).toBe("h1");
+ });
+});
diff --git a/src/component/coreMemories.ts b/src/component/coreMemories.ts
new file mode 100644
index 00000000..4f482346
--- /dev/null
+++ b/src/component/coreMemories.ts
@@ -0,0 +1,117 @@
+import { assert } from "convex-helpers";
+import { mutation, query } from "./_generated/server.js";
+import { v } from "convex/values";
+import { vCoreMemory, vCoreMemoryDoc } from "../validators.js";
+
+export const get = query({
+ args: {
+ userId: v.optional(v.string()),
+ },
+ returns: v.union(v.null(), vCoreMemoryDoc),
+ handler: async (ctx, args) => {
+ return await ctx.db.query("coreMemories").withIndex("userId", (q) => q.eq("userId", args.userId)).first();
+ },
+});
+
+export const getOrCreate = mutation({
+ args: vCoreMemory,
+ returns: v.union(v.null(), vCoreMemoryDoc),
+ handler: async (ctx, args) => {
+ const doc = await ctx.db
+ .query("coreMemories")
+ .withIndex("userId", (q) => q.eq("userId", args.userId))
+ .first();
+ if (doc) {
+ assert(doc.userId === args.userId, `Core memory for user ${args.userId} already exists`);
+ return doc;
+ }
+ const id = await ctx.db.insert("coreMemories", args);
+ return await ctx.db.get(id);
+ },
+});
+
+export const update = mutation({
+ args: {
+ userId: v.optional(v.string()),
+ persona: v.optional(v.string()),
+ human: v.optional(v.string()),
+ },
+ returns: v.null(),
+ handler: async (ctx, args) => {
+ const doc = await ctx.db
+ .query("coreMemories")
+ .withIndex("userId", (q) => q.eq("userId", args.userId))
+ .first();
+ assert(doc, `Core memory for user ${args.userId} not found`);
+ await ctx.db.patch(doc._id, {
+ ...(args.persona !== undefined ? { persona: args.persona ?? undefined } : {}),
+ ...(args.human !== undefined ? { human: args.human ?? undefined } : {}),
+ });
+ return null;
+ },
+});
+
+export const append = mutation({
+ args: {
+ userId: v.optional(v.string()),
+ field: v.union(v.literal("persona"), v.literal("human")),
+ text: v.string(),
+ },
+ returns: v.null(),
+ handler: async (ctx, args) => {
+ const doc = await ctx.db
+ .query("coreMemories")
+ .withIndex("userId", (q) => q.eq("userId", args.userId))
+ .first();
+ assert(doc, `Core memory for user ${args.userId} not found`);
+ const base = (doc[args.field] as string | undefined) ?? "";
+ await ctx.db.patch(doc._id, { [args.field]: base + "\n" + args.text });
+ return null;
+ },
+});
+
+export const replace = mutation({
+ args: {
+ userId: v.optional(v.string()),
+ field: v.union(v.literal("persona"), v.literal("human")),
+ oldContent: v.string(),
+ newContent: v.string(),
+ },
+ returns: v.number(),
+ handler: async (ctx, args) => {
+ const doc = await ctx.db
+ .query("coreMemories")
+ .withIndex("userId", (q) => q.eq("userId", args.userId))
+ .first();
+ assert(doc, `Core memory for user ${args.userId} not found`);
+ const base = (doc[args.field] as string | undefined) ?? "";
+
+ if (args.oldContent === "") {
+ throw new Error("oldContent must be non-empty for replacement");
+ }
+
+ const occurrences = base.split(args.oldContent).length - 1;
+ const updated = occurrences > 0
+ ? base.split(args.oldContent).join(args.newContent)
+ : base;
+
+ await ctx.db.patch(doc._id, { [args.field]: updated });
+ return occurrences;
+ },
+});
+
+export const remove = mutation({
+ args: {
+ userId: v.optional(v.string()),
+ },
+ returns: v.null(),
+ handler: async (ctx, args) => {
+ const doc = await ctx.db
+ .query("coreMemories")
+ .withIndex("userId", (q) => q.eq("userId", args.userId))
+ .first();
+ assert(doc, `Core memory for user ${args.userId} not found`);
+ await ctx.db.delete(doc._id);
+ return null;
+ },
+});
diff --git a/src/component/messages.ts b/src/component/messages.ts
index 380e1022..6c1d7d9f 100644
--- a/src/component/messages.ts
+++ b/src/component/messages.ts
@@ -60,7 +60,6 @@ export async function deleteMessage(
export const deleteByIds = mutation({
args: { messageIds: v.array(v.id("messages")) },
- returns: v.array(v.id("messages")),
handler: async (ctx, args) => {
const deletedMessageIds = await Promise.all(
args.messageIds.map(async (id) => {
diff --git a/src/component/schema.ts b/src/component/schema.ts
index da319828..fd4737b6 100644
--- a/src/component/schema.ts
+++ b/src/component/schema.ts
@@ -137,15 +137,14 @@ export const schema = defineSchema({
parts: v.array(v.any()),
}).index("streamId_start_end", ["streamId", "start", "end"]),
- memories: defineTable({
- threadId: v.optional(v.id("threads")),
+ coreMemories: defineTable({
userId: v.optional(v.string()),
- memory: v.string(),
- embeddingId: v.optional(vVectorId),
+ persona: v.string(),
+ human: v.string(),
})
- .index("threadId", ["threadId"])
.index("userId", ["userId"])
- .index("embeddingId", ["embeddingId"]),
+ .index("persona", ["persona"])
+ .index("human", ["human"]),
files: defineTable({
storageId: v.string(),
diff --git a/src/component/vector/index.ts b/src/component/vector/index.ts
index d298d0d9..b3ec5c52 100644
--- a/src/component/vector/index.ts
+++ b/src/component/vector/index.ts
@@ -132,7 +132,7 @@ export async function insertVector(
});
}
-export function searchVectors(
+export async function searchVectors(
ctx: ActionCtx,
vector: number[],
args: {
@@ -146,23 +146,42 @@ export function searchVectors(
},
) {
const tableName = getVectorTableName(args.dimension);
- return ctx.vectorSearch(tableName, "vector", {
- vector,
- // TODO: to support more tables, add more "OR" clauses for each.
- filter: (q) =>
- args.searchAllMessagesForUserId
- ? q.eq("model_table_userId", [
- args.model,
- args.table,
- args.searchAllMessagesForUserId,
- ])
- : q.eq("model_table_threadId", [
- args.model,
- args.table,
- args.threadId!,
- ]),
- limit: args.limit,
- });
+
+ let results;
+ if (args.searchAllMessagesForUserId) {
+ results = await ctx.vectorSearch(tableName, "vector", {
+ vector,
+ filter: (q) =>
+ q.eq("model_table_userId", [
+ args.model,
+ args.table,
+ args.searchAllMessagesForUserId!,
+ ]),
+ limit: args.limit,
+ });
+ } else if (args.userId && args.table === "memories") {
+ // Memories vector search should be keyed by userId, not threadId
+ results = await ctx.vectorSearch(tableName, "vector", {
+ vector,
+ filter: (q) => q.eq("model_table_userId", [args.model, args.table, args.userId!]),
+ limit: args.limit,
+ });
+ } else if (args.threadId) {
+ results = await ctx.vectorSearch(tableName, "vector", {
+ vector,
+ filter: (q) =>
+ q.eq("model_table_threadId", [args.model, args.table, args.threadId!]),
+ limit: args.limit,
+ });
+ } else {
+ // No valid threadId/userId to filter on: search broadly (caller narrows results later).
+ results = await ctx.vectorSearch(tableName, "vector", {
+ vector,
+ limit: args.limit,
+ });
+ }
+
+ return results;
}
export const updateBatch = mutation({
diff --git a/src/react/useUIMessages.ts b/src/react/useUIMessages.ts
index 7872763d..eeebbe32 100644
--- a/src/react/useUIMessages.ts
+++ b/src/react/useUIMessages.ts
@@ -17,7 +17,7 @@ import type {
} from "convex/server";
import { useMemo } from "react";
import type { SyncStreamsReturnValue } from "../client/types.js";
-import type { MessageStatus, StreamArgs } from "../validators.js";
+import type { StreamArgs } from "../validators.js";
import type { StreamQuery } from "./types.js";
import { type UIMessage, type UIStatus } from "../UIMessages.js";
import { sorted } from "../shared.js";
diff --git a/src/validators.ts b/src/validators.ts
index 2cc95cae..34385f91 100644
--- a/src/validators.ts
+++ b/src/validators.ts
@@ -511,3 +511,19 @@ export const vThreadDoc = v.object({
status: vThreadStatus,
});
export type ThreadDoc = Infer;
+
+export const vCoreMemory = v.object({
+ userId: v.optional(v.string()),
+ persona: v.string(),
+ human: v.string(),
+});
+export type CoreMemory = Infer;
+
+export const vCoreMemoryDoc = v.object({
+ _id: v.string(),
+ _creationTime: v.number(),
+ userId: v.optional(v.string()),
+ persona: v.string(),
+ human: v.string(),
+});
+export type CoreMemoryDoc = Infer;
diff --git a/test-assets/mornin_cat.jpeg b/test-assets/mornin_cat.jpeg
new file mode 100644
index 00000000..af7b7994
Binary files /dev/null and b/test-assets/mornin_cat.jpeg differ