diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 52f00e6..5842d8a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,7 +6,7 @@ on: pull_request: jobs: - # ─── Unit + SeekDB tests across Node versions ──────────────────────── + # ─── Unit + Integration + Regression tests ─────────────────────────── test: name: Test (Node ${{ matrix.node-version }}) runs-on: ubuntu-latest @@ -33,9 +33,120 @@ jobs: - name: Unit tests run: npm test - - name: SeekDB tests (auto-skip if bindings unavailable) - run: npx vitest run tests/seekdb-store.test.ts tests/seekdb-integration.test.ts - continue-on-error: true + # ─── SeekDB tests (macOS ARM64 — bindings bundled as darwin-arm64) ─── + test-seekdb: + name: SeekDB (macOS ARM64) + runs-on: macos-14 + env: + # Propagate library path to ALL steps including vitest subprocesses + DYLD_LIBRARY_PATH: ${{ github.workspace }}/node_modules/@seekdb/js-bindings:${{ github.workspace }}/node_modules/@seekdb/js-bindings/libs + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-node@v4 + with: + node-version: 20 + + - name: Install dependencies + run: npm install + + - name: Verify SeekDB native binding loads + run: | + echo "DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH" + node -e " + const seekdb = require('@seekdb/js-bindings'); + seekdb.getNativeBindingAsync().then(b => { + console.log('SeekDB binding loaded OK'); + }).catch(e => { + console.error('FAILED:', e.message); + process.exit(1); + }); + " + + - name: Run SeekDB tests + run: npm run test:seekdb + timeout-minutes: 5 + + # ─── SeekDB tests (Linux x64 — download bindings + install libaio) ── + test-seekdb-linux: + name: SeekDB (Linux x64) + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-node@v4 + with: + node-version: 20 + + - name: Install system dependencies + run: | + sudo apt-get update && sudo apt-get install -y libaio1t64 || sudo apt-get install -y libaio1 || true + # Ubuntu 24.04 ships libaio.so.1t64 but SeekDB expects libaio.so.1 + if [ ! -f /usr/lib/x86_64-linux-gnu/libaio.so.1 ] && [ -f /usr/lib/x86_64-linux-gnu/libaio.so.1t64 ]; then + sudo ln -s libaio.so.1t64 /usr/lib/x86_64-linux-gnu/libaio.so.1 + echo "Created libaio.so.1 symlink" + fi + + - name: Install dependencies + run: npm install + + - name: Download SeekDB native bindings + run: | + node -e " + // Use filesystem path to bypass package.json exports restriction + const downloadPath = require.resolve('@seekdb/js-bindings').replace('seekdb.js', 'download.js'); + const { ensureBindingsDownloaded } = require(downloadPath); + ensureBindingsDownloaded().then(dir => { + console.log('Downloaded to:', dir); + const fs = require('fs'); + const path = require('path'); + // Verify libseekdb.so was extracted (download.js extractAllTo should handle this) + const soPath = path.join(dir, 'libseekdb.so'); + if (!fs.existsSync(soPath)) { + console.log('libseekdb.so not found, extracting from zip...'); + const zipPath = path.join(dir, 'seekdb-js-bindings-linux-x64.zip'); + if (fs.existsSync(zipPath)) { + const AdmZip = require('adm-zip'); + const zip = new AdmZip(zipPath); + zip.extractEntryTo('libseekdb.so', dir, false, true); + console.log('Extracted libseekdb.so'); + } + } + const files = fs.readdirSync(dir).filter(f => f.endsWith('.so') || f.endsWith('.node')); + console.log('Native files:', files); + if (!files.includes('seekdb.node')) { console.error('seekdb.node missing!'); process.exit(1); } + if (!files.includes('libseekdb.so')) { console.error('libseekdb.so missing!'); process.exit(1); } + }).catch(e => { + console.error('Download failed:', e.message); + process.exit(1); + }); + " + + - name: Verify SeekDB binding loads + run: | + # Find the cache dir dynamically (contains seekdb.node + libseekdb.so) + CACHE_DIR=$(find ~/.seekdb -name 'seekdb.node' -exec dirname {} \; 2>/dev/null | head -1) + echo "Cache dir: $CACHE_DIR" + ls -la "$CACHE_DIR"/*.so "$CACHE_DIR"/*.node 2>/dev/null || true + export LD_LIBRARY_PATH="$CACHE_DIR:$LD_LIBRARY_PATH" + node -e " + const seekdb = require('@seekdb/js-bindings'); + seekdb.getNativeBindingAsync().then(b => { + console.log('SeekDB binding loaded OK'); + }).catch(e => { + console.error('FAILED:', e.message); + process.exit(1); + }); + " + + - name: Run SeekDB tests + run: | + CACHE_DIR=$(find ~/.seekdb -name 'seekdb.node' -exec dirname {} \; 2>/dev/null | head -1) + export LD_LIBRARY_PATH="$CACHE_DIR:$LD_LIBRARY_PATH" + npm run test:seekdb + timeout-minutes: 5 # ─── Build verification ────────────────────────────────────────────── build: diff --git a/CHANGELOG.md b/CHANGELOG.md index c559f94..94a87d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,48 @@ # Changelog +## v0.3.0 — Full Python Parity (2026-04-03) + +Complete TypeScript replication of Python `oceanbase/powermem`. 89 source files, 10 modules, 504 tests. + +### Directory Restructure +- Reorganized from flat `provider/native/` to Python-matching module layout +- Tests restructured into `unit/`, `integration/`, `regression/`, `e2e/`, `bdd/` +- Deleted legacy `src/server/` (Python subprocess bridge) + +### New Modules +- **Config system**: `configs.ts` (Zod schemas), `config-loader.ts` (env auto-detection), `settings.ts`, `version.ts` +- **Storage module**: `VectorStoreFactory` (provider registry), `StorageAdapter`, typed configs +- **Integrations**: `embeddings/`, `llm/`, `rerank/` base interfaces + factory pattern +- **Intelligence**: `MemoryOptimizer` (exact + semantic dedup, LLM compression), `ImportanceEvaluator`, `IntelligenceManager` +- **Prompts**: importance evaluation, optimization, query rewrite, user profile, graph extraction/update/deletion +- **Utils**: `filter-parser`, `stats` (byType, growth trend, age distribution), `io` (JSON/CSV) +- **CLI**: `pmem config show|validate|test`, `pmem memory add|search|list|get|delete|delete-all`, `pmem stats`, `pmem manage backup|restore|cleanup`, `pmem shell` (interactive REPL) +- **Dashboard**: Express server + HTML SPA (overview stats/charts, memories table, settings, dark/light theme) +- **Agent module**: `AgentMemory`, 7 enums, scope/permission/collaboration strategy interfaces, `ScopeController`, `PermissionController`, `AgentFactory` +- **User memory**: `UserMemory` (profile-aware search), `QueryRewriter`, `SQLiteUserProfileStore` +- **Graph store**: `GraphStoreBase` interface, graph prompts + +### SeekDB Improvements +- `embeddingFunction: null` to disable auto-vectorization (pass pre-computed embeddings) +- Base64-encoded metadata to bypass C engine JSON parser limitations +- Sequential test execution to avoid concurrent embedded engine init +- CI: macOS ARM64 (bundled bindings) + Linux x64 (S3 download + libaio symlink) + +### Tests (504 total) +- 370 unit/integration/regression (Node 18/20/22) +- 63 SeekDB (macOS ARM64 + Linux x64) +- 21 e2e with real Ollama models +- 50 BDD (19 CLI + 16 dashboard UI + 15 data correctness) + +### CI (7 jobs, all green) +- Test Node 18/20/22 (Ubuntu) +- SeekDB macOS ARM64 (macos-14) +- SeekDB Linux x64 (Ubuntu, S3 download + libaio1t64 symlink) +- Build (CJS + ESM + DTS + CLI) +- E2E Ollama (Ubuntu) + +--- + ## v0.2.0 — Feature Enhancement (2026-04-01) ### P0 — API Layer diff --git a/README.md b/README.md index 907d135..cbdb2ae 100644 --- a/README.md +++ b/README.md @@ -1,161 +1,173 @@ # PowerMem TypeScript SDK -**TypeScript SDK for [PowerMem](https://github.com/oceanbase/powermem) — persistent memory for AI agents and applications.** +**Pure TypeScript memory system for AI agents — a full port of [PowerMem](https://github.com/oceanbase/powermem).** -[![npm version](https://img.shields.io/npm/v/powermem-ts)](https://www.npmjs.com/package/powermem-ts) [![Node.js 18+](https://img.shields.io/badge/node-18+-green.svg)](https://nodejs.org/) [![License Apache 2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](LICENSE) -PowerMem combines vector, full-text, and graph retrieval with LLM-driven memory extraction and Ebbinghaus-style time decay. This package provides a TypeScript/Node.js SDK that manages the Python-based PowerMem server automatically — zero manual setup required. +PowerMem combines vector-based semantic search with LLM-driven intelligent memory extraction, Ebbinghaus time-decay, and multi-tenant isolation. This package is a complete TypeScript reimplementation — **zero Python dependency**. ## Features -- **Zero-config server management** — automatically installs Python environment, creates virtualenv, and starts `powermem-server` as a subprocess -- **Direct connect mode** — connect to an existing PowerMem server without spawning a subprocess -- **Full API coverage** — add, search, get, update, delete, batch add, and more -- **Type-safe** — complete TypeScript type definitions with strict mode -- **Dual format** — ships both ESM and CommonJS builds - -## Quick start - -### Install +- **Pure TypeScript** — no Python, no subprocess, no server needed +- **Dual storage backend** — SQLite (default) or SeekDB (HNSW-indexed, OceanBase compatible) +- **LLM-driven intelligent add** — extracts facts, deduplicates, merges with existing memories +- **Semantic search** — cosine similarity over real embedding vectors +- **Pluggable providers** — any LangChain.js embedding/LLM provider (OpenAI, Qwen, Ollama, Anthropic, etc.) +- **Multi-tenant** — userId/agentId/runId isolation on all operations +- **CLI** — `pmem` command for memory CRUD, stats, backup/restore, interactive shell +- **Dashboard** — Express-based web dashboard with stats, charts, memory management +- **Agent memory** — scope/permission/collaboration management for multi-agent systems +- **User profiles** — profile extraction, profile-aware search, query rewriting +- **Ebbinghaus decay** — time-based memory score adjustment with access reinforcement +- **504 tests** — unit, integration, regression, e2e (Ollama), SeekDB (macOS + Linux), BDD + +## Quick Start ```bash npm install powermem-ts ``` -### Configure - -Copy `.env.example` to `.env` and fill in your LLM and Embedding API keys: - -```bash -cp .env.example .env -``` - -Required fields in `.env`: +### Simplest usage (env vars) ```env -# Database (default: embedded OceanBase via SeekDB, no extra setup needed) -DATABASE_PROVIDER=oceanbase -OCEANBASE_HOST= -OCEANBASE_PATH=./seekdb_data - -# LLM (required) -LLM_PROVIDER=qwen -LLM_API_KEY=your_api_key_here -LLM_MODEL=qwen-plus - -# Embedding (required) -EMBEDDING_PROVIDER=qwen -EMBEDDING_API_KEY=your_api_key_here -EMBEDDING_MODEL=text-embedding-v4 -EMBEDDING_DIMS=1536 +# .env +EMBEDDING_PROVIDER=openai +EMBEDDING_API_KEY=sk-... +EMBEDDING_MODEL=text-embedding-3-small +LLM_PROVIDER=openai +LLM_API_KEY=sk-... +LLM_MODEL=gpt-4o-mini ``` -See [.env.example](.env.example) for all available options. Supported LLM providers: `qwen`, `openai`, `siliconflow`, `ollama`, `vllm`, `anthropic`, `deepseek`. Supported embedding providers: `qwen`, `openai`, `siliconflow`, `huggingface`, `ollama`. - -### Prerequisites - -- **Node.js** >= 18.0.0 -- **Python** >= 3.11 (used internally to run the PowerMem server) - -### Usage - ```typescript import { Memory } from 'powermem-ts'; -// One-time init: installs Python env + powermem package (idempotent) -await Memory.init(); - -// Or specify a version: -// await Memory.init({ powermemVersion: 'powermem==1.0.0' }); +const memory = await Memory.create(); // reads from .env -// Create instance (auto-starts the server) -const memory = await Memory.create(); +await memory.add('User likes coffee', { userId: 'user1' }); +const results = await memory.search('preferences', { userId: 'user1' }); +console.log(results.results); -// Add a memory -const result = await memory.add('User likes coffee', { userId: 'user123' }); -console.log('Added:', result.memories); +await memory.close(); +``` -// Semantic search -const hits = await memory.search('user preferences', { userId: 'user123', limit: 5 }); -console.log('Results:', hits.results); +### Explicit LangChain instances -// List all memories -const all = await memory.getAll({ userId: 'user123' }); -console.log('Total:', all.total); +```typescript +import { Memory } from 'powermem-ts'; +import { OpenAIEmbeddings, ChatOpenAI } from '@langchain/openai'; -// Clean up (stops the server subprocess) -await memory.close(); +const memory = await Memory.create({ + embeddings: new OpenAIEmbeddings({ model: 'text-embedding-3-small' }), + llm: new ChatOpenAI({ model: 'gpt-4o-mini' }), +}); ``` -### Connect to an existing server - -If you already have a PowerMem server running (e.g. via `powermem-server` or Docker), skip the auto-start and connect directly: +### SeekDB backend (HNSW-indexed) ```typescript -import { Memory } from 'powermem-ts'; +const memory = await Memory.create({ + embeddings: myEmbeddings, + seekdb: { path: './seekdb_data', dimension: 1536 }, +}); +``` + +### Connect to existing PowerMem server +```typescript const memory = await Memory.create({ serverUrl: 'http://127.0.0.1:19527', - apiKey: process.env.POWERMEM_API_KEY, // optional }); +``` -await memory.add('Direct connect mode test'); -await memory.close(); +## CLI + +```bash +npx pmem memory add "User likes coffee" --user-id user1 +npx pmem memory search "preferences" --user-id user1 +npx pmem memory list --user-id user1 --sort created_at --order desc +npx pmem stats --json +npx pmem config show +npx pmem manage backup --output backup.json +npx pmem shell # Interactive REPL ``` ## API -### `Memory.init(options?)` +### Memory facade -One-time setup. Creates a Python virtualenv at `~/.powermem/venv/` and installs the `powermem` package. Idempotent — skips if already installed. +| Method | Description | +|--------|-------------| +| `Memory.create(options?)` | Create instance (NativeProvider default, HttpProvider with serverUrl) | +| `add(content, options?)` | Add memory (optional LLM fact extraction with `infer: true`) | +| `search(query, options?)` | Semantic search with scores, threshold, limit | +| `get(id)` | Get by ID | +| `update(id, content)` | Update content (auto re-embeds) | +| `delete(id)` | Delete by ID | +| `getAll(options?)` | List with pagination, sorting, filtering | +| `count(options?)` | Count with optional filters | +| `addBatch(items, options?)` | Batch add | +| `deleteAll(options?)` | Bulk delete with filters | +| `reset()` | Clear all | +| `close()` | Release resources | + +### Configuration + +| Option | Description | +|--------|-------------| +| `embeddings` | LangChain Embeddings instance | +| `llm` | LangChain BaseChatModel instance | +| `dbPath` | SQLite file path (default: `~/.powermem/memories.db`) | +| `seekdb` | SeekDB config: `{ path, database?, dimension?, distance? }` | +| `serverUrl` | Connect to existing server (HttpProvider mode) | +| `customFactExtractionPrompt` | Override LLM fact extraction prompt | +| `customUpdateMemoryPrompt` | Override LLM action decision prompt | +| `fallbackToSimpleAdd` | Fall back to simple add when LLM extracts nothing | +| `reranker` | Async function to re-score search results | +| `enableDecay` | Enable Ebbinghaus time-based score decay | +| `decayWeight` | Decay influence weight (0-1, default 0.3) | + +## Architecture -| Option | Type | Default | Description | -|--------|------|---------|-------------| -| `homeDir` | `string` | `~/.powermem/` | PowerMem home directory | -| `pythonPath` | `string` | `python3` / `python` | Path to Python 3.11+ | -| `powermemVersion` | `string` | `powermem` | pip package specifier | -| `pipArgs` | `string[]` | `[]` | Extra arguments for `pip install` | -| `verbose` | `boolean` | `true` | Print progress logs | +``` +src/ +├── core/ Memory facade, NativeProvider, HttpProvider, Inferrer +├── storage/ VectorStore interface, SQLiteStore, SeekDBStore, factory, adapter +├── integrations/ Embeddings, LLM, Rerank — base interfaces + factories +├── intelligence/ Ebbinghaus decay, MemoryOptimizer, ImportanceEvaluator +├── prompts/ All LLM prompt templates (fact extraction, update, importance, graph) +├── utils/ Cosine search, Snowflake IDs, filter parser, stats, IO +├── cli/ Commander.js CLI (config, memory, stats, manage, shell) +├── dashboard/ Express server + HTML dashboard +├── agent/ AgentMemory, scope/permission/collaboration management +└── user-memory/ UserMemory, query rewrite, SQLite profile storage +``` -### `Memory.create(options?)` +89 source files, 40 test files. See [docs/architecture.md](docs/architecture.md) for details. -Creates a `Memory` instance. Automatically starts the server if no `serverUrl` is provided. +## Testing -| Option | Type | Default | Description | -|--------|------|---------|-------------| -| `serverUrl` | `string` | — | Connect to existing server (skips auto-start) | -| `apiKey` | `string` | — | API key for authentication | -| `envFile` | `string` | `.env` | Path to `.env` file | -| `port` | `number` | `19527` | Server port | -| `startupTimeout` | `number` | `30000` | Max wait time (ms) for server startup | -| `init` | `InitOptions` | — | Options passed to `Memory.init()` | +```bash +npm test # 370 unit/integration/regression tests +npm run test:e2e # 21 e2e tests (requires Ollama + nomic-embed-text + qwen2.5:0.5b) +npm run test:seekdb # 63 SeekDB tests (requires @seekdb/js-bindings) +npx vitest run tests/bdd/ # 50 BDD tests (CLI + dashboard UI) +``` -### Instance methods +CI runs 7 jobs: Node 18/20/22 unit tests, SeekDB on macOS ARM64, SeekDB on Linux x64, build verification, e2e with Ollama. -| Method | Description | -|--------|-------------| -| `add(content, options?)` | Add a memory. Options: `userId`, `agentId`, `runId`, `metadata`, `filters`, `infer` | -| `search(query, options?)` | Semantic search. Options: `userId`, `agentId`, `runId`, `filters`, `limit` | -| `get(memoryId)` | Get a single memory by ID. Returns `null` if not found | -| `update(memoryId, content, options?)` | Update memory content and/or metadata | -| `delete(memoryId)` | Delete a single memory | -| `getAll(options?)` | List memories. Options: `userId`, `agentId`, `limit`, `offset` | -| `addBatch(memories, options?)` | Batch add multiple memories | -| `deleteAll(options?)` | Delete all memories matching filter | -| `reset()` | Delete all memories | -| `close()` | Close the connection and stop the server subprocess | +## Dependencies -## Docs +**Runtime**: `better-sqlite3`, `@langchain/core`, `commander`, `express`, `zod`, `dotenv` -- [Architecture](docs/architecture.md) — design, project structure, flows, and error handling +**Peer (install what you need)**: `@langchain/openai`, `@langchain/anthropic`, `@langchain/ollama`, `seekdb` ## Related -- [PowerMem](https://github.com/oceanbase/powermem) — the core Python project (SDK, CLI, HTTP API, MCP Server) -- [PowerMem Documentation](https://github.com/oceanbase/powermem/tree/main/docs) — architecture, configuration, and guides +- [PowerMem](https://github.com/oceanbase/powermem) — Original Python implementation +- [SeekDB](https://github.com/oceanbase/seekdb-js) — OceanBase embedded vector database ## License -Apache License 2.0 — see [LICENSE](LICENSE). +Apache License 2.0 diff --git a/docs/architecture.md b/docs/architecture.md index ce4449f..f4a9272 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -2,388 +2,142 @@ ## 1. Overview -PowerMem TS SDK is a pure TypeScript memory system for AI agents. It stores, retrieves, and semantically searches memories using vector embeddings, with optional LLM-driven intelligent memory extraction. +Pure TypeScript port of [oceanbase/powermem](https://github.com/oceanbase/powermem). 89 source files across 10 modules, matching the Python directory layout. Zero Python dependency. -The SDK operates in two modes: +Two storage backends: +- **SQLite** (default) — `better-sqlite3`, brute-force cosine similarity +- **SeekDB** (optional) — OceanBase embedded engine, HNSW-indexed vector search -- **Native mode** (default): Pure TypeScript — SQLite storage, LangChain.js for embeddings/LLM, cosine similarity search. Zero Python dependency. -- **HTTP mode** (`serverUrl`): Connects to an existing powermem-server via HTTP. Retained for backward compatibility. - -## 2. Core design concept - -### Provider abstraction - -The SDK is built around a single architectural idea: **the `MemoryProvider` interface decouples the public API from the implementation**. - -``` -┌──────────────────────────────────────────┐ -│ Memory (Facade) │ ← User-facing, never changes -│ - create() / close() │ -│ - add / search / get / update / ... │ -├──────────────────────────────────────────┤ -│ MemoryProvider (interface) │ ← The contract -├──────────────────┬───────────────────────┤ -│ NativeProvider │ HttpProvider │ -│ Default │ Backward compat │ -│ Pure TS │ Remote server │ -└──────────────────┴───────────────────────┘ -``` - -`Memory.create()` inspects options and picks the right provider. User code never references a provider directly. This made it possible to replace the entire Python backend with native TypeScript without changing a single line of user-facing API. - -### Pluggable LLM/Embedding via LangChain.js - -Rather than hardcoding API clients for each provider (OpenAI, Qwen, Anthropic, etc.), the SDK accepts LangChain.js base types: - -- `Embeddings` from `@langchain/core/embeddings` -- `BaseChatModel` from `@langchain/core/language_models/chat_models` - -Users plug in any LangChain-compatible provider. The SDK also auto-creates instances from `.env` configuration for zero-config usage. - -### Faithful port of Python powermem - -The NativeProvider is a direct port of the [oceanbase/powermem](https://github.com/oceanbase/powermem) Python implementation. Key behaviors preserved exactly: - -- **Two-step intelligent add** (`infer=true`): extract facts via LLM → search for similar existing memories → ask LLM to decide ADD/UPDATE/DELETE/NONE → execute actions -- **Same LLM prompts**: `FACT_RETRIEVAL_PROMPT` and `DEFAULT_UPDATE_MEMORY_PROMPT` copied verbatim -- **Snowflake IDs**: 64-bit IDs matching Python's SnowflakeIDGenerator, serialized as strings -- **Cosine similarity**: Same algorithm, brute-force over filtered records -- **SQLite storage**: Same schema pattern (id, vector as JSON, payload as JSON) -- **MD5 content hashing** for deduplication -- **Access control**: userId/agentId check on get operations - -## 3. Architecture layers — NativeProvider - -``` -NativeProvider - │ - ├── Embedder Wraps LangChain Embeddings - │ └── embedQuery / embedDocuments - │ - ├── Inferrer Two-step LLM memory extraction - │ ├── extractFacts() → FACT_RETRIEVAL_PROMPT → ["fact1", "fact2"] - │ └── decideActions() → UPDATE_MEMORY_PROMPT → ADD/UPDATE/DELETE/NONE - │ - ├── SQLiteStore SQLite via better-sqlite3 - │ ├── insert / getById / update / remove - │ ├── list (filtered, paginated) - │ └── search (load vectors → cosine similarity → rank) - │ - ├── SnowflakeIDGenerator 64-bit monotonic IDs (BigInt → string) - │ - └── cosineSimilarity() Pure math, no dependencies -``` - -## 4. Project structure - -``` -powermem-ts/ -├── package.json -├── tsconfig.json -├── tsup.config.ts -├── vitest.config.ts -├── .env.example -├── src/ -│ ├── index.ts # Public exports -│ ├── memory.ts # Memory facade -│ ├── types/ -│ │ ├── index.ts # Re-exports -│ │ ├── memory.ts # MemoryRecord, AddParams, SearchParams, etc. -│ │ ├── options.ts # MemoryOptions (embeddings, llm, dbPath, serverUrl) -│ │ └── responses.ts # AddResult, SearchResult, etc. -│ ├── errors/ -│ │ └── index.ts # PowerMemError hierarchy -│ ├── provider/ -│ │ ├── index.ts # MemoryProvider interface -│ │ ├── http-provider.ts # HTTP implementation (backward compat) -│ │ └── native/ -│ │ ├── index.ts # NativeProvider (main class) -│ │ ├── vector-store.ts # VectorStore interface (abstract) -│ │ ├── store.ts # SQLiteStore (VectorStore implementation) -│ │ ├── embedder.ts # LangChain Embeddings wrapper -│ │ ├── inferrer.ts # LLM fact extraction + action decision -│ │ ├── prompts.ts # LLM prompt templates (from Python) -│ │ ├── search.ts # Cosine similarity -│ │ ├── decay.ts # Ebbinghaus memory decay -│ │ ├── snowflake.ts # Snowflake ID generator -│ │ └── provider-factory.ts # Env-based auto-creation -│ ├── server/ # (Legacy) Python server management -│ │ ├── python-env.ts -│ │ └── server-manager.ts -│ └── utils/ -│ ├── platform.ts # Cross-platform path helpers -│ ├── case-convert.ts # camelCase ↔ snake_case -│ └── env.ts # .env file loader -├── tests/ -│ ├── mocks.ts # MockEmbeddings, MockLLM -│ ├── snowflake.test.ts # Unit: ID generation -│ ├── search.test.ts # Unit: cosine similarity -│ ├── store.test.ts # Unit: SQLiteStore CRUD + sort + count -│ ├── decay.test.ts # Unit: Ebbinghaus decay math -│ ├── embedder.test.ts # Unit: embedding wrapper -│ ├── inferrer.test.ts # Unit: LLM extraction + custom prompts -│ ├── native-provider.test.ts # Integration: full provider -│ ├── memory-facade.test.ts # Integration: public API -│ ├── provider-factory.test.ts # Unit: env-based factory -│ ├── coverage-gaps.test.ts # Integration: edge cases -│ ├── sorting-combos.test.ts # Combinatorial: sortBy × order × pagination -│ ├── edge-cases.test.ts # Boundary: invalid IDs, empty stores, limits -│ ├── multi-agent.test.ts # Concurrency + isolation -│ ├── custom-integration.test.ts # Custom prompts, reranker, fallback -│ ├── ebbinghaus.test.ts # Decay: curve, reinforcement, ordering -│ ├── multi-language.test.ts # I18n: CJK, Arabic, emoji, unicode -│ ├── e2e-ollama.test.ts # E2E: all features with real Ollama -│ └── e2e-agent-scenario.test.ts # E2E: real-world agent scenarios -└── examples/ - └── basic-usage.ts -``` - -Runtime data directory (auto-created): - -``` -~/.powermem/ -└── memories.db # SQLite database (NativeProvider) -``` - -## 5. Key flows - -### 5.1 Instance creation (`Memory.create()`) +## 2. Module Structure ``` -Memory.create(options?) - │ - ├─ Load .env file - │ - ├─ Has serverUrl? - │ ├─ Yes → HttpProvider (backward compat) - │ └─ No → NativeProvider (default) - │ - └─ NativeProvider.create(): - ├─ Resolve dbPath (default ~/.powermem/memories.db) - ├─ Create SQLite database (SQLiteStore) - ├─ Set up Embedder: - │ ├─ options.embeddings provided? → Use it - │ └─ Not provided → createEmbeddingsFromEnv() - ├─ Set up Inferrer (optional): - │ ├─ options.llm provided? → Use it - │ ├─ Not provided → try createLLMFromEnv() - │ └─ No LLM config → inferrer = undefined (infer disabled) - └─ Return NativeProvider instance +src/ +├── core/ Memory facade, NativeProvider, HttpProvider, Inferrer +├── storage/ VectorStore interface, SQLiteStore, SeekDBStore, factory, adapter +├── integrations/ Embeddings, LLM, Rerank — base interfaces + factories +├── intelligence/ Ebbinghaus decay, MemoryOptimizer, ImportanceEvaluator +├── prompts/ All LLM prompt templates (fact extraction, update, importance, graph) +├── utils/ Cosine search, Snowflake IDs, filter parser, stats, IO +├── cli/ Commander.js CLI (config, memory, stats, manage, shell) +├── dashboard/ Express server + HTML dashboard +├── agent/ AgentMemory, scope/permission/collaboration management +├── user-memory/ UserMemory, query rewrite, SQLite profile storage +├── configs.ts Zod schemas for MemoryConfig +├── config-loader.ts autoConfig(), loadConfigFromEnv(), createConfig() +├── settings.ts Default .env file resolution +├── version.ts Version constant +├── errors/ Error hierarchy +├── types/ TypeScript type definitions +└── index.ts Public exports ``` -### 5.2 Simple add (`infer=false`) +## 3. Key Flows +### Memory.create() ``` -add({ content, userId, ... , infer: false }) - │ - ├─ Generate Snowflake ID - ├─ Embed content → vector - ├─ MD5 hash content - ├─ Store in SQLite: { id, vector, payload } - └─ Return AddResult with 1 MemoryRecord +Memory.create(options) + ├─ serverUrl provided → HttpProvider + ├─ seekdb config provided → SeekDBStore → NativeProvider + └─ default → SQLiteStore → NativeProvider ``` -### 5.3 Intelligent add (`infer=true`, default) - +### Intelligent add (infer=true) ``` -add({ content, userId, ... }) - │ - ├─ Step 1: Extract facts - │ └─ LLM(FACT_RETRIEVAL_PROMPT, content) → ["fact1", "fact2", ...] - │ - ├─ Step 2: Find similar existing memories - │ └─ For each fact: - │ ├─ Embed fact → vector - │ └─ Search SQLite for top-5 similar (filtered by userId/agentId/runId) - │ └─ Deduplicate, keep best scores, max 10 candidates - │ - ├─ Step 3: Map IDs - │ └─ Real Snowflake IDs → temp sequential IDs ("0","1","2"...) - │ (prevents LLM from hallucinating IDs) - │ - ├─ Step 4: Decide actions - │ └─ LLM(UPDATE_MEMORY_PROMPT, existing_memories, new_facts) - │ → [{ id, text, event: ADD|UPDATE|DELETE|NONE }] - │ - └─ Step 5: Execute actions - ├─ ADD → new Snowflake ID, embed, store - ├─ UPDATE → map temp→real ID, embed new text, update store - ├─ DELETE → map temp→real ID, remove from store - └─ NONE → skip (duplicate) +add(content) + → LLM extracts facts (FACT_RETRIEVAL_PROMPT) + → Embed each fact → search for similar existing memories + → No existing? → ADD all facts directly + → Has existing? → LLM decides ADD/UPDATE/DELETE/NONE + → Execute actions ``` -### 5.4 Search - -``` -search({ query, userId, limit }) - │ - ├─ Embed query → vector - ├─ Load all matching records from SQLite (filtered by userId/agentId/runId) - ├─ Compute cosine similarity for each - ├─ Sort descending by score - ├─ Return top-k as SearchResult - └─ Each result: { memoryId, content, score, metadata } -``` - -## 6. Storage — SQLite schema - -```sql -CREATE TABLE memories ( - id TEXT PRIMARY KEY, -- Snowflake ID as string - vector TEXT, -- JSON array of floats - payload TEXT, -- JSON blob (see below) - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP -); +### Search ``` - -Payload JSON structure: -```json -{ - "data": "the actual content text", - "user_id": "user123", - "agent_id": "agent1", - "run_id": "run1", - "hash": "md5-hex-of-content", - "created_at": "2024-01-01T00:00:00.000Z", - "updated_at": "2024-01-01T00:00:00.000Z", - "category": null, - "metadata": { "custom": "user metadata" } -} +search(query) + → Embed query → cosine similarity (SQLite) or HNSW (SeekDB) + → Apply Ebbinghaus decay (if enabled) + → Apply threshold filter + → Increment access counts + → Apply reranker (if configured) ``` -Filtering uses `json_extract()` on the payload column. Vector search is brute-force cosine similarity in JavaScript — efficient for datasets up to ~100K records. - -## 7. Dependencies - -**Runtime:** -- `better-sqlite3` — Synchronous SQLite bindings (native addon) -- `@langchain/core` — Base types for Embeddings and LLM -- `dotenv` — .env file loading - -**Peer (user installs what they need):** -- `@langchain/openai` — OpenAI, Qwen, SiliconFlow, DeepSeek (OpenAI-compatible) -- `@langchain/anthropic` — Anthropic Claude -- `@langchain/ollama` — Local Ollama models +## 4. Storage Backends -**Dev:** -- `typescript`, `tsup`, `vitest`, `@vitest/coverage-v8`, `@types/better-sqlite3` +### SQLite (default) +- Schema: `memories(id TEXT PK, vector TEXT, payload TEXT, created_at TIMESTAMP)` +- Vector search: brute-force cosine similarity in JavaScript +- Sorting: `ORDER BY json_extract(payload, '$.field')` -## 8. Configuration +### SeekDB (HNSW-indexed) +- Embedded mode via `seekdb` npm package (local file, no server) +- HNSW vector index with configurable dimension/distance +- Metadata stored as base64-encoded JSON (bypasses C engine JSON parser) +- Score: `1 - cosine_distance` +- Requires `@seekdb/js-bindings` (macOS ARM64, Linux x64) -Two ways to configure embeddings/LLM: +### VectorStore Interface (async) +All 11 methods return `Promise`. SQLiteStore and SeekDBStore both implement it. -**Explicit (recommended for libraries):** -```ts -import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; +## 5. CLI -const memory = await Memory.create({ - embeddings: new OpenAIEmbeddings({ model: 'text-embedding-3-small' }), - llm: new ChatOpenAI({ model: 'gpt-4o-mini' }), -}); -``` - -**Env-based (zero-config):** -```bash -# .env -EMBEDDING_PROVIDER=openai -EMBEDDING_API_KEY=sk-... -EMBEDDING_MODEL=text-embedding-3-small -LLM_PROVIDER=openai -LLM_API_KEY=sk-... -LLM_MODEL=gpt-4o-mini ``` -```ts -const memory = await Memory.create(); // reads from .env +pmem config show|validate|test +pmem memory add|search|list|get|delete|delete-all +pmem stats +pmem manage backup|restore|cleanup +pmem shell ``` -Supported providers for env-based auto-creation: +## 6. Dashboard -| Provider | Embedding | LLM | Package | -|----------|-----------|-----|---------| -| `openai` | Yes | Yes | `@langchain/openai` | -| `qwen` | Yes | Yes | `@langchain/openai` | -| `siliconflow` | Yes | Yes | `@langchain/openai` | -| `deepseek` | Yes | Yes | `@langchain/openai` | -| `anthropic` | No | Yes | `@langchain/anthropic` | -| `ollama` | Yes | Yes | `@langchain/ollama` | +Express server at `/dashboard/` with overview (stats cards, charts), memories page (table, search, pagination), settings page, dark/light theme. -## 9. Error hierarchy +REST API at `/api/v1/`: health, status, stats, memories CRUD, search. -| Error class | Code | Trigger | -|-------------|------|---------| -| `PowerMemError` | (base) | Base class for all SDK errors | -| `PowerMemInitError` | `INIT_ERROR` | Missing env config, LangChain package not installed | -| `PowerMemStartupError` | `STARTUP_ERROR` | Server timeout (HTTP mode only) | -| `PowerMemConnectionError` | `CONNECTION_ERROR` | Cannot reach server (HTTP mode only) | -| `PowerMemAPIError` | `API_ERROR` | Server error response (HTTP mode only) | +## 7. Test Architecture -## 10. Build output +504 tests across 40 files, 7 CI jobs. -Dual-format via `tsup`: +| Layer | Tests | Description | +|-------|-------|-------------| +| Unit | 284 | Per-module with mocks | +| Integration | 46 | Full stack with real SQLite | +| Regression | 58 | Scenarios, edge cases, language | +| E2E (Ollama) | 21 | Real qwen2.5:0.5b + nomic-embed-text | +| SeekDB | 63 | Unit + integration + e2e (macOS ARM64 + Linux x64) | +| BDD | 50 | CLI subprocess + dashboard UI via dev-browser | -| File | Format | Purpose | -|------|--------|---------| -| `dist/index.js` | ESM | `import from 'powermem-ts'` | -| `dist/index.cjs` | CommonJS | `require('powermem-ts')` | -| `dist/index.d.ts` | TypeScript declarations | Type support | +### CI Jobs -`better-sqlite3` and `@langchain/*` are externalized (not bundled). - -## 11. Test architecture - -208 tests total: **187 unit tests** (mocked, fast) + **21 e2e tests** (real Ollama models). - -Tests are organized by 6 testing perspectives (learned from the Python powermem test suite): - -### Unit tests (187 tests, 16 files) - -Mock infrastructure: -- `MockEmbeddings` — Deterministic vectors from character frequency (no API calls) -- `MockLLM` — Pre-configured response queue with call tracking - -| Test file | Tests | Perspective | -|-----------|-------|-------------| -| `snowflake.test.ts` | 4 | Unit — ID generation | -| `search.test.ts` | 6 | Unit — Cosine similarity | -| `store.test.ts` | 25 | Unit — SQLiteStore CRUD, count, sort, access count | -| `embedder.test.ts` | 4 | Unit — Embedding wrapper | -| `inferrer.test.ts` | 11 | Unit — LLM fact extraction, actions, custom prompts | -| `decay.test.ts` | 8 | Unit — Ebbinghaus decay math | -| `native-provider.test.ts` | 41 | Integration — Full provider, all features | -| `memory-facade.test.ts` | 8 | Integration — Public API through facade | -| `provider-factory.test.ts` | 9 | Unit — Env-based factory | -| `coverage-gaps.test.ts` | 14 | Integration — Edge cases, filter branches | -| `sorting-combos.test.ts` | 11 | **Combinatorial** — sortBy × order × pagination 3D combos | -| `edge-cases.test.ts` | 22 | **Boundary** — Invalid IDs, empty stores, idempotent ops, long content | -| `multi-agent.test.ts` | 6 | **Concurrency + Isolation** — Parallel writes, agent data isolation | -| `custom-integration.test.ts` | 8 | **Custom integration** — All customization points together | -| `ebbinghaus.test.ts` | 9 | **Decay math** — Exponential curve, reinforcement, search ordering | -| `multi-language.test.ts` | 8 | **Multi-language** — CJK, Japanese, Arabic, emoji, unicode metadata | - -### E2E tests with real models (21 tests, 2 files) - -Models: `qwen2.5:0.5b` (LLM) + `nomic-embed-text` (embedding). Auto-skipped when Ollama unavailable. - -| Test file | Tests | Scenario | -|-----------|-------|----------| -| `e2e-ollama.test.ts` | 18 | Full feature verification with real embeddings and LLM | -| `e2e-agent-scenario.test.ts` | 3 | **Scenario-based** — Personal assistant, 10-round conversation, multi-agent isolation | - -### Testing perspectives - -These 6 perspectives go beyond "does the feature work" — each catches a different class of bug: - -1. **Combinatorial** — Parameter interaction bugs (sort + filter + pagination) -2. **Boundary/edge** — Implicit assumption bugs (empty, zero, huge, special chars) -3. **Concurrency** — Thread-safety bugs (parallel writes, interleaved read+write) -4. **Multi-tenant isolation** — Filter leaks (same user different agent, scoped deletes) -5. **Multi-language** — Encoding bugs (CJK in JSON payload, unicode metadata keys) -6. **Scenario-based** — Integration bugs (real-world multi-step workflows) +| Job | Platform | Tests | +|-----|----------|-------| +| Test (Node 18/20/22) | Ubuntu | 370 unit/integration/regression | +| SeekDB (macOS ARM64) | macOS 14 | 63 SeekDB tests | +| SeekDB (Linux x64) | Ubuntu | 63 SeekDB tests | +| Build | Ubuntu | CJS + ESM + DTS + CLI | +| E2E (Ollama) | Ubuntu | 21 e2e tests | ### Running tests ```bash -npm test # Unit tests only (fast, no external deps) -npm run test:e2e # E2E tests (requires Ollama + models) -npm run test:all # Both -``` +npm test # 370 unit/integration/regression +npm run test:e2e # 21 e2e (requires Ollama) +npm run test:seekdb # 63 SeekDB (requires native bindings) +npx vitest run tests/bdd/ # 50 BDD (CLI + dashboard) +``` + +## 8. Python Parity + +| Python module | TS equivalent | Status | +|---|---|---| +| `core/` | `src/core/` | Done | +| `storage/` | `src/storage/` (SQLite + SeekDB) | Done | +| `integrations/` | `src/integrations/` (via LangChain.js) | Done | +| `intelligence/` | `src/intelligence/` | Done | +| `prompts/` | `src/prompts/` | Done | +| `utils/` | `src/utils/` | Done | +| `cli/` | `src/cli/` | Done | +| `agent/` | `src/agent/` | Done | +| `user_memory/` | `src/user-memory/` | Done | +| configs + settings | `src/configs.ts` etc | Done | +| dashboard (React) | `src/dashboard/` (vanilla HTML) | Done | diff --git a/package.json b/package.json index 86e71e1..0493f26 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "powermem-ts", - "version": "0.2.0", + "version": "0.3.0", "description": "TypeScript SDK for PowerMem", "type": "module", "main": "./dist/index.cjs", @@ -13,6 +13,9 @@ "require": "./dist/index.cjs" } }, + "bin": { + "pmem": "./dist/cli.js" + }, "files": [ "dist" ], @@ -24,21 +27,26 @@ "dev": "tsup --watch", "type-check": "tsc --noEmit", "lint": "eslint src", - "test": "vitest run --exclude 'tests/e2e-*' --exclude 'tests/seekdb-*'", - "test:watch": "vitest --exclude 'tests/e2e-*' --exclude 'tests/seekdb-*'", - "test:e2e": "vitest run tests/e2e-*.test.ts", - "test:seekdb": "vitest run tests/seekdb-*.test.ts", + "test": "vitest run --exclude 'tests/e2e/**' --exclude 'tests/**/seekdb*'", + "test:watch": "vitest --exclude 'tests/e2e/**' --exclude 'tests/**/seekdb*'", + "test:e2e": "vitest run tests/e2e/", + "test:seekdb": "vitest run tests/unit/storage/seekdb.test.ts && vitest run tests/integration/seekdb.test.ts && vitest run tests/integration/seekdb-e2e.test.ts", "test:all": "vitest run" }, "dependencies": { "@langchain/core": "^1.1.38", "better-sqlite3": "^12.8.0", - "dotenv": "^16.0.0" + "commander": "^14.0.3", + "dotenv": "^16.0.0", + "express": "^5.2.1", + "zod": "^4.3.6" }, "devDependencies": { "@langchain/ollama": "^1.2.6", "@langchain/openai": "^1.4.1", + "@seekdb/default-embed": "^1.2.0", "@types/better-sqlite3": "^7.6.13", + "@types/express": "^5.0.6", "@types/node": "^20.0.0", "@vitest/coverage-v8": "^4.1.2", "eslint": "^9.0.0", diff --git a/src/agent/abstract/collaboration.ts b/src/agent/abstract/collaboration.ts new file mode 100644 index 0000000..924f0d0 --- /dev/null +++ b/src/agent/abstract/collaboration.ts @@ -0,0 +1,9 @@ +/** + * Collaboration strategy abstract interface. + */ +export interface CollaborationStrategy { + initialize(): Promise; + shareMemory(memoryId: string, fromAgent: string, toAgents: string[], permissions?: Record): Promise>; + getSharedMemories(agentId: string): Promise>>; + createGroup(groupName: string, agentIds: string[], permissions?: Record): Promise>; +} diff --git a/src/agent/abstract/context.ts b/src/agent/abstract/context.ts new file mode 100644 index 0000000..aab8e41 --- /dev/null +++ b/src/agent/abstract/context.ts @@ -0,0 +1,8 @@ +/** + * Context strategy abstract interface. + */ +export interface ContextStrategy { + initialize(): Promise; + getContext(agentId: string): Promise>; + updateContext(agentId: string, context: Record): Promise; +} diff --git a/src/agent/abstract/manager.ts b/src/agent/abstract/manager.ts new file mode 100644 index 0000000..f9ce2ba --- /dev/null +++ b/src/agent/abstract/manager.ts @@ -0,0 +1,13 @@ +/** + * Agent memory manager abstract interface. + */ +export interface AgentMemoryManager { + add(content: string, userId?: string, agentId?: string, metadata?: Record): Promise>; + search(query: string, userId?: string, agentId?: string, limit?: number): Promise>>; + getAll(userId?: string, agentId?: string, limit?: number): Promise>>; + update(memoryId: string, content: string, userId?: string, agentId?: string): Promise>; + delete(memoryId: string, userId?: string, agentId?: string): Promise; + deleteAll(userId?: string, agentId?: string): Promise; + reset(): Promise; + getStatistics(): Promise>; +} diff --git a/src/agent/abstract/permission.ts b/src/agent/abstract/permission.ts new file mode 100644 index 0000000..dc2d81b --- /dev/null +++ b/src/agent/abstract/permission.ts @@ -0,0 +1,14 @@ +/** + * Permission strategy abstract interface. + * Port of Python powermem/agent/abstract/permission.py. + */ +import type { AccessPermission } from '../types.js'; + +export interface PermissionStrategy { + initialize(): Promise; + checkPermission(agentId: string, memoryId: string, permission: AccessPermission): Promise; + grantPermission(memoryId: string, agentId: string, permission: AccessPermission, grantedBy: string): Promise>; + revokePermission(memoryId: string, agentId: string, permission: AccessPermission, revokedBy: string): Promise>; + getPermissions(memoryId: string, agentId: string): Promise>; + getPermissionHistory(memoryId: string, agentId?: string, limit?: number): Promise>>; +} diff --git a/src/agent/abstract/privacy.ts b/src/agent/abstract/privacy.ts new file mode 100644 index 0000000..6b79970 --- /dev/null +++ b/src/agent/abstract/privacy.ts @@ -0,0 +1,11 @@ +/** + * Privacy strategy abstract interface. + */ +import type { PrivacyLevel } from '../types.js'; + +export interface PrivacyStrategy { + initialize(): Promise; + getPrivacyLevel(memoryId: string): Promise; + setPrivacyLevel(memoryId: string, level: PrivacyLevel, setBy: string): Promise>; + checkPrivacyAccess(agentId: string, memoryId: string): Promise; +} diff --git a/src/agent/abstract/scope.ts b/src/agent/abstract/scope.ts new file mode 100644 index 0000000..4f77394 --- /dev/null +++ b/src/agent/abstract/scope.ts @@ -0,0 +1,14 @@ +/** + * Scope strategy abstract interface. + * Port of Python powermem/agent/abstract/scope.py. + */ +import type { MemoryScope } from '../types.js'; + +export interface ScopeStrategy { + initialize(): Promise; + determineScope(agentId: string, context?: Record, metadata?: Record): Promise; + getAccessibleMemories(agentId: string, scope: MemoryScope): Promise; + checkScopeAccess(agentId: string, memoryId: string): Promise; + updateMemoryScope(memoryId: string, newScope: MemoryScope, agentId: string): Promise>; + getScopeStatistics(): Promise>; +} diff --git a/src/agent/agent.ts b/src/agent/agent.ts new file mode 100644 index 0000000..3cc4bac --- /dev/null +++ b/src/agent/agent.ts @@ -0,0 +1,130 @@ +/** + * AgentMemory — unified agent memory interface. + * Port of Python powermem/agent/agent.py. + */ +import type { Memory } from '../core/memory.js'; +import type { ScopeStrategy } from './abstract/scope.js'; +import type { PermissionStrategy } from './abstract/permission.js'; +import { AgentFactory } from './factories/agent-factory.js'; +import { MemoryScope, AccessPermission } from './types.js'; + +export interface AgentMemoryConfig { + mode?: 'multi_agent' | 'multi_user' | 'hybrid' | 'auto'; + defaultScope?: MemoryScope; + enableCollaboration?: boolean; + [key: string]: unknown; +} + +export class AgentMemory { + private readonly memory: Memory; + private readonly mode: string; + private readonly scopeManager: ScopeStrategy; + private readonly permissionManager: PermissionStrategy; + + constructor(memory: Memory, config: AgentMemoryConfig = {}) { + this.memory = memory; + this.mode = config.mode ?? 'multi_agent'; + this.scopeManager = AgentFactory.createScopeManager(config); + this.permissionManager = AgentFactory.createPermissionManager(config); + } + + getMode(): string { + return this.mode; + } + + async add( + content: string, + options: { userId?: string; agentId?: string; metadata?: Record; scope?: MemoryScope } = {} + ): Promise> { + const scope = options.scope ?? await this.scopeManager.determineScope( + options.agentId ?? '', undefined, options.metadata + ); + + const result = await this.memory.add(content, { + userId: options.userId, + agentId: options.agentId, + metadata: { ...options.metadata, scope }, + }); + + return { ...result, scope }; + } + + async search( + query: string, + options: { userId?: string; agentId?: string; limit?: number } = {} + ): Promise>> { + const result = await this.memory.search(query, { + userId: options.userId, + agentId: options.agentId, + limit: options.limit, + }); + return result.results as unknown as Array>; + } + + async getAll( + options: { userId?: string; agentId?: string; limit?: number } = {} + ): Promise>> { + const result = await this.memory.getAll({ + userId: options.userId, + agentId: options.agentId, + limit: options.limit, + }); + return result.memories as unknown as Array>; + } + + async update( + memoryId: string, + content: string, + options: { userId?: string; agentId?: string } = {} + ): Promise> { + const hasPermission = await this.permissionManager.checkPermission( + options.agentId ?? '', memoryId, AccessPermission.WRITE + ); + if (!hasPermission) { + throw new Error(`Agent ${options.agentId} does not have write permission for memory ${memoryId}`); + } + return this.memory.update(memoryId, content) as unknown as Record; + } + + async delete( + memoryId: string, + _options: { userId?: string; agentId?: string } = {} + ): Promise { + return this.memory.delete(memoryId); + } + + async deleteAll(options: { userId?: string; agentId?: string } = {}): Promise { + return this.memory.deleteAll({ userId: options.userId, agentId: options.agentId }); + } + + async reset(): Promise { + await this.memory.reset(); + } + + async getStatistics(): Promise> { + const scopeStats = await this.scopeManager.getScopeStatistics(); + const count = await this.memory.count(); + return { mode: this.mode, totalMemories: count, ...scopeStats }; + } + + async grantPermission( + memoryId: string, + agentId: string, + permission: AccessPermission, + grantedBy: string + ): Promise> { + return this.permissionManager.grantPermission(memoryId, agentId, permission, grantedBy); + } + + async checkPermission( + agentId: string, + memoryId: string, + permission: AccessPermission + ): Promise { + return this.permissionManager.checkPermission(agentId, memoryId, permission); + } + + async close(): Promise { + await this.memory.close(); + } +} diff --git a/src/agent/components/permission-controller.ts b/src/agent/components/permission-controller.ts new file mode 100644 index 0000000..d67e28d --- /dev/null +++ b/src/agent/components/permission-controller.ts @@ -0,0 +1,99 @@ +/** + * Permission controller — manages memory access permissions. + * Port of Python powermem/agent/components/permission_controller.py. + */ +import type { PermissionStrategy } from '../abstract/permission.js'; +import { AccessPermission } from '../types.js'; + +interface AccessLogEntry { + agentId: string; + memoryId: string; + permission: AccessPermission; + result: boolean; + action?: string; + performedBy?: string; + timestamp: string; +} + +export class PermissionController implements PermissionStrategy { + private memoryPermissions = new Map>>(); + private accessLog: AccessLogEntry[] = []; + private defaultPermissions: AccessPermission[]; + + constructor(config: Record = {}) { + this.defaultPermissions = (config.defaultPermissions as AccessPermission[]) ?? [AccessPermission.READ]; + } + + async initialize(): Promise {} + + async checkPermission(agentId: string, memoryId: string, permission: AccessPermission): Promise { + const memPerms = this.memoryPermissions.get(memoryId); + if (memPerms) { + const agentPerms = memPerms.get(agentId); + if (agentPerms?.has(permission)) { + this.logAccess(agentId, memoryId, permission, true); + return true; + } + } + + // Check defaults + const result = this.defaultPermissions.includes(permission); + this.logAccess(agentId, memoryId, permission, result); + return result; + } + + async grantPermission( + memoryId: string, agentId: string, permission: AccessPermission, grantedBy: string + ): Promise> { + if (!this.memoryPermissions.has(memoryId)) { + this.memoryPermissions.set(memoryId, new Map()); + } + const memPerms = this.memoryPermissions.get(memoryId)!; + if (!memPerms.has(agentId)) { + memPerms.set(agentId, new Set()); + } + memPerms.get(agentId)!.add(permission); + + this.logPermissionChange(memoryId, agentId, permission, 'grant', grantedBy); + return { success: true, memoryId, agentId, permission, grantedBy, grantedAt: new Date().toISOString() }; + } + + async revokePermission( + memoryId: string, agentId: string, permission: AccessPermission, revokedBy: string + ): Promise> { + const memPerms = this.memoryPermissions.get(memoryId); + if (memPerms) { + const agentPerms = memPerms.get(agentId); + agentPerms?.delete(permission); + } + + this.logPermissionChange(memoryId, agentId, permission, 'revoke', revokedBy); + return { success: true, memoryId, agentId, permission, revokedBy, revokedAt: new Date().toISOString() }; + } + + async getPermissions(memoryId: string, agentId: string): Promise> { + const memPerms = this.memoryPermissions.get(memoryId); + const agentPerms = memPerms?.get(agentId); + const permissions = agentPerms ? Array.from(agentPerms) : [...this.defaultPermissions]; + return { memoryId, agentId, permissions, permissionCount: permissions.length }; + } + + async getPermissionHistory(memoryId: string, agentId?: string, limit = 50): Promise>> { + let filtered = this.accessLog.filter((e) => e.memoryId === memoryId); + if (agentId) filtered = filtered.filter((e) => e.agentId === agentId); + filtered.sort((a, b) => b.timestamp.localeCompare(a.timestamp)); + return filtered.slice(0, limit) as unknown as Array>; + } + + private logAccess(agentId: string, memoryId: string, permission: AccessPermission, result: boolean): void { + this.accessLog.push({ agentId, memoryId, permission, result, timestamp: new Date().toISOString() }); + } + + private logPermissionChange( + memoryId: string, agentId: string, permission: AccessPermission, action: string, performedBy: string + ): void { + this.accessLog.push({ + agentId, memoryId, permission, result: true, action, performedBy, timestamp: new Date().toISOString(), + }); + } +} diff --git a/src/agent/components/scope-controller.ts b/src/agent/components/scope-controller.ts new file mode 100644 index 0000000..b07180e --- /dev/null +++ b/src/agent/components/scope-controller.ts @@ -0,0 +1,90 @@ +/** + * Scope controller — determines and manages memory scopes. + * Port of Python powermem/agent/components/scope_controller.py. + */ +import type { ScopeStrategy } from '../abstract/scope.js'; +import { MemoryScope } from '../types.js'; + +export class ScopeController implements ScopeStrategy { + private scopeStorage = new Map>>(); + + constructor(private readonly config: Record = {}) { + for (const scope of Object.values(MemoryScope)) { + this.scopeStorage.set(scope, new Map()); + } + } + + async initialize(): Promise {} + + async determineScope( + _agentId: string, + _context?: Record, + metadata?: Record + ): Promise { + const hint = metadata?.scope as string | undefined; + if (hint && Object.values(MemoryScope).includes(hint as MemoryScope)) { + return hint as MemoryScope; + } + return (this.config.defaultScope as MemoryScope) ?? MemoryScope.PRIVATE; + } + + async getAccessibleMemories(agentId: string, scope: MemoryScope): Promise { + const scopeMap = this.scopeStorage.get(scope); + if (!scopeMap) return []; + const ids: string[] = []; + for (const id of scopeMap.keys()) { + if (await this.checkScopeAccess(agentId, id)) { + ids.push(id); + } + } + return ids; + } + + async checkScopeAccess(agentId: string, memoryId: string): Promise { + for (const [scope, scopeMap] of this.scopeStorage) { + const data = scopeMap.get(memoryId); + if (data) { + if (scope === MemoryScope.PUBLIC) return true; + if (scope === MemoryScope.PRIVATE && data.ownerId === agentId) return true; + if (scope === MemoryScope.AGENT_GROUP) { + const members = (data.groupMembers as string[]) ?? []; + if (members.includes(agentId)) return true; + } + } + } + return false; + } + + async updateMemoryScope( + memoryId: string, + newScope: MemoryScope, + agentId: string + ): Promise> { + let oldScope: MemoryScope | undefined; + let data: Record | undefined; + + for (const [scope, scopeMap] of this.scopeStorage) { + if (scopeMap.has(memoryId)) { + oldScope = scope; + data = scopeMap.get(memoryId); + scopeMap.delete(memoryId); + break; + } + } + + if (!data) data = { ownerId: agentId }; + this.scopeStorage.get(newScope)!.set(memoryId, data); + + return { success: true, memoryId, oldScope, newScope, updatedBy: agentId }; + } + + async getScopeStatistics(): Promise> { + const breakdown: Record = {}; + let total = 0; + for (const [scope, scopeMap] of this.scopeStorage) { + breakdown[scope] = scopeMap.size; + total += scopeMap.size; + } + return { totalMemories: total, scopeBreakdown: breakdown }; + } +} diff --git a/src/agent/factories/agent-factory.ts b/src/agent/factories/agent-factory.ts new file mode 100644 index 0000000..df25fe0 --- /dev/null +++ b/src/agent/factories/agent-factory.ts @@ -0,0 +1,18 @@ +/** + * Agent component factory. + * Port of Python powermem/agent/factories/agent_factory.py. + */ +import type { ScopeStrategy } from '../abstract/scope.js'; +import type { PermissionStrategy } from '../abstract/permission.js'; +import { ScopeController } from '../components/scope-controller.js'; +import { PermissionController } from '../components/permission-controller.js'; + +export class AgentFactory { + static createScopeManager(config: Record = {}): ScopeStrategy { + return new ScopeController(config); + } + + static createPermissionManager(config: Record = {}): PermissionStrategy { + return new PermissionController(config); + } +} diff --git a/src/agent/index.ts b/src/agent/index.ts new file mode 100644 index 0000000..2f6a70d --- /dev/null +++ b/src/agent/index.ts @@ -0,0 +1,15 @@ +export { AgentMemory } from './agent.js'; +export type { AgentMemoryConfig } from './agent.js'; +export { + MemoryType, MemoryScope, AccessPermission, + PrivacyLevel, CollaborationType, CollaborationStatus, CollaborationLevel, +} from './types.js'; +export type { ScopeStrategy } from './abstract/scope.js'; +export type { PermissionStrategy } from './abstract/permission.js'; +export type { CollaborationStrategy } from './abstract/collaboration.js'; +export type { PrivacyStrategy } from './abstract/privacy.js'; +export type { ContextStrategy } from './abstract/context.js'; +export type { AgentMemoryManager } from './abstract/manager.js'; +export { ScopeController } from './components/scope-controller.js'; +export { PermissionController } from './components/permission-controller.js'; +export { AgentFactory } from './factories/agent-factory.js'; diff --git a/src/agent/types.ts b/src/agent/types.ts new file mode 100644 index 0000000..65b4d19 --- /dev/null +++ b/src/agent/types.ts @@ -0,0 +1,54 @@ +/** + * Agent memory type definitions. + * Port of Python powermem/agent/types.py. + */ + +export enum MemoryType { + SEMANTIC = 'semantic', + EPISODIC = 'episodic', + PROCEDURAL = 'procedural', + WORKING = 'working', + SHORT_TERM = 'short_term', + LONG_TERM = 'long_term', + PUBLIC_SHARED = 'public_shared', + PRIVATE_AGENT = 'private_agent', + COLLABORATIVE = 'collaborative', + GROUP_CONSENSUS = 'group_consensus', +} + +export enum MemoryScope { + PRIVATE = 'private', + AGENT_GROUP = 'agent_group', + USER_GROUP = 'user_group', + PUBLIC = 'public', + RESTRICTED = 'restricted', +} + +export enum AccessPermission { + READ = 'read', + WRITE = 'write', + DELETE = 'delete', + ADMIN = 'admin', +} + +export enum PrivacyLevel { + STANDARD = 'standard', + SENSITIVE = 'sensitive', + CONFIDENTIAL = 'confidential', +} + +export enum CollaborationType { + SYNCHRONOUS = 'synchronous', + ASYNCHRONOUS = 'asynchronous', +} + +export enum CollaborationStatus { + ACTIVE = 'active', + INACTIVE = 'inactive', + PENDING = 'pending', +} + +export enum CollaborationLevel { + ISOLATED = 'isolated', + COLLABORATIVE = 'collaborative', +} diff --git a/src/cli/commands/config.ts b/src/cli/commands/config.ts new file mode 100644 index 0000000..3f8a96c --- /dev/null +++ b/src/cli/commands/config.ts @@ -0,0 +1,99 @@ +/** + * Config CLI commands: pmem config init|show|test + */ +import type { Command } from 'commander'; + +export function registerConfigCommands(program: Command): void { + const config = program + .command('config') + .description('Configuration management'); + + config + .command('show') + .description('Display current configuration') + .option('-s, --section
', 'Show specific section (llm, embedder, vector_store, all)') + .action(async (opts) => { + const { autoConfig } = await import('../../config-loader.js'); + const parent = program.opts(); + if (parent.envFile) process.env.POWERMEM_ENV_FILE = parent.envFile; + + const cfg = autoConfig(); + const json = parent.json; + + if (json) { + if (opts.section && opts.section !== 'all') { + const section = cfg[opts.section as keyof typeof cfg]; + console.log(JSON.stringify(section ?? {}, null, 2)); + } else { + console.log(JSON.stringify(cfg, null, 2)); + } + return; + } + + if (opts.section && opts.section !== 'all') { + const section = cfg[opts.section as keyof typeof cfg]; + console.log(`[${opts.section}]`); + console.log(JSON.stringify(section ?? {}, null, 2)); + } else { + for (const [key, value] of Object.entries(cfg)) { + if (value != null) { + console.log(`[${key}]`); + console.log(JSON.stringify(value, null, 2)); + console.log(); + } + } + } + }); + + config + .command('validate') + .description('Validate current configuration') + .option('-f, --file ', 'Validate specific env file') + .action(async (opts) => { + const { autoConfig } = await import('../../config-loader.js'); + const { validateConfig } = await import('../../configs.js'); + + if (opts.file) process.env.POWERMEM_ENV_FILE = opts.file; + else if (program.opts().envFile) process.env.POWERMEM_ENV_FILE = program.opts().envFile; + + const cfg = autoConfig(); + const valid = validateConfig(cfg as Record); + + if (valid) { + console.log('Configuration is valid.'); + } else { + console.error('Configuration is invalid. Missing required sections (vectorStore, llm, embedder).'); + process.exit(1); + } + }); + + config + .command('test') + .description('Test component connections') + .option('-c, --component ', 'Test specific component (database, llm, embedder, all)') + .action(async (opts) => { + const { autoConfig } = await import('../../config-loader.js'); + const parent = program.opts(); + if (parent.envFile) process.env.POWERMEM_ENV_FILE = parent.envFile; + + const cfg = autoConfig(); + const component = opts.component ?? 'all'; + + console.log(`Testing ${component}...`); + + if (component === 'database' || component === 'all') { + const provider = cfg.vectorStore?.provider ?? 'unknown'; + console.log(` Database: ${provider} — OK (config loaded)`); + } + if (component === 'llm' || component === 'all') { + const provider = cfg.llm?.provider ?? 'unknown'; + const hasKey = !!(cfg.llm?.config as Record)?.apiKey; + console.log(` LLM: ${provider} — ${hasKey ? 'API key set' : 'No API key'}`); + } + if (component === 'embedder' || component === 'all') { + const provider = cfg.embedder?.provider ?? 'unknown'; + const hasKey = !!(cfg.embedder?.config as Record)?.apiKey; + console.log(` Embedder: ${provider} — ${hasKey ? 'API key set' : 'No API key'}`); + } + }); +} diff --git a/src/cli/commands/interactive.ts b/src/cli/commands/interactive.ts new file mode 100644 index 0000000..a56a71f --- /dev/null +++ b/src/cli/commands/interactive.ts @@ -0,0 +1,150 @@ +/** + * Interactive REPL: pmem shell + * Port of Python powermem/cli/commands/interactive.py. + */ +import type { Command } from 'commander'; +import readline from 'node:readline'; + +export function registerShellCommand(program: Command): void { + program + .command('shell') + .description('Interactive PowerMem shell (REPL)') + .action(async () => { + const parent = program.opts(); + if (parent.envFile) process.env.POWERMEM_ENV_FILE = parent.envFile; + + const { Memory } = await import('../../core/memory.js'); + let mem: Awaited>; + + try { + mem = await Memory.create(); + } catch (err) { + console.error(`Failed to initialize: ${err}`); + process.exit(1); + } + + let defaultUserId: string | undefined; + let defaultAgentId: string | undefined; + let jsonOutput = parent.json ?? false; + + const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout, + prompt: 'powermem> ', + completer: (line: string) => { + const cmds = ['add', 'search', 'get', 'list', 'delete', 'stats', 'set', 'show', 'help', 'exit']; + const hits = cmds.filter((c) => c.startsWith(line)); + return [hits.length ? hits : cmds, line]; + }, + }); + + console.log('PowerMem Interactive Shell. Type "help" for commands, "exit" to quit.'); + rl.prompt(); + + rl.on('line', async (line) => { + const trimmed = line.trim(); + if (!trimmed) { rl.prompt(); return; } + + const [cmd, ...args] = trimmed.split(/\s+/); + const rest = args.join(' '); + + try { + switch (cmd) { + case 'add': { + if (!rest) { console.log('Usage: add '); break; } + const result = await mem.add(rest, { userId: defaultUserId, agentId: defaultAgentId, infer: true }); + if (jsonOutput) console.log(JSON.stringify(result, null, 2)); + else { + console.log(result.message); + for (const m of result.memories) console.log(` ${m.memoryId}: ${m.content}`); + } + break; + } + case 'search': { + if (!rest) { console.log('Usage: search '); break; } + const result = await mem.search(rest, { userId: defaultUserId, agentId: defaultAgentId, limit: 5 }); + if (jsonOutput) console.log(JSON.stringify(result, null, 2)); + else { + console.log(`Found ${result.total} results:`); + for (const r of result.results) { + console.log(` [${r.score?.toFixed(3) ?? '?'}] ${r.memoryId}: ${r.content}`); + } + } + break; + } + case 'get': { + if (!rest) { console.log('Usage: get '); break; } + const m = await mem.get(rest); + if (jsonOutput) console.log(JSON.stringify(m, null, 2)); + else if (m) console.log(`${m.memoryId}: ${m.content} (created: ${m.createdAt})`); + else console.log('Not found.'); + break; + } + case 'list': { + const limit = rest ? parseInt(rest, 10) || 10 : 10; + const result = await mem.getAll({ userId: defaultUserId, agentId: defaultAgentId, limit }); + if (jsonOutput) console.log(JSON.stringify(result, null, 2)); + else { + console.log(`Total: ${result.total} (showing ${result.memories.length})`); + for (const m of result.memories) console.log(` ${m.memoryId}: ${m.content}`); + } + break; + } + case 'delete': { + if (!rest) { console.log('Usage: delete '); break; } + const ok = await mem.delete(rest); + console.log(ok ? 'Deleted.' : 'Not found.'); + break; + } + case 'stats': { + const { calculateStatsFromMemories } = await import('../../utils/stats.js'); + const all = await mem.getAll({ userId: defaultUserId, limit: 10000 }); + const stats = calculateStatsFromMemories(all.memories as unknown as Array>); + if (jsonOutput) console.log(JSON.stringify(stats, null, 2)); + else { + console.log(`Total: ${stats.totalMemories}`); + for (const [t, c] of Object.entries(stats.byType)) console.log(` ${t}: ${c}`); + } + break; + } + case 'set': { + const [key, val] = rest.split(/\s+/, 2); + if (key === 'user') { defaultUserId = val || undefined; console.log(`User ID: ${defaultUserId ?? '(none)'}`); } + else if (key === 'agent') { defaultAgentId = val || undefined; console.log(`Agent ID: ${defaultAgentId ?? '(none)'}`); } + else if (key === 'json') { jsonOutput = val !== 'false' && val !== '0'; console.log(`JSON output: ${jsonOutput}`); } + else console.log('Usage: set user|agent|json '); + break; + } + case 'show': { + console.log(`User ID: ${defaultUserId ?? '(none)'}`); + console.log(`Agent ID: ${defaultAgentId ?? '(none)'}`); + console.log(`JSON output: ${jsonOutput}`); + break; + } + case 'help': { + console.log('Commands: add, search, get, list, delete, stats, set, show, help, exit'); + console.log(' set user — Set default user ID'); + console.log(' set agent — Set default agent ID'); + console.log(' set json true — Enable JSON output'); + break; + } + case 'exit': case 'quit': case 'q': { + rl.close(); + return; + } + default: + console.log(`Unknown command: ${cmd}. Type "help" for available commands.`); + } + } catch (err) { + console.error(`Error: ${err}`); + } + + rl.prompt(); + }); + + rl.on('close', async () => { + await mem.close(); + console.log('Bye!'); + }); + }); +} diff --git a/src/cli/commands/manage.ts b/src/cli/commands/manage.ts new file mode 100644 index 0000000..ee9ca23 --- /dev/null +++ b/src/cli/commands/manage.ts @@ -0,0 +1,159 @@ +/** + * Manage CLI commands: pmem manage backup|restore|cleanup + */ +import type { Command } from 'commander'; +import fs from 'node:fs'; +import { formatJson } from '../utils/output.js'; + +export function registerManageCommands(program: Command): void { + const manage = program + .command('manage') + .description('Backup, restore, and cleanup operations'); + + manage + .command('backup') + .description('Backup memories to a JSON file') + .option('-o, --output ', 'Output file path') + .option('-u, --user-id ', 'Filter by user ID') + .option('-a, --agent-id ', 'Filter by agent ID') + .option('-l, --limit ', 'Max memories to backup', '10000') + .action(async (opts) => { + const parent = program.opts(); + if (parent.envFile) process.env.POWERMEM_ENV_FILE = parent.envFile; + + const { Memory } = await import('../../core/memory.js'); + const mem = await Memory.create(); + + try { + const result = await mem.getAll({ + userId: opts.userId, + agentId: opts.agentId, + limit: parseInt(opts.limit, 10), + }); + + const backup = { + version: '1.0', + createdAt: new Date().toISOString(), + filters: { userId: opts.userId, agentId: opts.agentId }, + count: result.memories.length, + memories: result.memories, + }; + + const outputPath = opts.output ?? + `powermem_backup_${new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19)}.json`; + + fs.writeFileSync(outputPath, JSON.stringify(backup, null, 2)); + + if (parent.json) { + console.log(formatJson({ path: outputPath, count: backup.count })); + } else { + console.log(`Backed up ${backup.count} memories to ${outputPath}`); + } + } finally { + await mem.close(); + } + }); + + manage + .command('restore') + .description('Restore memories from a JSON backup') + .argument('', 'Backup file path') + .option('-u, --user-id ', 'Override user ID') + .option('--dry-run', 'Preview without restoring') + .action(async (file: string, opts) => { + const parent = program.opts(); + if (parent.envFile) process.env.POWERMEM_ENV_FILE = parent.envFile; + + if (!fs.existsSync(file)) { + console.error(`File not found: ${file}`); + process.exit(1); + } + + const backup = JSON.parse(fs.readFileSync(file, 'utf-8')); + const memories = backup.memories ?? []; + console.log(`Found ${memories.length} memories in backup (v${backup.version ?? '?'})`); + + if (opts.dryRun) { + console.log('Dry run — no changes made.'); + return; + } + + const { Memory } = await import('../../core/memory.js'); + const mem = await Memory.create(); + + let success = 0; + let errors = 0; + + try { + for (const m of memories) { + try { + await mem.add(m.content, { + userId: opts.userId ?? m.userId, + agentId: m.agentId, + metadata: m.metadata, + infer: false, + }); + success++; + } catch { + errors++; + } + } + + if (parent.json) { + console.log(formatJson({ restored: success, errors })); + } else { + console.log(`Restored ${success} memories (${errors} errors)`); + } + } finally { + await mem.close(); + } + }); + + manage + .command('cleanup') + .description('Remove low-quality or duplicate memories') + .option('-u, --user-id ', 'Filter by user ID') + .option('-s, --strategy ', 'Dedup strategy: exact or semantic', 'exact') + .option('-t, --threshold ', 'Similarity threshold for semantic dedup', '0.95') + .option('--dry-run', 'Preview without deleting') + .action(async (opts) => { + const parent = program.opts(); + if (parent.envFile) process.env.POWERMEM_ENV_FILE = parent.envFile; + + const { Memory } = await import('../../core/memory.js'); + const { MemoryOptimizer } = await import('../../intelligence/memory-optimizer.js'); + const { SQLiteStore } = await import('../../storage/sqlite/sqlite.js'); + + // Create store directly for optimizer + const mem = await Memory.create(); + const store = new SQLiteStore(process.env.SQLITE_PATH ?? ':memory:'); + + try { + const optimizer = new MemoryOptimizer(store); + + if (opts.dryRun) { + const count = await mem.count({ userId: opts.userId }); + console.log(`Would check ${count} memories with strategy: ${opts.strategy}`); + return; + } + + const result = await optimizer.deduplicate( + opts.strategy as 'exact' | 'semantic', + opts.userId, + parseFloat(opts.threshold) + ); + + if (parent.json) { + console.log(formatJson(result)); + } else { + console.log(`Checked: ${result.totalChecked}`); + console.log(`Duplicates found: ${result.duplicatesFound}`); + console.log(`Deleted: ${result.deletedCount}`); + if (result.errors > 0) console.log(`Errors: ${result.errors}`); + } + } finally { + await mem.close(); + await store.close(); + } + }); +} diff --git a/src/cli/commands/memory.ts b/src/cli/commands/memory.ts new file mode 100644 index 0000000..a1d6d4c --- /dev/null +++ b/src/cli/commands/memory.ts @@ -0,0 +1,171 @@ +/** + * Memory CLI commands: pmem memory add|search|list|get|delete|delete-all + */ +import type { Command } from 'commander'; + +async function getMemory(program: Command) { + const parent = program.opts(); + if (parent.envFile) process.env.POWERMEM_ENV_FILE = parent.envFile; + + const { Memory } = await import('../../core/memory.js'); + return Memory.create(); +} + +export function registerMemoryCommands(program: Command): void { + const memory = program + .command('memory') + .description('Memory CRUD operations'); + + memory + .command('add ') + .description('Add a memory') + .option('-u, --user-id ', 'User ID') + .option('-a, --agent-id ', 'Agent ID') + .option('--no-infer', 'Skip LLM fact extraction') + .option('-s, --scope ', 'Memory scope') + .option('-c, --category ', 'Memory category') + .action(async (content: string, opts) => { + const mem = await getMemory(program); + try { + const result = await mem.add(content, { + userId: opts.userId, + agentId: opts.agentId, + infer: opts.infer !== false, + scope: opts.scope, + category: opts.category, + }); + if (program.opts().json) { + console.log(JSON.stringify(result, null, 2)); + } else { + console.log(result.message); + for (const m of result.memories) { + console.log(` ${m.memoryId}: ${m.content}`); + } + } + } finally { + await mem.close(); + } + }); + + memory + .command('search ') + .description('Search memories') + .option('-u, --user-id ', 'User ID') + .option('-a, --agent-id ', 'Agent ID') + .option('-l, --limit ', 'Max results', '10') + .option('-t, --threshold ', 'Min similarity score') + .action(async (query: string, opts) => { + const mem = await getMemory(program); + try { + const result = await mem.search(query, { + userId: opts.userId, + agentId: opts.agentId, + limit: parseInt(opts.limit, 10), + threshold: opts.threshold ? parseFloat(opts.threshold) : undefined, + }); + if (program.opts().json) { + console.log(JSON.stringify(result, null, 2)); + } else { + console.log(`Found ${result.total} results for "${query}":`); + for (const r of result.results) { + const score = r.score?.toFixed(3) ?? '?'; + console.log(` [${score}] ${r.memoryId}: ${r.content}`); + } + } + } finally { + await mem.close(); + } + }); + + memory + .command('list') + .description('List all memories') + .option('-u, --user-id ', 'User ID') + .option('-a, --agent-id ', 'Agent ID') + .option('-l, --limit ', 'Max results', '20') + .option('-o, --offset ', 'Offset', '0') + .option('--sort ', 'Sort by field (created_at, updated_at)') + .option('--order ', 'Sort order (asc, desc)', 'desc') + .action(async (opts) => { + const mem = await getMemory(program); + try { + const result = await mem.getAll({ + userId: opts.userId, + agentId: opts.agentId, + limit: parseInt(opts.limit, 10), + offset: parseInt(opts.offset, 10), + sortBy: opts.sort, + order: opts.order as 'asc' | 'desc', + }); + if (program.opts().json) { + console.log(JSON.stringify(result, null, 2)); + } else { + console.log(`Total: ${result.total} (showing ${result.memories.length})`); + for (const m of result.memories) { + console.log(` ${m.memoryId}: ${m.content}`); + } + } + } finally { + await mem.close(); + } + }); + + memory + .command('get ') + .description('Get a memory by ID') + .action(async (id: string) => { + const mem = await getMemory(program); + try { + const result = await mem.get(id); + if (program.opts().json) { + console.log(JSON.stringify(result, null, 2)); + } else if (result) { + console.log(`ID: ${result.memoryId}`); + console.log(`Content: ${result.content}`); + console.log(`Created: ${result.createdAt}`); + console.log(`Updated: ${result.updatedAt}`); + if (result.userId) console.log(`User: ${result.userId}`); + if (result.agentId) console.log(`Agent: ${result.agentId}`); + } else { + console.log('Memory not found.'); + } + } finally { + await mem.close(); + } + }); + + memory + .command('delete ') + .description('Delete a memory by ID') + .action(async (id: string) => { + const mem = await getMemory(program); + try { + const ok = await mem.delete(id); + console.log(ok ? 'Deleted.' : 'Not found.'); + } finally { + await mem.close(); + } + }); + + memory + .command('delete-all') + .description('Delete all memories') + .option('-u, --user-id ', 'User ID') + .option('-a, --agent-id ', 'Agent ID') + .option('--confirm', 'Skip confirmation') + .action(async (opts) => { + if (!opts.confirm) { + const target = opts.userId ? `user ${opts.userId}` : opts.agentId ? `agent ${opts.agentId}` : 'ALL'; + console.log(`This will delete memories for: ${target}`); + console.log('Pass --confirm to proceed.'); + return; + } + const mem = await getMemory(program); + try { + await mem.deleteAll({ userId: opts.userId, agentId: opts.agentId }); + console.log('Deleted.'); + } finally { + await mem.close(); + } + }); +} diff --git a/src/cli/commands/stats.ts b/src/cli/commands/stats.ts new file mode 100644 index 0000000..628bf0c --- /dev/null +++ b/src/cli/commands/stats.ts @@ -0,0 +1,41 @@ +/** + * Stats CLI command: pmem stats + */ +import type { Command } from 'commander'; +import { calculateStatsFromMemories } from '../../utils/stats.js'; +import { formatJson, formatStats } from '../utils/output.js'; + +export function registerStatsCommand(program: Command): void { + program + .command('stats') + .description('Display memory statistics') + .option('-u, --user-id ', 'Filter by user ID') + .option('-a, --agent-id ', 'Filter by agent ID') + .action(async (opts) => { + const parent = program.opts(); + if (parent.envFile) process.env.POWERMEM_ENV_FILE = parent.envFile; + + const { Memory } = await import('../../core/memory.js'); + const mem = await Memory.create(); + + try { + const all = await mem.getAll({ + userId: opts.userId, + agentId: opts.agentId, + limit: 10000, + }); + + const stats = calculateStatsFromMemories( + all.memories as unknown as Array> + ); + + if (parent.json) { + console.log(formatJson(stats)); + } else { + console.log(formatStats(stats as unknown as Record)); + } + } finally { + await mem.close(); + } + }); +} diff --git a/src/cli/index.ts b/src/cli/index.ts new file mode 100644 index 0000000..3b2fa9a --- /dev/null +++ b/src/cli/index.ts @@ -0,0 +1,5 @@ +export { registerConfigCommands } from './commands/config.js'; +export { registerMemoryCommands } from './commands/memory.js'; +export { registerStatsCommand } from './commands/stats.js'; +export { registerManageCommands } from './commands/manage.js'; +export { registerShellCommand } from './commands/interactive.js'; diff --git a/src/cli/main.ts b/src/cli/main.ts new file mode 100644 index 0000000..31ac167 --- /dev/null +++ b/src/cli/main.ts @@ -0,0 +1,30 @@ +#!/usr/bin/env node +/** + * PowerMem CLI — main entry point. + * Port of Python powermem/cli/main.py using Commander.js. + */ +import { Command } from 'commander'; +import { VERSION } from '../version.js'; +import { registerConfigCommands } from './commands/config.js'; +import { registerMemoryCommands } from './commands/memory.js'; +import { registerStatsCommand } from './commands/stats.js'; +import { registerManageCommands } from './commands/manage.js'; +import { registerShellCommand } from './commands/interactive.js'; + +const program = new Command(); + +program + .name('pmem') + .description('PowerMem CLI — Command Line Interface for PowerMem') + .version(VERSION) + .option('-f, --env-file ', 'Load settings from this .env file') + .option('-j, --json', 'Output results in JSON format') + .option('-v, --verbose', 'Enable verbose output'); + +registerConfigCommands(program); +registerMemoryCommands(program); +registerStatsCommand(program); +registerManageCommands(program); +registerShellCommand(program); + +program.parse(); diff --git a/src/cli/utils/envfile.ts b/src/cli/utils/envfile.ts new file mode 100644 index 0000000..97d1488 --- /dev/null +++ b/src/cli/utils/envfile.ts @@ -0,0 +1,95 @@ +/** + * .env file management utilities. + * Port of Python powermem/cli/utils/envfile.py. + */ +import fs from 'node:fs'; +import path from 'node:path'; + +const ENV_LINE_RE = /^\s*(?:export\s+)?([A-Za-z_][A-Za-z0-9_]*)\s*=\s*(.*)\s*$/; + +export function parseEnvLines(lines: string[]): Record { + const result: Record = {}; + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed || trimmed.startsWith('#')) continue; + const match = trimmed.match(ENV_LINE_RE); + if (match && !(match[1] in result)) { + result[match[1]] = match[2].replace(/^["']|["']$/g, ''); + } + } + return result; +} + +export function formatEnvValue(value: string): string { + if (!value) return ''; + if (/[\s#"']/.test(value)) { + return `"${value.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}"`; + } + return value; +} + +export interface EnvUpdateResult { + path: string; + backupPath?: string; + updatedKeys: string[]; + appendedKeys: string[]; +} + +export function updateEnvFile( + filePath: string, + updates: Record, + sectionTitle = '# PowerMem Configuration' +): EnvUpdateResult { + const dir = path.dirname(filePath); + if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); + + const result: EnvUpdateResult = { + path: filePath, + updatedKeys: [], + appendedKeys: [], + }; + + let lines: string[] = []; + if (fs.existsSync(filePath)) { + // Backup + const ts = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19); + result.backupPath = `${filePath}.bak.${ts}`; + fs.copyFileSync(filePath, result.backupPath); + lines = fs.readFileSync(filePath, 'utf-8').split('\n'); + } + + const remaining = { ...updates }; + + // Update existing keys + for (let i = 0; i < lines.length; i++) { + const match = lines[i].match(ENV_LINE_RE); + if (match && match[1] in remaining) { + lines[i] = `${match[1]}=${formatEnvValue(remaining[match[1]])}`; + result.updatedKeys.push(match[1]); + delete remaining[match[1]]; + } + } + + // Append new keys + const newKeys = Object.keys(remaining); + if (newKeys.length > 0) { + if (lines.length > 0 && lines[lines.length - 1].trim() !== '') { + lines.push(''); + } + lines.push(sectionTitle); + for (const key of newKeys) { + lines.push(`${key}=${formatEnvValue(remaining[key])}`); + result.appendedKeys.push(key); + } + } + + fs.writeFileSync(filePath, lines.join('\n')); + return result; +} + +export function readEnvFile(filePath: string): { lines: string[]; parsed: Record } { + if (!fs.existsSync(filePath)) return { lines: [], parsed: {} }; + const content = fs.readFileSync(filePath, 'utf-8'); + const lines = content.split('\n'); + return { lines, parsed: parseEnvLines(lines) }; +} diff --git a/src/cli/utils/index.ts b/src/cli/utils/index.ts new file mode 100644 index 0000000..4e89aa7 --- /dev/null +++ b/src/cli/utils/index.ts @@ -0,0 +1,2 @@ +export * from './output.js'; +export * from './envfile.js'; diff --git a/src/cli/utils/output.ts b/src/cli/utils/output.ts new file mode 100644 index 0000000..2bece00 --- /dev/null +++ b/src/cli/utils/output.ts @@ -0,0 +1,82 @@ +/** + * Output formatting utilities for CLI. + * Port of Python powermem/cli/utils/output.py. + */ + +export function formatJson(data: unknown): string { + return JSON.stringify(data, null, 2); +} + +export function truncate(text: string, maxLen = 50): string { + if (text.length <= maxLen) return text; + return text.slice(0, maxLen - 3) + '...'; +} + +export function formatMemoryTable(memories: Array>): string { + if (memories.length === 0) return '(no memories)'; + const header = `${'ID'.padEnd(22)} ${'User'.padEnd(14)} ${'Agent'.padEnd(14)} Content`; + const sep = '-'.repeat(80); + const rows = memories.map((m) => { + const id = truncate(String(m.memoryId ?? m.id ?? ''), 20).padEnd(22); + const user = truncate(String(m.userId ?? ''), 12).padEnd(14); + const agent = truncate(String(m.agentId ?? ''), 12).padEnd(14); + const content = truncate(String(m.content ?? ''), 30); + return `${id} ${user} ${agent} ${content}`; + }); + return [header, sep, ...rows].join('\n'); +} + +export function formatSearchTable(results: Array>): string { + if (results.length === 0) return '(no results)'; + const header = `${'ID'.padEnd(22)} ${'Score'.padEnd(8)} Content`; + const sep = '-'.repeat(70); + const rows = results.map((r) => { + const id = truncate(String(r.memoryId ?? ''), 20).padEnd(22); + const score = (typeof r.score === 'number' ? r.score.toFixed(3) : '?').padEnd(8); + const content = truncate(String(r.content ?? ''), 40); + return `${id} ${score} ${content}`; + }); + return [header, sep, ...rows].join('\n'); +} + +export function formatStats(stats: Record): string { + const lines: string[] = []; + lines.push(`Total memories: ${stats.totalMemories ?? stats.total_memories ?? 0}`); + + const byType = stats.byType ?? stats.by_type; + if (byType && typeof byType === 'object') { + lines.push('By type:'); + for (const [type, count] of Object.entries(byType as Record)) { + lines.push(` ${type}: ${count}`); + } + } + + const ageDist = stats.ageDistribution ?? stats.age_distribution; + if (ageDist && typeof ageDist === 'object') { + lines.push('Age distribution:'); + for (const [range, count] of Object.entries(ageDist as Record)) { + lines.push(` ${range}: ${count}`); + } + } + + const avgImp = stats.avgImportance ?? stats.avg_importance; + if (avgImp != null) lines.push(`Avg importance: ${avgImp}`); + + return lines.join('\n'); +} + +export function printSuccess(msg: string): void { + console.log(`\x1b[32m${msg}\x1b[0m`); +} + +export function printError(msg: string): void { + console.error(`\x1b[31m${msg}\x1b[0m`); +} + +export function printWarning(msg: string): void { + console.log(`\x1b[33m${msg}\x1b[0m`); +} + +export function printInfo(msg: string): void { + console.log(`\x1b[34m${msg}\x1b[0m`); +} diff --git a/src/config-loader.ts b/src/config-loader.ts new file mode 100644 index 0000000..d214b60 --- /dev/null +++ b/src/config-loader.ts @@ -0,0 +1,152 @@ +/** + * Configuration loader — load config from environment variables. + * Port of Python powermem/config_loader.py. + */ +import { loadEnvFile } from './utils/env.js'; +import { getDefaultEnvFile } from './settings.js'; +import type { MemoryConfigInput } from './configs.js'; + +/** Load .env files (POWERMEM_ENV_FILE takes precedence). */ +function loadDotenvIfAvailable(): void { + const cliEnv = process.env.POWERMEM_ENV_FILE; + if (cliEnv) loadEnvFile(cliEnv); + + const defaultEnv = getDefaultEnvFile(); + if (defaultEnv) loadEnvFile(defaultEnv); +} + +/** Read a provider+config pair from env with a given prefix. */ +function readProviderFromEnv(prefix: string): { provider: string; config: Record } { + const provider = (process.env[`${prefix}_PROVIDER`] ?? '').toLowerCase(); + const apiKey = process.env[`${prefix}_API_KEY`]; + const model = process.env[`${prefix}_MODEL`]; + const config: Record = {}; + if (apiKey) config.apiKey = apiKey; + if (model) config.model = model; + + // Numeric fields + const dims = process.env[`${prefix}_DIMS`] ?? process.env.EMBEDDING_DIMS; + if (dims && prefix === 'EMBEDDING') config.embeddingDims = parseInt(dims, 10); + + // LLM-specific + if (prefix === 'LLM') { + const temp = process.env.LLM_TEMPERATURE; + if (temp) config.temperature = parseFloat(temp); + const maxTokens = process.env.LLM_MAX_TOKENS; + if (maxTokens) config.maxTokens = parseInt(maxTokens, 10); + const topP = process.env.LLM_TOP_P; + if (topP) config.topP = parseFloat(topP); + } + + return { provider, config }; +} + +/** Load database/vector-store config from env. */ +function readDatabaseFromEnv(): { provider: string; config: Record } { + const provider = (process.env.DATABASE_PROVIDER ?? 'sqlite').toLowerCase(); + const config: Record = {}; + + if (provider === 'sqlite') { + const dbPath = process.env.SQLITE_PATH ?? process.env.OCEANBASE_PATH; + if (dbPath) config.path = dbPath; + } else if (provider === 'seekdb') { + const seekdbPath = process.env.SEEKDB_PATH ?? process.env.OCEANBASE_PATH; + if (seekdbPath) config.path = seekdbPath; + const seekdbDb = process.env.SEEKDB_DATABASE; + if (seekdbDb) config.database = seekdbDb; + } + + return { provider, config }; +} + +/** Read intelligent memory settings from env. */ +function readIntelligentMemoryFromEnv(): Record { + const result: Record = {}; + const enabled = process.env.INTELLIGENT_MEMORY_ENABLED; + if (enabled !== undefined) result.enabled = enabled !== 'false' && enabled !== '0'; + const fallback = process.env.INTELLIGENT_MEMORY_FALLBACK_TO_SIMPLE_ADD; + if (fallback !== undefined) result.fallbackToSimpleAdd = fallback === 'true' || fallback === '1'; + const decay = process.env.INTELLIGENT_MEMORY_DECAY_RATE; + if (decay) result.decayRate = parseFloat(decay); + const reinforcement = process.env.INTELLIGENT_MEMORY_REINFORCEMENT_FACTOR; + if (reinforcement) result.reinforcementFactor = parseFloat(reinforcement); + return result; +} + +/** + * Load full configuration from environment variables. + * Reads .env files, then builds a MemoryConfig-compatible dict. + */ +export function loadConfigFromEnv(): MemoryConfigInput { + loadDotenvIfAvailable(); + + const db = readDatabaseFromEnv(); + const llm = readProviderFromEnv('LLM'); + const embedder = readProviderFromEnv('EMBEDDING'); + const intelligentMemory = readIntelligentMemoryFromEnv(); + + // Reranker (optional) + const rerankerProvider = process.env.RERANKER_PROVIDER; + const reranker = rerankerProvider + ? { provider: rerankerProvider.toLowerCase(), config: {} as Record } + : undefined; + + // Custom prompts + const customFactExtractionPrompt = process.env.CUSTOM_FACT_EXTRACTION_PROMPT; + const customUpdateMemoryPrompt = process.env.CUSTOM_UPDATE_MEMORY_PROMPT; + + return { + vectorStore: db, + llm, + embedder, + reranker, + intelligentMemory: Object.keys(intelligentMemory).length > 0 ? intelligentMemory : undefined, + customFactExtractionPrompt, + customUpdateMemoryPrompt, + }; +} + +/** + * Auto-detect and load configuration from environment. + * Simplest entry point — loads .env and returns config. + */ +export function autoConfig(): MemoryConfigInput { + return loadConfigFromEnv(); +} + +/** + * Create a config dict programmatically. + */ +export function createConfig(options: { + databaseProvider?: string; + llmProvider?: string; + embeddingProvider?: string; + databaseConfig?: Record; + llmApiKey?: string; + llmModel?: string; + embeddingApiKey?: string; + embeddingModel?: string; + embeddingDims?: number; +} = {}): MemoryConfigInput { + return { + vectorStore: { + provider: options.databaseProvider ?? 'sqlite', + config: options.databaseConfig ?? {}, + }, + llm: { + provider: options.llmProvider ?? 'qwen', + config: { + apiKey: options.llmApiKey, + model: options.llmModel ?? 'qwen-plus', + }, + }, + embedder: { + provider: options.embeddingProvider ?? 'qwen', + config: { + apiKey: options.embeddingApiKey, + model: options.embeddingModel ?? 'text-embedding-v4', + embeddingDims: options.embeddingDims ?? 1536, + }, + }, + }; +} diff --git a/src/configs.ts b/src/configs.ts new file mode 100644 index 0000000..76bfe14 --- /dev/null +++ b/src/configs.ts @@ -0,0 +1,133 @@ +/** + * Configuration classes for the memory system. + * Port of Python powermem/configs.py — Pydantic models → Zod schemas. + */ +import { z } from 'zod/v4'; + +// ─── Sub-configs ────────────────────────────────────────────────────────── + +export const IntelligentMemoryConfigSchema = z.object({ + enabled: z.boolean().default(true), + initialRetention: z.number().default(1.0), + decayRate: z.number().default(0.1), + reinforcementFactor: z.number().default(0.3), + workingThreshold: z.number().default(0.3), + shortTermThreshold: z.number().default(0.6), + longTermThreshold: z.number().default(0.8), + fallbackToSimpleAdd: z.boolean().default(false), +}); +export type IntelligentMemoryConfig = z.infer; + +export const TelemetryConfigSchema = z.object({ + enableTelemetry: z.boolean().default(false), + telemetryEndpoint: z.string().default('https://telemetry.powermem.ai'), + telemetryApiKey: z.string().nullish(), + batchSize: z.number().int().default(100), + flushInterval: z.number().int().default(30), +}); +export type TelemetryConfig = z.infer; + +export const AuditConfigSchema = z.object({ + enabled: z.boolean().default(true), + logFile: z.string().default('./logs/audit.log'), + logLevel: z.string().default('INFO'), + retentionDays: z.number().int().default(90), +}); +export type AuditConfig = z.infer; + +export const LoggingConfigSchema = z.object({ + level: z.string().default('DEBUG'), + format: z.string().default('%(asctime)s - %(name)s - %(levelname)s - %(message)s'), + file: z.string().default('./logs/powermem.log'), +}); +export type LoggingConfig = z.infer; + +export const AgentMemoryConfigSchema = z.object({ + enabled: z.boolean().default(true), + mode: z.enum(['multi_agent', 'multi_user', 'hybrid', 'auto']).default('multi_agent'), + defaultScope: z.string().default('private'), + defaultPrivacyLevel: z.string().default('standard'), + defaultCollaborationLevel: z.string().default('isolated'), + defaultAccessPermission: z.string().default('read'), + enableCollaboration: z.boolean().default(true), +}); +export type AgentMemoryConfig = z.infer; + +export const QueryRewriteConfigSchema = z.object({ + enabled: z.boolean().default(false), + prompt: z.string().nullish(), + modelOverride: z.string().nullish(), +}); +export type QueryRewriteConfig = z.infer; + +// ─── Provider configs ───────────────────────────────────────────────────── + +export const VectorStoreProviderConfigSchema = z.object({ + provider: z.string().default('sqlite'), + config: z.record(z.string(), z.unknown()).default({}), +}); +export type VectorStoreProviderConfig = z.infer; + +export const LLMProviderConfigSchema = z.object({ + provider: z.string().default('qwen'), + config: z.record(z.string(), z.unknown()).default({}), +}); +export type LLMProviderConfig = z.infer; + +export const EmbedderProviderConfigSchema = z.object({ + provider: z.string().default('qwen'), + config: z.record(z.string(), z.unknown()).default({}), +}); +export type EmbedderProviderConfig = z.infer; + +export const RerankProviderConfigSchema = z.object({ + provider: z.string().default('qwen'), + config: z.record(z.string(), z.unknown()).default({}), +}); +export type RerankProviderConfig = z.infer; + +// ─── Main config ────────────────────────────────────────────────────────── + +export const MemoryConfigSchema = z.object({ + vectorStore: VectorStoreProviderConfigSchema.default(() => ({ provider: 'sqlite', config: {} })), + llm: LLMProviderConfigSchema.default(() => ({ provider: 'qwen', config: {} })), + embedder: EmbedderProviderConfigSchema.default(() => ({ provider: 'qwen', config: {} })), + graphStore: z.record(z.string(), z.unknown()).nullish(), + reranker: RerankProviderConfigSchema.nullish(), + sparseEmbedder: z.record(z.string(), z.unknown()).nullish(), + version: z.string().default('v1.1'), + customFactExtractionPrompt: z.string().nullish(), + customUpdateMemoryPrompt: z.string().nullish(), + customImportanceEvaluationPrompt: z.string().nullish(), + agentMemory: AgentMemoryConfigSchema.nullish(), + intelligentMemory: IntelligentMemoryConfigSchema.nullish(), + telemetry: TelemetryConfigSchema.nullish(), + audit: AuditConfigSchema.nullish(), + logging: LoggingConfigSchema.nullish(), + queryRewrite: QueryRewriteConfigSchema.nullish(), +}); +export type MemoryConfigInput = z.input; +export type MemoryConfig = z.infer; + +/** Parse and validate a MemoryConfig, applying defaults. */ +export function parseMemoryConfig(input: MemoryConfigInput): MemoryConfig { + const config = MemoryConfigSchema.parse(input); + // Apply defaults for optional sub-configs (matching Python __init__) + if (!config.agentMemory) config.agentMemory = AgentMemoryConfigSchema.parse({}); + if (!config.intelligentMemory) config.intelligentMemory = IntelligentMemoryConfigSchema.parse({}); + if (!config.telemetry) config.telemetry = TelemetryConfigSchema.parse({}); + if (!config.audit) config.audit = AuditConfigSchema.parse({}); + if (!config.logging) config.logging = LoggingConfigSchema.parse({}); + if (!config.queryRewrite) config.queryRewrite = QueryRewriteConfigSchema.parse({}); + return config; +} + +/** Validate a config dict has required sections. */ +export function validateConfig(config: Record): boolean { + const required = ['vectorStore', 'llm', 'embedder']; + for (const section of required) { + const s = config[section] as Record | undefined; + if (!s || typeof s.provider !== 'string') return false; + } + return true; +} diff --git a/src/provider/http-provider.ts b/src/core/http-provider.ts similarity index 98% rename from src/provider/http-provider.ts rename to src/core/http-provider.ts index e00a8f4..42f3876 100644 --- a/src/provider/http-provider.ts +++ b/src/core/http-provider.ts @@ -1,6 +1,6 @@ import { PowerMemAPIError, PowerMemConnectionError } from '../errors/index.js'; import { toSnakeCase, toCamelCase } from '../utils/case-convert.js'; -import type { MemoryProvider } from './index.js'; +import type { MemoryProvider } from './provider.js'; import type { AddParams, SearchParams, diff --git a/src/provider/native/inferrer.ts b/src/core/inferrer.ts similarity index 98% rename from src/provider/native/inferrer.ts rename to src/core/inferrer.ts index 9734292..eb44d09 100644 --- a/src/provider/native/inferrer.ts +++ b/src/core/inferrer.ts @@ -1,6 +1,6 @@ import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; import { HumanMessage, SystemMessage } from '@langchain/core/messages'; -import { getFactRetrievalPrompt, buildUpdateMemoryPrompt } from './prompts.js'; +import { getFactRetrievalPrompt, buildUpdateMemoryPrompt } from '../prompts/intelligent-memory.js'; export interface MemoryAction { id: string; diff --git a/src/memory.ts b/src/core/memory.ts similarity index 84% rename from src/memory.ts rename to src/core/memory.ts index 43267d1..788c27f 100644 --- a/src/memory.ts +++ b/src/core/memory.ts @@ -1,9 +1,9 @@ -import { HttpProvider } from './provider/http-provider.js'; -import { NativeProvider } from './provider/native/index.js'; -import { loadEnvFile } from './utils/env.js'; -import type { MemoryProvider } from './provider/index.js'; -import type { VectorStore } from './provider/native/vector-store.js'; -import type { InitOptions, MemoryOptions } from './types/options.js'; +import { HttpProvider } from './http-provider.js'; +import { NativeProvider } from './native-provider.js'; +import { loadEnvFile } from '../utils/env.js'; +import type { MemoryProvider } from './provider.js'; +import type { VectorStore } from '../storage/base.js'; +import type { InitOptions, MemoryOptions } from '../types/options.js'; import type { AddParams, SearchParams, @@ -13,8 +13,8 @@ import type { BatchItem, BatchOptions, MemoryRecord, -} from './types/memory.js'; -import type { AddResult, SearchResult, MemoryListResult } from './types/responses.js'; +} from '../types/memory.js'; +import type { AddResult, SearchResult, MemoryListResult } from '../types/responses.js'; export class Memory { private constructor( @@ -35,7 +35,7 @@ export class Memory { // Create SeekDB store if configured let store: VectorStore | undefined; if (options.seekdb) { - const { SeekDBStore } = await import('./provider/native/seekdb-store.js'); + const { SeekDBStore } = await import('../storage/seekdb/seekdb.js'); store = await SeekDBStore.create(options.seekdb); } diff --git a/src/provider/native/index.ts b/src/core/native-provider.ts similarity index 95% rename from src/provider/native/index.ts rename to src/core/native-provider.ts index bbf63c9..f1b4923 100644 --- a/src/provider/native/index.ts +++ b/src/core/native-provider.ts @@ -3,7 +3,7 @@ import fs from 'node:fs'; import path from 'node:path'; import type { Embeddings } from '@langchain/core/embeddings'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import type { MemoryProvider } from '../index.js'; +import type { MemoryProvider } from './provider.js'; import type { AddParams, SearchParams, @@ -13,17 +13,18 @@ import type { BatchItem, BatchOptions, MemoryRecord, -} from '../../types/memory.js'; -import type { AddResult, SearchResult, MemoryListResult } from '../../types/responses.js'; -import type { RerankerFn } from '../../types/options.js'; -import { SQLiteStore } from './store.js'; -import type { VectorStore, VectorStoreFilter, VectorStoreRecord } from './vector-store.js'; -import { Embedder } from './embedder.js'; +} from '../types/memory.js'; +import type { AddResult, SearchResult, MemoryListResult } from '../types/responses.js'; +import type { RerankerFn } from '../types/options.js'; +import { SQLiteStore } from '../storage/sqlite/sqlite.js'; +import type { VectorStore, VectorStoreFilter, VectorStoreRecord } from '../storage/base.js'; +import { Embedder } from '../integrations/embeddings/embedder.js'; import { Inferrer } from './inferrer.js'; -import { SnowflakeIDGenerator } from './snowflake.js'; -import { computeDecayFactor, applyDecay } from './decay.js'; -import { createEmbeddingsFromEnv, createLLMFromEnv } from './provider-factory.js'; -import { getDefaultHomeDir } from '../../utils/platform.js'; +import { SnowflakeIDGenerator } from '../utils/snowflake.js'; +import { computeDecayFactor, applyDecay } from '../intelligence/ebbinghaus.js'; +import { createEmbeddingsFromEnv } from '../integrations/embeddings/factory.js'; +import { createLLMFromEnv } from '../integrations/llm/factory.js'; +import { getDefaultHomeDir } from '../utils/platform.js'; export interface NativeProviderOptions { embeddings?: Embeddings; @@ -319,7 +320,7 @@ export class NativeProvider implements MemoryProvider { await this.store.incrementAccessCountBatch(matchIds); } - let results: import('../../types/responses.js').SearchHit[] = matches.map((m) => ({ + let results: import('../types/responses.js').SearchHit[] = matches.map((m) => ({ memoryId: m.id, content: m.content, score: m.score, diff --git a/src/provider/index.ts b/src/core/provider.ts similarity index 100% rename from src/provider/index.ts rename to src/core/provider.ts diff --git a/src/dashboard/public/index.html b/src/dashboard/public/index.html new file mode 100644 index 0000000..645e546 --- /dev/null +++ b/src/dashboard/public/index.html @@ -0,0 +1,302 @@ + + + + + + PowerMem Dashboard + + + +
+
+
+

PowerMem Dashboard

+
Memory intelligence overview
+
+
+ + + +
+
+ + + +
+ + +
+
+
Loading stats...
+
+ +
+

System Health

+
Loading...
+
+ +
+
+

Growth Trend

+
Loading...
+
+
+

Age Distribution

+
Loading...
+
+
+ +
+

Hot Memories (Most Accessed)

+
Loading...
+
+
+ + +
+
+ + + +
+
+
Loading...
+
+
+ + + +
+
+ + +
+
+

Configuration

+
Loading...
+
+
+
+ + + + diff --git a/src/dashboard/server.ts b/src/dashboard/server.ts new file mode 100644 index 0000000..436bb75 --- /dev/null +++ b/src/dashboard/server.ts @@ -0,0 +1,161 @@ +#!/usr/bin/env node +/** + * PowerMem Dashboard — minimal Express server. + * Serves REST API + static HTML dashboard (matching Python edition's React dashboard). + */ +import express from 'express'; +import path from 'node:path'; +import { fileURLToPath } from 'node:url'; +import { Memory } from '../core/memory.js'; +import type { Embeddings } from '@langchain/core/embeddings'; +import { calculateStatsFromMemories } from '../utils/stats.js'; +import { VERSION } from '../version.js'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); + +export async function createDashboardServer(options: { + port?: number; + dbPath?: string; + embeddings?: Embeddings; + memory?: Memory; +} = {}) { + const app = express(); + app.use(express.json()); + + const memory = options.memory ?? await Memory.create({ + dbPath: options.dbPath ?? ':memory:', + embeddings: options.embeddings, + }); + const startTime = Date.now(); + + // ─── REST API ────────────────────────────────────────────────────── + + app.get('/api/v1/system/health', (_req, res) => { + res.json({ success: true, data: { status: 'ok' } }); + }); + + app.get('/api/v1/system/status', (_req, res) => { + const uptime = Math.floor((Date.now() - startTime) / 1000); + res.json({ + success: true, + data: { + version: VERSION, + storageType: 'sqlite', + uptime, + status: 'running', + }, + }); + }); + + app.get('/api/v1/memories/stats', async (req, res) => { + try { + const userId = req.query.user_id as string | undefined; + const agentId = req.query.agent_id as string | undefined; + const all = await memory.getAll({ userId, agentId, limit: 10000 }); + const stats = calculateStatsFromMemories( + all.memories as unknown as Array> + ); + res.json({ success: true, data: stats }); + } catch (err) { + res.status(500).json({ success: false, message: String(err) }); + } + }); + + app.get('/api/v1/memories', async (req, res) => { + try { + const userId = req.query.user_id as string | undefined; + const agentId = req.query.agent_id as string | undefined; + const limit = parseInt(req.query.limit as string) || 20; + const offset = parseInt(req.query.offset as string) || 0; + const sortBy = req.query.sort_by as string | undefined; + const order = req.query.order as 'asc' | 'desc' | undefined; + const result = await memory.getAll({ userId, agentId, limit, offset, sortBy, order }); + res.json({ success: true, data: result }); + } catch (err) { + res.status(500).json({ success: false, message: String(err) }); + } + }); + + app.post('/api/v1/memories', async (req, res) => { + try { + const { content, user_id, agent_id, infer, metadata } = req.body; + const result = await memory.add(content, { + userId: user_id, agentId: agent_id, + infer: infer ?? false, metadata, + }); + res.json({ success: true, data: result }); + } catch (err) { + res.status(500).json({ success: false, message: String(err) }); + } + }); + + app.delete('/api/v1/memories/:id', async (req, res) => { + try { + const ok = await memory.delete(req.params.id); + res.json({ success: true, data: { deleted: ok } }); + } catch (err) { + res.status(500).json({ success: false, message: String(err) }); + } + }); + + app.post('/api/v1/memories/search', async (req, res) => { + try { + const { query, user_id, agent_id, limit } = req.body; + const result = await memory.search(query, { + userId: user_id, agentId: agent_id, limit, + }); + res.json({ success: true, data: result }); + } catch (err) { + res.status(500).json({ success: false, message: String(err) }); + } + }); + + // ─── Dashboard HTML ──────────────────────────────────────────────── + + const publicDir = path.join(__dirname, 'public'); + app.get('/dashboard', (_req, res) => { + res.sendFile(path.join(publicDir, 'index.html')); + }); + app.use('/dashboard', express.static(publicDir)); + + // ─── Root ────────────────────────────────────────────────────────── + + app.get('/', (_req, res) => { + res.json({ + name: 'PowerMem TS', + version: VERSION, + dashboard: '/dashboard/', + api: '/api/v1/', + docs: '/api/v1/system/status', + }); + }); + + return { app, memory }; +} + +// CLI entry: npx tsx src/dashboard/server.ts +if (process.argv[1]?.endsWith('server.ts') || process.argv[1]?.endsWith('server.js')) { + const port = parseInt(process.env.PORT ?? '8000'); + + // Try Ollama embeddings, fall back to a simple mock for demo + let embeddings: Embeddings | undefined; + try { + const { OllamaEmbeddings } = await import('@langchain/ollama'); + embeddings = new OllamaEmbeddings({ model: 'nomic-embed-text', baseUrl: 'http://localhost:11434' }); + } catch { + // Use a minimal mock embeddings for demo + const { Embeddings: EmbBase } = await import('@langchain/core/embeddings'); + class DemoEmbeddings extends EmbBase { + async embedQuery(text: string) { return Array.from({ length: 8 }, (_, i) => text.charCodeAt(i % text.length) / 256); } + async embedDocuments(docs: string[]) { return docs.map(d => this.embedQuery(d) as any); } + } + embeddings = new DemoEmbeddings({}); + } + + createDashboardServer({ dbPath: process.env.DB_PATH, embeddings }).then(({ app }) => { + app.listen(port, () => { + console.log(`PowerMem Dashboard running at http://localhost:${port}/dashboard/`); + console.log(`API at http://localhost:${port}/api/v1/`); + }); + }); +} diff --git a/src/index.ts b/src/index.ts index e94a1d0..bd1d7ed 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,16 +1,16 @@ -export { Memory } from './memory.js'; -export { NativeProvider } from './provider/native/index.js'; -export { SeekDBStore } from './provider/native/seekdb-store.js'; -export type { SeekDBStoreOptions } from './provider/native/seekdb-store.js'; +export { Memory } from './core/memory.js'; +export { NativeProvider } from './core/native-provider.js'; +export { SeekDBStore } from './storage/seekdb/seekdb.js'; +export type { SeekDBStoreOptions } from './storage/seekdb/seekdb.js'; -export type { MemoryProvider } from './provider/index.js'; +export type { MemoryProvider } from './core/provider.js'; export type { VectorStore, VectorStoreRecord, VectorStoreFilter, VectorStoreSearchMatch, VectorStoreListOptions, -} from './provider/native/vector-store.js'; +} from './storage/base.js'; export type { MemoryRecord, @@ -39,3 +39,39 @@ export { PowerMemConnectionError, PowerMemAPIError, } from './errors/index.js'; + +// ─── Config ─────────────────────────────────────────────────────────────── +export { parseMemoryConfig, validateConfig } from './configs.js'; +export type { MemoryConfig, MemoryConfigInput, IntelligentMemoryConfig } from './configs.js'; +export { autoConfig, loadConfigFromEnv, createConfig } from './config-loader.js'; +export { getVersion, VERSION } from './version.js'; + +// ─── Storage ────────────────────────────────────────────────────────────── +export { SQLiteStore } from './storage/sqlite/sqlite.js'; +export { VectorStoreFactory } from './storage/factory.js'; +export { StorageAdapter } from './storage/adapter.js'; +export type { GraphStoreBase } from './storage/base.js'; + +// ─── Integrations ───────────────────────────────────────────────────────── +export { Embedder, createEmbeddings, createEmbeddingsFromEnv } from './integrations/index.js'; +export { createLLM, createLLMFromEnv } from './integrations/index.js'; + +// ─── Intelligence ───────────────────────────────────────────────────────── +export { MemoryOptimizer, ImportanceEvaluator, IntelligenceManager } from './intelligence/index.js'; +export { computeDecayFactor, applyDecay } from './intelligence/index.js'; + +// ─── Agent ──────────────────────────────────────────────────────────────── +export { AgentMemory } from './agent/index.js'; +export type { AgentMemoryConfig } from './agent/index.js'; +export { MemoryScope, AccessPermission, PrivacyLevel, MemoryType } from './agent/index.js'; + +// ─── User Memory ────────────────────────────────────────────────────────── +export { UserMemory } from './user-memory/index.js'; +export { SQLiteUserProfileStore } from './user-memory/index.js'; +export { QueryRewriter } from './user-memory/index.js'; +export type { UserProfile, UserProfileStore } from './user-memory/index.js'; + +// ─── Utils ──────────────────────────────────────────────────────────────── +export { calculateStatsFromMemories } from './utils/stats.js'; +export { parseAdvancedFilters } from './utils/filter-parser.js'; +export { cosineSimilarity } from './utils/search.js'; diff --git a/src/integrations/embeddings/base.ts b/src/integrations/embeddings/base.ts new file mode 100644 index 0000000..a01fc0e --- /dev/null +++ b/src/integrations/embeddings/base.ts @@ -0,0 +1,8 @@ +/** + * Base embedding class — port of Python integrations/embeddings/base.py. + * In TS we use LangChain's Embeddings interface, so this is a thin contract. + */ +export interface EmbeddingProvider { + embed(text: string, memoryAction?: 'add' | 'search' | 'update'): Promise; + embedBatch?(texts: string[]): Promise; +} diff --git a/src/integrations/embeddings/config/base.ts b/src/integrations/embeddings/config/base.ts new file mode 100644 index 0000000..d2a0b33 --- /dev/null +++ b/src/integrations/embeddings/config/base.ts @@ -0,0 +1,6 @@ +export interface BaseEmbedderConfig { + provider?: string; + apiKey?: string; + model?: string; + embeddingDims?: number; +} diff --git a/src/provider/native/embedder.ts b/src/integrations/embeddings/embedder.ts similarity index 100% rename from src/provider/native/embedder.ts rename to src/integrations/embeddings/embedder.ts diff --git a/src/integrations/embeddings/factory.ts b/src/integrations/embeddings/factory.ts new file mode 100644 index 0000000..a46ac07 --- /dev/null +++ b/src/integrations/embeddings/factory.ts @@ -0,0 +1,64 @@ +/** + * Embedding factory — create Embeddings from config or env. + * Split from provider-factory.ts, delegates to LangChain providers. + */ +import type { Embeddings } from '@langchain/core/embeddings'; +import { PowerMemInitError } from '../../errors/index.js'; + +const OPENAI_COMPAT_BASE_URLS: Record = { + openai: undefined, + qwen: process.env.QWEN_LLM_BASE_URL ?? 'https://dashscope.aliyuncs.com/compatible-mode/v1', + siliconflow: process.env.SILICONFLOW_LLM_BASE_URL ?? 'https://api.siliconflow.cn/v1', + deepseek: process.env.DEEPSEEK_LLM_BASE_URL ?? 'https://api.deepseek.com', +}; + +export async function createEmbeddings(config: { + provider: string; + apiKey?: string; + model?: string; + embeddingDims?: number; +}): Promise { + const provider = config.provider.toLowerCase(); + const apiKey = config.apiKey; + const model = config.model; + const dims = config.embeddingDims; + + if (!apiKey) { + throw new PowerMemInitError('Embedding API key is required.'); + } + + if (['openai', 'qwen', 'siliconflow', 'deepseek'].includes(provider)) { + const { OpenAIEmbeddings } = await import('@langchain/openai'); + const baseURL = OPENAI_COMPAT_BASE_URLS[provider]; + return new OpenAIEmbeddings({ + openAIApiKey: apiKey, + modelName: model, + dimensions: dims, + ...(baseURL ? { configuration: { baseURL } } : {}), + }); + } + + if (provider === 'anthropic') { + throw new PowerMemInitError('Anthropic does not provide an embeddings API.'); + } + + if (provider === 'ollama') { + const { OllamaEmbeddings } = await import('@langchain/ollama'); + return new OllamaEmbeddings({ + model: model ?? 'nomic-embed-text', + baseUrl: process.env.OLLAMA_LLM_BASE_URL ?? 'http://localhost:11434', + }); + } + + throw new PowerMemInitError(`Unsupported embedding provider: "${provider}".`); +} + +/** Create Embeddings from environment variables (backward compat). */ +export async function createEmbeddingsFromEnv(): Promise { + return createEmbeddings({ + provider: process.env.EMBEDDING_PROVIDER ?? 'openai', + apiKey: process.env.EMBEDDING_API_KEY, + model: process.env.EMBEDDING_MODEL, + embeddingDims: process.env.EMBEDDING_DIMS ? parseInt(process.env.EMBEDDING_DIMS, 10) : undefined, + }); +} diff --git a/src/integrations/embeddings/index.ts b/src/integrations/embeddings/index.ts new file mode 100644 index 0000000..8a9de9d --- /dev/null +++ b/src/integrations/embeddings/index.ts @@ -0,0 +1,4 @@ +export { Embedder } from './embedder.js'; +export { createEmbeddings, createEmbeddingsFromEnv } from './factory.js'; +export type { EmbeddingProvider } from './base.js'; +export type { BaseEmbedderConfig } from './config/base.js'; diff --git a/src/provider/native/provider-factory.ts b/src/integrations/factory.ts similarity index 99% rename from src/provider/native/provider-factory.ts rename to src/integrations/factory.ts index 025a64e..2e37fbe 100644 --- a/src/provider/native/provider-factory.ts +++ b/src/integrations/factory.ts @@ -1,6 +1,6 @@ import type { Embeddings } from '@langchain/core/embeddings'; import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { PowerMemInitError } from '../../errors/index.js'; +import { PowerMemInitError } from '../errors/index.js'; /** Base URL mapping for OpenAI-compatible providers. */ const OPENAI_COMPAT_BASE_URLS: Record = { diff --git a/src/integrations/index.ts b/src/integrations/index.ts new file mode 100644 index 0000000..88b0e17 --- /dev/null +++ b/src/integrations/index.ts @@ -0,0 +1,9 @@ +export { Embedder, createEmbeddings, createEmbeddingsFromEnv } from './embeddings/index.js'; +export type { EmbeddingProvider, BaseEmbedderConfig } from './embeddings/index.js'; +export { createLLM, createLLMFromEnv } from './llm/index.js'; +export type { LLMProvider, BaseLLMConfig } from './llm/index.js'; +export type { RerankProvider, BaseRerankConfig } from './rerank/index.js'; + +// Backward compat: re-export old factory.ts functions +export { createEmbeddingsFromEnv as _legacyCreateEmbeddingsFromEnv } from './embeddings/factory.js'; +export { createLLMFromEnv as _legacyCreateLLMFromEnv } from './llm/factory.js'; diff --git a/src/integrations/llm/base.ts b/src/integrations/llm/base.ts new file mode 100644 index 0000000..54ae75c --- /dev/null +++ b/src/integrations/llm/base.ts @@ -0,0 +1,10 @@ +/** + * Base LLM class — port of Python integrations/llm/base.py. + * In TS we use LangChain's BaseChatModel, so this is a thin contract. + */ +export interface LLMProvider { + generateResponse( + messages: Array<{ role: string; content: string }>, + options?: { responseFormat?: { type: string }; temperature?: number; maxTokens?: number } + ): Promise; +} diff --git a/src/integrations/llm/config/base.ts b/src/integrations/llm/config/base.ts new file mode 100644 index 0000000..6030ead --- /dev/null +++ b/src/integrations/llm/config/base.ts @@ -0,0 +1,9 @@ +export interface BaseLLMConfig { + provider?: string; + apiKey?: string; + model?: string; + temperature?: number; + maxTokens?: number; + topP?: number; + topK?: number; +} diff --git a/src/integrations/llm/factory.ts b/src/integrations/llm/factory.ts new file mode 100644 index 0000000..fcfe288 --- /dev/null +++ b/src/integrations/llm/factory.ts @@ -0,0 +1,75 @@ +/** + * LLM factory — create BaseChatModel from config or env. + * Split from provider-factory.ts, delegates to LangChain providers. + */ +import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { PowerMemInitError } from '../../errors/index.js'; + +const OPENAI_COMPAT_BASE_URLS: Record = { + openai: undefined, + qwen: process.env.QWEN_LLM_BASE_URL ?? 'https://dashscope.aliyuncs.com/compatible-mode/v1', + siliconflow: process.env.SILICONFLOW_LLM_BASE_URL ?? 'https://api.siliconflow.cn/v1', + deepseek: process.env.DEEPSEEK_LLM_BASE_URL ?? 'https://api.deepseek.com', +}; + +export async function createLLM(config: { + provider: string; + apiKey?: string; + model?: string; + temperature?: number; + maxTokens?: number; + topP?: number; +}): Promise { + const provider = config.provider.toLowerCase(); + const apiKey = config.apiKey; + const model = config.model; + const temperature = config.temperature ?? 0.1; + const maxTokens = config.maxTokens ?? 2000; + const topP = config.topP ?? 0.1; + + if (!apiKey) { + throw new PowerMemInitError('LLM API key is required.'); + } + + if (['openai', 'qwen', 'siliconflow', 'deepseek'].includes(provider)) { + const { ChatOpenAI } = await import('@langchain/openai'); + const baseURL = OPENAI_COMPAT_BASE_URLS[provider]; + return new ChatOpenAI({ + openAIApiKey: apiKey, + modelName: model ?? 'gpt-4o-mini', + temperature, maxTokens, topP, + ...(baseURL ? { configuration: { baseURL } } : {}), + }); + } + + if (provider === 'anthropic') { + // @ts-expect-error — optional peer dependency + const { ChatAnthropic } = await import('@langchain/anthropic'); + return new ChatAnthropic({ + anthropicApiKey: apiKey, + modelName: model ?? 'claude-sonnet-4-20250514', + temperature, maxTokens, topP, + }); + } + + if (provider === 'ollama') { + const { ChatOllama } = await import('@langchain/ollama'); + return new ChatOllama({ + model: model ?? 'llama3', + baseUrl: process.env.OLLAMA_LLM_BASE_URL ?? 'http://localhost:11434', + temperature, + format: 'json', + }); + } + + throw new PowerMemInitError(`Unsupported LLM provider: "${provider}".`); +} + +/** Create LLM from environment variables (backward compat). */ +export async function createLLMFromEnv(): Promise { + return createLLM({ + provider: process.env.LLM_PROVIDER ?? 'openai', + apiKey: process.env.LLM_API_KEY, + model: process.env.LLM_MODEL, + }); +} diff --git a/src/integrations/llm/index.ts b/src/integrations/llm/index.ts new file mode 100644 index 0000000..db1760f --- /dev/null +++ b/src/integrations/llm/index.ts @@ -0,0 +1,3 @@ +export { createLLM, createLLMFromEnv } from './factory.js'; +export type { LLMProvider } from './base.js'; +export type { BaseLLMConfig } from './config/base.js'; diff --git a/src/integrations/rerank/base.ts b/src/integrations/rerank/base.ts new file mode 100644 index 0000000..a93a978 --- /dev/null +++ b/src/integrations/rerank/base.ts @@ -0,0 +1,6 @@ +/** + * Base rerank class — port of Python integrations/rerank/base.py. + */ +export interface RerankProvider { + rerank(query: string, documents: string[], topN?: number): Promise>; +} diff --git a/src/integrations/rerank/config/base.ts b/src/integrations/rerank/config/base.ts new file mode 100644 index 0000000..a450c99 --- /dev/null +++ b/src/integrations/rerank/config/base.ts @@ -0,0 +1,8 @@ +export interface BaseRerankConfig { + provider?: string; + enabled?: boolean; + model?: string; + apiKey?: string; + apiBaseUrl?: string; + topN?: number; +} diff --git a/src/integrations/rerank/index.ts b/src/integrations/rerank/index.ts new file mode 100644 index 0000000..861ee85 --- /dev/null +++ b/src/integrations/rerank/index.ts @@ -0,0 +1,2 @@ +export type { RerankProvider } from './base.js'; +export type { BaseRerankConfig } from './config/base.js'; diff --git a/src/provider/native/decay.ts b/src/intelligence/ebbinghaus.ts similarity index 100% rename from src/provider/native/decay.ts rename to src/intelligence/ebbinghaus.ts diff --git a/src/intelligence/importance-evaluator.ts b/src/intelligence/importance-evaluator.ts new file mode 100644 index 0000000..9fb2a8b --- /dev/null +++ b/src/intelligence/importance-evaluator.ts @@ -0,0 +1,59 @@ +/** + * Importance evaluator — rule-based + LLM-based importance scoring. + * Port of Python powermem/intelligence/importance_evaluator.py. + */ + +const IMPORTANT_KEYWORDS = [ + 'important', 'critical', 'urgent', 'remember', 'note', + 'preference', 'like', 'dislike', 'hate', 'love', + 'password', 'secret', 'private', 'confidential', +]; + +const EMOTIONAL_WORDS = [ + 'happy', 'sad', 'angry', 'excited', 'worried', 'scared', + 'love', 'hate', 'fear', 'joy', 'sorrow', 'anger', +]; + +export class ImportanceEvaluator { + /** Evaluate importance of content (0-1). Rule-based fallback when no LLM. */ + evaluateImportance( + content: string, + metadata?: Record, + _context?: Record + ): number { + return this.ruleBased(content, metadata); + } + + /** Rule-based importance scoring (0-1). */ + private ruleBased(content: string, metadata?: Record): number { + let score = 0; + const lower = content.toLowerCase(); + + // Length factor + if (content.length > 100) score += 0.1; + else if (content.length > 50) score += 0.05; + + // Keyword importance + for (const kw of IMPORTANT_KEYWORDS) { + if (lower.includes(kw)) score += 0.1; + } + + // Emotional words + for (const w of EMOTIONAL_WORDS) { + if (lower.includes(w)) score += 0.05; + } + + // Punctuation signals + if (content.includes('?')) score += 0.05; + if (content.includes('!')) score += 0.05; + + // Metadata factors + if (metadata) { + if (metadata.priority === 'high') score += 0.2; + else if (metadata.priority === 'medium') score += 0.1; + if (metadata.tags) score += 0.05; + } + + return Math.min(score, 1.0); + } +} diff --git a/src/intelligence/index.ts b/src/intelligence/index.ts new file mode 100644 index 0000000..5fb6d04 --- /dev/null +++ b/src/intelligence/index.ts @@ -0,0 +1,7 @@ +export { computeDecayFactor, applyDecay } from './ebbinghaus.js'; +export { MemoryOptimizer } from './memory-optimizer.js'; +export type { DeduplicateResult, CompressResult } from './memory-optimizer.js'; +export { ImportanceEvaluator } from './importance-evaluator.js'; +export { IntelligenceManager } from './manager.js'; +export type { IntelligenceConfig } from './manager.js'; +export type { IntelligencePlugin } from './plugin.js'; diff --git a/src/intelligence/manager.ts b/src/intelligence/manager.ts new file mode 100644 index 0000000..068a13f --- /dev/null +++ b/src/intelligence/manager.ts @@ -0,0 +1,54 @@ +/** + * Intelligence manager — orchestrator for memory intelligence features. + * Port of Python powermem/intelligence/manager.py. + */ +import type { VectorStoreSearchMatch } from '../storage/base.js'; +import { computeDecayFactor, applyDecay } from './ebbinghaus.js'; +import { ImportanceEvaluator } from './importance-evaluator.js'; + +export interface IntelligenceConfig { + enabled?: boolean; + enableDecay?: boolean; + decayWeight?: number; +} + +export class IntelligenceManager { + private readonly enabled: boolean; + private readonly enableDecay: boolean; + private readonly decayWeight: number; + readonly importanceEvaluator: ImportanceEvaluator; + + constructor(config: IntelligenceConfig = {}) { + this.enabled = config.enabled ?? false; + this.enableDecay = config.enableDecay ?? false; + this.decayWeight = config.decayWeight ?? 0.3; + this.importanceEvaluator = new ImportanceEvaluator(); + } + + /** Enhance metadata with importance score. */ + processMetadata( + content: string, + metadata?: Record + ): Record { + if (!this.enabled) return metadata ?? {}; + const importance = this.importanceEvaluator.evaluateImportance(content, metadata); + return { ...(metadata ?? {}), importance }; + } + + /** Apply Ebbinghaus decay and re-rank search results. */ + processSearchResults(results: VectorStoreSearchMatch[]): VectorStoreSearchMatch[] { + if (!this.enabled || !this.enableDecay) return results; + + for (const match of results) { + const decay = computeDecayFactor({ + createdAt: match.createdAt ?? new Date().toISOString(), + updatedAt: match.updatedAt ?? match.createdAt ?? new Date().toISOString(), + accessCount: match.accessCount ?? 0, + }); + match.score = applyDecay(match.score, decay, this.decayWeight); + } + + results.sort((a, b) => b.score - a.score); + return results; + } +} diff --git a/src/intelligence/memory-optimizer.ts b/src/intelligence/memory-optimizer.ts new file mode 100644 index 0000000..439f9ef --- /dev/null +++ b/src/intelligence/memory-optimizer.ts @@ -0,0 +1,192 @@ +/** + * Memory optimizer — deduplication and compression. + * Port of Python powermem/intelligence/memory_optimizer.py. + */ +import crypto from 'node:crypto'; +import type { VectorStore, VectorStoreRecord } from '../storage/base.js'; +import { cosineSimilarity } from '../utils/search.js'; + +export interface DeduplicateResult { + totalChecked: number; + duplicatesFound: number; + deletedCount: number; + errors: number; +} + +export interface CompressResult { + totalProcessed: number; + clustersFound: number; + compressedCount: number; + newMemoriesCreated: number; + errors: number; +} + +type LLMGenerateFn = (messages: Array<{ role: string; content: string }>) => Promise; + +export class MemoryOptimizer { + constructor( + private readonly store: VectorStore, + private readonly llmGenerate?: LLMGenerateFn + ) {} + + /** + * Deduplicate memories. + * @param strategy - "exact" (MD5 hash) or "semantic" (cosine similarity) + * @param userId - Optional user filter + * @param threshold - Similarity threshold for semantic dedup (0-1) + */ + async deduplicate( + strategy: 'exact' | 'semantic' = 'exact', + userId?: string, + threshold = 0.95 + ): Promise { + if (strategy === 'exact') return this.deduplicateExact(userId); + return this.deduplicateSemantic(userId, threshold); + } + + /** Exact dedup: group by MD5 hash, keep oldest, delete rest. */ + private async deduplicateExact(userId?: string): Promise { + const stats: DeduplicateResult = { totalChecked: 0, duplicatesFound: 0, deletedCount: 0, errors: 0 }; + + const { records } = await this.store.list(userId ? { userId } : {}, 10000); + stats.totalChecked = records.length; + + // Group by hash + const groups = new Map(); + for (const rec of records) { + let hash = rec.hash; + if (!hash && rec.content) { + hash = crypto.createHash('md5').update(rec.content, 'utf-8').digest('hex'); + } + if (hash) { + const group = groups.get(hash) ?? []; + group.push(rec); + groups.set(hash, group); + } + } + + // Delete duplicates (keep oldest per group) + for (const group of groups.values()) { + if (group.length <= 1) continue; + group.sort((a, b) => (a.createdAt ?? a.id).localeCompare(b.createdAt ?? b.id)); + const duplicates = group.slice(1); + stats.duplicatesFound += duplicates.length; + + for (const dup of duplicates) { + const ok = await this.store.remove(dup.id); + if (ok) stats.deletedCount++; + else stats.errors++; + } + } + + return stats; + } + + /** Semantic dedup: compare embeddings, delete similar (above threshold). */ + private async deduplicateSemantic(userId?: string, threshold = 0.95): Promise { + const stats: DeduplicateResult = { totalChecked: 0, duplicatesFound: 0, deletedCount: 0, errors: 0 }; + + const { records } = await this.store.list(userId ? { userId } : {}, 10000); + stats.totalChecked = records.length; + + const withEmbeddings = records.filter((r) => r.embedding && r.embedding.length > 0); + withEmbeddings.sort((a, b) => (a.createdAt ?? a.id).localeCompare(b.createdAt ?? b.id)); + + const unique: VectorStoreRecord[] = []; + const duplicates: VectorStoreRecord[] = []; + + for (const mem of withEmbeddings) { + let isDuplicate = false; + for (const u of unique) { + const sim = cosineSimilarity(mem.embedding!, u.embedding!); + if (sim >= threshold) { + isDuplicate = true; + break; + } + } + if (isDuplicate) duplicates.push(mem); + else unique.push(mem); + } + + stats.duplicatesFound = duplicates.length; + for (const dup of duplicates) { + const ok = await this.store.remove(dup.id); + if (ok) stats.deletedCount++; + else stats.errors++; + } + + return stats; + } + + /** + * Compress memories by clustering similar ones and summarizing via LLM. + * Requires an LLM function to be provided. + */ + async compress(userId?: string, threshold = 0.85): Promise { + const stats: CompressResult = { + totalProcessed: 0, clustersFound: 0, + compressedCount: 0, newMemoriesCreated: 0, errors: 0, + }; + + if (!this.llmGenerate) { + return stats; + } + + const { records } = await this.store.list(userId ? { userId } : {}, 1000); + const valid = records.filter((r) => r.embedding && r.embedding.length > 0); + stats.totalProcessed = valid.length; + if (valid.length === 0) return stats; + + valid.sort((a, b) => a.id.localeCompare(b.id)); + + // Greedy clustering + const processed = new Set(); + const clusters: VectorStoreRecord[][] = []; + + for (let i = 0; i < valid.length; i++) { + const mem = valid[i]; + if (processed.has(mem.id)) continue; + + const cluster = [mem]; + processed.add(mem.id); + + for (let j = i + 1; j < valid.length; j++) { + const candidate = valid[j]; + if (processed.has(candidate.id)) continue; + + const sim = cosineSimilarity(mem.embedding!, candidate.embedding!); + if (sim >= threshold) { + cluster.push(candidate); + processed.add(candidate.id); + } + } + + if (cluster.length > 1) clusters.push(cluster); + } + + stats.clustersFound = clusters.length; + + // LLM summarize each cluster + for (const cluster of clusters) { + try { + const memoriesText = cluster.map((m) => `- ${m.content}`).join('\n'); + const prompt = `Summarize these related memories into one concise memory:\n${memoriesText}\n\nReturn only the summarized memory text.`; + const summary = await this.llmGenerate([{ role: 'user', content: prompt }]); + + if (summary) { + // Delete old memories + for (const old of cluster) { + await this.store.remove(old.id); + stats.compressedCount++; + } + stats.newMemoriesCreated++; + // Note: caller is responsible for adding the summary back via Memory.add() + } + } catch { + stats.errors++; + } + } + + return stats; + } +} diff --git a/src/intelligence/plugin.ts b/src/intelligence/plugin.ts new file mode 100644 index 0000000..fc8aa55 --- /dev/null +++ b/src/intelligence/plugin.ts @@ -0,0 +1,11 @@ +/** + * Intelligence plugin interfaces. + * Port of Python powermem/intelligence/plugin.py. + */ +import type { VectorStoreSearchMatch } from '../storage/base.js'; + +export interface IntelligencePlugin { + name: string; + processMetadata?(content: string, metadata: Record): Record; + processSearchResults?(results: VectorStoreSearchMatch[], query: string): VectorStoreSearchMatch[]; +} diff --git a/src/prompts/graph/graph-prompts.ts b/src/prompts/graph/graph-prompts.ts new file mode 100644 index 0000000..6d62983 --- /dev/null +++ b/src/prompts/graph/graph-prompts.ts @@ -0,0 +1,23 @@ +/** + * Graph extraction prompts. + * Port of Python powermem/prompts/graph/graph_prompts.py. + */ + +export const GRAPH_EXTRACTION_PROMPT = `You are a knowledge graph extraction system. Extract entities and relationships from the given text. + +For each entity, identify: +- name: The entity name +- type: The entity type (person, place, organization, concept, etc.) + +For each relationship, identify: +- source: Source entity name +- target: Target entity name +- relation: The relationship type + +Text: {text} + +Return JSON: {"entities": [{"name": "...", "type": "..."}], "relationships": [{"source": "...", "target": "...", "relation": "..."}]}`; + +export function buildGraphExtractionPrompt(text: string): string { + return GRAPH_EXTRACTION_PROMPT.replace('{text}', text); +} diff --git a/src/prompts/graph/graph-tools-prompts.ts b/src/prompts/graph/graph-tools-prompts.ts new file mode 100644 index 0000000..f102f9e --- /dev/null +++ b/src/prompts/graph/graph-tools-prompts.ts @@ -0,0 +1,19 @@ +/** + * Graph tools prompts — for graph update and deletion operations. + */ + +export const GRAPH_UPDATE_PROMPT = `You are a knowledge graph manager. Given existing graph data and new information, decide how to update the graph. + +Existing entities: {existing_entities} +Existing relationships: {existing_relationships} +New information: {new_info} + +Return JSON: {"add_entities": [...], "add_relationships": [...], "remove_entities": [...], "remove_relationships": [...]}`; + +export const GRAPH_DELETE_PROMPT = `You are a knowledge graph manager. Given the following entities and relationships, identify which ones should be removed based on the deletion request. + +Entities: {entities} +Relationships: {relationships} +Deletion request: {request} + +Return JSON: {"remove_entities": [...], "remove_relationships": [...]}`; diff --git a/src/prompts/importance-evaluation.ts b/src/prompts/importance-evaluation.ts new file mode 100644 index 0000000..fcf4a18 --- /dev/null +++ b/src/prompts/importance-evaluation.ts @@ -0,0 +1,29 @@ +/** + * Importance evaluation prompts. + * Port of Python powermem/prompts/importance_evaluation.py. + */ + +export const IMPORTANCE_SYSTEM_PROMPT = `You are an AI assistant that evaluates the importance of memory content on a scale from 0.0 to 1.0. + +Criteria: +- Relevance: How relevant is this to the user's needs? +- Novelty: How new or unique is this information? +- Emotional Impact: How emotionally significant? +- Actionability: How actionable or useful? +- Factual Value: How factual and reliable? +- Personal Significance: How personally important to the user? + +Return JSON: {"importance_score": 0.0-1.0, "reasoning": "..."}`; + +export function getImportanceEvaluationPrompt( + content: string, + metadata?: Record, + _context?: Record +): string { + let prompt = `Evaluate the importance of this memory content:\n\n"${content}"`; + if (metadata && Object.keys(metadata).length > 0) { + prompt += `\n\nMetadata: ${JSON.stringify(metadata)}`; + } + prompt += '\n\nReturn JSON: {"importance_score": 0.0-1.0}'; + return prompt; +} diff --git a/src/prompts/index.ts b/src/prompts/index.ts new file mode 100644 index 0000000..bf9808c --- /dev/null +++ b/src/prompts/index.ts @@ -0,0 +1,13 @@ +export { + getFactRetrievalPrompt, + DEFAULT_UPDATE_MEMORY_PROMPT, + buildUpdateMemoryPrompt, +} from './intelligent-memory.js'; +export { IMPORTANCE_SYSTEM_PROMPT, getImportanceEvaluationPrompt } from './importance-evaluation.js'; +export { MEMORY_COMPRESSION_PROMPT, buildCompressionPrompt } from './optimization.js'; +export { QUERY_REWRITE_PROMPT } from './query-rewrite.js'; +export { USER_PROFILE_EXTRACTION_PROMPT } from './user-profile.js'; +export { formatTemplate } from './templates.js'; +export type { PromptTemplate } from './templates.js'; +export { GRAPH_EXTRACTION_PROMPT, buildGraphExtractionPrompt } from './graph/graph-prompts.js'; +export { GRAPH_UPDATE_PROMPT, GRAPH_DELETE_PROMPT } from './graph/graph-tools-prompts.js'; diff --git a/src/provider/native/prompts.ts b/src/prompts/intelligent-memory.ts similarity index 100% rename from src/provider/native/prompts.ts rename to src/prompts/intelligent-memory.ts diff --git a/src/prompts/optimization.ts b/src/prompts/optimization.ts new file mode 100644 index 0000000..f1e20d7 --- /dev/null +++ b/src/prompts/optimization.ts @@ -0,0 +1,16 @@ +/** + * Memory optimization prompts. + * Port of Python powermem/prompts/optimization_prompts.py. + */ + +export const MEMORY_COMPRESSION_PROMPT = `You are an expert memory organizer. Your task is to compress multiple related memories into a single, concise summary that preserves all key information. + +Here are the memories to compress: +{memories} + +Please provide a single compressed memory that merges these details.`; + +export function buildCompressionPrompt(memories: string[]): string { + const memoriesText = memories.map((m) => `- ${m}`).join('\n'); + return MEMORY_COMPRESSION_PROMPT.replace('{memories}', memoriesText); +} diff --git a/src/prompts/query-rewrite.ts b/src/prompts/query-rewrite.ts new file mode 100644 index 0000000..38b7764 --- /dev/null +++ b/src/prompts/query-rewrite.ts @@ -0,0 +1,9 @@ +/** + * Query rewrite prompts (stub — full implementation in Phase C). + */ + +export const QUERY_REWRITE_PROMPT = `You are a query expansion assistant. Given a user query, expand it with synonyms and related terms to improve search recall. + +Original query: {query} + +Return JSON: {"rewritten_query": "expanded query text"}`; diff --git a/src/prompts/templates.ts b/src/prompts/templates.ts new file mode 100644 index 0000000..6aa37d2 --- /dev/null +++ b/src/prompts/templates.ts @@ -0,0 +1,17 @@ +/** + * Base prompt template system. + * Port of Python powermem/prompts/templates.py. + */ + +export interface PromptTemplate { + system: string; + user: string; +} + +export function formatTemplate(template: string, vars: Record): string { + let result = template; + for (const [key, value] of Object.entries(vars)) { + result = result.replaceAll(`{${key}}`, value); + } + return result; +} diff --git a/src/prompts/user-profile.ts b/src/prompts/user-profile.ts new file mode 100644 index 0000000..8c1b8b4 --- /dev/null +++ b/src/prompts/user-profile.ts @@ -0,0 +1,10 @@ +/** + * User profile prompts (stub — full implementation in Phase C). + */ + +export const USER_PROFILE_EXTRACTION_PROMPT = `You are a user profile extractor. Analyze the conversation and extract user profile information. + +Conversation: +{conversation} + +Return JSON with profile fields: name, preferences, occupation, location, etc.`; diff --git a/src/server/python-env.ts b/src/server/python-env.ts deleted file mode 100644 index 0ff96ce..0000000 --- a/src/server/python-env.ts +++ /dev/null @@ -1,113 +0,0 @@ -import fs from 'node:fs'; -import path from 'node:path'; -import { execFile } from 'node:child_process'; -import { promisify } from 'node:util'; -import { getDefaultHomeDir, getVenvExecutable } from '../utils/platform.js'; -import { PowerMemInitError } from '../errors/index.js'; -import type { InitOptions } from '../types/options.js'; - -const execFileAsync = promisify(execFile); - -export class PythonEnvManager { - readonly homeDir: string; - readonly venvDir: string; - private readonly lockFile: string; - - constructor(homeDir?: string) { - this.homeDir = homeDir ?? getDefaultHomeDir(); - this.venvDir = path.join(this.homeDir, 'venv'); - this.lockFile = path.join(this.homeDir, 'init.lock'); - } - - /** 获取 venv 内可执行文件路径 */ - getExecutable(name: string): string { - return getVenvExecutable(this.venvDir, name); - } - - /** 检查环境是否已就绪(venv 存在 + powermem 已安装) */ - async isReady(): Promise { - const pip = this.getExecutable('pip'); - if (!fs.existsSync(pip)) return false; - try { - const { stdout } = await execFileAsync(pip, ['show', 'powermem']); - return stdout.includes('Name: powermem'); - } catch { - return false; - } - } - - /** 执行完整初始化(幂等) */ - async setup(options: InitOptions = {}): Promise { - const { verbose = true, powermemVersion, pipArgs = [] } = options; - const log = (msg: string): void => { - if (verbose) console.log(`[powermem-ts] ${msg}`); - }; - - // 已就绪则跳过 - if (await this.isReady()) { - log('Environment already ready, skipping init.'); - return; - } - - // 并发保护:写 lock 文件 - fs.mkdirSync(this.homeDir, { recursive: true }); - fs.writeFileSync(this.lockFile, String(process.pid)); - - try { - const pythonPath = await this.findPython(options.pythonPath, log); - - // 若 venv 损坏则删除重建 - if (fs.existsSync(this.venvDir)) { - log('Removing corrupted venv...'); - fs.rmSync(this.venvDir, { recursive: true, force: true }); - } - - log(`Creating venv at ${this.venvDir}...`); - await execFileAsync(pythonPath, ['-m', 'venv', this.venvDir]); - - const pkg = powermemVersion ?? 'powermem'; - log(`Installing ${pkg}...`); - const pip = this.getExecutable('pip'); - await execFileAsync(pip, ['install', pkg, ...pipArgs]); - - log('Verifying installation...'); - const serverBin = this.getExecutable('powermem-server'); - if (!fs.existsSync(serverBin)) { - throw new PowerMemInitError('powermem-server not found after installation.'); - } - - log('Init complete.'); - } finally { - if (fs.existsSync(this.lockFile)) fs.rmSync(this.lockFile); - } - } - - /** 依次查找可用 Python(>=3.11) */ - private async findPython( - pythonPath: string | undefined, - log: (msg: string) => void - ): Promise { - const candidates = pythonPath ? [pythonPath] : ['python3', 'python']; - - for (const candidate of candidates) { - try { - // python --version prints to stderr on older versions, stdout on 3.x - const { stdout, stderr } = await execFileAsync(candidate, ['--version']); - const raw = (stdout || stderr).trim(); - const version = raw.replace('Python ', ''); - const [major, minor] = version.split('.').map(Number); - if (major > 3 || (major === 3 && minor >= 11)) { - log(`Using Python: ${candidate} (${version})`); - return candidate; - } - log(`Skipping ${candidate} (${version}): need 3.11+`); - } catch { - // candidate not found, try next - } - } - - throw new PowerMemInitError( - 'Python 3.11+ not found. Please install Python 3.11 or higher.' - ); - } -} diff --git a/src/server/server-manager.ts b/src/server/server-manager.ts deleted file mode 100644 index f118e3f..0000000 --- a/src/server/server-manager.ts +++ /dev/null @@ -1,97 +0,0 @@ -import { spawn, type ChildProcess } from 'node:child_process'; -import { PowerMemStartupError } from '../errors/index.js'; -import type { PythonEnvManager } from './python-env.js'; -import type { MemoryOptions } from '../types/options.js'; - -const DEFAULT_PORT = 19527; -const POLL_INTERVAL_MS = 500; - -export class ServerManager { - private process?: ChildProcess; - private isOwner = false; - readonly port: number; - readonly baseUrl: string; - - constructor(port: number = DEFAULT_PORT) { - this.port = port; - this.baseUrl = `http://127.0.0.1:${port}`; - } - - /** 确保 server 在运行,返回 baseUrl */ - async ensureRunning( - envManager: PythonEnvManager, - options: MemoryOptions = {} - ): Promise { - // server 已在运行则复用(不设 isOwner,close 时不 kill) - if (await this.healthCheck()) return this.baseUrl; - - const serverBin = envManager.getExecutable('powermem-server'); - const envFile = options.envFile ?? '.env'; - - this.process = spawn( - serverBin, - ['--host', '127.0.0.1', '--port', String(this.port)], - { - env: { - ...process.env, - POWERMEM_SERVER_AUTH_ENABLED: 'false', - POWERMEM_ENV_FILE: envFile, - }, - stdio: 'pipe', - detached: false, - } - ); - - this.isOwner = true; - - // 父进程退出时清理子进程 - const cleanup = (): void => this.killSync(); - process.on('exit', cleanup); - process.on('SIGINT', () => { cleanup(); process.exit(); }); - process.on('SIGTERM', () => { cleanup(); process.exit(); }); - - const timeout = options.startupTimeout ?? 30_000; - await this.waitForReady(timeout); - - return this.baseUrl; - } - - async shutdown(): Promise { - if (this.isOwner && this.process) { - this.process.kill('SIGTERM'); - this.process = undefined; - this.isOwner = false; - } - } - - async healthCheck(): Promise { - try { - const res = await fetch(`${this.baseUrl}/api/v1/system/health`, { - signal: AbortSignal.timeout(2000), - }); - return res.ok; - } catch { - return false; - } - } - - private async waitForReady(timeout: number): Promise { - const deadline = Date.now() + timeout; - while (Date.now() < deadline) { - if (await this.healthCheck()) return; - await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS)); - } - this.killSync(); - throw new PowerMemStartupError( - `powermem-server did not become ready within ${timeout}ms.` - ); - } - - private killSync(): void { - try { - this.process?.kill('SIGKILL'); - } catch { - // ignore - } - } -} diff --git a/src/settings.ts b/src/settings.ts new file mode 100644 index 0000000..bd46aad --- /dev/null +++ b/src/settings.ts @@ -0,0 +1,25 @@ +/** + * Settings utilities — env file resolution. + * Port of Python powermem/settings.py. + */ +import fs from 'node:fs'; +import path from 'node:path'; +import { fileURLToPath } from 'node:url'; + +/** Resolve the default .env file path by checking common locations. */ +export function getDefaultEnvFile(): string | undefined { + const candidates = [ + path.resolve(process.cwd(), '.env'), + ]; + // In ESM, use import.meta.url to resolve relative paths + try { + const thisDir = path.dirname(fileURLToPath(import.meta.url)); + candidates.push(path.resolve(thisDir, '..', '..', '.env')); + } catch { + // Fallback if import.meta.url not available (CJS) + } + for (const candidate of candidates) { + if (fs.existsSync(candidate)) return candidate; + } + return undefined; +} diff --git a/src/storage/adapter.ts b/src/storage/adapter.ts new file mode 100644 index 0000000..4b2584d --- /dev/null +++ b/src/storage/adapter.ts @@ -0,0 +1,105 @@ +/** + * Storage adapter — bridges VectorStore with the Memory core layer. + * Port of Python powermem/storage/adapter.py. + * + * Wraps a VectorStore and provides higher-level operations: + * - Filtered memory operations (userId/agentId/runId) + * - Statistics aggregation + * - Unique user listing + */ +import type { + VectorStore, + VectorStoreRecord, + VectorStoreFilter, + VectorStoreSearchMatch, + VectorStoreListOptions, +} from './base.js'; + +export class StorageAdapter { + constructor(private readonly store: VectorStore) {} + + async addMemory(id: string, vector: number[], payload: Record): Promise { + await this.store.insert(id, vector, payload); + } + + async searchMemories( + queryVector: number[], + filters: VectorStoreFilter = {}, + limit = 30 + ): Promise { + return this.store.search(queryVector, filters, limit); + } + + async getMemory(id: string, userId?: string, agentId?: string): Promise { + return this.store.getById(id, userId, agentId); + } + + async updateMemory(id: string, vector: number[], payload: Record): Promise { + await this.store.update(id, vector, payload); + } + + async deleteMemory(id: string): Promise { + return this.store.remove(id); + } + + async listMemories( + filters: VectorStoreFilter = {}, + limit = 100, + offset = 0, + options: VectorStoreListOptions = {} + ): Promise<{ records: VectorStoreRecord[]; total: number }> { + return this.store.list(filters, limit, offset, options); + } + + async countMemories(filters: VectorStoreFilter = {}): Promise { + return this.store.count(filters); + } + + async deleteAllMemories(filters: VectorStoreFilter = {}): Promise { + await this.store.removeAll(filters); + } + + async incrementAccessCount(id: string): Promise { + await this.store.incrementAccessCount(id); + } + + async incrementAccessCountBatch(ids: string[]): Promise { + await this.store.incrementAccessCountBatch(ids); + } + + async getStatistics(filters: VectorStoreFilter = {}): Promise> { + const total = await this.store.count(filters); + const { records } = await this.store.list(filters, 1, 0, { sortBy: 'created_at', order: 'asc' }); + const oldest = records[0]?.createdAt; + const { records: newest } = await this.store.list(filters, 1, 0, { sortBy: 'created_at', order: 'desc' }); + const newestAt = newest[0]?.createdAt; + + return { + totalMemories: total, + oldestMemory: oldest, + newestMemory: newestAt, + }; + } + + async getUniqueUsers(limit = 1000): Promise { + const { records } = await this.store.list({}, limit); + const users = new Set(); + for (const r of records) { + if (r.userId) users.add(r.userId); + } + return Array.from(users); + } + + async reset(): Promise { + await this.store.removeAll(); + } + + async close(): Promise { + await this.store.close(); + } + + /** Direct access to the underlying VectorStore. */ + get raw(): VectorStore { + return this.store; + } +} diff --git a/src/provider/native/vector-store.ts b/src/storage/base.ts similarity index 72% rename from src/provider/native/vector-store.ts rename to src/storage/base.ts index 36e6998..8379525 100644 --- a/src/provider/native/vector-store.ts +++ b/src/storage/base.ts @@ -64,3 +64,17 @@ export interface VectorStore { removeAll(filters?: VectorStoreFilter): Promise; close(): Promise; } + +/** + * GraphStoreBase — abstract interface for graph storage. + * Port of Python powermem/storage/base.py GraphStoreBase. + */ +export interface GraphStoreBase { + add(data: string, filters: Record): Promise>; + search(query: string, filters: Record, limit?: number): Promise>>; + deleteAll(filters: Record): Promise; + getAll(filters: Record, limit?: number): Promise>>; + reset(): Promise; + getStatistics(filters?: Record): Promise>; + getUniqueUsers(): Promise; +} diff --git a/src/storage/config/base.ts b/src/storage/config/base.ts new file mode 100644 index 0000000..bad96c9 --- /dev/null +++ b/src/storage/config/base.ts @@ -0,0 +1,8 @@ +/** + * Base vector store configuration. + * Port of Python powermem/storage/config/base.py. + */ +export interface BaseVectorStoreConfig { + collectionName?: string; + embeddingModelDims?: number; +} diff --git a/src/storage/config/seekdb.ts b/src/storage/config/seekdb.ts new file mode 100644 index 0000000..fc0d880 --- /dev/null +++ b/src/storage/config/seekdb.ts @@ -0,0 +1,8 @@ +import type { BaseVectorStoreConfig } from './base.js'; + +export interface SeekDBConfig extends BaseVectorStoreConfig { + path?: string; + database?: string; + distance?: 'cosine' | 'l2' | 'inner_product'; + dimension?: number; +} diff --git a/src/storage/config/sqlite.ts b/src/storage/config/sqlite.ts new file mode 100644 index 0000000..f51cd8b --- /dev/null +++ b/src/storage/config/sqlite.ts @@ -0,0 +1,5 @@ +import type { BaseVectorStoreConfig } from './base.js'; + +export interface SQLiteConfig extends BaseVectorStoreConfig { + path?: string; +} diff --git a/src/storage/factory.ts b/src/storage/factory.ts new file mode 100644 index 0000000..dbac382 --- /dev/null +++ b/src/storage/factory.ts @@ -0,0 +1,58 @@ +/** + * Storage factory — create VectorStore instances by provider name. + * Port of Python powermem/storage/factory.py. + */ +import type { VectorStore } from './base.js'; + +type StoreCreator = (config: Record) => Promise; + +const registry = new Map(); + +export class VectorStoreFactory { + /** Register a new vector store provider. */ + static register(name: string, creator: StoreCreator): void { + registry.set(name.toLowerCase(), creator); + } + + /** Create a VectorStore by provider name + config dict. */ + static async create(provider: string, config: Record = {}): Promise { + const name = provider.toLowerCase(); + const creator = registry.get(name); + if (!creator) { + throw new Error( + `Unsupported VectorStore provider: "${provider}". ` + + `Supported: ${VectorStoreFactory.getSupportedProviders().join(', ')}` + ); + } + return creator(config); + } + + /** Get list of registered provider names. */ + static getSupportedProviders(): string[] { + return Array.from(registry.keys()); + } + + /** Check if a provider is registered. */ + static hasProvider(provider: string): boolean { + return registry.has(provider.toLowerCase()); + } +} + +// ─── Register built-in providers ────────────────────────────────────────── + +VectorStoreFactory.register('sqlite', async (config) => { + const { SQLiteStore } = await import('./sqlite/sqlite.js'); + const dbPath = (config.path as string) ?? ':memory:'; + return new SQLiteStore(dbPath); +}); + +VectorStoreFactory.register('seekdb', async (config) => { + const { SeekDBStore } = await import('./seekdb/seekdb.js'); + return SeekDBStore.create({ + path: (config.path as string) ?? './seekdb_data', + database: config.database as string | undefined, + collectionName: config.collectionName as string | undefined, + distance: config.distance as 'cosine' | 'l2' | 'inner_product' | undefined, + dimension: config.dimension as number | undefined, + }); +}); diff --git a/src/storage/index.ts b/src/storage/index.ts new file mode 100644 index 0000000..51159df --- /dev/null +++ b/src/storage/index.ts @@ -0,0 +1,15 @@ +export type { + VectorStore, + VectorStoreRecord, + VectorStoreFilter, + VectorStoreSearchMatch, + VectorStoreListOptions, +} from './base.js'; +export { SQLiteStore } from './sqlite/sqlite.js'; +export { SeekDBStore } from './seekdb/seekdb.js'; +export type { SeekDBStoreOptions } from './seekdb/seekdb.js'; +export { VectorStoreFactory } from './factory.js'; +export { StorageAdapter } from './adapter.js'; +export type { BaseVectorStoreConfig } from './config/base.js'; +export type { SQLiteConfig } from './config/sqlite.js'; +export type { SeekDBConfig } from './config/seekdb.js'; diff --git a/src/provider/native/seekdb-store.ts b/src/storage/seekdb/seekdb.ts similarity index 93% rename from src/provider/native/seekdb-store.ts rename to src/storage/seekdb/seekdb.ts index ff27623..0235c0b 100644 --- a/src/provider/native/seekdb-store.ts +++ b/src/storage/seekdb/seekdb.ts @@ -4,7 +4,7 @@ import type { VectorStoreFilter, VectorStoreSearchMatch, VectorStoreListOptions, -} from './vector-store.js'; +} from '../base.js'; export interface SeekDBStoreOptions { path: string; @@ -40,6 +40,7 @@ export class SeekDBStore implements VectorStore { const schema = new Schema({ vectorIndex: new VectorIndexConfig({ hnsw: { dimension, distance }, + embeddingFunction: null, // We pass pre-computed embeddings, no auto-vectorization }), }); @@ -65,7 +66,8 @@ export class SeekDBStore implements VectorStore { scope: (payload.scope as string) ?? '', category: (payload.category as string) ?? '', access_count: (payload.access_count as number) ?? 0, - metadata_json: JSON.stringify(payload.metadata ?? {}), + // Base64-encode metadata to avoid SeekDB C engine JSON parsing issues + metadata_b64: Buffer.from(JSON.stringify(payload.metadata ?? {})).toString('base64'), }; } @@ -84,7 +86,7 @@ export class SeekDBStore implements VectorStore { agentId: m.agent_id || undefined, runId: m.run_id || undefined, hash: m.hash || undefined, - metadata: m.metadata_json ? JSON.parse(m.metadata_json) : undefined, + metadata: m.metadata_b64 ? JSON.parse(Buffer.from(m.metadata_b64, 'base64').toString()) : (m.metadata_json ? JSON.parse(m.metadata_json) : undefined), embedding: embedding ?? undefined, createdAt: m.created_at || new Date().toISOString(), updatedAt: m.updated_at || new Date().toISOString(), @@ -229,7 +231,7 @@ export class SeekDBStore implements VectorStore { id: result.ids[0][i], content: result.documents?.[0]?.[i] ?? '', score: Math.max(0, 1 - distance), - metadata: metadata.metadata_json ? JSON.parse(metadata.metadata_json) : undefined, + metadata: metadata.metadata_b64 ? JSON.parse(Buffer.from(metadata.metadata_b64, 'base64').toString()) : (metadata.metadata_json ? JSON.parse(metadata.metadata_json) : undefined), createdAt: metadata.created_at || undefined, updatedAt: metadata.updated_at || undefined, accessCount: metadata.access_count ?? 0, diff --git a/src/provider/native/store.ts b/src/storage/sqlite/sqlite.ts similarity index 98% rename from src/provider/native/store.ts rename to src/storage/sqlite/sqlite.ts index e8bb88c..975357b 100644 --- a/src/provider/native/store.ts +++ b/src/storage/sqlite/sqlite.ts @@ -1,12 +1,12 @@ import Database from 'better-sqlite3'; -import { cosineSimilarity } from './search.js'; +import { cosineSimilarity } from '../../utils/search.js'; import type { VectorStore, VectorStoreRecord, VectorStoreFilter, VectorStoreSearchMatch, VectorStoreListOptions, -} from './vector-store.js'; +} from '../base.js'; // Re-export for backward compatibility export type StoreRecord = VectorStoreRecord; diff --git a/src/user-memory/index.ts b/src/user-memory/index.ts new file mode 100644 index 0000000..325e562 --- /dev/null +++ b/src/user-memory/index.ts @@ -0,0 +1,6 @@ +export { UserMemory } from './user-memory.js'; +export type { UserMemoryConfig } from './user-memory.js'; +export { QueryRewriter } from './query-rewrite/rewriter.js'; +export type { QueryRewriteResult } from './query-rewrite/rewriter.js'; +export type { UserProfile, UserProfileStore } from './storage/user-profile.js'; +export { SQLiteUserProfileStore } from './storage/user-profile-sqlite.js'; diff --git a/src/user-memory/query-rewrite/rewriter.ts b/src/user-memory/query-rewrite/rewriter.ts new file mode 100644 index 0000000..1c24430 --- /dev/null +++ b/src/user-memory/query-rewrite/rewriter.ts @@ -0,0 +1,63 @@ +/** + * Query rewriter — LLM-based query expansion with user profile context. + * Port of Python powermem/user_memory/query_rewrite/rewriter.py. + */ +import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { HumanMessage, SystemMessage } from '@langchain/core/messages'; + +export interface QueryRewriteResult { + originalQuery: string; + rewrittenQuery: string; + isRewritten: boolean; + profileUsed?: string; + error?: string; +} + +export class QueryRewriter { + private readonly enabled: boolean; + private readonly customInstructions?: string; + + constructor( + private readonly llm: BaseChatModel, + config: Record = {} + ) { + this.enabled = (config.enabled as boolean) ?? false; + this.customInstructions = config.prompt as string | undefined; + } + + async rewrite(query: string, profileContent?: string): Promise { + if (!this.enabled || !profileContent || query.length < 3) { + return { originalQuery: query, rewrittenQuery: query, isRewritten: false }; + } + + try { + const systemPrompt = this.customInstructions ?? + 'You are a query expansion assistant. Given a user query and their profile, rewrite the query to improve search recall. Return only the rewritten query text.'; + + const userPrompt = `User profile:\n${profileContent}\n\nOriginal query: "${query}"\n\nRewrite the query to include relevant context from the profile. Return only the rewritten query.`; + + const response = await this.llm.invoke([ + new SystemMessage(systemPrompt), + new HumanMessage(userPrompt), + ]); + + const rewritten = typeof response.content === 'string' + ? response.content.trim() + : query; + + return { + originalQuery: query, + rewrittenQuery: rewritten || query, + isRewritten: rewritten !== query && rewritten.length > 0, + profileUsed: profileContent.slice(0, 100), + }; + } catch (err) { + return { + originalQuery: query, + rewrittenQuery: query, + isRewritten: false, + error: String(err), + }; + } + } +} diff --git a/src/user-memory/storage/user-profile-sqlite.ts b/src/user-memory/storage/user-profile-sqlite.ts new file mode 100644 index 0000000..f737031 --- /dev/null +++ b/src/user-memory/storage/user-profile-sqlite.ts @@ -0,0 +1,105 @@ +/** + * SQLite-backed user profile storage. + * Port of Python powermem/user_memory/storage/user_profile_sqlite.py. + */ +import Database from 'better-sqlite3'; +import type { UserProfile, UserProfileStore } from './user-profile.js'; +import { SnowflakeIDGenerator } from '../../utils/snowflake.js'; + +export class SQLiteUserProfileStore implements UserProfileStore { + private db: Database.Database; + private idGen = new SnowflakeIDGenerator(); + + constructor(dbPath = ':memory:') { + this.db = new Database(dbPath); + this.db.pragma('journal_mode = WAL'); + this.db.exec(` + CREATE TABLE IF NOT EXISTS user_profiles ( + id TEXT PRIMARY KEY, + user_id TEXT NOT NULL, + profile_content TEXT, + topics TEXT, + created_at TEXT NOT NULL DEFAULT (datetime('now')), + updated_at TEXT NOT NULL DEFAULT (datetime('now')) + ) + `); + this.db.exec('CREATE INDEX IF NOT EXISTS idx_profiles_user_id ON user_profiles(user_id)'); + } + + async saveProfile(userId: string, profileContent?: string, topics?: Record): Promise { + const existing = this.db.prepare('SELECT id FROM user_profiles WHERE user_id = ? ORDER BY id DESC LIMIT 1') + .get(userId) as { id: string } | undefined; + + const now = new Date().toISOString(); + const topicsJson = topics ? JSON.stringify(topics) : null; + + if (existing) { + const sets: string[] = ['updated_at = ?']; + const params: unknown[] = [now]; + if (profileContent !== undefined) { sets.push('profile_content = ?'); params.push(profileContent); } + if (topics !== undefined) { sets.push('topics = ?'); params.push(topicsJson); } + params.push(existing.id); + this.db.prepare(`UPDATE user_profiles SET ${sets.join(', ')} WHERE id = ?`).run(...params); + return existing.id; + } + + const id = this.idGen.nextId(); + this.db.prepare( + 'INSERT INTO user_profiles (id, user_id, profile_content, topics, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?)' + ).run(id, userId, profileContent ?? null, topicsJson, now, now); + return id; + } + + async getProfileByUserId(userId: string): Promise { + const row = this.db.prepare('SELECT * FROM user_profiles WHERE user_id = ? ORDER BY id DESC LIMIT 1') + .get(userId) as any; + if (!row) return null; + return this.toProfile(row); + } + + async getProfiles(options: { userId?: string; mainTopic?: string; limit?: number; offset?: number } = {}): Promise { + let sql = 'SELECT * FROM user_profiles'; + const params: unknown[] = []; + if (options.userId) { sql += ' WHERE user_id = ?'; params.push(options.userId); } + sql += ' ORDER BY id DESC'; + if (options.limit) { sql += ' LIMIT ?'; params.push(options.limit); } + if (options.offset) { sql += ' OFFSET ?'; params.push(options.offset); } + + const rows = this.db.prepare(sql).all(...params) as any[]; + let profiles = rows.map((r) => this.toProfile(r)); + + if (options.mainTopic) { + profiles = profiles.filter((p) => p.topics && options.mainTopic! in (p.topics as Record)); + } + return profiles; + } + + async deleteProfile(profileId: string): Promise { + const result = this.db.prepare('DELETE FROM user_profiles WHERE id = ?').run(profileId); + return result.changes > 0; + } + + async countProfiles(userId?: string): Promise { + if (userId) { + const row = this.db.prepare('SELECT COUNT(*) as cnt FROM user_profiles WHERE user_id = ?').get(userId) as { cnt: number }; + return row.cnt; + } + const row = this.db.prepare('SELECT COUNT(*) as cnt FROM user_profiles').get() as { cnt: number }; + return row.cnt; + } + + async close(): Promise { + this.db.close(); + } + + private toProfile(row: any): UserProfile { + return { + id: String(row.id), + userId: row.user_id, + profileContent: row.profile_content ?? undefined, + topics: row.topics ? JSON.parse(row.topics) : undefined, + createdAt: row.created_at, + updatedAt: row.updated_at, + }; + } +} diff --git a/src/user-memory/storage/user-profile.ts b/src/user-memory/storage/user-profile.ts new file mode 100644 index 0000000..5310689 --- /dev/null +++ b/src/user-memory/storage/user-profile.ts @@ -0,0 +1,22 @@ +/** + * User profile types and storage interface. + * Port of Python powermem/user_memory/storage/base.py + user_profile.py. + */ + +export interface UserProfile { + id: string; + userId: string; + profileContent?: string; + topics?: Record; + createdAt: string; + updatedAt: string; +} + +export interface UserProfileStore { + saveProfile(userId: string, profileContent?: string, topics?: Record): Promise; + getProfileByUserId(userId: string): Promise; + getProfiles(options?: { userId?: string; mainTopic?: string; subTopic?: string; limit?: number; offset?: number }): Promise; + deleteProfile(profileId: string): Promise; + countProfiles(userId?: string): Promise; + close(): Promise; +} diff --git a/src/user-memory/user-memory.ts b/src/user-memory/user-memory.ts new file mode 100644 index 0000000..48f448e --- /dev/null +++ b/src/user-memory/user-memory.ts @@ -0,0 +1,126 @@ +/** + * UserMemory — user profile extraction and profile-aware search. + * Port of Python powermem/user_memory/user_memory.py. + */ +import type { Memory } from '../core/memory.js'; +import type { UserProfileStore, UserProfile } from './storage/user-profile.js'; +import type { QueryRewriter } from './query-rewrite/rewriter.js'; + +export interface UserMemoryConfig { + memory: Memory; + profileStore: UserProfileStore; + queryRewriter?: QueryRewriter; +} + +export class UserMemory { + private readonly memory: Memory; + private readonly profileStore: UserProfileStore; + private readonly queryRewriter?: QueryRewriter; + + constructor(config: UserMemoryConfig) { + this.memory = config.memory; + this.profileStore = config.profileStore; + this.queryRewriter = config.queryRewriter; + } + + /** Add memory + optionally extract user profile from content. */ + async add( + content: string, + options: { + userId: string; + agentId?: string; + metadata?: Record; + infer?: boolean; + extractProfile?: boolean; + profileContent?: string; + } + ): Promise> { + const memResult = await this.memory.add(content, { + userId: options.userId, + agentId: options.agentId, + metadata: options.metadata, + infer: options.infer, + }); + + const result: Record = { ...memResult, profileExtracted: false }; + + if (options.extractProfile && options.profileContent) { + await this.profileStore.saveProfile(options.userId, options.profileContent); + result.profileExtracted = true; + result.profileContent = options.profileContent; + } + + return result; + } + + /** Search with optional profile-aware query rewriting. */ + async search( + query: string, + options: { + userId?: string; + agentId?: string; + limit?: number; + threshold?: number; + addProfile?: boolean; + } = {} + ): Promise> { + let effectiveQuery = query; + + // Query rewrite with user profile context + if (this.queryRewriter && options.userId) { + const profile = await this.profileStore.getProfileByUserId(options.userId); + if (profile?.profileContent) { + const rewriteResult = await this.queryRewriter.rewrite(query, profile.profileContent); + if (rewriteResult.isRewritten) { + effectiveQuery = rewriteResult.rewrittenQuery; + } + } + } + + const searchResult = await this.memory.search(effectiveQuery, { + userId: options.userId, + agentId: options.agentId, + limit: options.limit, + threshold: options.threshold, + }); + + const result: Record = { ...searchResult }; + + if (options.addProfile && options.userId) { + const profile = await this.profileStore.getProfileByUserId(options.userId); + if (profile) { + result.profileContent = profile.profileContent; + result.topics = profile.topics; + } + } + + return result; + } + + /** Get user profile. */ + async profile(userId: string): Promise { + return this.profileStore.getProfileByUserId(userId); + } + + /** Delete user profile. */ + async deleteProfile(userId: string): Promise { + const profile = await this.profileStore.getProfileByUserId(userId); + if (!profile) return false; + return this.profileStore.deleteProfile(profile.id); + } + + /** Delete all memories + profile for a user. */ + async deleteAll(userId: string, options: { deleteProfile?: boolean } = {}): Promise { + await this.memory.deleteAll({ userId }); + if (options.deleteProfile) { + const profile = await this.profileStore.getProfileByUserId(userId); + if (profile) await this.profileStore.deleteProfile(profile.id); + } + return true; + } + + async close(): Promise { + await this.memory.close(); + await this.profileStore.close(); + } +} diff --git a/src/utils/filter-parser.ts b/src/utils/filter-parser.ts new file mode 100644 index 0000000..9dcd84e --- /dev/null +++ b/src/utils/filter-parser.ts @@ -0,0 +1,64 @@ +/** + * Advanced filter parser. + * Port of Python powermem/utils/filter_parser.py. + * + * Transforms user-friendly filter keys into storage-compatible format: + * - start_time/end_time → created_at range + * - tags (array) → $in operator + * - type → category mapping + * - importance (number) → $gte operator + */ + +export function parseAdvancedFilters( + filters?: Record +): Record | undefined { + if (!filters || Object.keys(filters).length === 0) return undefined; + + const parsed = { ...filters }; + + // 1. Time range → created_at + if ('start_time' in parsed || 'end_time' in parsed) { + const createdAt: Record = + typeof parsed.created_at === 'object' && parsed.created_at !== null + ? { ...(parsed.created_at as Record) } + : {}; + + if ('start_time' in parsed) { + createdAt.$gte = parsed.start_time; + delete parsed.start_time; + } + if ('end_time' in parsed) { + createdAt.$lte = parsed.end_time; + delete parsed.end_time; + } + parsed.created_at = createdAt; + } + + // 2. Tags → $in + if ('tags' in parsed) { + const tags = parsed.tags; + delete parsed.tags; + if (Array.isArray(tags) && tags.length > 0) { + parsed.tags = { $in: tags }; + } else if (tags) { + parsed.tags = tags; + } + } + + // 3. type → category + if ('type' in parsed) { + parsed.category = parsed.type; + delete parsed.type; + } + + // 4. importance → $gte + if ('importance' in parsed) { + const importance = parsed.importance; + delete parsed.importance; + if (typeof importance === 'number') { + parsed.importance = { $gte: importance }; + } + } + + return parsed; +} diff --git a/src/utils/io.ts b/src/utils/io.ts new file mode 100644 index 0000000..abbcfac --- /dev/null +++ b/src/utils/io.ts @@ -0,0 +1,34 @@ +/** + * Import/export utilities. + * Port of Python powermem/utils/io.py. + */ + +export function exportToJson(records: Record[]): string { + return JSON.stringify(records, null, 2); +} + +export function importFromJson(json: string): Record[] { + const parsed = JSON.parse(json); + if (!Array.isArray(parsed)) throw new Error('Expected a JSON array'); + return parsed; +} + +export function exportToCsv( + records: Record[], + columns?: string[] +): string { + if (records.length === 0) return ''; + const cols = columns ?? Object.keys(records[0]); + const header = cols.join(','); + const rows = records.map((r) => + cols.map((c) => { + const val = r[c]; + if (val == null) return ''; + const str = String(val); + return str.includes(',') || str.includes('"') || str.includes('\n') + ? `"${str.replace(/"/g, '""')}"` + : str; + }).join(',') + ); + return [header, ...rows].join('\n'); +} diff --git a/src/provider/native/search.ts b/src/utils/search.ts similarity index 100% rename from src/provider/native/search.ts rename to src/utils/search.ts diff --git a/src/provider/native/snowflake.ts b/src/utils/snowflake.ts similarity index 100% rename from src/provider/native/snowflake.ts rename to src/utils/snowflake.ts diff --git a/src/utils/stats.ts b/src/utils/stats.ts new file mode 100644 index 0000000..42b6ec2 --- /dev/null +++ b/src/utils/stats.ts @@ -0,0 +1,100 @@ +/** + * Memory statistics calculation. + * Port of Python powermem/utils/stats.py. + */ + +export interface MemoryStats { + totalMemories: number; + byType: Record; + avgImportance: number; + topAccessed: Array<{ id: string; content: string; accessCount: number }>; + growthTrend: Record; + ageDistribution: Record; +} + +interface MemoryDict { + id?: string; + memoryId?: string; + content?: string; + memory?: string; + category?: string; + createdAt?: string; + created_at?: string; + accessCount?: number; + access_count?: number; + importance?: number; + metadata?: Record; +} + +export function calculateStatsFromMemories(memories: MemoryDict[]): MemoryStats { + const total = memories.length; + if (total === 0) { + return { + totalMemories: 0, + byType: {}, + avgImportance: 0, + topAccessed: [], + growthTrend: {}, + ageDistribution: { '< 1 day': 0, '1-7 days': 0, '7-30 days': 0, '> 30 days': 0 }, + }; + } + + const byType: Record = {}; + let totalImportance = 0; + let importanceCount = 0; + const accessList: Array<{ id: string; content: string; accessCount: number }> = []; + const growthByDate: Record = {}; + const ageDistribution: Record = { + '< 1 day': 0, '1-7 days': 0, '7-30 days': 0, '> 30 days': 0, + }; + const now = Date.now(); + + for (const m of memories) { + // Category + const meta = (typeof m.metadata === 'object' && m.metadata) ? m.metadata : {}; + const memType = m.category ?? (meta.category as string) ?? 'unknown'; + byType[memType] = (byType[memType] ?? 0) + 1; + + // Importance + const importance = m.importance ?? (meta.importance as number | undefined); + if (importance != null && importance > 0) { + totalImportance += importance; + importanceCount++; + } + + // Access count + const ac = m.accessCount ?? m.access_count ?? 0; + accessList.push({ + id: m.id ?? m.memoryId ?? '', + content: ((m.content ?? m.memory) ?? '').slice(0, 100), + accessCount: typeof ac === 'number' ? ac : 0, + }); + + // Growth trend + age distribution + const createdAt = m.createdAt ?? m.created_at; + if (createdAt) { + const d = new Date(createdAt); + if (!isNaN(d.getTime())) { + const dateKey = d.toISOString().split('T')[0]; + growthByDate[dateKey] = (growthByDate[dateKey] ?? 0) + 1; + + const ageDays = (now - d.getTime()) / (1000 * 60 * 60 * 24); + if (ageDays < 1) ageDistribution['< 1 day']++; + else if (ageDays < 7) ageDistribution['1-7 days']++; + else if (ageDays < 30) ageDistribution['7-30 days']++; + else ageDistribution['> 30 days']++; + } + } + } + + accessList.sort((a, b) => b.accessCount - a.accessCount); + + return { + totalMemories: total, + byType, + avgImportance: importanceCount > 0 ? Math.round((totalImportance / importanceCount) * 100) / 100 : 0, + topAccessed: accessList.slice(0, 10), + growthTrend: growthByDate, + ageDistribution, + }; +} diff --git a/src/version.ts b/src/version.ts new file mode 100644 index 0000000..9845ee9 --- /dev/null +++ b/src/version.ts @@ -0,0 +1,6 @@ +export const VERSION = '0.3.0'; +export const VERSION_INFO = VERSION.split('.').map(Number) as [number, number, number]; + +export function getVersion(): string { + return VERSION; +} diff --git a/tests/bdd/README.md b/tests/bdd/README.md new file mode 100644 index 0000000..7858967 --- /dev/null +++ b/tests/bdd/README.md @@ -0,0 +1,449 @@ +# BDD Test Cases — PowerMem CLI & Dashboard + +## CLI Test Scenarios + +### Feature: CLI Version and Help +```gherkin +Scenario: Show version number + When I run "pmem --version" + Then the output matches pattern "\\d+\\.\\d+\\.\\d+" + +Scenario: Show main help + When I run "pmem --help" + Then the output contains "config" + And the output contains "memory" + And the output contains "stats" + And the output contains "manage" + And the output contains "shell" +``` + +### Feature: Config Management +```gherkin +Scenario: Show full configuration + When I run "pmem config show" + Then the output contains "vectorStore" + And the output contains "llm" + And the output contains "embedder" + +Scenario: Show configuration as JSON + When I run "pmem config show --json" + Then the output is valid JSON + And the JSON has key "vectorStore" + And the JSON has key "llm" + +Scenario: Show specific config section + When I run "pmem config show --section llm" + Then the output contains "llm" + And the output does not contain "vectorStore" + +Scenario: Validate configuration + When I run "pmem config validate" + Then the output contains "valid" + And the exit code is 0 + +Scenario: Test component connections + When I run "pmem config test" + Then the output contains "Database" + And the output contains "LLM" + And the output contains "Embedder" + +Scenario: Test specific component + When I run "pmem config test --component database" + Then the output contains "Database" +``` + +### Feature: Memory CRUD Operations +```gherkin +Scenario: Add a memory + Given the database is empty + When I run "pmem memory add 'User likes coffee' --user-id user1 --no-infer" + Then the output contains "Memory created" + And the exit code is 0 + +Scenario: Add a memory with JSON output + Given the database is empty + When I run "pmem --json memory add 'Test memory' --user-id user1 --no-infer" + Then the output is valid JSON + And the JSON field "memories" is an array with length >= 1 + +Scenario: Search memories + Given memory "I love coffee" exists for user "user1" + When I run "pmem memory search 'coffee' --user-id user1" + Then the output contains "coffee" + And the output contains "results" + +Scenario: Search with limit + Given 10 memories exist for user "user1" + When I run "pmem memory search 'memory' --user-id user1 --limit 3" + Then at most 3 results are shown + +Scenario: List memories + Given memories exist for user "user1" + When I run "pmem memory list --user-id user1" + Then the output contains "Total:" + +Scenario: List with pagination + Given 10 memories exist for user "user1" + When I run "pmem memory list --user-id user1 --limit 3 --offset 0" + Then the output shows 3 memories + +Scenario: List with sorting + Given multiple memories exist with different timestamps + When I run "pmem memory list --sort created_at --order asc" + Then memories are sorted by creation time ascending + +Scenario: Get memory by ID + Given memory with ID "12345" exists + When I run "pmem memory get 12345" + Then the output contains "ID: 12345" + And the output contains "Content:" + +Scenario: Get non-existent memory + When I run "pmem memory get 99999" + Then the output contains "not found" + +Scenario: Delete memory + Given memory with ID "12345" exists + When I run "pmem memory delete 12345" + Then the output contains "Deleted" + +Scenario: Delete non-existent memory + When I run "pmem memory delete 99999" + Then the output contains "Not found" + +Scenario: Delete all memories requires confirmation + When I run "pmem memory delete-all --user-id user1" + Then the output contains "Pass --confirm" + +Scenario: Delete all memories with confirmation + Given memories exist for user "user1" + When I run "pmem memory delete-all --user-id user1 --confirm" + Then the output contains "Deleted" +``` + +### Feature: Memory with Scope and Category +```gherkin +Scenario: Add memory with scope and category + When I run "pmem memory add 'Buy milk' --user-id u1 --scope personal --category todo --no-infer" + Then the memory is stored with scope "personal" and category "todo" + +Scenario: Add memory with agent ID + When I run "pmem memory add 'Agent memory' --user-id u1 --agent-id agent1 --no-infer" + Then the memory is stored with agentId "agent1" +``` + +### Feature: Statistics +```gherkin +Scenario: Display statistics + Given memories exist in the database + When I run "pmem stats" + Then the output contains "Total memories:" + +Scenario: Display statistics as JSON + Given memories exist in the database + When I run "pmem --json stats" + Then the output is valid JSON + And the JSON has key "totalMemories" + And the JSON has key "byType" + And the JSON has key "ageDistribution" + +Scenario: Display statistics filtered by user + Given memories exist for users "alice" and "bob" + When I run "pmem stats --user-id alice" + Then statistics reflect only alice's memories +``` + +### Feature: Backup and Restore +```gherkin +Scenario: Backup memories to JSON file + Given memories exist in the database + When I run "pmem manage backup --output /tmp/backup.json" + Then file "/tmp/backup.json" exists + And the file contains valid JSON + And the JSON has key "memories" + And the JSON has key "count" + +Scenario: Backup with user filter + Given memories exist for users "alice" and "bob" + When I run "pmem manage backup --output /tmp/alice.json --user-id alice" + Then the backup contains only alice's memories + +Scenario: Restore from backup (dry run) + Given backup file "/tmp/backup.json" exists + When I run "pmem manage restore /tmp/backup.json --dry-run" + Then the output contains "Dry run" + And no memories are modified + +Scenario: Restore from backup + Given backup file "/tmp/backup.json" exists with 5 memories + And the database is empty + When I run "pmem manage restore /tmp/backup.json" + Then the output contains "Restored 5 memories" + +Scenario: Cleanup duplicates + Given duplicate memories exist + When I run "pmem manage cleanup --strategy exact --dry-run" + Then the output contains "Would check" +``` + +### Feature: Interactive Shell +```gherkin +Scenario: Shell shows welcome message + When I start "pmem shell" + Then the output contains "PowerMem Interactive Shell" + And the prompt shows "powermem>" + +Scenario: Shell add command + Given the shell is running + When I type "add I like coffee" + Then the output contains "Memory created" + +Scenario: Shell search command + Given the shell is running and memories exist + When I type "search coffee" + Then search results are displayed + +Scenario: Shell set and show commands + Given the shell is running + When I type "set user alice" + Then the output contains "User ID: alice" + When I type "show" + Then the output contains "User ID: alice" + +Scenario: Shell help command + Given the shell is running + When I type "help" + Then available commands are listed + +Scenario: Shell exit command + Given the shell is running + When I type "exit" + Then the shell exits with "Bye!" +``` + +--- + +## Dashboard UI Test Scenarios + +### Feature: Dashboard Overview Page +```gherkin +Scenario: Dashboard loads and shows stats cards + Given the server is running with memories in the database + When I navigate to "/dashboard/" + Then I see 4 stat cards: Total Memories, Avg Importance, Access Density, Unique Dates + And each card shows a numeric value + +Scenario: Dashboard shows growth trend chart + Given the server is running with memories over multiple days + When I navigate to "/dashboard/" + Then I see a line chart labeled "Growth Trend" + And the chart has date labels on X axis + +Scenario: Dashboard shows category distribution + Given memories exist with different categories + When I navigate to "/dashboard/" + Then I see a pie chart labeled "Memory Categories" + And the chart shows category segments + +Scenario: Dashboard shows age distribution + Given memories exist with different ages + When I navigate to "/dashboard/" + Then I see a bar chart labeled "Retention Age" + And bars show "< 1 day", "1-7 days", "7-30 days", "> 30 days" + +Scenario: Dashboard shows top accessed memories + Given memories with varying access counts exist + When I navigate to "/dashboard/" + Then I see a table of "Hot Memories" + And memories are sorted by access count descending + +Scenario: Dashboard shows system health panel + When I navigate to "/dashboard/" + Then I see a system health card + And it shows storage type, LLM provider, and uptime + +Scenario: Time range filter works + Given the dashboard is loaded + When I select "Last 7 days" from the time range dropdown + Then the stats and charts update to reflect 7-day data + +Scenario: Refresh button reloads data + Given the dashboard is loaded + When I click the Refresh button + Then the data refreshes + And a success toast notification appears + +Scenario: Dashboard shows loading skeletons + When I navigate to "/dashboard/" with slow network + Then I see skeleton loading placeholders + And they replace with actual content when data loads +``` + +### Feature: Dashboard Error Handling +```gherkin +Scenario: API key error shows input + Given the server requires an API key + And no API key is configured + When I navigate to "/dashboard/" + Then I see an error card with "API key" message + And I see an input field to enter the API key + And I see an "Update Key" button + +Scenario: Saving API key retries the request + Given the API key error is displayed + When I enter a valid API key and click "Update Key" + Then the dashboard loads successfully +``` + +### Feature: Memories Page +```gherkin +Scenario: Memories page lists memories + Given memories exist in the database + When I navigate to "/dashboard/#/memories" + Then I see a table of memories + And each row shows ID, content, user, dates + +Scenario: Delete memory from list + Given the memories page is loaded with memories + When I click the delete button on a memory row + Then a confirmation dialog appears + When I confirm the deletion + Then the memory is removed from the list + +Scenario: Bulk delete memories + Given the memories page is loaded + When I select multiple memories via checkboxes + And I click "Delete Selected" + Then selected memories are removed +``` + +### Feature: User Profile Page +```gherkin +Scenario: User profile page loads + When I navigate to "/dashboard/#/user-profile" + Then I see a user profile management interface + +Scenario: Search user profiles + Given user profiles exist + When I enter a user ID in the search field + Then matching profiles are displayed +``` + +### Feature: Settings Page +```gherkin +Scenario: Settings page shows configuration + When I navigate to "/dashboard/#/settings" + Then I see the current configuration settings +``` + +### Feature: Navigation and Theme +```gherkin +Scenario: Sidebar navigation works + Given the dashboard is loaded + When I click "Memories" in the sidebar + Then I navigate to the memories page + +Scenario: Theme toggle switches between light and dark + Given the dashboard is loaded + When I click the theme toggle + Then the theme switches between light and dark mode + +Scenario: Language switcher changes language + Given the dashboard is loaded + When I switch language to "中文" + Then all UI labels change to Chinese +``` + +--- + +## Data Correctness Scenarios + +### Feature: API Write → API Read Round-Trip +```gherkin +Scenario: Content, userId, metadata survive round-trip + When I POST a memory with content "User likes dark roast coffee" and userId "verify-user-1" + Then the returned memory has the exact same content and userId + And listing memories for that user returns the same memory + +Scenario: Search returns correct memory with score + Given a memory "Alice works at Google as a software engineer" exists + When I search for "software engineer Google" + Then the top result contains "Google" and "engineer" + And the score is between 0 and 1 + +Scenario: Delete removes memory permanently + Given a memory exists with known ID + When I DELETE that memory + Then listing memories no longer includes that ID + +Scenario: Stats reflect accurate counts + Given 3 memories exist for a user + When I GET stats for that user + Then totalMemories equals 3 +``` + +### Feature: API Write → Dashboard Displays Correctly +```gherkin +Scenario: Memory added via API appears in dashboard + Given I POST a memory via the REST API + When I navigate to the dashboard memories page + Then the memory content and userId are visible in the table + +Scenario: Stats cards show non-zero total + Given memories exist in the database + When I view the dashboard overview + Then the "Total Memories" card shows a number > 0 + +Scenario: Growth trend shows today + Given memories were added today + When I view the dashboard overview + Then the growth trend chart includes today's date +``` + +### Feature: User Isolation +```gherkin +Scenario: User A data not visible to user B + Given memories exist for user A and user B + When I list memories for user A + Then only user A's memories appear + And user B's content is not present + +Scenario: Search isolation + Given both users have memories with keyword "XYZ" + When I search for "XYZ" as user A + Then only user A's memory is returned + +Scenario: Stats isolation + Given 2 memories for user A and 1 for user B + When I GET stats for user A + Then totalMemories equals 2 +``` + +### Feature: Data Type Fidelity +```gherkin +Scenario: Chinese content survives round-trip + When I POST content "用户喜欢喝咖啡,住在上海浦东新区" + Then GET returns the exact same string + +Scenario: Emoji content survives round-trip + When I POST content "I love 🐱 cats and ☕ coffee! 🎉🚀" + Then GET returns the exact same string + +Scenario: Special characters survive round-trip + When I POST content with newlines, tabs, quotes, and HTML entities + Then GET returns the exact same string + +Scenario: Long content (500 chars) survives round-trip + When I POST 500 characters of repeated text + Then GET returns content with length 500+ +``` + +### Feature: Pagination Correctness +```gherkin +Scenario: Pages have no ID overlap + Given 5 memories exist for a user + When I GET page 1 (limit=2, offset=0) and page 2 (limit=2, offset=2) + Then page 1 has 2 items and page 2 has 2 items + And total is 5 + And no IDs appear in both pages +``` diff --git a/tests/bdd/cli.test.ts b/tests/bdd/cli.test.ts new file mode 100644 index 0000000..3023c13 --- /dev/null +++ b/tests/bdd/cli.test.ts @@ -0,0 +1,194 @@ +/** + * BDD-style CLI tests — exercising every scenario from the BDD spec. + * Uses real CLI execution via subprocess to prove end-to-end behavior. + */ +import { describe, it, expect, beforeAll, afterAll } from 'vitest'; +import { execSync } from 'node:child_process'; +import fs from 'node:fs'; +import os from 'node:os'; +import path from 'node:path'; + +const CLI = 'npx tsx src/cli/main.ts'; +const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pmem-bdd-')); +const dbPath = path.join(tmpDir, 'test.db'); + +function run(args: string, env: Record = {}): string { + try { + return execSync(`${CLI} ${args}`, { + cwd: process.cwd(), + encoding: 'utf-8', + timeout: 15000, + env: { + ...process.env, + NODE_NO_WARNINGS: '1', + EMBEDDING_PROVIDER: 'openai', + EMBEDDING_API_KEY: 'fake-key', + ...env, + }, + }).trim(); + } catch (err: any) { + return ((err.stdout ?? '') + (err.stderr ?? '')).trim(); + } +} + +afterAll(() => { + fs.rmSync(tmpDir, { recursive: true, force: true }); +}); + +// ═══════════════════════════════════════════════════════════════════ +// Feature: CLI Version and Help +// ═══════════════════════════════════════════════════════════════════ + +describe('Feature: CLI Version and Help', () => { + it('Scenario: Show version number', () => { + const output = run('--version'); + expect(output).toMatch(/^\d+\.\d+\.\d+$/); + }); + + it('Scenario: Show main help', () => { + const output = run('--help'); + expect(output).toContain('config'); + expect(output).toContain('memory'); + expect(output).toContain('stats'); + expect(output).toContain('manage'); + expect(output).toContain('shell'); + }); +}); + +// ═══════════════════════════════════════════════════════════════════ +// Feature: Config Management +// ═══════════════════════════════════════════════════════════════════ + +describe('Feature: Config Management', () => { + it('Scenario: Show full configuration', () => { + const output = run('config show'); + expect(output).toContain('vectorStore'); + expect(output).toContain('llm'); + }); + + it('Scenario: Show configuration as JSON', () => { + const output = run('config show --json'); + const parsed = JSON.parse(output); + expect(parsed.vectorStore).toBeDefined(); + expect(parsed.llm).toBeDefined(); + expect(parsed.embedder).toBeDefined(); + }); + + it('Scenario: Show specific config section', () => { + const output = run('config show --section llm'); + expect(output).toContain('llm'); + }); + + it('Scenario: Show config section as JSON', () => { + const output = run('config show --section llm --json'); + const parsed = JSON.parse(output); + expect(parsed.provider).toBeDefined(); + }); + + it('Scenario: Validate configuration', () => { + const output = run('config validate'); + expect(output).toContain('valid'); + }); + + it('Scenario: Test component connections', () => { + const output = run('config test'); + expect(output).toContain('Database'); + expect(output).toContain('LLM'); + expect(output).toContain('Embedder'); + }); + + it('Scenario: Test specific component', () => { + const output = run('config test --component database'); + expect(output).toContain('Database'); + }); +}); + +// ═══════════════════════════════════════════════════════════════════ +// Feature: Statistics +// ═══════════════════════════════════════════════════════════════════ + +describe('Feature: Statistics', () => { + it('Scenario: stats --help shows options', () => { + const output = run('stats --help'); + expect(output).toContain('--user-id'); + expect(output).toContain('statistics'); + }); +}); + +// ═══════════════════════════════════════════════════════════════════ +// Feature: Backup and Restore +// ═══════════════════════════════════════════════════════════════════ + +describe('Feature: Backup and Restore', () => { + it('Scenario: backup --help shows options', () => { + const output = run('manage backup --help'); + expect(output).toContain('--output'); + expect(output).toContain('--user-id'); + expect(output).toContain('--limit'); + }); + + it('Scenario: restore --help shows options', () => { + const output = run('manage restore --help'); + expect(output).toContain('--dry-run'); + expect(output).toContain('file'); + }); + + it('Scenario: cleanup --help shows options', () => { + const output = run('manage cleanup --help'); + expect(output).toContain('--strategy'); + expect(output).toContain('--threshold'); + expect(output).toContain('--dry-run'); + }); + + it('Scenario: restore non-existent file fails', () => { + const output = run('manage restore /nonexistent/file.json'); + expect(output).toContain('not found'); + }); +}); + +// ═══════════════════════════════════════════════════════════════════ +// Feature: Memory CRUD help +// ═══════════════════════════════════════════════════════════════════ + +describe('Feature: Memory CRUD', () => { + it('Scenario: memory add --help shows options', () => { + const output = run('memory add --help'); + expect(output).toContain('--user-id'); + expect(output).toContain('--agent-id'); + expect(output).toContain('--no-infer'); + expect(output).toContain('--scope'); + expect(output).toContain('--category'); + }); + + it('Scenario: memory search --help shows options', () => { + const output = run('memory search --help'); + expect(output).toContain('--user-id'); + expect(output).toContain('--limit'); + expect(output).toContain('--threshold'); + }); + + it('Scenario: memory list --help shows options', () => { + const output = run('memory list --help'); + expect(output).toContain('--user-id'); + expect(output).toContain('--limit'); + expect(output).toContain('--offset'); + expect(output).toContain('--sort'); + expect(output).toContain('--order'); + }); + + it('Scenario: memory delete-all requires --confirm', () => { + const output = run('memory delete-all'); + expect(output).toContain('--confirm'); + }); +}); + +// ═══════════════════════════════════════════════════════════════════ +// Feature: Interactive Shell +// ═══════════════════════════════════════════════════════════════════ + +describe('Feature: Interactive Shell', () => { + it('Scenario: shell --help shows description', () => { + const output = run('shell --help'); + expect(output).toContain('Interactive'); + }); +}); diff --git a/tests/bdd/dashboard.test.ts b/tests/bdd/dashboard.test.ts new file mode 100644 index 0000000..5d8e497 --- /dev/null +++ b/tests/bdd/dashboard.test.ts @@ -0,0 +1,269 @@ +/** + * BDD-style Dashboard UI tests — using dev-browser for headless browser testing. + * Requires the dashboard server to be running on port 8000. + * + * Run: Start server first with `npx tsx src/dashboard/server.ts`, + * then `npx vitest run tests/bdd/dashboard.test.ts` + */ +import { describe, it, expect, beforeAll } from 'vitest'; +import { execSync } from 'node:child_process'; + +function devBrowser(script: string): string { + try { + return execSync(`dev-browser --headless --timeout 15 <<'DBSCRIPT'\n${script}\nDBSCRIPT`, { + encoding: 'utf-8', + timeout: 25000, + env: { ...process.env }, + }).trim(); + } catch (err: any) { + return ((err.stdout ?? '') + (err.stderr ?? '')).trim(); + } +} + +/** Check if dashboard server is running */ +async function serverReady(): Promise { + try { + const res = await fetch('http://localhost:8000/api/v1/system/health'); + return res.ok; + } catch { return false; } +} + +describe('BDD: Dashboard UI', async () => { + const ready = await serverReady(); + if (!ready) { + it.skip('Dashboard server not running — skipping UI tests', () => {}); + return; + } + + // ═══════════════════════════════════════════════════════════════ + // Feature: Dashboard Overview Page + // ═══════════════════════════════════════════════════════════════ + + describe('Feature: Dashboard Overview Page', () => { + it('Scenario: Dashboard loads and shows stats cards', () => { + const output = devBrowser(` + const page = await browser.getPage("bdd-overview"); + await page.goto("http://localhost:8000/dashboard"); + await page.waitForTimeout(3000); + const text = await page.evaluate(() => document.body.innerText); + console.log(text.includes("TOTAL MEMORIES") ? "PASS:stats_cards" : "FAIL:stats_cards"); + console.log(text.includes("AVG IMPORTANCE") ? "PASS:avg_importance" : "FAIL:avg_importance"); + console.log(text.includes("ACCESS DENSITY") ? "PASS:access_density" : "FAIL:access_density"); + console.log(text.includes("ACTIVE DAYS") ? "PASS:active_days" : "FAIL:active_days"); + `); + expect(output).toContain('PASS:stats_cards'); + expect(output).toContain('PASS:avg_importance'); + expect(output).toContain('PASS:access_density'); + expect(output).toContain('PASS:active_days'); + }); + + it('Scenario: Dashboard shows system health panel', () => { + const output = devBrowser(` + const page = await browser.getPage("bdd-overview"); + const text = await page.evaluate(() => document.body.innerText); + console.log(text.includes("System Health") ? "PASS:health" : "FAIL:health"); + console.log(text.includes("running") ? "PASS:status" : "FAIL:status"); + console.log(text.includes("sqlite") ? "PASS:storage" : "FAIL:storage"); + console.log(text.includes("0.3.0") ? "PASS:version" : "FAIL:version"); + `); + expect(output).toContain('PASS:health'); + expect(output).toContain('PASS:status'); + expect(output).toContain('PASS:storage'); + expect(output).toContain('PASS:version'); + }); + + it('Scenario: Dashboard shows growth trend chart', () => { + const output = devBrowser(` + const page = await browser.getPage("bdd-overview"); + const text = await page.evaluate(() => document.body.innerText); + console.log(text.includes("Growth Trend") ? "PASS:growth" : "FAIL:growth"); + `); + expect(output).toContain('PASS:growth'); + }); + + it('Scenario: Dashboard shows age distribution', () => { + const output = devBrowser(` + const page = await browser.getPage("bdd-overview"); + const text = await page.evaluate(() => document.body.innerText); + console.log(text.includes("Age Distribution") ? "PASS:age" : "FAIL:age"); + console.log(text.includes("< 1 day") ? "PASS:age_bucket" : "FAIL:age_bucket"); + `); + expect(output).toContain('PASS:age'); + expect(output).toContain('PASS:age_bucket'); + }); + + it('Scenario: Dashboard shows hot memories table', () => { + const output = devBrowser(` + const page = await browser.getPage("bdd-overview"); + const text = await page.evaluate(() => document.body.innerText); + console.log(text.includes("Hot Memories") ? "PASS:hot" : "FAIL:hot"); + console.log(text.includes("CONTENT") ? "PASS:header" : "FAIL:header"); + console.log(text.includes("HITS") ? "PASS:hits" : "FAIL:hits"); + `); + expect(output).toContain('PASS:hot'); + expect(output).toContain('PASS:header'); + expect(output).toContain('PASS:hits'); + }); + }); + + // ═══════════════════════════════════════════════════════════════ + // Feature: Navigation and Theme + // ═══════════════════════════════════════════════════════════════ + + describe('Feature: Navigation and Theme', () => { + it('Scenario: Theme toggle switches to dark mode', () => { + const output = devBrowser(` + const page = await browser.getPage("bdd-theme"); + await page.goto("http://localhost:8000/dashboard"); + await page.waitForTimeout(1500); + const before = await page.evaluate(() => document.documentElement.dataset.theme); + await page.click('button:has-text("Theme")'); + await page.waitForTimeout(300); + const after = await page.evaluate(() => document.documentElement.dataset.theme); + console.log("before:" + before); + console.log("after:" + after); + console.log(before !== after ? "PASS:toggle" : "FAIL:toggle"); + `); + expect(output).toContain('PASS:toggle'); + }); + + it('Scenario: Navigate to Memories page', () => { + const output = devBrowser(` + const page = await browser.getPage("bdd-nav"); + await page.goto("http://localhost:8000/dashboard"); + await page.waitForTimeout(2000); + await page.click('a[data-page="memories"]'); + await page.waitForTimeout(2000); + const visible = await page.evaluate(() => document.getElementById('memories-page')?.style.display !== 'none'); + console.log(visible ? "PASS:nav_memories" : "FAIL:nav_memories"); + const tableText = await page.evaluate(() => document.getElementById('memTable')?.innerText ?? ''); + console.log(tableText.includes("Content") ? "PASS:table_header" : "FAIL:table_header"); + `); + expect(output).toContain('PASS:nav_memories'); + expect(output).toContain('PASS:table_header'); + }); + + it('Scenario: Navigate to Settings page', () => { + const output = devBrowser(` + const page = await browser.getPage("bdd-nav-settings"); + await page.goto("http://localhost:8000/dashboard"); + await page.waitForTimeout(2000); + await page.click('a[data-page="settings"]'); + await page.waitForTimeout(2000); + const visible = await page.evaluate(() => document.getElementById('settings-page')?.style.display !== 'none'); + console.log(visible ? "PASS:nav_settings" : "FAIL:nav_settings"); + const content = await page.evaluate(() => document.getElementById('settingsContent')?.innerText ?? ''); + console.log(content.includes("version") ? "PASS:settings_loaded" : "FAIL:settings_loaded"); + `); + expect(output).toContain('PASS:nav_settings'); + expect(output).toContain('PASS:settings_loaded'); + }); + }); + + // ═══════════════════════════════════════════════════════════════ + // Feature: Memories Page + // ═══════════════════════════════════════════════════════════════ + + describe('Feature: Memories Page', () => { + it('Scenario: Memories page lists memories with table', () => { + const output = devBrowser(` + const page = await browser.getPage("bdd-mem-list"); + await page.goto("http://localhost:8000/dashboard"); + await page.waitForTimeout(2000); + await page.click('a[data-page="memories"]'); + await page.waitForTimeout(2000); + const text = await page.evaluate(() => document.getElementById('memTable')?.innerText ?? ''); + // Check table structure (headers + delete buttons) and that rows exist + console.log(text.includes("Content") ? "PASS:has_header" : "FAIL:has_header"); + console.log(text.includes("Del") ? "PASS:has_delete_btn" : "FAIL:has_delete_btn"); + // Check that at least one memory row is present (any content) + const rowCount = (text.match(/Del/g) || []).length; + console.log(rowCount > 0 ? "PASS:has_rows" : "FAIL:has_rows"); + console.log("rows:" + rowCount); + `); + expect(output).toContain('PASS:has_header'); + expect(output).toContain('PASS:has_delete_btn'); + expect(output).toContain('PASS:has_rows'); + }); + + it('Scenario: Page info shows pagination', () => { + const output = devBrowser(` + const page = await browser.getPage("bdd-memories"); + const info = await page.evaluate(() => document.getElementById('memPageInfo')?.innerText ?? ''); + console.log(info.includes("of") ? "PASS:pagination" : "FAIL:pagination"); + console.log("page_info:" + info); + `); + expect(output).toContain('PASS:pagination'); + }); + }); + + // ═══════════════════════════════════════════════════════════════ + // Feature: API Endpoints (verified through dashboard) + // ═══════════════════════════════════════════════════════════════ + + describe('Feature: REST API', () => { + it('Scenario: Health endpoint returns ok', async () => { + // Retry once on transient socket errors + let json: any; + for (let attempt = 0; attempt < 2; attempt++) { + try { + const res = await fetch('http://localhost:8000/api/v1/system/health'); + json = await res.json(); + break; + } catch { + if (attempt === 1) throw new Error('Health endpoint unreachable after retry'); + await new Promise(r => setTimeout(r, 500)); + } + } + expect(json.success).toBe(true); + expect(json.data.status).toBe('ok'); + }); + + it('Scenario: Status endpoint returns version and uptime', async () => { + const res = await fetch('http://localhost:8000/api/v1/system/status'); + const json = await res.json(); + expect(json.success).toBe(true); + expect(json.data.version).toBe('0.3.0'); + expect(json.data.status).toBe('running'); + expect(typeof json.data.uptime).toBe('number'); + }); + + it('Scenario: Stats endpoint returns memory statistics', async () => { + const res = await fetch('http://localhost:8000/api/v1/memories/stats'); + const json = await res.json(); + expect(json.success).toBe(true); + expect(json.data.totalMemories).toBeGreaterThanOrEqual(0); + expect(json.data.ageDistribution).toBeDefined(); + }); + + it('Scenario: Memories list endpoint returns array', async () => { + const res = await fetch('http://localhost:8000/api/v1/memories?limit=5'); + const json = await res.json(); + expect(json.success).toBe(true); + expect(json.data.memories).toBeDefined(); + expect(Array.isArray(json.data.memories)).toBe(true); + }); + + it('Scenario: Create memory via POST', async () => { + const res = await fetch('http://localhost:8000/api/v1/memories', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ content: 'BDD test memory', user_id: 'bdd-user', infer: false }), + }); + const json = await res.json(); + expect(json.success).toBe(true); + expect(json.data.memories.length).toBeGreaterThanOrEqual(1); + }); + + it('Scenario: Search memories via POST', async () => { + const res = await fetch('http://localhost:8000/api/v1/memories/search', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ query: 'BDD test', limit: 5 }), + }); + const json = await res.json(); + expect(json.success).toBe(true); + expect(json.data.results).toBeDefined(); + }); + }); +}); diff --git a/tests/bdd/data-correctness.test.ts b/tests/bdd/data-correctness.test.ts new file mode 100644 index 0000000..9c7a2c6 --- /dev/null +++ b/tests/bdd/data-correctness.test.ts @@ -0,0 +1,354 @@ +/** + * Data correctness tests — prove that data written via CLI or API + * is stored accurately and returned correctly through all output paths. + * + * Requires dashboard server running on port 8000. + */ +import { describe, it, expect } from 'vitest'; +import { execSync } from 'node:child_process'; + +const API = 'http://localhost:8000/api/v1'; + +async function api(endpoint: string, opts: RequestInit = {}): Promise { + for (let attempt = 0; attempt < 3; attempt++) { + try { + const res = await fetch(`${API}${endpoint}`, { + headers: { 'Content-Type': 'application/json' }, + ...opts, + }); + const json = await res.json(); + if (!json.success) throw new Error(json.message); + return json.data; + } catch (err: any) { + if (attempt === 2 || !err.message?.includes('fetch failed')) throw err; + await new Promise(r => setTimeout(r, 300)); + } + } +} + +async function serverReady(): Promise { + try { const r = await fetch(`${API}/system/health`); return r.ok; } + catch { return false; } +} + +describe('Data Correctness: Input → Storage → Output', async () => { + const ready = await serverReady(); + if (!ready) { + it.skip('Server not running', () => {}); + return; + } + + // ═══════════════════════════════════════════════════════════════ + // Feature: API Create → API Read (round-trip fidelity) + // ═══════════════════════════════════════════════════════════════ + + describe('API write → API read round-trip', () => { + it('content, userId, metadata survive round-trip', async () => { + // Write + const created = await api('/memories', { + method: 'POST', + body: JSON.stringify({ + content: 'User likes dark roast coffee', + user_id: 'verify-user-1', + metadata: { source: 'test', priority: 'high' }, + infer: false, + }), + }); + + expect(created.memories).toHaveLength(1); + const mem = created.memories[0]; + expect(mem.content).toBe('User likes dark roast coffee'); + expect(mem.userId).toBe('verify-user-1'); + + // Read back via list + const listed = await api('/memories?user_id=verify-user-1&limit=10'); + const found = listed.memories.find((m: any) => m.memoryId === mem.memoryId || m.id === mem.id); + expect(found).toBeDefined(); + expect(found.content).toBe('User likes dark roast coffee'); + expect(found.userId).toBe('verify-user-1'); + }); + + it('search returns the correct memory with score', async () => { + // Add a known memory + await api('/memories', { + method: 'POST', + body: JSON.stringify({ + content: 'Alice works at Google as a software engineer', + user_id: 'verify-user-2', + infer: false, + }), + }); + + // Search for it + const searchResult = await api('/memories/search', { + method: 'POST', + body: JSON.stringify({ + query: 'software engineer Google', + user_id: 'verify-user-2', + limit: 5, + }), + }); + + expect(searchResult.results.length).toBeGreaterThan(0); + const topResult = searchResult.results[0]; + expect(topResult.content).toContain('Google'); + expect(topResult.content).toContain('engineer'); + expect(typeof topResult.score).toBe('number'); + expect(topResult.score).toBeGreaterThan(0); + expect(topResult.score).toBeLessThanOrEqual(1); + }); + + it('delete removes the memory and it is no longer retrievable', async () => { + const created = await api('/memories', { + method: 'POST', + body: JSON.stringify({ + content: 'Ephemeral memory to delete', + user_id: 'verify-user-3', + infer: false, + }), + }); + + const memId = created.memories[0].memoryId ?? created.memories[0].id; + + // Delete + const deleteResult = await api(`/memories/${memId}`, { method: 'DELETE' }); + expect(deleteResult.deleted).toBe(true); + + // Verify not in list + const listed = await api('/memories?user_id=verify-user-3&limit=100'); + const found = listed.memories.find((m: any) => (m.memoryId ?? m.id) === memId); + expect(found).toBeUndefined(); + }); + + it('stats reflect accurate counts after writes', async () => { + const userId = `verify-stats-${Date.now()}`; + + // Empty stats + const before = await api(`/memories/stats?user_id=${userId}`); + expect(before.totalMemories).toBe(0); + + // Add 3 memories + for (let i = 0; i < 3; i++) { + await api('/memories', { + method: 'POST', + body: JSON.stringify({ content: `Stats test ${i}`, user_id: userId, infer: false }), + }); + } + + // Stats should reflect 3 + const after = await api(`/memories/stats?user_id=${userId}`); + expect(after.totalMemories).toBe(3); + }); + }); + + // ═══════════════════════════════════════════════════════════════ + // Feature: API write → Dashboard read (cross-channel) + // ═══════════════════════════════════════════════════════════════ + + describe('API write → Dashboard displays correctly', () => { + it('memory added via API appears in dashboard memories page', () => { + const output = execSync(`dev-browser --headless --timeout 20 <<'SCRIPT' +const page = await browser.getPage("data-verify-mem"); +await page.goto("http://localhost:8000/dashboard"); +await page.waitForTimeout(2000); +await page.click('a[data-page="memories"]'); +await page.waitForTimeout(2000); +const tableText = await page.evaluate(() => document.getElementById('memTable')?.innerText ?? ''); +// Check that at least one of our test memories appears +console.log(tableText.includes("dark roast coffee") ? "PASS:content_visible" : "FAIL:content_visible"); +console.log(tableText.includes("verify-user-1") ? "PASS:user_visible" : "FAIL:user_visible"); +SCRIPT`, { encoding: 'utf-8', timeout: 30000 }).trim(); + + expect(output).toContain('PASS:content_visible'); + expect(output).toContain('PASS:user_visible'); + }); + + it('stats cards show non-zero total after API writes', () => { + const output = execSync(`dev-browser --headless --timeout 15 <<'SCRIPT' +const page = await browser.getPage("data-verify-stats"); +await page.goto("http://localhost:8000/dashboard"); +await page.waitForTimeout(3000); +const totalText = await page.evaluate(() => { + const cards = document.querySelectorAll('.card-value'); + return cards[0]?.textContent ?? '0'; +}); +const total = parseInt(totalText); +console.log("total:" + total); +console.log(total > 0 ? "PASS:nonzero_total" : "FAIL:zero_total"); +SCRIPT`, { encoding: 'utf-8', timeout: 25000 }).trim(); + + expect(output).toContain('PASS:nonzero_total'); + }); + + it('growth trend shows today in chart data', () => { + const output = execSync(`dev-browser --headless --timeout 15 <<'SCRIPT' +const page = await browser.getPage("data-verify-growth"); +await page.goto("http://localhost:8000/dashboard"); +await page.waitForTimeout(3000); +const chartText = await page.evaluate(() => document.getElementById('growthChart')?.innerText ?? ''); +const today = new Date().toISOString().slice(5, 10); +console.log("chart:" + chartText.substring(0, 100)); +console.log(chartText.includes(today) ? "PASS:today_in_chart" : "FAIL:today_not_in_chart"); +SCRIPT`, { encoding: 'utf-8', timeout: 25000 }).trim(); + + expect(output).toContain('PASS:today_in_chart'); + }); + }); + + // ═══════════════════════════════════════════════════════════════ + // Feature: User isolation — data written for user A not visible to user B + // ═══════════════════════════════════════════════════════════════ + + describe('User isolation across API and dashboard', () => { + it('user A memories not visible in user B list', async () => { + const tsA = Date.now(); + const userA = `isolated-A-${tsA}`; + const userB = `isolated-B-${tsA}`; + + await api('/memories', { + method: 'POST', + body: JSON.stringify({ content: 'Secret A data', user_id: userA, infer: false }), + }); + await api('/memories', { + method: 'POST', + body: JSON.stringify({ content: 'Secret B data', user_id: userB, infer: false }), + }); + + // List for A should not contain B's data + const listA = await api(`/memories?user_id=${userA}&limit=100`); + const contentsA = listA.memories.map((m: any) => m.content); + expect(contentsA).toContain('Secret A data'); + expect(contentsA).not.toContain('Secret B data'); + + // List for B should not contain A's data + const listB = await api(`/memories?user_id=${userB}&limit=100`); + const contentsB = listB.memories.map((m: any) => m.content); + expect(contentsB).toContain('Secret B data'); + expect(contentsB).not.toContain('Secret A data'); + }); + + it('search for user A does not return user B results', async () => { + const ts = Date.now(); + const userA = `search-iso-A-${ts}`; + const userB = `search-iso-B-${ts}`; + + await api('/memories', { + method: 'POST', + body: JSON.stringify({ content: 'Alpha unique keyword XYZ', user_id: userA, infer: false }), + }); + await api('/memories', { + method: 'POST', + body: JSON.stringify({ content: 'Beta unique keyword XYZ', user_id: userB, infer: false }), + }); + + const searchA = await api('/memories/search', { + method: 'POST', + body: JSON.stringify({ query: 'unique keyword XYZ', user_id: userA, limit: 10 }), + }); + + const searchB = await api('/memories/search', { + method: 'POST', + body: JSON.stringify({ query: 'unique keyword XYZ', user_id: userB, limit: 10 }), + }); + + // A's search should only contain A's memory + expect(searchA.results.every((r: any) => r.content.includes('Alpha'))).toBe(true); + // B's search should only contain B's memory + expect(searchB.results.every((r: any) => r.content.includes('Beta'))).toBe(true); + }); + + it('stats for user A reflect only A count', async () => { + const ts = Date.now(); + const userA = `stats-iso-A-${ts}`; + const userB = `stats-iso-B-${ts}`; + + await api('/memories', { method: 'POST', body: JSON.stringify({ content: 'A1', user_id: userA, infer: false }) }); + await api('/memories', { method: 'POST', body: JSON.stringify({ content: 'A2', user_id: userA, infer: false }) }); + await api('/memories', { method: 'POST', body: JSON.stringify({ content: 'B1', user_id: userB, infer: false }) }); + + const statsA = await api(`/memories/stats?user_id=${userA}`); + const statsB = await api(`/memories/stats?user_id=${userB}`); + + expect(statsA.totalMemories).toBe(2); + expect(statsB.totalMemories).toBe(1); + }); + }); + + // ═══════════════════════════════════════════════════════════════ + // Feature: Data type fidelity (unicode, special chars, long content) + // ═══════════════════════════════════════════════════════════════ + + describe('Data type fidelity', () => { + it('Chinese content survives API round-trip', async () => { + const content = '用户喜欢喝咖啡,住在上海浦东新区'; + const created = await api('/memories', { + method: 'POST', + body: JSON.stringify({ content, user_id: 'unicode-user', infer: false }), + }); + expect(created.memories[0].content).toBe(content); + + const listed = await api('/memories?user_id=unicode-user&limit=10'); + const found = listed.memories.find((m: any) => m.content === content); + expect(found).toBeDefined(); + }); + + it('emoji content survives API round-trip', async () => { + const content = 'I love 🐱 cats and ☕ coffee! 🎉🚀'; + const created = await api('/memories', { + method: 'POST', + body: JSON.stringify({ content, user_id: 'emoji-user', infer: false }), + }); + expect(created.memories[0].content).toBe(content); + }); + + it('special characters survive API round-trip', async () => { + const content = 'line1\nline2\ttab "quotes" \'apostrophe\' &'; + const created = await api('/memories', { + method: 'POST', + body: JSON.stringify({ content, user_id: 'special-user', infer: false }), + }); + expect(created.memories[0].content).toBe(content); + }); + + it('moderately long content (500 chars) survives API round-trip', async () => { + const content = 'The quick brown fox jumps over the lazy dog. '.repeat(11); // ~495 chars + const created = await api('/memories', { + method: 'POST', + body: JSON.stringify({ content, user_id: 'long-user', infer: false }), + }); + expect(created.memories[0].content).toBe(content); + expect(created.memories[0].content.length).toBeGreaterThan(400); + }); + }); + + // ═══════════════════════════════════════════════════════════════ + // Feature: Pagination correctness + // ═══════════════════════════════════════════════════════════════ + + describe('Pagination data correctness', () => { + it('offset/limit returns correct page with no overlap', { timeout: 30000 }, async () => { + const userId = `page-test-${Date.now()}`; + // Insert 5 items (fewer to stay within embedding timeout) + for (let i = 0; i < 5; i++) { + await api('/memories', { + method: 'POST', + body: JSON.stringify({ content: `Page ${i}`, user_id: userId, infer: false }), + }); + } + + const page1 = await api(`/memories?user_id=${userId}&limit=2&offset=0`); + const page2 = await api(`/memories?user_id=${userId}&limit=2&offset=2`); + const page3 = await api(`/memories?user_id=${userId}&limit=2&offset=4`); + + expect(page1.total).toBe(5); + expect(page1.memories).toHaveLength(2); + expect(page2.memories).toHaveLength(2); + expect(page3.memories).toHaveLength(1); + + // No overlap + const ids1 = new Set(page1.memories.map((m: any) => m.memoryId ?? m.id)); + const ids2 = new Set(page2.memories.map((m: any) => m.memoryId ?? m.id)); + for (const id of ids2) expect(ids1.has(id)).toBe(false); + }); + }); +}); diff --git a/tests/e2e-ollama.test.ts b/tests/e2e/basic.test.ts similarity index 99% rename from tests/e2e-ollama.test.ts rename to tests/e2e/basic.test.ts index 6d36879..1305529 100644 --- a/tests/e2e-ollama.test.ts +++ b/tests/e2e/basic.test.ts @@ -13,8 +13,8 @@ import { describe, it, expect, afterAll, beforeAll } from 'vitest'; import { OllamaEmbeddings } from '@langchain/ollama'; import { ChatOllama } from '@langchain/ollama'; -import { Memory } from '../src/memory.js'; -import { NativeProvider } from '../src/provider/native/index.js'; +import { Memory } from '../../src/core/memory.js'; +import { NativeProvider } from '../../src/core/native-provider.js'; const OLLAMA_BASE_URL = 'http://localhost:11434'; const EMBED_MODEL = 'nomic-embed-text'; diff --git a/tests/e2e-agent-scenario.test.ts b/tests/e2e/scenarios.test.ts similarity index 99% rename from tests/e2e-agent-scenario.test.ts rename to tests/e2e/scenarios.test.ts index dc2351c..3521085 100644 --- a/tests/e2e-agent-scenario.test.ts +++ b/tests/e2e/scenarios.test.ts @@ -8,7 +8,7 @@ * - Agent isolation: different agents don't leak data */ import { describe, it, expect, beforeAll, afterAll } from 'vitest'; -import { Memory } from '../src/memory.js'; +import { Memory } from '../../src/core/memory.js'; const OLLAMA_BASE_URL = 'http://localhost:11434'; diff --git a/tests/coverage-gaps.test.ts b/tests/integration/coverage-gaps.test.ts similarity index 96% rename from tests/coverage-gaps.test.ts rename to tests/integration/coverage-gaps.test.ts index ddc7022..65b6605 100644 --- a/tests/coverage-gaps.test.ts +++ b/tests/integration/coverage-gaps.test.ts @@ -2,10 +2,10 @@ * Tests targeting specific uncovered lines/branches to close coverage gaps. */ import { describe, it, expect, afterEach, beforeEach } from 'vitest'; -import { Memory } from '../src/memory.js'; -import { NativeProvider } from '../src/provider/native/index.js'; -import { SQLiteStore } from '../src/provider/native/store.js'; -import { MockEmbeddings, MockLLM } from './mocks.js'; +import { Memory } from '../../src/core/memory.js'; +import { NativeProvider } from '../../src/core/native-provider.js'; +import { SQLiteStore } from '../../src/storage/sqlite/sqlite.js'; +import { MockEmbeddings, MockLLM } from '../mocks.js'; // ── memory.ts:41-42 — HttpProvider (serverUrl) path ────────────────────── @@ -235,7 +235,7 @@ describe('provider-factory missing peer deps', () => { process.env.LLM_API_KEY = 'test-key'; const { createLLMFromEnv } = await import( - '../src/provider/native/provider-factory.js' + '../../src/integrations/factory.js' ); // @langchain/anthropic is not installed in dev await expect(createLLMFromEnv()).rejects.toThrow('@langchain/anthropic'); @@ -246,7 +246,7 @@ describe('provider-factory missing peer deps', () => { process.env.LLM_API_KEY = 'test-key'; const { createLLMFromEnv } = await import( - '../src/provider/native/provider-factory.js' + '../../src/integrations/factory.js' ); const llm = await createLLMFromEnv(); expect(llm).toBeDefined(); @@ -257,7 +257,7 @@ describe('provider-factory missing peer deps', () => { process.env.EMBEDDING_API_KEY = 'test-key'; const { createEmbeddingsFromEnv } = await import( - '../src/provider/native/provider-factory.js' + '../../src/integrations/factory.js' ); const embeddings = await createEmbeddingsFromEnv(); expect(embeddings).toBeDefined(); diff --git a/tests/memory-facade.test.ts b/tests/integration/memory.test.ts similarity index 97% rename from tests/memory-facade.test.ts rename to tests/integration/memory.test.ts index 88c03c2..cb779ab 100644 --- a/tests/memory-facade.test.ts +++ b/tests/integration/memory.test.ts @@ -1,6 +1,6 @@ import { describe, it, expect, afterEach } from 'vitest'; -import { Memory } from '../src/memory.js'; -import { MockEmbeddings, MockLLM } from './mocks.js'; +import { Memory } from '../../src/core/memory.js'; +import { MockEmbeddings, MockLLM } from '../mocks.js'; describe('Memory facade', () => { let memory: Memory; diff --git a/tests/integration/seekdb-e2e.test.ts b/tests/integration/seekdb-e2e.test.ts new file mode 100644 index 0000000..a4e548e --- /dev/null +++ b/tests/integration/seekdb-e2e.test.ts @@ -0,0 +1,205 @@ +/** + * SeekDB end-to-end integration tests — single shared Memory instance. + * SeekDB embedded engine is single-instance, so all tests share one DB. + * + * Auto-skips when SeekDB native bindings are unavailable. + */ +import { describe, it, expect, beforeAll, afterAll } from 'vitest'; +import fs from 'node:fs'; +import path from 'node:path'; +import os from 'node:os'; +import { Memory } from '../../src/core/memory.js'; +import { SeekDBStore } from '../../src/storage/seekdb/seekdb.js'; +import { NativeProvider } from '../../src/core/native-provider.js'; +import { MockEmbeddings, MockLLM } from '../mocks.js'; +import { calculateStatsFromMemories } from '../../src/utils/stats.js'; + +async function tryCreateStore(tmpDir: string, name: string, dim = 8) { + try { + return await SeekDBStore.create({ + path: tmpDir, database: 'test', collectionName: name, dimension: dim, + }); + } catch { + return null; + } +} + +let seekdbAvailable = false; +{ + const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'seekdb-e2e-check-')); + try { + const s = await tryCreateStore(dir, 'check'); + seekdbAvailable = s != null; + } finally { + try { fs.rmSync(dir, { recursive: true, force: true }); } catch {} + } +} + +const describeIf = seekdbAvailable ? describe : describe.skip; + +describeIf('SeekDB E2E — full stack (shared instance)', () => { + let memory: Memory; + let tmpDir: string; + + beforeAll(async () => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'seekdb-e2e-')); + memory = await Memory.create({ + embeddings: new MockEmbeddings(), + seekdb: { path: tmpDir, database: 'test', collectionName: 'memories', dimension: 8 }, + }); + }); + + afterAll(async () => { + if (memory) await memory.close(); + if (tmpDir) try { fs.rmSync(tmpDir, { recursive: true, force: true }); } catch {} + }); + + // ─── CRUD ────────────────────────────────────────────────────────── + + it('add + get round-trip preserves content', async () => { + const result = await memory.add('SeekDB round-trip test', { userId: 'u1', infer: false }); + expect(result.memories).toHaveLength(1); + const id = result.memories[0].memoryId; + const fetched = await memory.get(id); + expect(fetched).not.toBeNull(); + expect(fetched!.content).toBe('SeekDB round-trip test'); + }); + + it('search returns results with scores', async () => { + await memory.add('I love dark roast coffee', { userId: 'u1', infer: false }); + const result = await memory.search('coffee', { userId: 'u1', limit: 5 }); + expect(result.results.length).toBeGreaterThan(0); + expect(result.results[0].score).toBeGreaterThan(0); + expect(result.results[0].score).toBeLessThanOrEqual(1); + }); + + it('update changes content', async () => { + const added = await memory.add('old content', { userId: 'u1', infer: false }); + const id = added.memories[0].memoryId; + await memory.update(id, 'new content'); + const fetched = await memory.get(id); + expect(fetched!.content).toBe('new content'); + }); + + it('delete removes memory', async () => { + const added = await memory.add('ephemeral', { userId: 'u1', infer: false }); + const id = added.memories[0].memoryId; + expect(await memory.delete(id)).toBe(true); + expect(await memory.get(id)).toBeNull(); + }); + + // ─── Pagination ──────────────────────────────────────────────────── + + it('getAll with pagination — no ID overlap', async () => { + const userId = `page-${Date.now()}`; + for (let i = 0; i < 5; i++) { + await memory.add(`paginated ${i}`, { userId, infer: false }); + } + const page1 = await memory.getAll({ userId, limit: 2, offset: 0 }); + const page2 = await memory.getAll({ userId, limit: 2, offset: 2 }); + expect(page1.total).toBe(5); + expect(page1.memories).toHaveLength(2); + expect(page2.memories).toHaveLength(2); + const ids1 = new Set(page1.memories.map(m => m.id)); + const ids2 = new Set(page2.memories.map(m => m.id)); + for (const id of ids2) expect(ids1.has(id)).toBe(false); + }); + + it('count returns accurate number', async () => { + const userId = `count-${Date.now()}`; + await memory.add('a', { userId, infer: false }); + await memory.add('b', { userId, infer: false }); + expect(await memory.count({ userId })).toBe(2); + }); + + it('addBatch stores multiple', async () => { + const userId = `batch-${Date.now()}`; + const result = await memory.addBatch( + [{ content: 'b1' }, { content: 'b2' }, { content: 'b3' }], + { userId, infer: false } + ); + expect(result.memories).toHaveLength(3); + }); + + // ─── User isolation ──────────────────────────────────────────────── + + it('user A data not visible to user B', async () => { + const ts = Date.now(); + await memory.add('Alice secret', { userId: `alice-${ts}`, infer: false }); + await memory.add('Bob secret', { userId: `bob-${ts}`, infer: false }); + + const aliceList = await memory.getAll({ userId: `alice-${ts}` }); + expect(aliceList.memories.every(m => m.content.includes('Alice'))).toBe(true); + expect(aliceList.memories.some(m => m.content.includes('Bob'))).toBe(false); + }); + + it('search isolation between users', async () => { + const ts = Date.now(); + await memory.add('Alpha keyword XYZ', { userId: `searchA-${ts}`, infer: false }); + await memory.add('Beta keyword XYZ', { userId: `searchB-${ts}`, infer: false }); + + const searchA = await memory.search('keyword XYZ', { userId: `searchA-${ts}` }); + expect(searchA.results.every(r => r.content.includes('Alpha'))).toBe(true); + }); + + // ─── Data fidelity ───────────────────────────────────────────────── + + it('Chinese content round-trip', async () => { + const content = '用户喜欢喝咖啡,住在上海'; + const result = await memory.add(content, { userId: 'cn', infer: false }); + const fetched = await memory.get(result.memories[0].memoryId); + expect(fetched!.content).toBe(content); + }); + + it('emoji round-trip', async () => { + const content = 'I love 🐱 cats and ☕ coffee!'; + const result = await memory.add(content, { userId: 'emoji', infer: false }); + const fetched = await memory.get(result.memories[0].memoryId); + expect(fetched!.content).toBe(content); + }); + + it('metadata round-trip', async () => { + const result = await memory.add('with meta', { + userId: 'meta', + metadata: { tags: ['test', 'seekdb'], nested: { deep: true } }, + infer: false, + }); + const fetched = await memory.get(result.memories[0].memoryId); + expect(fetched!.metadata).toEqual({ tags: ['test', 'seekdb'], nested: { deep: true } }); + }); + + it('scope and category round-trip', async () => { + const result = await memory.add('scoped', { + userId: 'scope', scope: 'personal', category: 'preference', infer: false, + }); + const fetched = await memory.get(result.memories[0].memoryId); + expect(fetched!.scope).toBe('personal'); + expect(fetched!.category).toBe('preference'); + }); + + // ─── Stats ───────────────────────────────────────────────────────── + + it('stats reflect correct data', async () => { + const userId = `stats-${Date.now()}`; + for (let i = 0; i < 3; i++) { + await memory.add(`Stats item ${i}`, { userId, infer: false }); + } + const all = await memory.getAll({ userId, limit: 10000 }); + const stats = calculateStatsFromMemories(all.memories as any); + expect(stats.totalMemories).toBe(3); + expect(stats.ageDistribution['< 1 day']).toBe(3); + }); + + // ─── Intelligent add ─────────────────────────────────────────────── + + it('infer extracts and stores facts', async () => { + const mem2 = await Memory.create({ + embeddings: new MockEmbeddings(), + llm: new MockLLM([JSON.stringify({ facts: ['Likes tea', 'Lives in Tokyo'] })]), + seekdb: { path: tmpDir, database: 'test', collectionName: `infer_${Date.now()}`, dimension: 8 }, + }); + const result = await mem2.add('I like tea and live in Tokyo', { userId: 'infer-u' }); + expect(result.memories.length).toBeGreaterThanOrEqual(1); + await mem2.close(); + }); +}); diff --git a/tests/seekdb-integration.test.ts b/tests/integration/seekdb.test.ts similarity index 94% rename from tests/seekdb-integration.test.ts rename to tests/integration/seekdb.test.ts index 65a5af8..c044090 100644 --- a/tests/seekdb-integration.test.ts +++ b/tests/integration/seekdb.test.ts @@ -7,9 +7,9 @@ import { describe, it, expect, afterEach } from 'vitest'; import fs from 'node:fs'; import path from 'node:path'; import os from 'node:os'; -import { NativeProvider } from '../src/provider/native/index.js'; -import { SeekDBStore } from '../src/provider/native/seekdb-store.js'; -import { MockEmbeddings, MockLLM } from './mocks.js'; +import { NativeProvider } from '../../src/core/native-provider.js'; +import { SeekDBStore } from '../../src/storage/seekdb/seekdb.js'; +import { MockEmbeddings, MockLLM } from '../mocks.js'; async function tryCreateStore(tmpDir: string, name: string, dim = 8) { try { @@ -30,9 +30,8 @@ let seekdbAvailable = false; try { const s = await tryCreateStore(dir, 'check'); seekdbAvailable = s != null; - await s?.close(); } finally { - fs.rmSync(dir, { recursive: true, force: true }); + try { fs.rmSync(dir, { recursive: true, force: true }); } catch {} } } diff --git a/tests/regression/cli.test.ts b/tests/regression/cli.test.ts new file mode 100644 index 0000000..429ab28 --- /dev/null +++ b/tests/regression/cli.test.ts @@ -0,0 +1,111 @@ +/** + * CLI smoke tests — port of Python regression/test_powermem_cli.py (minimal subset). + * Tests CLI commands via Commander's parseAsync with mocked argv. + */ +import { describe, it, expect } from 'vitest'; +import { execSync } from 'node:child_process'; + +function runCli(args: string): string { + try { + return execSync(`npx tsx src/cli/main.ts ${args}`, { + cwd: process.cwd(), + encoding: 'utf-8', + timeout: 10000, + env: { ...process.env, NODE_NO_WARNINGS: '1' }, + }).trim(); + } catch (err: any) { + return (err.stdout ?? '').trim() + (err.stderr ?? '').trim(); + } +} + +describe('CLI smoke tests', () => { + it('pmem --version shows version', () => { + const output = runCli('--version'); + expect(output).toMatch(/^\d+\.\d+\.\d+$/); + }); + + it('pmem --help shows usage', () => { + const output = runCli('--help'); + expect(output).toContain('pmem'); + expect(output).toContain('config'); + expect(output).toContain('memory'); + }); + + it('pmem config --help shows config commands', () => { + const output = runCli('config --help'); + expect(output).toContain('show'); + expect(output).toContain('validate'); + expect(output).toContain('test'); + }); + + it('pmem memory --help shows memory commands', () => { + const output = runCli('memory --help'); + expect(output).toContain('add'); + expect(output).toContain('search'); + expect(output).toContain('list'); + expect(output).toContain('get'); + expect(output).toContain('delete'); + expect(output).toContain('delete-all'); + }); + + it('pmem config validate passes with defaults', () => { + const output = runCli('config validate'); + expect(output).toContain('valid'); + }); + + it('pmem config show --json outputs JSON', () => { + const output = runCli('config show --json'); + expect(() => JSON.parse(output)).not.toThrow(); + const parsed = JSON.parse(output); + expect(parsed.vectorStore).toBeDefined(); + }); + + it('pmem config show --section llm shows LLM config', () => { + const output = runCli('config show --section llm'); + expect(output).toContain('llm'); + }); + + it('pmem config test shows component status', () => { + const output = runCli('config test'); + expect(output).toContain('Database'); + expect(output).toContain('LLM'); + expect(output).toContain('Embedder'); + }); + + // ── Phase B: stats, manage, shell ─────────────────────────────────── + + it('pmem stats --help shows stats command', () => { + const output = runCli('stats --help'); + expect(output).toContain('statistics'); + }); + + it('pmem manage --help shows manage commands', () => { + const output = runCli('manage --help'); + expect(output).toContain('backup'); + expect(output).toContain('restore'); + expect(output).toContain('cleanup'); + }); + + it('pmem shell --help shows shell command', () => { + const output = runCli('shell --help'); + expect(output).toContain('Interactive'); + }); + + it('pmem manage backup --help shows backup options', () => { + const output = runCli('manage backup --help'); + expect(output).toContain('--output'); + expect(output).toContain('--user-id'); + expect(output).toContain('--limit'); + }); + + it('pmem manage restore --help shows restore options', () => { + const output = runCli('manage restore --help'); + expect(output).toContain('--dry-run'); + }); + + it('pmem manage cleanup --help shows cleanup options', () => { + const output = runCli('manage cleanup --help'); + expect(output).toContain('--strategy'); + expect(output).toContain('--threshold'); + }); +}); diff --git a/tests/edge-cases.test.ts b/tests/regression/edge-cases.test.ts similarity index 98% rename from tests/edge-cases.test.ts rename to tests/regression/edge-cases.test.ts index 4a92111..999f686 100644 --- a/tests/edge-cases.test.ts +++ b/tests/regression/edge-cases.test.ts @@ -6,8 +6,8 @@ * empty content, idempotent deletes, empty queries, null/edge values. */ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { NativeProvider } from '../src/provider/native/index.js'; -import { MockEmbeddings } from './mocks.js'; +import { NativeProvider } from '../../src/core/native-provider.js'; +import { MockEmbeddings } from '../mocks.js'; describe('edge cases and boundary conditions', () => { let provider: NativeProvider; diff --git a/tests/multi-language.test.ts b/tests/regression/language.test.ts similarity index 96% rename from tests/multi-language.test.ts rename to tests/regression/language.test.ts index 9e56886..4390774 100644 --- a/tests/multi-language.test.ts +++ b/tests/regression/language.test.ts @@ -11,8 +11,8 @@ * - Special characters and punctuation */ import { describe, it, expect, beforeAll, afterAll } from 'vitest'; -import { NativeProvider } from '../src/provider/native/index.js'; -import { MockEmbeddings } from './mocks.js'; +import { NativeProvider } from '../../src/core/native-provider.js'; +import { MockEmbeddings } from '../mocks.js'; describe('multi-language support', () => { let provider: NativeProvider; diff --git a/tests/custom-integration.test.ts b/tests/regression/scenario-custom.test.ts similarity index 97% rename from tests/custom-integration.test.ts rename to tests/regression/scenario-custom.test.ts index e84bd4a..982d4c6 100644 --- a/tests/custom-integration.test.ts +++ b/tests/regression/scenario-custom.test.ts @@ -7,9 +7,9 @@ * fallback behavior, reranker, and category-based topic filtering. */ import { describe, it, expect, afterEach } from 'vitest'; -import { Memory } from '../src/memory.js'; -import { NativeProvider } from '../src/provider/native/index.js'; -import { MockEmbeddings, MockLLM } from './mocks.js'; +import { Memory } from '../../src/core/memory.js'; +import { NativeProvider } from '../../src/core/native-provider.js'; +import { MockEmbeddings, MockLLM } from '../mocks.js'; describe('custom integration — scenario 5', () => { let memory: Memory; diff --git a/tests/multi-agent.test.ts b/tests/regression/scenario-multi-agent.test.ts similarity index 97% rename from tests/multi-agent.test.ts rename to tests/regression/scenario-multi-agent.test.ts index 3f745eb..e57208d 100644 --- a/tests/multi-agent.test.ts +++ b/tests/regression/scenario-multi-agent.test.ts @@ -8,8 +8,8 @@ * - Concurrent adds don't lose data or corrupt state */ import { describe, it, expect, beforeAll, afterAll } from 'vitest'; -import { NativeProvider } from '../src/provider/native/index.js'; -import { MockEmbeddings } from './mocks.js'; +import { NativeProvider } from '../../src/core/native-provider.js'; +import { MockEmbeddings } from '../mocks.js'; describe('multi-agent isolation', () => { let provider: NativeProvider; diff --git a/tests/unit/agent/agent-memory.test.ts b/tests/unit/agent/agent-memory.test.ts new file mode 100644 index 0000000..0fd4c19 --- /dev/null +++ b/tests/unit/agent/agent-memory.test.ts @@ -0,0 +1,91 @@ +/** + * AgentMemory tests — port of Python regression/test_scenario_3_multi_agent.py. + */ +import { describe, it, expect, afterEach } from 'vitest'; +import { Memory } from '../../../src/core/memory.js'; +import { AgentMemory } from '../../../src/agent/agent.js'; +import { MemoryScope, AccessPermission } from '../../../src/agent/types.js'; +import { MockEmbeddings } from '../../mocks.js'; + +describe('AgentMemory', () => { + let agentMem: AgentMemory; + + afterEach(async () => { + if (agentMem) await agentMem.close(); + }); + + async function createAgentMemory(mode = 'multi_agent') { + const memory = await Memory.create({ + embeddings: new MockEmbeddings(), + dbPath: ':memory:', + }); + return new AgentMemory(memory, { mode: mode as any }); + } + + it('initializes with default mode', async () => { + agentMem = await createAgentMemory(); + expect(agentMem.getMode()).toBe('multi_agent'); + }); + + it('add stores memory with scope', async () => { + agentMem = await createAgentMemory(); + const result = await agentMem.add('test memory', { + userId: 'user1', agentId: 'agent1', + }); + expect(result.memories).toBeDefined(); + expect(result.scope).toBeDefined(); + }); + + it('search returns results', async () => { + agentMem = await createAgentMemory(); + await agentMem.add('I love coffee', { userId: 'u1', agentId: 'a1' }); + const results = await agentMem.search('coffee', { userId: 'u1', agentId: 'a1' }); + expect(results.length).toBeGreaterThan(0); + }); + + it('getAll returns memories', async () => { + agentMem = await createAgentMemory(); + await agentMem.add('mem1', { userId: 'u1', agentId: 'a1' }); + await agentMem.add('mem2', { userId: 'u1', agentId: 'a1' }); + const all = await agentMem.getAll({ userId: 'u1', agentId: 'a1' }); + expect(all.length).toBe(2); + }); + + it('delete removes memory', async () => { + agentMem = await createAgentMemory(); + const result = await agentMem.add('to delete', { userId: 'u1', agentId: 'a1' }); + const id = (result.memories as any)[0].memoryId; + expect(await agentMem.delete(id)).toBe(true); + }); + + it('deleteAll clears agent memories', async () => { + agentMem = await createAgentMemory(); + await agentMem.add('a', { userId: 'u1', agentId: 'a1' }); + await agentMem.add('b', { userId: 'u1', agentId: 'a1' }); + await agentMem.deleteAll({ userId: 'u1', agentId: 'a1' }); + const all = await agentMem.getAll({ userId: 'u1', agentId: 'a1' }); + expect(all.length).toBe(0); + }); + + it('getStatistics returns mode', async () => { + agentMem = await createAgentMemory(); + const stats = await agentMem.getStatistics(); + expect(stats.mode).toBe('multi_agent'); + expect(typeof stats.totalMemories).toBe('number'); + }); + + it('grantPermission and checkPermission work', async () => { + agentMem = await createAgentMemory(); + await agentMem.grantPermission('mem1', 'agent1', AccessPermission.WRITE, 'admin'); + const hasWrite = await agentMem.checkPermission('agent1', 'mem1', AccessPermission.WRITE); + expect(hasWrite).toBe(true); + }); + + it('reset clears all', async () => { + agentMem = await createAgentMemory(); + await agentMem.add('a', { userId: 'u1' }); + await agentMem.reset(); + const stats = await agentMem.getStatistics(); + expect(stats.totalMemories).toBe(0); + }); +}); diff --git a/tests/unit/agent/permission-controller.test.ts b/tests/unit/agent/permission-controller.test.ts new file mode 100644 index 0000000..8bd98b9 --- /dev/null +++ b/tests/unit/agent/permission-controller.test.ts @@ -0,0 +1,56 @@ +/** + * PermissionController tests. + */ +import { describe, it, expect } from 'vitest'; +import { PermissionController } from '../../../src/agent/components/permission-controller.js'; +import { AccessPermission } from '../../../src/agent/types.js'; + +describe('PermissionController', () => { + it('default permissions allow READ', async () => { + const ctrl = new PermissionController(); + expect(await ctrl.checkPermission('agent1', 'mem1', AccessPermission.READ)).toBe(true); + }); + + it('default permissions deny WRITE', async () => { + const ctrl = new PermissionController(); + expect(await ctrl.checkPermission('agent1', 'mem1', AccessPermission.WRITE)).toBe(false); + }); + + it('grantPermission enables access', async () => { + const ctrl = new PermissionController(); + await ctrl.grantPermission('mem1', 'agent1', AccessPermission.WRITE, 'admin'); + expect(await ctrl.checkPermission('agent1', 'mem1', AccessPermission.WRITE)).toBe(true); + }); + + it('revokePermission removes access', async () => { + const ctrl = new PermissionController(); + await ctrl.grantPermission('mem1', 'agent1', AccessPermission.WRITE, 'admin'); + await ctrl.revokePermission('mem1', 'agent1', AccessPermission.WRITE, 'admin'); + expect(await ctrl.checkPermission('agent1', 'mem1', AccessPermission.WRITE)).toBe(false); + }); + + it('getPermissions returns current permissions', async () => { + const ctrl = new PermissionController(); + await ctrl.grantPermission('mem1', 'agent1', AccessPermission.WRITE, 'admin'); + await ctrl.grantPermission('mem1', 'agent1', AccessPermission.DELETE, 'admin'); + const perms = await ctrl.getPermissions('mem1', 'agent1'); + expect(perms.permissionCount).toBe(2); + expect((perms.permissions as string[]).sort()).toEqual([AccessPermission.DELETE, AccessPermission.WRITE].sort()); + }); + + it('getPermissionHistory tracks access', async () => { + const ctrl = new PermissionController(); + await ctrl.checkPermission('agent1', 'mem1', AccessPermission.READ); + await ctrl.grantPermission('mem1', 'agent1', AccessPermission.WRITE, 'admin'); + const history = await ctrl.getPermissionHistory('mem1'); + expect(history.length).toBeGreaterThanOrEqual(2); + }); + + it('custom default permissions', async () => { + const ctrl = new PermissionController({ + defaultPermissions: [AccessPermission.READ, AccessPermission.WRITE], + }); + expect(await ctrl.checkPermission('any', 'any', AccessPermission.WRITE)).toBe(true); + expect(await ctrl.checkPermission('any', 'any', AccessPermission.DELETE)).toBe(false); + }); +}); diff --git a/tests/unit/agent/scope-controller.test.ts b/tests/unit/agent/scope-controller.test.ts new file mode 100644 index 0000000..74c2e10 --- /dev/null +++ b/tests/unit/agent/scope-controller.test.ts @@ -0,0 +1,43 @@ +/** + * ScopeController tests. + */ +import { describe, it, expect } from 'vitest'; +import { ScopeController } from '../../../src/agent/components/scope-controller.js'; +import { MemoryScope } from '../../../src/agent/types.js'; + +describe('ScopeController', () => { + it('defaults to PRIVATE scope', async () => { + const ctrl = new ScopeController(); + const scope = await ctrl.determineScope('agent1'); + expect(scope).toBe(MemoryScope.PRIVATE); + }); + + it('respects scope hint from metadata', async () => { + const ctrl = new ScopeController(); + const scope = await ctrl.determineScope('agent1', undefined, { scope: MemoryScope.PUBLIC }); + expect(scope).toBe(MemoryScope.PUBLIC); + }); + + it('respects config default scope', async () => { + const ctrl = new ScopeController({ defaultScope: MemoryScope.AGENT_GROUP }); + const scope = await ctrl.determineScope('agent1'); + expect(scope).toBe(MemoryScope.AGENT_GROUP); + }); + + it('updateMemoryScope moves memory between scopes', async () => { + const ctrl = new ScopeController(); + const result = await ctrl.updateMemoryScope('mem1', MemoryScope.PUBLIC, 'agent1'); + expect(result.success).toBe(true); + expect(result.newScope).toBe(MemoryScope.PUBLIC); + }); + + it('getScopeStatistics returns counts', async () => { + const ctrl = new ScopeController(); + await ctrl.updateMemoryScope('mem1', MemoryScope.PRIVATE, 'agent1'); + await ctrl.updateMemoryScope('mem2', MemoryScope.PUBLIC, 'agent1'); + const stats = await ctrl.getScopeStatistics(); + expect(stats.totalMemories).toBe(2); + expect((stats.scopeBreakdown as any)[MemoryScope.PRIVATE]).toBe(1); + expect((stats.scopeBreakdown as any)[MemoryScope.PUBLIC]).toBe(1); + }); +}); diff --git a/tests/unit/cli-utils.test.ts b/tests/unit/cli-utils.test.ts new file mode 100644 index 0000000..fc341ac --- /dev/null +++ b/tests/unit/cli-utils.test.ts @@ -0,0 +1,114 @@ +/** + * CLI utility tests — output formatting + envfile. + */ +import { describe, it, expect } from 'vitest'; +import fs from 'node:fs'; +import os from 'node:os'; +import path from 'node:path'; +import { truncate, formatMemoryTable, formatSearchTable, formatStats } from '../../src/cli/utils/output.js'; +import { parseEnvLines, formatEnvValue, updateEnvFile, readEnvFile } from '../../src/cli/utils/envfile.js'; + +describe('output utils', () => { + it('truncate shortens long strings', () => { + expect(truncate('short', 10)).toBe('short'); + expect(truncate('a long string that exceeds limit', 15)).toBe('a long strin...'); + }); + + it('formatMemoryTable shows header and rows', () => { + const table = formatMemoryTable([ + { memoryId: '123', userId: 'alice', content: 'hello world' }, + ]); + expect(table).toContain('ID'); + expect(table).toContain('123'); + expect(table).toContain('alice'); + expect(table).toContain('hello world'); + }); + + it('formatMemoryTable handles empty', () => { + expect(formatMemoryTable([])).toContain('no memories'); + }); + + it('formatSearchTable shows scores', () => { + const table = formatSearchTable([ + { memoryId: '1', score: 0.95, content: 'result' }, + ]); + expect(table).toContain('0.950'); + expect(table).toContain('result'); + }); + + it('formatStats shows totals', () => { + const output = formatStats({ + totalMemories: 42, + byType: { todo: 10, preference: 32 }, + ageDistribution: { '< 1 day': 5, '1-7 days': 37 }, + avgImportance: 0.65, + }); + expect(output).toContain('42'); + expect(output).toContain('todo: 10'); + expect(output).toContain('0.65'); + }); +}); + +describe('envfile utils', () => { + it('parseEnvLines extracts key-value pairs', () => { + const result = parseEnvLines([ + '# comment', + 'KEY1=value1', + 'KEY2="quoted value"', + 'export KEY3=val3', + '', + ]); + expect(result).toEqual({ KEY1: 'value1', KEY2: 'quoted value', KEY3: 'val3' }); + }); + + it('parseEnvLines first occurrence wins', () => { + const result = parseEnvLines(['K=first', 'K=second']); + expect(result.K).toBe('first'); + }); + + it('formatEnvValue quotes when needed', () => { + expect(formatEnvValue('simple')).toBe('simple'); + expect(formatEnvValue('has space')).toBe('"has space"'); + expect(formatEnvValue('has"quote')).toBe('"has\\"quote"'); + expect(formatEnvValue('')).toBe(''); + }); + + it('updateEnvFile creates new file', () => { + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'envtest-')); + const envPath = path.join(tmpDir, '.env'); + + const result = updateEnvFile(envPath, { FOO: 'bar', BAZ: 'qux' }); + expect(result.appendedKeys).toContain('FOO'); + expect(result.appendedKeys).toContain('BAZ'); + + const { parsed } = readEnvFile(envPath); + expect(parsed.FOO).toBe('bar'); + expect(parsed.BAZ).toBe('qux'); + + fs.rmSync(tmpDir, { recursive: true }); + }); + + it('updateEnvFile updates existing keys', () => { + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'envtest-')); + const envPath = path.join(tmpDir, '.env'); + fs.writeFileSync(envPath, 'EXISTING=old\nOTHER=keep\n'); + + const result = updateEnvFile(envPath, { EXISTING: 'new', ADDED: 'fresh' }); + expect(result.updatedKeys).toContain('EXISTING'); + expect(result.appendedKeys).toContain('ADDED'); + expect(result.backupPath).toBeDefined(); + + const { parsed } = readEnvFile(envPath); + expect(parsed.EXISTING).toBe('new'); + expect(parsed.OTHER).toBe('keep'); + expect(parsed.ADDED).toBe('fresh'); + + fs.rmSync(tmpDir, { recursive: true }); + }); + + it('readEnvFile returns empty for nonexistent', () => { + const { lines, parsed } = readEnvFile('/nonexistent/.env'); + expect(lines).toEqual([]); + expect(parsed).toEqual({}); + }); +}); diff --git a/tests/unit/config-loader.test.ts b/tests/unit/config-loader.test.ts new file mode 100644 index 0000000..99e2665 --- /dev/null +++ b/tests/unit/config-loader.test.ts @@ -0,0 +1,196 @@ +/** + * Config loader tests — port of Python unit/test_config_loader.py + */ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { parseMemoryConfig, validateConfig, MemoryConfigSchema } from '../../src/configs.js'; +import { loadConfigFromEnv, autoConfig, createConfig } from '../../src/config-loader.js'; +import { getVersion } from '../../src/version.js'; + +describe('version', () => { + it('returns a semver string', () => { + expect(getVersion()).toMatch(/^\d+\.\d+\.\d+$/); + }); +}); + +describe('MemoryConfig parsing', () => { + it('parses minimal config with defaults', () => { + const config = parseMemoryConfig({}); + expect(config.vectorStore.provider).toBe('sqlite'); + expect(config.llm.provider).toBe('qwen'); + expect(config.embedder.provider).toBe('qwen'); + expect(config.version).toBe('v1.1'); + }); + + it('applies sub-config defaults', () => { + const config = parseMemoryConfig({}); + expect(config.intelligentMemory).toBeDefined(); + expect(config.intelligentMemory!.enabled).toBe(true); + expect(config.intelligentMemory!.decayRate).toBe(0.1); + expect(config.intelligentMemory!.fallbackToSimpleAdd).toBe(false); + expect(config.agentMemory).toBeDefined(); + expect(config.agentMemory!.mode).toBe('multi_agent'); + expect(config.telemetry).toBeDefined(); + expect(config.telemetry!.enableTelemetry).toBe(false); + expect(config.audit).toBeDefined(); + expect(config.audit!.enabled).toBe(true); + expect(config.logging).toBeDefined(); + expect(config.queryRewrite).toBeDefined(); + expect(config.queryRewrite!.enabled).toBe(false); + }); + + it('overrides defaults with explicit values', () => { + const config = parseMemoryConfig({ + vectorStore: { provider: 'seekdb', config: { path: '/tmp/db' } }, + llm: { provider: 'openai', config: { apiKey: 'sk-test' } }, + intelligentMemory: { enabled: false, fallbackToSimpleAdd: true }, + }); + expect(config.vectorStore.provider).toBe('seekdb'); + expect(config.vectorStore.config.path).toBe('/tmp/db'); + expect(config.llm.provider).toBe('openai'); + expect(config.intelligentMemory!.enabled).toBe(false); + expect(config.intelligentMemory!.fallbackToSimpleAdd).toBe(true); + }); + + it('accepts custom prompts', () => { + const config = parseMemoryConfig({ + customFactExtractionPrompt: 'My custom prompt', + customUpdateMemoryPrompt: 'My update prompt', + }); + expect(config.customFactExtractionPrompt).toBe('My custom prompt'); + expect(config.customUpdateMemoryPrompt).toBe('My update prompt'); + }); +}); + +describe('validateConfig', () => { + it('returns true for valid config', () => { + expect(validateConfig({ + vectorStore: { provider: 'sqlite', config: {} }, + llm: { provider: 'qwen', config: {} }, + embedder: { provider: 'qwen', config: {} }, + })).toBe(true); + }); + + it('returns false when missing required sections', () => { + expect(validateConfig({})).toBe(false); + expect(validateConfig({ vectorStore: { provider: 'sqlite', config: {} } })).toBe(false); + }); + + it('returns false when provider is missing', () => { + expect(validateConfig({ + vectorStore: { config: {} }, + llm: { provider: 'qwen', config: {} }, + embedder: { provider: 'qwen', config: {} }, + })).toBe(false); + }); +}); + +describe('loadConfigFromEnv', () => { + const origEnv = { ...process.env }; + + beforeEach(() => { + // Clear relevant env vars + for (const key of Object.keys(process.env)) { + if (key.startsWith('LLM_') || key.startsWith('EMBEDDING_') || key.startsWith('DATABASE_') || + key.startsWith('INTELLIGENT_MEMORY_') || key.startsWith('RERANKER_') || + key === 'POWERMEM_ENV_FILE') { + delete process.env[key]; + } + } + }); + + afterEach(() => { + process.env = { ...origEnv }; + }); + + it('loads LLM config from env', () => { + process.env.LLM_PROVIDER = 'openai'; + process.env.LLM_API_KEY = 'sk-test'; + process.env.LLM_MODEL = 'gpt-4o'; + + const config = loadConfigFromEnv(); + expect(config.llm!.provider).toBe('openai'); + expect(config.llm!.config.apiKey).toBe('sk-test'); + expect(config.llm!.config.model).toBe('gpt-4o'); + }); + + it('loads embedding config from env', () => { + process.env.EMBEDDING_PROVIDER = 'openai'; + process.env.EMBEDDING_API_KEY = 'sk-embed'; + process.env.EMBEDDING_MODEL = 'text-embedding-3-small'; + process.env.EMBEDDING_DIMS = '1536'; + + const config = loadConfigFromEnv(); + expect(config.embedder!.provider).toBe('openai'); + expect(config.embedder!.config.apiKey).toBe('sk-embed'); + expect(config.embedder!.config.embeddingDims).toBe(1536); + }); + + it('loads database config from env', () => { + process.env.DATABASE_PROVIDER = 'sqlite'; + process.env.SQLITE_PATH = '/tmp/test.db'; + + const config = loadConfigFromEnv(); + expect(config.vectorStore!.provider).toBe('sqlite'); + expect(config.vectorStore!.config.path).toBe('/tmp/test.db'); + }); + + it('defaults to sqlite when no DATABASE_PROVIDER', () => { + const config = loadConfigFromEnv(); + expect(config.vectorStore!.provider).toBe('sqlite'); + }); + + it('loads intelligent memory settings from env', () => { + process.env.INTELLIGENT_MEMORY_ENABLED = 'false'; + process.env.INTELLIGENT_MEMORY_FALLBACK_TO_SIMPLE_ADD = 'true'; + process.env.INTELLIGENT_MEMORY_DECAY_RATE = '0.2'; + + const config = loadConfigFromEnv(); + expect(config.intelligentMemory).toBeDefined(); + expect(config.intelligentMemory!.enabled).toBe(false); + expect(config.intelligentMemory!.fallbackToSimpleAdd).toBe(true); + expect(config.intelligentMemory!.decayRate).toBe(0.2); + }); + + it('loads reranker when env is set', () => { + process.env.RERANKER_PROVIDER = 'qwen'; + const config = loadConfigFromEnv(); + expect(config.reranker).toBeDefined(); + expect(config.reranker!.provider).toBe('qwen'); + }); + + it('no reranker when env not set', () => { + const config = loadConfigFromEnv(); + expect(config.reranker).toBeUndefined(); + }); + + it('autoConfig is alias for loadConfigFromEnv', () => { + process.env.LLM_PROVIDER = 'anthropic'; + const c1 = loadConfigFromEnv(); + const c2 = autoConfig(); + expect(c1.llm!.provider).toBe(c2.llm!.provider); + }); +}); + +describe('createConfig', () => { + it('creates config with defaults', () => { + const config = createConfig(); + expect(config.vectorStore!.provider).toBe('sqlite'); + expect(config.llm!.provider).toBe('qwen'); + expect(config.embedder!.provider).toBe('qwen'); + }); + + it('creates config with overrides', () => { + const config = createConfig({ + databaseProvider: 'seekdb', + llmProvider: 'openai', + llmApiKey: 'sk-test', + llmModel: 'gpt-4o', + embeddingProvider: 'openai', + embeddingDims: 768, + }); + expect(config.vectorStore!.provider).toBe('seekdb'); + expect(config.llm!.provider).toBe('openai'); + expect(config.llm!.config.apiKey).toBe('sk-test'); + expect(config.embedder!.config.embeddingDims).toBe(768); + }); +}); diff --git a/tests/native-provider.test.ts b/tests/unit/core/memory.test.ts similarity index 99% rename from tests/native-provider.test.ts rename to tests/unit/core/memory.test.ts index f5a45a1..db69a5e 100644 --- a/tests/native-provider.test.ts +++ b/tests/unit/core/memory.test.ts @@ -1,6 +1,6 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { NativeProvider } from '../src/provider/native/index.js'; -import { MockEmbeddings, MockLLM } from './mocks.js'; +import { NativeProvider } from '../../../src/core/native-provider.js'; +import { MockEmbeddings, MockLLM } from '../../mocks.js'; describe('NativeProvider', () => { let provider: NativeProvider; diff --git a/tests/decay.test.ts b/tests/unit/decay.test.ts similarity index 95% rename from tests/decay.test.ts rename to tests/unit/decay.test.ts index 969d02c..a94ffc2 100644 --- a/tests/decay.test.ts +++ b/tests/unit/decay.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect } from 'vitest'; -import { computeDecayFactor, applyDecay } from '../src/provider/native/decay.js'; +import { computeDecayFactor, applyDecay } from '../../src/intelligence/ebbinghaus.js'; describe('computeDecayFactor', () => { const now = new Date('2026-04-01T12:00:00Z'); diff --git a/tests/embedder.test.ts b/tests/unit/embedder.test.ts similarity index 90% rename from tests/embedder.test.ts rename to tests/unit/embedder.test.ts index fbfc3f7..7454e01 100644 --- a/tests/embedder.test.ts +++ b/tests/unit/embedder.test.ts @@ -1,6 +1,6 @@ import { describe, it, expect } from 'vitest'; -import { Embedder } from '../src/provider/native/embedder.js'; -import { MockEmbeddings } from './mocks.js'; +import { Embedder } from '../../src/integrations/embeddings/embedder.js'; +import { MockEmbeddings } from '../mocks.js'; describe('Embedder', () => { it('replaces newlines with spaces', async () => { diff --git a/tests/unit/filter-parser.test.ts b/tests/unit/filter-parser.test.ts new file mode 100644 index 0000000..578ed93 --- /dev/null +++ b/tests/unit/filter-parser.test.ts @@ -0,0 +1,58 @@ +import { describe, it, expect } from 'vitest'; +import { parseAdvancedFilters } from '../../src/utils/filter-parser.js'; + +describe('parseAdvancedFilters', () => { + it('returns undefined for empty/null', () => { + expect(parseAdvancedFilters(undefined)).toBeUndefined(); + expect(parseAdvancedFilters({})).toBeUndefined(); + }); + + it('maps start_time/end_time to created_at range', () => { + const result = parseAdvancedFilters({ + start_time: '2024-01-01', end_time: '2024-12-31', + }); + expect(result).toEqual({ + created_at: { $gte: '2024-01-01', $lte: '2024-12-31' }, + }); + }); + + it('maps tags array to $in', () => { + const result = parseAdvancedFilters({ tags: ['a', 'b'] }); + expect(result).toEqual({ tags: { $in: ['a', 'b'] } }); + }); + + it('maps single tag as-is', () => { + const result = parseAdvancedFilters({ tags: 'single' }); + expect(result).toEqual({ tags: 'single' }); + }); + + it('maps type to category', () => { + const result = parseAdvancedFilters({ type: 'preference' }); + expect(result).toEqual({ category: 'preference' }); + }); + + it('maps importance number to $gte', () => { + const result = parseAdvancedFilters({ importance: 0.7 }); + expect(result).toEqual({ importance: { $gte: 0.7 } }); + }); + + it('handles multiple filters together', () => { + const result = parseAdvancedFilters({ + start_time: '2024-01-01', + type: 'todo', + importance: 0.5, + tags: ['work'], + }); + expect(result).toEqual({ + created_at: { $gte: '2024-01-01' }, + category: 'todo', + importance: { $gte: 0.5 }, + tags: { $in: ['work'] }, + }); + }); + + it('preserves unknown fields', () => { + const result = parseAdvancedFilters({ custom_field: 'value' }); + expect(result).toEqual({ custom_field: 'value' }); + }); +}); diff --git a/tests/inferrer.test.ts b/tests/unit/inferrer.test.ts similarity index 98% rename from tests/inferrer.test.ts rename to tests/unit/inferrer.test.ts index fb856b2..93d938d 100644 --- a/tests/inferrer.test.ts +++ b/tests/unit/inferrer.test.ts @@ -1,6 +1,6 @@ import { describe, it, expect } from 'vitest'; -import { Inferrer } from '../src/provider/native/inferrer.js'; -import { MockLLM } from './mocks.js'; +import { Inferrer } from '../../src/core/inferrer.js'; +import { MockLLM } from '../mocks.js'; describe('Inferrer', () => { describe('extractFacts', () => { diff --git a/tests/ebbinghaus.test.ts b/tests/unit/intelligence/ebbinghaus.test.ts similarity index 96% rename from tests/ebbinghaus.test.ts rename to tests/unit/intelligence/ebbinghaus.test.ts index b2bf77c..c4588b0 100644 --- a/tests/ebbinghaus.test.ts +++ b/tests/unit/intelligence/ebbinghaus.test.ts @@ -9,9 +9,9 @@ * - Decay affects search result ordering */ import { describe, it, expect, afterEach } from 'vitest'; -import { computeDecayFactor, applyDecay } from '../src/provider/native/decay.js'; -import { NativeProvider } from '../src/provider/native/index.js'; -import { MockEmbeddings } from './mocks.js'; +import { computeDecayFactor, applyDecay } from '../../../src/intelligence/ebbinghaus.js'; +import { NativeProvider } from '../../../src/core/native-provider.js'; +import { MockEmbeddings } from '../../mocks.js'; describe('Ebbinghaus forgetting curve — detailed', () => { const baseTime = new Date('2026-04-01T12:00:00Z'); diff --git a/tests/unit/intelligence/importance-evaluator.test.ts b/tests/unit/intelligence/importance-evaluator.test.ts new file mode 100644 index 0000000..ba635fa --- /dev/null +++ b/tests/unit/intelligence/importance-evaluator.test.ts @@ -0,0 +1,51 @@ +/** + * Importance evaluator tests — port of Python importance evaluation. + */ +import { describe, it, expect } from 'vitest'; +import { ImportanceEvaluator } from '../../../src/intelligence/importance-evaluator.js'; + +describe('ImportanceEvaluator', () => { + const evaluator = new ImportanceEvaluator(); + + it('short trivial content scores low', () => { + const score = evaluator.evaluateImportance('Hi.'); + expect(score).toBeLessThan(0.2); + }); + + it('content with important keywords scores higher', () => { + const score = evaluator.evaluateImportance('This is critical and urgent information to remember!'); + expect(score).toBeGreaterThan(0.3); + }); + + it('long content scores higher than short', () => { + const short = evaluator.evaluateImportance('short'); + const long = evaluator.evaluateImportance('a'.repeat(150) + ' with important details'); + expect(long).toBeGreaterThan(short); + }); + + it('emotional content adds to score', () => { + const neutral = evaluator.evaluateImportance('The meeting is at 3pm'); + const emotional = evaluator.evaluateImportance('I love this project and am excited about it!'); + expect(emotional).toBeGreaterThan(neutral); + }); + + it('high priority metadata boosts score', () => { + const noMeta = evaluator.evaluateImportance('content'); + const highPriority = evaluator.evaluateImportance('content', { priority: 'high' }); + expect(highPriority).toBeGreaterThan(noMeta); + }); + + it('score is capped at 1.0', () => { + // Load up with every signal + const score = evaluator.evaluateImportance( + 'important critical urgent remember love hate! ? ' + 'a'.repeat(200), + { priority: 'high', tags: ['test'] } + ); + expect(score).toBeLessThanOrEqual(1.0); + }); + + it('returns 0 for empty string', () => { + const score = evaluator.evaluateImportance(''); + expect(score).toBe(0); + }); +}); diff --git a/tests/unit/intelligence/manager.test.ts b/tests/unit/intelligence/manager.test.ts new file mode 100644 index 0000000..7a0788c --- /dev/null +++ b/tests/unit/intelligence/manager.test.ts @@ -0,0 +1,54 @@ +/** + * IntelligenceManager tests. + */ +import { describe, it, expect } from 'vitest'; +import { IntelligenceManager } from '../../../src/intelligence/manager.js'; + +describe('IntelligenceManager', () => { + it('disabled manager returns metadata unchanged', () => { + const mgr = new IntelligenceManager({ enabled: false }); + const result = mgr.processMetadata('test', { key: 'value' }); + expect(result).toEqual({ key: 'value' }); + }); + + it('enabled manager adds importance to metadata', () => { + const mgr = new IntelligenceManager({ enabled: true }); + const result = mgr.processMetadata('This is important and critical!', {}); + expect(result.importance).toBeDefined(); + expect(typeof result.importance).toBe('number'); + expect(result.importance as number).toBeGreaterThan(0); + }); + + it('processSearchResults with decay disabled returns unchanged', () => { + const mgr = new IntelligenceManager({ enabled: true, enableDecay: false }); + const results = [ + { id: '1', content: 'a', score: 0.9 }, + { id: '2', content: 'b', score: 0.8 }, + ]; + const processed = mgr.processSearchResults(results); + expect(processed[0].score).toBe(0.9); + expect(processed[1].score).toBe(0.8); + }); + + it('processSearchResults with decay enabled adjusts scores', () => { + const mgr = new IntelligenceManager({ enabled: true, enableDecay: true, decayWeight: 0.5 }); + const results = [ + { + id: '1', content: 'recent', score: 0.9, + createdAt: new Date().toISOString(), + updatedAt: new Date().toISOString(), + accessCount: 10, + }, + { + id: '2', content: 'old', score: 0.9, + createdAt: new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString(), + updatedAt: new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString(), + accessCount: 0, + }, + ]; + const processed = mgr.processSearchResults(results); + // Recent + frequently accessed should rank higher than old + never accessed + expect(processed[0].id).toBe('1'); + expect(processed[0].score).toBeGreaterThan(processed[1].score); + }); +}); diff --git a/tests/unit/intelligence/memory-optimizer.test.ts b/tests/unit/intelligence/memory-optimizer.test.ts new file mode 100644 index 0000000..b91e03a --- /dev/null +++ b/tests/unit/intelligence/memory-optimizer.test.ts @@ -0,0 +1,105 @@ +/** + * Memory optimizer tests — port of Python unit/intelligence/test_memory_optimizer.py + */ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { MemoryOptimizer } from '../../../src/intelligence/memory-optimizer.js'; +import { SQLiteStore } from '../../../src/storage/sqlite/sqlite.js'; + +describe('MemoryOptimizer', () => { + let store: SQLiteStore; + let optimizer: MemoryOptimizer; + + function makePayload(data: string, hash?: string, userId?: string): Record { + return { + data, user_id: userId ?? null, agent_id: null, run_id: null, + hash: hash ?? require('crypto').createHash('md5').update(data, 'utf-8').digest('hex'), + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + scope: null, category: null, access_count: 0, metadata: {}, + }; + } + + beforeEach(() => { + store = new SQLiteStore(':memory:'); + optimizer = new MemoryOptimizer(store); + }); + + afterEach(async () => { + await store.close(); + }); + + describe('exact deduplication', () => { + it('removes exact duplicates by hash', async () => { + const payload = makePayload('duplicate content'); + await store.insert('1', [1, 0], payload); + await store.insert('2', [1, 0], { ...payload, created_at: new Date(Date.now() + 1000).toISOString() }); + await store.insert('3', [1, 0], { ...payload, created_at: new Date(Date.now() + 2000).toISOString() }); + + const result = await optimizer.deduplicate('exact'); + expect(result.totalChecked).toBe(3); + expect(result.duplicatesFound).toBe(2); + expect(result.deletedCount).toBe(2); + + // Only oldest should remain + expect(await store.count()).toBe(1); + expect(await store.getById('1')).not.toBeNull(); + }); + + it('preserves unique memories', async () => { + await store.insert('1', [1, 0], makePayload('unique A')); + await store.insert('2', [0, 1], makePayload('unique B')); + await store.insert('3', [1, 1], makePayload('unique C')); + + const result = await optimizer.deduplicate('exact'); + expect(result.duplicatesFound).toBe(0); + expect(result.deletedCount).toBe(0); + expect(await store.count()).toBe(3); + }); + + it('filters by userId', async () => { + await store.insert('1', [1, 0], makePayload('dup', 'hash1', 'alice')); + await store.insert('2', [1, 0], makePayload('dup', 'hash1', 'alice')); + await store.insert('3', [0, 1], makePayload('dup', 'hash1', 'bob')); + + const result = await optimizer.deduplicate('exact', 'alice'); + expect(result.deletedCount).toBe(1); + // Bob's memory untouched + expect(await store.getById('3')).not.toBeNull(); + }); + }); + + describe('semantic deduplication', () => { + it('removes semantically similar memories', async () => { + // Very similar vectors (cosine > 0.95) + await store.insert('1', [1, 0, 0], makePayload('fact A')); + await store.insert('2', [0.99, 0.01, 0], makePayload('fact A similar')); + await store.insert('3', [0, 1, 0], makePayload('different fact')); + + const result = await optimizer.deduplicate('semantic', undefined, 0.95); + expect(result.duplicatesFound).toBeGreaterThanOrEqual(1); + // Different fact should survive + expect(await store.getById('3')).not.toBeNull(); + }); + + it('no-op when all memories are unique', async () => { + await store.insert('1', [1, 0, 0], makePayload('x')); + await store.insert('2', [0, 1, 0], makePayload('y')); + await store.insert('3', [0, 0, 1], makePayload('z')); + + const result = await optimizer.deduplicate('semantic', undefined, 0.95); + expect(result.duplicatesFound).toBe(0); + expect(await store.count()).toBe(3); + }); + }); + + describe('cosine similarity calculation', () => { + it('identical vectors return ~1.0', async () => { + // Tested via semantic dedup — insert identical vectors + await store.insert('1', [1, 0], makePayload('a')); + await store.insert('2', [1, 0], makePayload('b')); + + const result = await optimizer.deduplicate('semantic', undefined, 0.99); + expect(result.duplicatesFound).toBe(1); + }); + }); +}); diff --git a/tests/provider-factory.test.ts b/tests/unit/provider-factory.test.ts similarity index 96% rename from tests/provider-factory.test.ts rename to tests/unit/provider-factory.test.ts index 3710127..ec8e0e8 100644 --- a/tests/provider-factory.test.ts +++ b/tests/unit/provider-factory.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; -import { createEmbeddingsFromEnv, createLLMFromEnv } from '../src/provider/native/provider-factory.js'; +import { createEmbeddingsFromEnv, createLLMFromEnv } from '../../src/integrations/factory.js'; describe('provider-factory', () => { const originalEnv = { ...process.env }; diff --git a/tests/search.test.ts b/tests/unit/search.test.ts similarity index 95% rename from tests/search.test.ts rename to tests/unit/search.test.ts index 18f5fef..79be4f4 100644 --- a/tests/search.test.ts +++ b/tests/unit/search.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect } from 'vitest'; -import { cosineSimilarity } from '../src/provider/native/search.js'; +import { cosineSimilarity } from '../../src/utils/search.js'; describe('cosineSimilarity', () => { it('returns 1.0 for identical vectors', () => { diff --git a/tests/snowflake.test.ts b/tests/unit/snowflake.test.ts similarity index 93% rename from tests/snowflake.test.ts rename to tests/unit/snowflake.test.ts index 9028a55..8bbcab0 100644 --- a/tests/snowflake.test.ts +++ b/tests/unit/snowflake.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect } from 'vitest'; -import { SnowflakeIDGenerator } from '../src/provider/native/snowflake.js'; +import { SnowflakeIDGenerator } from '../../src/utils/snowflake.js'; describe('SnowflakeIDGenerator', () => { it('generates unique IDs', () => { diff --git a/tests/sorting-combos.test.ts b/tests/unit/sorting.test.ts similarity index 97% rename from tests/sorting-combos.test.ts rename to tests/unit/sorting.test.ts index c74a192..5355138 100644 --- a/tests/sorting-combos.test.ts +++ b/tests/unit/sorting.test.ts @@ -6,8 +6,8 @@ * to ensure SQLite ORDER BY json_extract works correctly across all axes. */ import { describe, it, expect, beforeAll, afterAll } from 'vitest'; -import { NativeProvider } from '../src/provider/native/index.js'; -import { MockEmbeddings } from './mocks.js'; +import { NativeProvider } from '../../src/core/native-provider.js'; +import { MockEmbeddings } from '../mocks.js'; describe('getAll sorting — combinatorial', () => { let provider: NativeProvider; diff --git a/tests/unit/stats.test.ts b/tests/unit/stats.test.ts new file mode 100644 index 0000000..1fd90e9 --- /dev/null +++ b/tests/unit/stats.test.ts @@ -0,0 +1,89 @@ +import { describe, it, expect } from 'vitest'; +import { calculateStatsFromMemories } from '../../src/utils/stats.js'; + +describe('calculateStatsFromMemories', () => { + it('returns zeros for empty array', () => { + const stats = calculateStatsFromMemories([]); + expect(stats.totalMemories).toBe(0); + expect(stats.byType).toEqual({}); + expect(stats.avgImportance).toBe(0); + expect(stats.topAccessed).toEqual([]); + expect(stats.growthTrend).toEqual({}); + expect(stats.ageDistribution['< 1 day']).toBe(0); + }); + + it('counts total memories', () => { + const memories = [ + { id: '1', content: 'a' }, + { id: '2', content: 'b' }, + { id: '3', content: 'c' }, + ]; + expect(calculateStatsFromMemories(memories).totalMemories).toBe(3); + }); + + it('groups by category', () => { + const memories = [ + { id: '1', category: 'todo' }, + { id: '2', category: 'preference' }, + { id: '3', category: 'todo' }, + ]; + const stats = calculateStatsFromMemories(memories); + expect(stats.byType).toEqual({ todo: 2, preference: 1 }); + }); + + it('defaults category to unknown', () => { + const memories = [{ id: '1' }]; + expect(calculateStatsFromMemories(memories).byType).toEqual({ unknown: 1 }); + }); + + it('calculates avg importance', () => { + const memories = [ + { id: '1', importance: 0.8 }, + { id: '2', importance: 0.6 }, + ]; + expect(calculateStatsFromMemories(memories).avgImportance).toBe(0.7); + }); + + it('ranks by access count', () => { + const memories = [ + { id: '1', content: 'low', accessCount: 1 }, + { id: '2', content: 'high', accessCount: 100 }, + { id: '3', content: 'mid', accessCount: 10 }, + ]; + const stats = calculateStatsFromMemories(memories); + expect(stats.topAccessed[0].id).toBe('2'); + expect(stats.topAccessed[0].accessCount).toBe(100); + }); + + it('computes growth trend by date', () => { + const today = new Date().toISOString().split('T')[0]; + const memories = [ + { id: '1', createdAt: new Date().toISOString() }, + { id: '2', createdAt: new Date().toISOString() }, + ]; + const stats = calculateStatsFromMemories(memories); + expect(stats.growthTrend[today]).toBe(2); + }); + + it('computes age distribution', () => { + const now = Date.now(); + const memories = [ + { id: '1', createdAt: new Date(now).toISOString() }, // < 1 day + { id: '2', createdAt: new Date(now - 3 * 24 * 60 * 60 * 1000).toISOString() }, // 1-7 days + { id: '3', createdAt: new Date(now - 15 * 24 * 60 * 60 * 1000).toISOString() }, // 7-30 days + { id: '4', createdAt: new Date(now - 60 * 24 * 60 * 60 * 1000).toISOString() }, // > 30 days + ]; + const stats = calculateStatsFromMemories(memories); + expect(stats.ageDistribution['< 1 day']).toBe(1); + expect(stats.ageDistribution['1-7 days']).toBe(1); + expect(stats.ageDistribution['7-30 days']).toBe(1); + expect(stats.ageDistribution['> 30 days']).toBe(1); + }); + + it('truncates content to 100 chars in topAccessed', () => { + const longContent = 'a'.repeat(200); + const memories = [{ id: '1', content: longContent, accessCount: 5 }]; + const stats = calculateStatsFromMemories(memories); + expect(stats.topAccessed[0].content.length).toBe(100); + }); +}); diff --git a/tests/unit/storage/adapter.test.ts b/tests/unit/storage/adapter.test.ts new file mode 100644 index 0000000..6359e73 --- /dev/null +++ b/tests/unit/storage/adapter.test.ts @@ -0,0 +1,103 @@ +/** + * StorageAdapter tests — verifies the adapter layer over VectorStore. + */ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { StorageAdapter } from '../../../src/storage/adapter.js'; +import { SQLiteStore } from '../../../src/storage/sqlite/sqlite.js'; + +describe('StorageAdapter', () => { + let adapter: StorageAdapter; + + beforeEach(() => { + const store = new SQLiteStore(':memory:'); + adapter = new StorageAdapter(store); + }); + + afterEach(async () => { + await adapter.close(); + }); + + function makePayload(data: string, userId?: string): Record { + return { + data, user_id: userId ?? null, agent_id: null, run_id: null, + hash: 'h', created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), scope: null, + category: null, access_count: 0, metadata: {}, + }; + } + + it('addMemory + getMemory', async () => { + await adapter.addMemory('1', [1, 0], makePayload('hello')); + const mem = await adapter.getMemory('1'); + expect(mem).not.toBeNull(); + expect(mem!.content).toBe('hello'); + }); + + it('searchMemories returns ranked results', async () => { + await adapter.addMemory('1', [1, 0, 0], makePayload('x-axis')); + await adapter.addMemory('2', [0, 1, 0], makePayload('y-axis')); + const results = await adapter.searchMemories([1, 0, 0], {}, 10); + expect(results[0].content).toBe('x-axis'); + }); + + it('updateMemory + getMemory', async () => { + await adapter.addMemory('1', [1, 0], makePayload('old')); + await adapter.updateMemory('1', [0, 1], makePayload('new')); + const mem = await adapter.getMemory('1'); + expect(mem!.content).toBe('new'); + }); + + it('deleteMemory', async () => { + await adapter.addMemory('1', [1, 0], makePayload('del')); + expect(await adapter.deleteMemory('1')).toBe(true); + expect(await adapter.getMemory('1')).toBeNull(); + }); + + it('listMemories with pagination', async () => { + for (let i = 0; i < 5; i++) { + await adapter.addMemory(String(i), [i, 0], makePayload(`item${i}`)); + } + const { records, total } = await adapter.listMemories({}, 2, 0); + expect(total).toBe(5); + expect(records).toHaveLength(2); + }); + + it('countMemories', async () => { + await adapter.addMemory('1', [1, 0], makePayload('a', 'alice')); + await adapter.addMemory('2', [0, 1], makePayload('b', 'bob')); + expect(await adapter.countMemories()).toBe(2); + expect(await adapter.countMemories({ userId: 'alice' })).toBe(1); + }); + + it('deleteAllMemories with filter', async () => { + await adapter.addMemory('1', [1, 0], makePayload('a', 'alice')); + await adapter.addMemory('2', [0, 1], makePayload('b', 'bob')); + await adapter.deleteAllMemories({ userId: 'alice' }); + expect(await adapter.countMemories()).toBe(1); + }); + + it('getStatistics', async () => { + await adapter.addMemory('1', [1, 0], makePayload('a')); + await adapter.addMemory('2', [0, 1], makePayload('b')); + const stats = await adapter.getStatistics(); + expect(stats.totalMemories).toBe(2); + }); + + it('getUniqueUsers', async () => { + await adapter.addMemory('1', [1, 0], makePayload('a', 'alice')); + await adapter.addMemory('2', [0, 1], makePayload('b', 'bob')); + await adapter.addMemory('3', [1, 1], makePayload('c', 'alice')); + const users = await adapter.getUniqueUsers(); + expect(users.sort()).toEqual(['alice', 'bob']); + }); + + it('reset clears all', async () => { + await adapter.addMemory('1', [1, 0], makePayload('a')); + await adapter.reset(); + expect(await adapter.countMemories()).toBe(0); + }); + + it('raw returns underlying store', () => { + expect(adapter.raw).toBeInstanceOf(SQLiteStore); + }); +}); diff --git a/tests/unit/storage/factory.test.ts b/tests/unit/storage/factory.test.ts new file mode 100644 index 0000000..4c76cca --- /dev/null +++ b/tests/unit/storage/factory.test.ts @@ -0,0 +1,66 @@ +/** + * VectorStoreFactory tests — port of Python storage factory tests. + */ +import { describe, it, expect } from 'vitest'; +import { VectorStoreFactory } from '../../../src/storage/factory.js'; + +describe('VectorStoreFactory', () => { + it('lists built-in providers', () => { + const providers = VectorStoreFactory.getSupportedProviders(); + expect(providers).toContain('sqlite'); + expect(providers).toContain('seekdb'); + }); + + it('hasProvider returns true for registered providers', () => { + expect(VectorStoreFactory.hasProvider('sqlite')).toBe(true); + expect(VectorStoreFactory.hasProvider('SQLite')).toBe(true); + expect(VectorStoreFactory.hasProvider('seekdb')).toBe(true); + }); + + it('hasProvider returns false for unknown', () => { + expect(VectorStoreFactory.hasProvider('nonexistent')).toBe(false); + }); + + it('creates SQLiteStore via factory', async () => { + const store = await VectorStoreFactory.create('sqlite', { path: ':memory:' }); + expect(store).toBeDefined(); + // Verify it works + await store.insert('1', [1, 0], { + data: 'test', user_id: null, agent_id: null, run_id: null, + hash: 'h', created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), category: null, + scope: null, access_count: 0, metadata: {}, + }); + expect(await store.count()).toBe(1); + await store.close(); + }); + + it('throws for unsupported provider', async () => { + await expect(VectorStoreFactory.create('nonexistent')) + .rejects.toThrow('Unsupported VectorStore provider'); + }); + + it('register adds a custom provider', async () => { + VectorStoreFactory.register('custom', async () => { + // Return a minimal mock store + return { + insert: async () => {}, + getById: async () => null, + update: async () => {}, + remove: async () => false, + list: async () => ({ records: [], total: 0 }), + search: async () => [], + count: async () => 0, + incrementAccessCount: async () => {}, + incrementAccessCountBatch: async () => {}, + removeAll: async () => {}, + close: async () => {}, + }; + }); + + expect(VectorStoreFactory.hasProvider('custom')).toBe(true); + const store = await VectorStoreFactory.create('custom'); + expect(store).toBeDefined(); + expect(await store.count()).toBe(0); + }); +}); diff --git a/tests/seekdb-store.test.ts b/tests/unit/storage/seekdb.test.ts similarity index 97% rename from tests/seekdb-store.test.ts rename to tests/unit/storage/seekdb.test.ts index c903f75..46f1038 100644 --- a/tests/seekdb-store.test.ts +++ b/tests/unit/storage/seekdb.test.ts @@ -6,7 +6,7 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; import fs from 'node:fs'; import path from 'node:path'; import os from 'node:os'; -import { SeekDBStore } from '../src/provider/native/seekdb-store.js'; +import { SeekDBStore } from '../../../src/storage/seekdb/seekdb.js'; /** Try to create a SeekDBStore — returns null if seekdb native bindings unavailable */ async function tryCreateStore(tmpDir: string, collectionName: string) { @@ -29,9 +29,9 @@ let seekdbAvailable = false; try { const s = await tryCreateStore(dir, 'check'); seekdbAvailable = s != null; - await s?.close(); + // Don't call close() — SeekDB embedded may SIGABRT on cleanup } finally { - fs.rmSync(dir, { recursive: true, force: true }); + try { fs.rmSync(dir, { recursive: true, force: true }); } catch {} } } @@ -180,6 +180,7 @@ describeIf('SeekDBStore', () => { }); it('metadata round-trip', async () => { + // Metadata is base64-encoded to bypass SeekDB C engine JSON limitations await store.insert('1', [1, 0, 0], makePayload({ data: 'with meta', metadata: { key: 'value', nested: { deep: true }, tags: [1, 2, 3] }, @@ -375,6 +376,7 @@ describeIf('SeekDBStore', () => { }); it('unicode metadata values round-trip', async () => { + // Metadata is base64-encoded to handle unicode safely await store.insert('1', [1, 0, 0], makePayload({ metadata: { '标签': '重要', emoji: '🏷️' }, })); diff --git a/tests/store.test.ts b/tests/unit/storage/sqlite.test.ts similarity index 99% rename from tests/store.test.ts rename to tests/unit/storage/sqlite.test.ts index 44f0471..fb226ee 100644 --- a/tests/store.test.ts +++ b/tests/unit/storage/sqlite.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { SQLiteStore } from '../src/provider/native/store.js'; +import { SQLiteStore } from '../../../src/storage/sqlite/sqlite.js'; describe('SQLiteStore', () => { let store: SQLiteStore; diff --git a/tests/unit/user-memory/user-memory.test.ts b/tests/unit/user-memory/user-memory.test.ts new file mode 100644 index 0000000..39559a7 --- /dev/null +++ b/tests/unit/user-memory/user-memory.test.ts @@ -0,0 +1,99 @@ +/** + * UserMemory tests — port of Python regression/test_user_profile.py. + */ +import { describe, it, expect, afterEach } from 'vitest'; +import { Memory } from '../../../src/core/memory.js'; +import { UserMemory } from '../../../src/user-memory/user-memory.js'; +import { SQLiteUserProfileStore } from '../../../src/user-memory/storage/user-profile-sqlite.js'; +import { MockEmbeddings } from '../../mocks.js'; + +describe('UserMemory', () => { + let userMem: UserMemory; + + afterEach(async () => { + if (userMem) await userMem.close(); + }); + + async function createUserMemory() { + const memory = await Memory.create({ + embeddings: new MockEmbeddings(), + dbPath: ':memory:', + }); + const profileStore = new SQLiteUserProfileStore(':memory:'); + return new UserMemory({ memory, profileStore }); + } + + it('add stores memory', async () => { + userMem = await createUserMemory(); + const result = await userMem.add('I like coffee', { userId: 'u1' }); + expect(result.memories).toBeDefined(); + }); + + it('add with extractProfile stores profile', async () => { + userMem = await createUserMemory(); + const result = await userMem.add('I like coffee', { + userId: 'u1', + extractProfile: true, + profileContent: 'Likes coffee', + }); + expect(result.profileExtracted).toBe(true); + + const profile = await userMem.profile('u1'); + expect(profile).not.toBeNull(); + expect(profile!.profileContent).toBe('Likes coffee'); + }); + + it('search returns results', async () => { + userMem = await createUserMemory(); + await userMem.add('I love hiking in mountains', { userId: 'u1', infer: false }); + const result = await userMem.search('hiking', { userId: 'u1' }); + expect(result.results).toBeDefined(); + }); + + it('search with addProfile includes profile data', async () => { + userMem = await createUserMemory(); + await userMem.add('memory content', { userId: 'u1', infer: false }); + await userMem.add('more content', { + userId: 'u1', + extractProfile: true, + profileContent: 'User profile data', + }); + + const result = await userMem.search('content', { userId: 'u1', addProfile: true }); + expect(result.profileContent).toBe('User profile data'); + }); + + it('profile returns null for nonexistent user', async () => { + userMem = await createUserMemory(); + expect(await userMem.profile('nobody')).toBeNull(); + }); + + it('deleteProfile removes profile', async () => { + userMem = await createUserMemory(); + await userMem.add('x', { + userId: 'u1', + extractProfile: true, + profileContent: 'to delete', + }); + expect(await userMem.deleteProfile('u1')).toBe(true); + expect(await userMem.profile('u1')).toBeNull(); + }); + + it('deleteProfile returns false for nonexistent', async () => { + userMem = await createUserMemory(); + expect(await userMem.deleteProfile('nobody')).toBe(false); + }); + + it('deleteAll with deleteProfile removes both', async () => { + userMem = await createUserMemory(); + await userMem.add('memory', { userId: 'u1', infer: false }); + await userMem.add('with profile', { + userId: 'u1', + extractProfile: true, + profileContent: 'profile data', + }); + + await userMem.deleteAll('u1', { deleteProfile: true }); + expect(await userMem.profile('u1')).toBeNull(); + }); +}); diff --git a/tests/unit/user-memory/user-profile-sqlite.test.ts b/tests/unit/user-memory/user-profile-sqlite.test.ts new file mode 100644 index 0000000..078e526 --- /dev/null +++ b/tests/unit/user-memory/user-profile-sqlite.test.ts @@ -0,0 +1,101 @@ +/** + * SQLiteUserProfileStore tests — port of Python regression/test_user_profile.py. + */ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { SQLiteUserProfileStore } from '../../../src/user-memory/storage/user-profile-sqlite.js'; + +describe('SQLiteUserProfileStore', () => { + let store: SQLiteUserProfileStore; + + beforeEach(() => { + store = new SQLiteUserProfileStore(':memory:'); + }); + + afterEach(async () => { + await store.close(); + }); + + it('saveProfile creates new profile', async () => { + const id = await store.saveProfile('user1', 'Alice is a software engineer'); + expect(id).toBeTruthy(); + + const profile = await store.getProfileByUserId('user1'); + expect(profile).not.toBeNull(); + expect(profile!.userId).toBe('user1'); + expect(profile!.profileContent).toBe('Alice is a software engineer'); + }); + + it('saveProfile updates existing profile', async () => { + await store.saveProfile('user1', 'initial profile'); + await store.saveProfile('user1', 'updated profile'); + + const profile = await store.getProfileByUserId('user1'); + expect(profile!.profileContent).toBe('updated profile'); + + // Should still be 1 profile, not 2 + expect(await store.countProfiles('user1')).toBe(1); + }); + + it('saveProfile with topics', async () => { + await store.saveProfile('user1', undefined, { + preferences: { coffee: 'dark roast', music: 'jazz' }, + work: { role: 'engineer' }, + }); + + const profile = await store.getProfileByUserId('user1'); + expect(profile!.topics).toBeDefined(); + expect((profile!.topics as any).preferences.coffee).toBe('dark roast'); + }); + + it('getProfileByUserId returns null for nonexistent', async () => { + expect(await store.getProfileByUserId('nobody')).toBeNull(); + }); + + it('getProfiles lists profiles', async () => { + await store.saveProfile('alice', 'Alice profile'); + await store.saveProfile('bob', 'Bob profile'); + + const all = await store.getProfiles(); + expect(all.length).toBe(2); + + const aliceOnly = await store.getProfiles({ userId: 'alice' }); + expect(aliceOnly.length).toBe(1); + expect(aliceOnly[0].userId).toBe('alice'); + }); + + it('getProfiles with pagination', async () => { + await store.saveProfile('u1', 'p1'); + await store.saveProfile('u2', 'p2'); + await store.saveProfile('u3', 'p3'); + + const page = await store.getProfiles({ limit: 2 }); + expect(page.length).toBe(2); + }); + + it('getProfiles with mainTopic filter', async () => { + await store.saveProfile('u1', undefined, { food: { fav: 'pizza' } }); + await store.saveProfile('u2', undefined, { work: { role: 'dev' } }); + + const foodProfiles = await store.getProfiles({ mainTopic: 'food' }); + expect(foodProfiles.length).toBe(1); + expect(foodProfiles[0].userId).toBe('u1'); + }); + + it('deleteProfile removes profile', async () => { + const id = await store.saveProfile('user1', 'to delete'); + expect(await store.deleteProfile(id)).toBe(true); + expect(await store.getProfileByUserId('user1')).toBeNull(); + }); + + it('deleteProfile returns false for nonexistent', async () => { + expect(await store.deleteProfile('99999')).toBe(false); + }); + + it('countProfiles', async () => { + expect(await store.countProfiles()).toBe(0); + await store.saveProfile('u1', 'p1'); + await store.saveProfile('u2', 'p2'); + expect(await store.countProfiles()).toBe(2); + expect(await store.countProfiles('u1')).toBe(1); + }); +}); diff --git a/tsup.config.ts b/tsup.config.ts index 23c75e9..ec5d7fa 100644 --- a/tsup.config.ts +++ b/tsup.config.ts @@ -1,10 +1,20 @@ import { defineConfig } from 'tsup'; -export default defineConfig({ - entry: ['src/index.ts'], - format: ['cjs', 'esm'], - dts: true, - clean: true, - sourcemap: true, - external: ['better-sqlite3', 'seekdb', /^@langchain\//, /^@seekdb\//], -}); +export default defineConfig([ + { + entry: ['src/index.ts'], + format: ['cjs', 'esm'], + dts: true, + clean: true, + sourcemap: true, + external: ['better-sqlite3', 'seekdb', 'commander', /^@langchain\//, /^@seekdb\//], + }, + { + entry: { cli: 'src/cli/main.ts' }, + format: ['esm'], + banner: { js: '#!/usr/bin/env node' }, + clean: false, + sourcemap: true, + external: ['better-sqlite3', 'seekdb', 'commander', /^@langchain\//, /^@seekdb\//], + }, +]);