diff --git a/.gitignore b/.gitignore index 55a3c15..e4fe99d 100644 --- a/.gitignore +++ b/.gitignore @@ -46,3 +46,5 @@ tasks/ # Added by goreleaser init: dist/ IMPROVEMENTS.md +CULL_CANDIDATES.md +picoclaw-latest/ diff --git a/AGENTS.md b/AGENTS.md index ef24351..5ddad79 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -30,7 +30,7 @@ firmware/overlay/ ← Files baked into Luckfox firmware image |-----------|-------|--------| | Total RAM | 64MB DDR2 | Only ~33MB usable after kernel | | Usable RAM | ~33MB | Gateway uses 10-14MB, leaves 2-12MB free | -| GOMEMLIMIT | 8MiB | Baked into binary via `applyPerformanceDefaults()` | +| GOMEMLIMIT | 24MiB | Set in init script; binary default is 8MiB but overridden | | GOGC | 20 | Aggressive GC to prevent RSS growth | | Flash | 128MB SPI NAND | Limited storage, no swap | | CPU | ARM Cortex-A7 (RV1103) | Single core, GOARM=7 | @@ -39,12 +39,12 @@ firmware/overlay/ ← Files baked into Luckfox firmware image ### What We Changed - **Onboarding**: Simplified to OpenRouter only (was 7 provider choices) -- **Performance**: Baked `GOGC=20` + `GOMEMLIMIT=8MiB` into binary +- **Performance**: Baked `GOGC=20` into binary; init script sets `GOMEMLIMIT=24MiB` - **CLI**: Added `luckyclaw stop`, `restart`, `gateway -b` (background) - **Init script**: Auto-starts gateway on boot with OOM protection - **SSH banner**: Shows ASCII art, status, memory, all commands on login - **Default model**: `google/gemini-2.0-flash-exp:free` (free tier) -- **Defaults**: `max_tokens=4096`, `max_tool_iterations=10` (was 8192/20) +- **Defaults**: `max_tokens=16384`, `max_tool_iterations=25` (tuned for web search headroom) ### What We Did NOT Change All PicoClaw channels (Telegram, Discord, QQ, LINE, Slack, WhatsApp, etc.) and tools remain in the codebase. Users can configure any provider via `config.json` directly. @@ -58,7 +58,7 @@ Go allocates ~500MB virtual memory (lazy reservations). The Linux OOM killer use The `loadStore()` function in `pkg/cron/service.go` panicked on empty or corrupted `jobs.json`. **Fix**: Added graceful handling — treats empty/corrupt files as fresh state. ### 3. Init Script Must Bake Environment Variables -The init script at `/etc/init.d/S99luckyclaw` MUST export `GOGC=20`, `GOMEMLIMIT=8MiB`, and `TZ` before starting the daemon. Without these, the binary runs with Go defaults and immediately OOMs. +The init script at `/etc/init.d/S99luckyclaw` MUST export `GOGC=20`, `GOMEMLIMIT=24MiB`, and `TZ` before starting the daemon. Without these, the binary runs with Go defaults and immediately OOMs. WARNING: setting GOMEMLIMIT too low (e.g. 8MiB) causes the GC to spin at 100% CPU. ### 4. Busybox Limitations Luckfox uses Busybox. `wget` doesn't support HTTPS. `sudo` doesn't exist—you're already root. `curl` isn't available. The Go binary handles all HTTPS via `net/http`. @@ -67,7 +67,7 @@ Luckfox uses Busybox. `wget` doesn't support HTTPS. `sudo` doesn't exist—you'r Telegram API DNS (`api.telegram.org`) sometimes fails to resolve. The init script adds a static entry to `/etc/hosts`. ### 6. Don't Add Unnecessary Dependencies -Every byte counts. The binary is already ~15MB stripped. Adding dependencies increases memory usage. Always test with `GOMEMLIMIT=8MiB`. +Every byte counts. The binary is already ~15MB stripped. Adding dependencies increases memory usage. Always test with `GOMEMLIMIT=24MiB`. ### 7. AI Agent Access to the Device If you are an AI agent and need to test changes, examine logs, or execute commands directly on the Luckfox Pico hardware, **do not guess the IP or password**. Simply ask the user to provide the SSH IP address and password for the device, and use the `run_command` tool via `sshpass` (e.g., `sshpass -p ssh root@`). @@ -81,6 +81,14 @@ If you are an AI agent, you **MUST NEVER** execute code changes, environment mod ### 10. Multiple Daemon Instances & PID Tracking If `luckyclaw gateway -b` is executed while a daemon started by `/etc/init.d/S99luckyclaw` is already running it will overwrite the `/var/run/luckyclaw.pid` file. Because the init script only tracks the latest PID, subsequent `stop` or `restart` commands will leave the original daemon alive as a zombie, causing duplicate Telegram processing and hallucinated timestamps in session memory. **Fix:** Going forward, making sure we strictly append `&& killall -9 luckyclaw` alongside the init script (which I've started doing in my deploy commands) completely eliminates the possibility of this happening again. +### 11. PicoClaw Upstream Reference +A shallow clone of the upstream PicoClaw repo is kept at `picoclaw-latest/` (gitignored). This is used for comparing upstream changes and evaluating code worth porting. To refresh it: `cd picoclaw-latest && git pull`. Do not commit this directory. + +### 12. Log File Destinations & Workspace Paths +- **Gateway log**: `/var/log/luckyclaw.log` (stdout/stderr from the init script). The init script uses an `sh -c "exec ..."` wrapper because BusyBox's `start-stop-daemon -b` redirects fds to `/dev/null` before shell redirects take effect. +- **Heartbeat log**: `/heartbeat.log` (written directly by the heartbeat service, not stdout). +- **Runtime workspace**: `/oem/.luckyclaw/workspace/` — this is where the bot actually reads/writes data at runtime. The firmware overlay installs template files to `/root/.luckyclaw/workspace/` but these are NOT used at runtime because `luckyclaw onboard` creates its config at `/oem/`. Any default template changes must also be reflected in `createDefaultHeartbeatTemplate()` in `pkg/heartbeat/service.go`. + ## Build & Deploy ### Testing Before Commits @@ -98,14 +106,25 @@ GOOS=linux GOARCH=arm GOARM=7 CGO_ENABLED=0 \ ``` ### Deploy to Device + +> **⚠️ IMPORTANT:** The binary MUST be deployed to `/usr/bin/luckyclaw` — this is where the init script +> (`/etc/init.d/S99luckyclaw`) and PATH (`which luckyclaw`) expect it. Do NOT deploy to `/usr/local/bin/`. +> The running process locks the file, so you must kill it before copying. + ```bash -sshpass -p 'luckfox' scp build/luckyclaw-linux-arm root@192.168.1.175:/usr/local/bin/luckyclaw -sshpass -p 'luckfox' ssh root@192.168.1.175 "chmod +x /usr/local/bin/luckyclaw && luckyclaw version" +# 1. Kill running process (required — scp fails if binary is locked) +sshpass -p 'luckfox' ssh root@ "killall -9 luckyclaw" + +# 2. Copy new binary to /usr/bin/ (NOT /usr/local/bin/) +sshpass -p 'luckfox' scp build/luckyclaw-linux-arm root@:/usr/bin/luckyclaw + +# 3. Restart via init script and verify +sshpass -p 'luckfox' ssh root@ "chmod +x /usr/bin/luckyclaw && /etc/init.d/S99luckyclaw restart && sleep 2 && luckyclaw version" ``` ### Test on Device ```bash -sshpass -p 'luckfox' ssh root@192.168.1.175 +sshpass -p 'luckfox' ssh root@ luckyclaw status # Check everything luckyclaw gateway -b # Start in background luckyclaw stop # Stop cleanly diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..ad6c47c --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,43 @@ +# Contributing to LuckyClaw + +LuckyClaw is currently maintained by a single developer. Contributions via pull requests and issue reports are welcome on GitHub. Response times may vary. + +## Before Submitting a PR + +1. Run `make check` and ensure all tests pass locally. +2. Keep PRs focused. Avoid bundling unrelated changes together. +3. Include a clear description of what changed and why. + +## PR Structure + +Every pull request should include: + +- **Description** -- What does this change do and why? +- **Type** -- Bug fix, feature, docs, or refactor. +- **Testing** -- How you tested the change (hardware, model/provider, channel). +- **Evidence** -- Logs or screenshots demonstrating the change works (optional but encouraged). + +## AI-Assisted Contributions + +LuckyClaw embraces AI-assisted development. If you use AI tools to generate code, please: + +- **Disclose it** in the PR description. There is no stigma -- only transparency matters. +- **Read and understand** every line of generated code before submitting. +- **Test it** in a real environment, not just in an editor. +- **Review for security** -- AI-generated code can produce subtle bugs around path traversal, command injection, and credential handling. + +AI-generated contributions are held to the same quality bar as human-written code. + +## Code Standards + +- Idiomatic Go, consistent with the existing codebase style. +- No unnecessary abstractions, dead code, or over-engineering. +- Include or update tests where appropriate. +- All CI checks (`make check`) must pass. + +## Communication + +- **GitHub Issues** -- Bug reports, feature requests, design discussions. +- **Pull Request comments** -- Code-specific feedback. + +When in doubt, open an issue before writing code. diff --git a/IMPROVEMENTS.md b/IMPROVEMENTS.md new file mode 100644 index 0000000..8e097e3 --- /dev/null +++ b/IMPROVEMENTS.md @@ -0,0 +1,99 @@ +# Suggested Improvements (Backlog) + +Items listed here are planned enhancements that are not yet scheduled for implementation. + +## Cron Tool Enhancements + +### Add `at_time` Parameter +**Priority**: Medium +**Description**: Add a new `at_time` parameter to the cron tool that accepts an ISO-8601 timestamp (e.g., `"2026-02-22T07:00:00+03:00"`). The tool would internally convert this to `atMS` using `time.Parse(time.RFC3339, at_time)`. This eliminates the need for the LLM to manually calculate `at_seconds` from `time.Now()` when the user specifies an absolute clock time for a one-time reminder. + +**Benefit**: Reduces LLM arithmetic errors when converting "at 7:10 AM" → `at_seconds`. Currently the LLM must compute `target_time - current_time` in seconds, which is error-prone. With `at_time`, it just passes the ISO string directly. + +**Blocked by**: Nothing. Can be implemented independently after Phase 12-H. + +## PicoClaw Upstream Bugs + +### Infinite Optimization Loop +**Priority**: High +**Description**: In `pkg/agent/loop.go` -> `summarizeSession()`, if the conversational history consists entirely of tool outputs (which causes `len(validMessages) == 0` during the extraction phase), the function returns early without invoking `al.sessions.TruncateHistory()`. This causes the token window boundary to be instantly breached again on the very next turn, locking the agent into an infinite "Memory threshold reached. Optimizing conversation history..." cycle that never actually optimizes. + +**Benefit**: Prevents catastrophic session corruption when LLM APIs fail or large tool exchanges dominate a short time window. + +**Blocked by**: Should be submitted as a PR to the [picoclaw](https://github.com/sipeed/picoclaw) upstream repository. + +> **Status (checked 2026-03-09):** Bug confirmed still present in picoclaw-latest at `pkg/agent/loop.go` lines 1457-1459. The `len(validMessages) == 0` early return skips `TruncateHistory()`, causing the infinite loop. We fixed this in our fork but have not yet opened a PR. + +## Installation / Deployment + +### OTA Binary Updates (No Reflash) +**Priority**: Medium +**Description**: The LuckyClaw binary at `/usr/bin/luckyclaw` can be replaced via SCP without reflashing the entire firmware, since user data lives on `/oem/.luckyclaw/` (a separate partition). An `luckyclaw update` command could check the GitHub Releases API for the latest version, download the matching ARM binary, replace itself, and restart — all without touching config, sessions, cron jobs, or memory. + +**Benefit**: Users can update without Windows, without SOCToolKit, and without losing any data. Dramatically lowers the friction of staying current. + +**Blocked by**: Needs a stable releases workflow publishing individual ARM binaries (not just full `.img` files). Also needs a version comparison check (`luckyclaw version` already embeds the version tag). + +### Open-Source Cross-Platform Flashing Tool +**Priority**: Low +**Description**: Currently, flashing the eMMC requires using the proprietary Rockchip `SOCToolKit`, which is Windows-only. We should develop or adopt an open-source, cross-platform CLI tool (e.g., in Python or Go) that can communicate with the Rockchip MaskROM protocol to flash `update.img` directly from Linux and macOS without needing Windows VMs or proprietary software. + +**Benefit**: Dramatically simplifies the onboarding process for non-Windows users and allows for scripted/automated deployments. +**Blocked by**: Reverse engineering of Rockchip protocols or integrating existing open-source alternatives like `rkdeveloptool`. + +## Performance Optimizations + +### Cache System Prompt Between Messages +**Priority**: Medium +**Description**: `BuildSystemPrompt()` in `pkg/agent/context.go` re-reads `SOUL.md`, `USER.md`, `AGENTS.md`, skills summaries, and memory context from disk on every message. These files rarely change. Caching the result with a file-modification-time check would eliminate repeated disk I/O and string allocations. + +**Benefit**: Eliminates ~5 file reads and ~10KB of string allocations per message. On the Luckfox's SPI NAND flash (slower than eMMC), this could save 5-10ms per message. + +### Cache Tool Provider Definitions +**Priority**: Low +**Description**: `al.tools.ToProviderDefs()` in `runLLMIteration` rebuilds the full tool definition JSON on every LLM iteration (up to 15 per message). The tool registry doesn't change at runtime, so this can be computed once at startup and cached. + +**Benefit**: Avoids rebuilding ~2KB of JSON schema per iteration. Minor memory saving but reduces GC pressure. + +### Use `json.Marshal` Instead of `json.MarshalIndent` for Session Save +**Priority**: Low +**Description**: `SessionManager.Save()` uses `json.MarshalIndent` for pretty-printing. This is ~2x slower than `json.Marshal` and produces larger files on flash storage. + +**Benefit**: Faster session saves, smaller session files on limited SPI NAND storage. + +### Pre-allocate HTTP Response Buffer +**Priority**: Low +**Description**: `HTTPProvider.Chat()` uses `io.ReadAll(resp.Body)` which starts with a small buffer and grows dynamically. Pre-allocating based on `Content-Length` header (when available) would reduce reallocations. + +**Benefit**: Fewer intermediate allocations during LLM response parsing. + +## Benchmark Tests + +### Add Performance Benchmarks to `make check` +**Priority**: Medium +**Description**: Introduce Go benchmark tests (`func BenchmarkXxx(b *testing.B)`) that measure the performance of critical hot-path functions. These should run as part of `make check` or as a separate `make bench` target. Proposed benchmarks: + +1. **`BenchmarkBuildSystemPrompt`** — Measures time to build the full system prompt from disk files. Baseline: should be <5ms. +2. **`BenchmarkBuildMessages`** — Measures context assembly with varying history sizes (10, 50, 100 messages). Guards against regression as history grows. +3. **`BenchmarkSessionSave`** — Measures JSON serialization + atomic write for sessions of varying sizes. Ensures save stays <50ms. +4. **`BenchmarkToProviderDefs`** — Measures tool definition generation. Should be <1ms. +5. **`BenchmarkForceCompression`** — Measures conversation compression performance. Critical for memory-constrained devices. +6. **`BenchmarkGetHistory`** — Measures session history copy for varying message counts. Guards against O(n²) regressions. + +**Benefit**: Catches performance regressions early, provides baseline numbers for the Luckfox board, and validates that optimization PRs actually improve performance. + +**Blocked by**: Nothing. Can be implemented independently. + +## Session Management + +### Configurable Summarization Thresholds +**Priority**: Medium +**Description**: Port `SummarizeMessageThreshold` and `SummarizeTokenPercent` from picoclaw upstream into our config struct. Currently hardcoded at 20 messages / 75% of context window in `loop.go`. Making these configurable allows users to tune conversation memory behavior without rebuilding. + +**Benefit**: Power users can trade token cost for longer conversation context, or reduce it on very small models. + +### Improved Token Estimator +**Priority**: Low +**Description**: Port `utf8.RuneCountInString` with 2.5 chars/token ratio from picoclaw upstream (vs our current `len` with 3 chars/token). More accurate for mixed-language content and CJK text. + +**Benefit**: Better context budget estimation, especially for non-English conversations. diff --git a/README.md b/README.md index be99de4..74dc3e9 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,15 @@
- LuckyClaw + LuckyClaw

🦞 LuckyClaw: AI Assistant for Luckfox Pico

One-stop AI firmware for Luckfox Pico boards

- Go + Go Board License + Discord

@@ -32,16 +33,23 @@ LuckyClaw is a purpose-built AI assistant for [Luckfox Pico](https://wiki.luckfo ## ⚡ Quick Start (End Users) +### Supported Boards + +| Board | Chip | Image | +|-------|------|-------| +| **Luckfox Pico Plus** | RV1103 | `luckyclaw-luckfox_pico_plus_rv1103-vX.X.X.img` | +| **Luckfox Pico Pro Max** | RV1106 | `luckyclaw-luckfox_pico_pro_max_rv1106-vX.X.X.img` | + +> [!IMPORTANT] +> LuckyClaw currently only supports these two boards. Other Luckfox variants (Pico Mini, Pico Zero, etc.) are untested and may not work. + ### 1. Flash the firmware -Download the latest firmware image from [GitHub Releases](https://github.com/jamesrossdev/luckyclaw/releases). +Download the firmware image matching your board from [GitHub Releases](https://github.com/jamesrossdev/luckyclaw/releases). -Flash to your Luckfox Pico board using the [Luckfox flashing tool](https://wiki.luckfox.com/Luckfox-Pico/Luckfox-Pico-SD-Card-burn-image/): +Follow our detailed documentation to flash the firmware: -```bash -# On Linux, use SocToolKit or dd -# On Windows, use the Luckfox burn tool -``` +👉 **[LuckyClaw Flashing Guide (eMMC)](doc/FLASHING_GUIDE.md)** ### 2. Connect via SSH @@ -62,7 +70,7 @@ You'll see the LuckyClaw banner: | |__| |_| | (__| <| |_| | |___| | (_| |\ V V / |_____\__,_|\___|_|\_\\__, |\____|_|\__,_| \_/\_/ |___/ - 🦞 luckyclaw v0.3.3 + 🦞 luckyclaw v0.2.0 Gateway: running (PID 1234, 15MB) Memory: 33MB / 55MB available @@ -87,7 +95,7 @@ The wizard walks you through: 1. **API Provider** — OpenRouter - but you can manually set up OpenAI, Anthropic, Ollama and others in config.json 2. **API Key** — Paste your key, it's validated in real-time 3. **Timezone** — Explicitly enter your IANA Zone classification via the [Wikipedia TZ Database List](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List) -4. **Messaging** — Optionally set up Telegram (Discord, WhatsApp, and others coming soon) +4. **Messaging** — Optionally set up Telegram and/or Discord 5. **Start gateway** — Optionally start the AI gateway in the background ### 4. Chat! @@ -99,7 +107,7 @@ luckyclaw agent -m "What time is it?" # Interactive mode luckyclaw agent -# Or use Telegram (if configured) +# Or use Telegram/Discord (if configured) # Just message your bot! ``` @@ -107,12 +115,12 @@ luckyclaw agent ## 💬 Chat Channels -| Channel | Status | Setup | -| ------------ | ---------- | -------------------------- | -| **Telegram** | ✅ Ready | Token from @BotFather | -| **Discord** | ✅ Ready | Bot token + intents | -| **WhatsApp** | 🔜 Planned | — | -| **Slack** | 🔜 Planned | — | +| Channel | Status | Setup | +| ------------ | ------------------- | -------------------------- | +| **Telegram** | ✅ Ready | Token from @BotFather | +| **Discord** | ✅ Ready | Bot token + intents | +| **WhatsApp** | 🚧 Work in Progress | — | +| **Slack** | 🧬 Inherited (untested) | — |
Telegram Setup (Recommended) @@ -251,7 +259,7 @@ To allow system-wide access (use with caution): ### Prerequisites -- Go 1.22+ +- Go 1.25+ - [Luckfox Pico SDK](https://github.com/LuckfoxTECH/luckfox-pico) (for firmware builds) - ARM cross-compilation toolchain (included in the SDK) @@ -291,7 +299,7 @@ firmware/overlay/ ├── root/.luckyclaw/ │ ├── config.json # Default config │ └── workspace/ # Default workspace files -└── usr/local/bin/luckyclaw # The binary +└── usr/bin/luckyclaw # The binary ``` To build a firmware image: @@ -299,7 +307,7 @@ To build a firmware image: 1. **Build the ARM binary**: `make build-arm` 2. **Clone the SDK**: `git clone https://github.com/LuckfoxTECH/luckfox-pico.git luckfox-pico-sdk` 3. **Copy overlay**: `cp -r firmware/overlay/* luckfox-pico-sdk/project/cfg/BoardConfig_IPC/overlay/luckyclaw-overlay/` -4. **Copy binary**: `cp build/luckyclaw-linux-arm luckfox-pico-sdk/project/cfg/BoardConfig_IPC/overlay/luckyclaw-overlay/usr/local/bin/luckyclaw` +4. **Copy binary**: `cp build/luckyclaw-linux-arm luckfox-pico-sdk/project/cfg/BoardConfig_IPC/overlay/luckyclaw-overlay/usr/bin/luckyclaw` 5. **Build image**: ```bash cd luckfox-pico-sdk @@ -329,7 +337,7 @@ luckyclaw/ ### Performance tuning -LuckyClaw automatically sets `GOGC=20` and `GOMEMLIMIT=8MiB` at startup for memory-constrained boards. These can be overridden via environment variables if your board has more RAM. +LuckyClaw automatically sets `GOGC=20` and `GOMEMLIMIT=24MiB` at startup for memory-constrained boards. These can be overridden via environment variables if your board has more RAM. --- @@ -341,7 +349,7 @@ LuckyClaw v0.2+ automatically caps memory usage. If you're on an older version o ```bash export GOGC=20 -export GOMEMLIMIT=8MiB +export GOMEMLIMIT=24MiB luckyclaw gateway ``` @@ -362,11 +370,11 @@ If reminders were created but never fire, the cron service may not have started. tail -20 /var/log/luckyclaw.log ``` -Look for `✓ Cron service started`. If missing, the jobs.json file may be corrupted — v0.2.1+ handles this automatically. +Look for `✓ Cron service started`. If missing, the jobs.json file may be corrupted — v0.2.0+ handles this automatically. ### Time is wrong -LuckyClaw v0.3.3+ embeds its own timezone database and sets the timezone during onboarding. If the time is still wrong: +LuckyClaw v0.2.0+ embeds its own timezone database and sets the timezone during onboarding. If the time is still wrong: 1. **System clock**: Sync via NTP: ```bash @@ -375,12 +383,18 @@ LuckyClaw v0.3.3+ embeds its own timezone database and sets the timezone during 2. **Timezone**: Re-run onboarding or set manually: ```bash - echo "export TZ='Africa/Nairobi'" > /etc/profile.d/timezone.sh + echo "export TZ='America/New_York'" > /etc/profile.d/timezone.sh ``` Then restart the gateway: `luckyclaw restart` --- +## 💬 Community + +Join our Discord for help, feedback, and discussion: + +👉 **[LuckyClaw Discord](https://discord.gg/TRdD9dBe)** + ## 🙏 Credits - [PicoClaw](https://github.com/sipeed/picoclaw) by [Sipeed](https://sipeed.com) — The upstream project that LuckyClaw is forked from diff --git a/assets/flashing/step-01-click-install-driver.png b/assets/flashing/step-01-click-install-driver.png new file mode 100644 index 0000000..593143a Binary files /dev/null and b/assets/flashing/step-01-click-install-driver.png differ diff --git a/assets/flashing/step-01-install-success.png b/assets/flashing/step-01-install-success.png new file mode 100644 index 0000000..269d46e Binary files /dev/null and b/assets/flashing/step-01-install-success.png differ diff --git a/assets/flashing/step-01-open-driver-folder.png b/assets/flashing/step-01-open-driver-folder.png new file mode 100644 index 0000000..3c13738 Binary files /dev/null and b/assets/flashing/step-01-open-driver-folder.png differ diff --git a/assets/flashing/step-01-trust-publisher.png b/assets/flashing/step-01-trust-publisher.png new file mode 100644 index 0000000..7cedf1a Binary files /dev/null and b/assets/flashing/step-01-trust-publisher.png differ diff --git a/assets/flashing/step-02-chip-selection.png b/assets/flashing/step-02-chip-selection.png new file mode 100644 index 0000000..9d4d894 Binary files /dev/null and b/assets/flashing/step-02-chip-selection.png differ diff --git a/assets/flashing/step-02-open-soctoolkit.png b/assets/flashing/step-02-open-soctoolkit.png new file mode 100644 index 0000000..49f2614 Binary files /dev/null and b/assets/flashing/step-02-open-soctoolkit.png differ diff --git a/assets/flashing/step-03-maskrom-detected.png b/assets/flashing/step-03-maskrom-detected.png new file mode 100644 index 0000000..1d3c7d5 Binary files /dev/null and b/assets/flashing/step-03-maskrom-detected.png differ diff --git a/assets/flashing/step-04-firmware-flash-success.png b/assets/flashing/step-04-firmware-flash-success.png new file mode 100644 index 0000000..31920c0 Binary files /dev/null and b/assets/flashing/step-04-firmware-flash-success.png differ diff --git a/assets/logo.png b/assets/logo.png new file mode 100644 index 0000000..4b6d1df Binary files /dev/null and b/assets/logo.png differ diff --git a/cmd/luckyclaw/main.go b/cmd/luckyclaw/main.go index dcb3dca..3a0b57c 100644 --- a/cmd/luckyclaw/main.go +++ b/cmd/luckyclaw/main.go @@ -420,30 +420,55 @@ func onboard() { } } - // Step 4: Telegram + // Step 4: Messaging Channels fmt.Println() - fmt.Println(" Step 4: Telegram") - fmt.Println(" ──────────────────") - fmt.Println(" See README.md for setup instructions.") + fmt.Println(" Step 4: Messaging Channels") + fmt.Println(" ──────────────────────────") + fmt.Println(" Set up one or both chat channels. You can always configure these later") + fmt.Println(" by editing config.json or re-running 'luckyclaw onboard'.") fmt.Println() - tgToken := promptLine(" Bot token (or Enter to skip): ") - if tgToken != "" { - fmt.Print(" Validating... ") - username, err := validateTelegramToken(tgToken) - if err != nil { - fmt.Printf("⚠ %v\n", err) - fmt.Println(" (Token saved anyway — check it later)") - } else { - fmt.Printf("✓ @%s\n", username) - } + // Telegram + if promptYN(" Set up Telegram?") { + fmt.Println() + fmt.Println(" Create a bot via @BotFather on Telegram, then paste the token below.") + tgToken := promptLine(" Telegram bot token: ") + if tgToken != "" { + fmt.Print(" Validating... ") + username, err := validateTelegramToken(tgToken) + if err != nil { + fmt.Printf("⚠ %v\n", err) + fmt.Println(" (Token saved anyway — check it later)") + } else { + fmt.Printf("✓ @%s\n", username) + } + + cfg.Channels.Telegram.Enabled = true + cfg.Channels.Telegram.Token = tgToken - cfg.Channels.Telegram.Enabled = true - cfg.Channels.Telegram.Token = tgToken + tgUserID := promptLine(" Your Telegram user ID (optional, from @userinfobot): ") + if tgUserID != "" { + cfg.Channels.Telegram.AllowFrom = config.FlexibleStringSlice{tgUserID} + } + } + } - tgUserID := promptLine(" Your Telegram user ID (optional): ") - if tgUserID != "" { - cfg.Channels.Telegram.AllowFrom = config.FlexibleStringSlice{tgUserID} + // Discord + fmt.Println() + if promptYN(" Set up Discord?") { + fmt.Println() + fmt.Println(" Create a bot at https://discord.com/developers/applications") + fmt.Println(" Enable MESSAGE CONTENT INTENT in Bot settings, then paste the token.") + dcToken := promptLine(" Discord bot token: ") + if dcToken != "" { + cfg.Channels.Discord.Enabled = true + cfg.Channels.Discord.Token = dcToken + + dcUserID := promptLine(" Your Discord user ID (optional): ") + if dcUserID != "" { + cfg.Channels.Discord.AllowFrom = config.FlexibleStringSlice{dcUserID} + } + fmt.Println(" ✓ Discord configured") } } @@ -1672,7 +1697,7 @@ func cronAddCmd(storePath string, loc *time.Location) { } cs := cron.NewCronService(storePath, nil, loc) - job, err := cs.AddJob(name, schedule, message, deliver, channel, to) + job, err := cs.AddJob(name, schedule, message, deliver, channel, to, 0) if err != nil { fmt.Printf("Error adding job: %v\n", err) return diff --git a/config/config.example.json b/config/config.example.json index 98f1327..4382ccd 100644 --- a/config/config.example.json +++ b/config/config.example.json @@ -3,10 +3,10 @@ "defaults": { "workspace": "~/.luckyclaw/workspace", "restrict_to_workspace": true, - "model": "glm-4.7", - "max_tokens": 8192, + "model": "arcee-ai/trinity-large-preview:free", + "max_tokens": 16384, "temperature": 0.7, - "max_tool_iterations": 20 + "max_tool_iterations": 25 } }, "channels": { @@ -14,7 +14,9 @@ "enabled": false, "token": "YOUR_TELEGRAM_BOT_TOKEN", "proxy": "", - "allow_from": ["YOUR_USER_ID"] + "allow_from": [ + "YOUR_USER_ID" + ] }, "discord": { "enabled": false, @@ -133,4 +135,4 @@ "host": "0.0.0.0", "port": 18790 } -} +} \ No newline at end of file diff --git a/doc/BACKUP_RESTORE.md b/doc/BACKUP_RESTORE.md new file mode 100644 index 0000000..961d288 --- /dev/null +++ b/doc/BACKUP_RESTORE.md @@ -0,0 +1,52 @@ +# Backup and Restore + +Flashing a new firmware image replaces the entire filesystem on the board. All configuration, memories, sessions, and cron jobs will be lost unless backed up beforehand. + +This guide covers how to preserve your data before reflashing and restore it afterward. + +> A future `luckyclaw update` command is planned (see [Roadmap](ROADMAP.md)) that will update only the binary without touching your data. + +## What Gets Backed Up + +| Item | Path on Device | Contains | +|------|---------------|----------| +| Config | `/root/.luckyclaw/config.json` | API key, model, channel tokens, tool settings | +| Workspace | `/root/.luckyclaw/workspace/` | Memories, sessions, cron jobs, skills, identity files | + +## Backup (Before Flashing) + +Run this from your computer (not the board): + +```bash +sshpass -p 'luckfox' scp -r root@:/root/.luckyclaw/ ./luckyclaw-backup/ +``` + +Replace `` with your board's IP address (e.g., `192.168.1.156`). + +Verify the backup contains your files: + +```bash +ls ./luckyclaw-backup/ +# Should show: config.json workspace/ +``` + +## Restore (After Flashing) + +After flashing the new firmware and running `luckyclaw onboard`, restore your data: + +```bash +# Restore workspace (memories, sessions, cron jobs) +sshpass -p 'luckfox' scp -r ./luckyclaw-backup/workspace/ root@:/root/.luckyclaw/ + +# Restore config (API key, model, channel settings) +sshpass -p 'luckfox' scp ./luckyclaw-backup/config.json root@:/root/.luckyclaw/ + +# Restart the gateway to pick up restored config +sshpass -p 'luckfox' ssh root@ "/etc/init.d/S99luckyclaw restart" +``` + +## Notes + +- The backup does not include the binary itself (`/usr/bin/luckyclaw`) -- that is part of the firmware image. +- If you are upgrading between major versions, check the release notes for any config format changes before restoring an old `config.json`. +- Session files are JSON. If a new version changes the session format, old sessions may be ignored but will not cause crashes. diff --git a/doc/FLASHING_GUIDE.md b/doc/FLASHING_GUIDE.md new file mode 100644 index 0000000..1e2e890 --- /dev/null +++ b/doc/FLASHING_GUIDE.md @@ -0,0 +1,137 @@ +# LuckyClaw Flashing Guide (eMMC) + +This guide covers flashing the LuckyClaw firmware to a Luckfox Pico board's eMMC storage using the Rockchip SOCToolKit on Windows. + +> [!NOTE] +> We currently provide pre-built firmware images for **two board variants**: +> - `luckyclaw-luckfox_pico_plus_rv1103-vX.X.X.img` — for **Luckfox Pico Plus** +> - `luckyclaw-luckfox_pico_pro_max_rv1106-vX.X.X.img` — for **Luckfox Pico Pro Max** +> +> Download the image matching your board from the [Releases](https://github.com/jamesrossdev/luckyclaw/releases) page. + +> [!IMPORTANT] +> Only the **Luckfox Pico Plus** (RV1103) and **Luckfox Pico Pro Max** (RV1106) are supported. Other Luckfox variants (Pico Mini, Pico Zero, etc.) have not been tested and may not work. + +> [!WARNING] +> Flashing replaces the entire filesystem on the board. All existing configuration, memories, sessions, and cron jobs will be lost. If you are upgrading from a previous version, back up your data first — see [Backup and Restore](BACKUP_RESTORE.md). + +## Prerequisites + +### Hardware + +- **Luckfox Pico Plus** (RV1103) or **Luckfox Pico Pro Max** (RV1106) board +- USB Type-C to Type-A cable (must be **data capable**, not charge-only) +- A computer running Windows + +### Software and Files + +All files are bundled together on the [Releases](https://github.com/jamesrossdev/luckyclaw/releases) page: + +1. **LuckyClaw firmware image** — pick the `.img` that matches your board: + - `luckyclaw-luckfox_pico_plus_rv1103-vX.X.X.img` for **Luckfox Pico Plus** + - `luckyclaw-luckfox_pico_pro_max_rv1106-vX.X.X.img` for **Luckfox Pico Pro Max** + +2. **Rockchip Driver Assistant** (`DriverAssistant_vX.X.zip`) — installs the USB driver so Windows can communicate with the board in MaskROM mode. + +3. **Rockchip SOCToolKit** (`SocToolKit_vX.XX.zip`) — the flashing utility that writes the firmware image to the board. + +--- + +## Step 1: Install the USB Driver + +Before flashing, you must install the Rockchip USB driver on your Windows machine. You only need to do this **once per computer**. + +1. Download and extract the **Driver Assistant** ZIP. +2. Open the extracted folder and **run `DriverInstall.exe` as Administrator**. + +![Open DriverAssistant folder and run DriverInstall](../assets/flashing/step-01-open-driver-folder.png) + +3. Click **Install Driver** in the dialog that appears. + +![Click Install Driver](../assets/flashing/step-01-click-install-driver.png) + +4. Windows Security will ask you to trust software from "Fuzhou Rockchip Electronics". **Check the box** to always trust this publisher, then click **Install**. + +![Trust the publisher and click Install](../assets/flashing/step-01-trust-publisher.png) + +5. Wait for the **"Install driver ok."** confirmation dialog, then click OK. + +![Driver installed successfully](../assets/flashing/step-01-install-success.png) + +--- + +## Step 2: Open SOCToolKit + +1. Download and extract the **SOCToolKit** ZIP. +2. Open the extracted folder, **right-click `SocToolKit.exe`**, and select **Run as administrator**. + +![Open SOCToolKit folder and run as admin](../assets/flashing/step-02-open-soctoolkit.png) + +3. When SOCToolKit opens, it will ask you to select a chip. Choose **RV1103** (for Luckfox Pico Plus) or **RV1106** (for Pico Pro Max) from the dropdown, then click **OK**. Make sure **USB** is selected (not COM). + +![Select your chip — RV1103 or RV1106](../assets/flashing/step-02-chip-selection.png) + +--- + +## Step 3: Enter MaskROM Mode + +The board must be in MaskROM mode before it can be flashed. + +1. **Disconnect** the USB cable from the board if it is currently connected. +2. Locate the **BOOT button** on the board (near the USB-C port). +3. **Press and hold** the BOOT button. +4. While holding the BOOT button, plug the USB cable into the board and your computer. +5. Wait 2–3 seconds, then **release** the BOOT button. + +If successful, SOCToolKit will display a **"Maskrom"** device in the USB dropdown at the top of the window. The device number may vary from the screenshot. + +> [!TIP] +> Make sure the cable you are using is a **data cable** and not a charging-only cable. If no MaskROM device appears, try a different cable or USB port. + +![MaskROM device detected](../assets/flashing/step-03-maskrom-detected.png) + +--- + +## Step 4: Select Firmware and Flash + +1. Click the **Firmware…** button at the bottom of the window. +2. Browse to and select the `.img` file you downloaded (e.g. `luckyclaw-luckfox_pico_plus_rv1103-v0.2.0.img`). +3. Click **Upgrade** to begin flashing. +4. **Do not disconnect the cable** during this process. The log panel on the right will show progress. +5. When complete, the log will show **"Upgrade firmware ok."** and **"Upgrade luckyclaw-v0…OK"**. + +![Select firmware, click Upgrade, and wait for success](../assets/flashing/step-04-firmware-flash-success.png) + +The board will reboot automatically. If it does not, unplug and replug the USB cable (without holding the BOOT button). + +--- + +## Step 5: First-Time Setup + +After the board boots, connect to it via SSH and run the onboarding wizard: + +```bash +ssh root@ +luckyclaw onboard +``` + +The wizard will walk you through configuring your LLM provider, API key, Telegram/Discord bot tokens, and other settings. For full setup instructions, see the [README](../README.md). + +### Restoring Previous Data + +If you backed up your data before flashing, follow the [Backup and Restore](BACKUP_RESTORE.md) guide to restore your configuration and workspace. + +--- + +## Troubleshooting + +- **Device not detected in SOCToolKit:** Ensure you are using a data-capable USB cable. Try a different USB port. Make sure you held the BOOT button before and during cable insertion. Verify that Driver Assistant was installed successfully. +- **Flashing fails partway through:** Often caused by a loose USB connection or a faulty cable. Try a different cable. +- **"Test Device Fail" error:** The board may have exited MaskROM mode. Repeat the BOOT button sequence from Step 3. +- **Wrong chip selected:** If you selected RV1103 but have an RV1106 board (or vice versa), close SOCToolKit, reopen it, and select the correct chip. + +## Further Reading + +- [Official Luckfox burning instructions](https://wiki.luckfox.com/Luckfox-Pico/Luckfox-Pico-quick-start/image-burn) +- [Backup and Restore](BACKUP_RESTORE.md) — preserve your data before reflashing +- [LuckyClaw README](../README.md) — full project documentation diff --git a/doc/ROADMAP.md b/doc/ROADMAP.md new file mode 100644 index 0000000..315f570 --- /dev/null +++ b/doc/ROADMAP.md @@ -0,0 +1,42 @@ +# LuckyClaw Roadmap + +Items are prioritized by readiness and impact. Items may be moved between versions or dropped based on progress and real-world usage feedback. + +## v0.2.0 (Current Release) + +- Heartbeat hardening (HeartbeatMode, SilentResult, audit logging) +- Memory optimization (GOMEMLIMIT tuning, GOGC=20) +- Flashing guide with backup/restore documentation +- SSH banner and init script improvements +- Default response improvement (echoes user's question on failure) + +## v0.2.x (Patch Releases) + +- Port `registry_test.go` from upstream PicoClaw (tool registry test coverage) +- Port `shell_process_unix.go` from upstream (process group cleanup for exec tool) +- Performance benchmark tests (`make bench`) +- System prompt caching between messages +- Cron tool `at_time` parameter (ISO-8601 absolute time for reminders) + +## v0.3.x (Next Minor) + +- Auto-update command (`luckyclaw update`) -- binary-only OTA updates +- WhatsApp channel integration +- Tool definition caching +- Versioned firmware image naming in build pipeline +- Session save optimization (json.Marshal vs MarshalIndent) + +## Future + +- Cross-platform flashing tool (replace Windows-only SOCToolKit) +- Multi-model routing (small model for easy tasks, large for hard) +- Skill marketplace / remote skill install + +## Upstream Watchlist + +Items from PicoClaw upstream that may be worth integrating if they mature: + +- `pkg/routing` -- model routing (1,103 lines, added upstream post-fork) +- `pkg/media` -- media handling for attachments (801 lines) +- `shell_process_windows.go` -- Windows cross-platform support (28 lines) +- `pkg/identity` -- identity/personality management (336 lines) diff --git a/doc/picoclaw_community_roadmap_260216.md b/doc/picoclaw_community_roadmap_260216.md deleted file mode 100644 index cfcc30f..0000000 --- a/doc/picoclaw_community_roadmap_260216.md +++ /dev/null @@ -1,112 +0,0 @@ -## 🚀 Join the PicoClaw Journey: Call for Community Volunteers & Roadmap Reveal - -**Hello, PicoClaw Community!** - -First, a massive thank you to everyone for your enthusiasm and PR contributions. It is because of you that PicoClaw continues to iterate and evolve so rapidly. Thanks to the simplicity and accessibility of the **Go language**, we’ve seen a non-stop stream of high-quality PRs! - -PicoClaw is growing much faster than we anticipated. As we are currently in the midst of the **Chinese New Year holiday**, we are looking to recruit community volunteers to help us maintain this incredible momentum. - -This document outlines the specific volunteer roles we need right now and provides a look at our upcoming **Roadmap**. - -### 🎁 Community Perks - -To show our appreciation, developers who officially join our community operations will receive: - -* **Exclusive AI Hardware:** Our upcoming, unreleased AI device. -* **Token Discounts:** Potential discounts on LLM tokens (currently in negotiations with major providers). - -### 🎥 Calling All Content Creators! - -Not a developer? You can still help! We welcome users to post **PicoClaw reviews or tutorials**. - -* **Twitter:** Use the tag **#picoclaw** and mention **@SipeedIO**. -* **Bilibili:** Mention **@Sipeed矽速科技** or send us a DM. -We will be rewarding high-quality content creators with the same perks as our community developers! - ---- - -## 🛠️ Urgent Volunteer Roles - -We are looking for experts in the following areas: - -1. **Issue/PR Reviewers** -* **The Mission:** With PRs and Issues exploding in volume, we need help with initial triage, evaluation, and merging. -* **Focus:** Preliminary merging and community health. Efficiency optimization and security audits will be handled by specialized roles. - - -2. **Resource Optimization Experts** -* **The Mission:** Rapid growth has introduced dependencies that are making PicoClaw a bit "heavy." We want to keep it lean. -* **Focus:** Analyzing resource growth between releases and trimming redundancy. -* **Priority:** **RAM usage optimization** > Binary size reduction. - - -3. **Security Audit & Bug Fixes** -* **The Mission:** Due to the "vibe coding" nature of our early stages, we need a thorough review of network security and AI permission management. -* **Focus:** Auditing the codebase for vulnerabilities and implementing robust fixes. - - -4. **Documentation & DX (Developer Experience)** -* **The Mission:** Our current README is a bit outdated. We need "step-by-step" guides that even beginners can follow. -* **Focus:** Creating clear, user-friendly documentation for both setup and development. - - -5. **AI-Powered CI/CD Optimization** -* **The Mission:** PicoClaw started as a "vibe coding" experiment; now we want to use AI to manage it. -* **Focus:** Automating builds with AI and exploring AI-driven issue resolution. - -**How to Apply:** > If you are interested in any of the roles above, please send an email to support@sipeed.com with the subject line: [Apply: PicoClaw Expert Volunteer] + Your Desired Role. -Please include a brief introduction and any relevant experience or portfolio links. We will review all applications and grant project permissions to selected contributors! - ---- - -## 📍 The Roadmap - -Interested in a specific feature? You can "claim" these tasks and start building: - -### -* **Provider:** - * **Provider Refactor:** Currently being handled by **@Daming** (ETA: 5 days) - * You can still submit code; Daming will merge it into the new implementation. -* **Channels:** - * Support for OneBot, additional platforms - * attachments (images, audio, video, files). -* **Skills:** - * Implementing `find_skill` to discover tools via [openclaw/skills](https://github.com/openclaw/skills) and other platforms. -* **Operations:** * MCP Support. - * Android operations (e.g., botdrop). - * Browser automation via CDP or ActionBook. - - -* **Multi-Agent Ecosystem:** - * **Basic Model-Agnet** S - * **Model Routing:** Small models for easy tasks, large models for hard ones (to save tokens). - * **Swarm Mode.** - * **AIEOS Integration.** - - -* **Branding:** - * **Logo**: We need a cute logo! We’re leaning toward a **Mantis Shrimp**—small, but packs a legendary punch! - - -We have officially created these tasks as GitHub Issues, all marked with the roadmap tag. -This list will be updated continuously as we progress. -If you would like to claim a task, please feel free to start a conversation by commenting directly on the corresponding issue! - ---- - -## 🤝 How to Join - -**Everything is open to your creativity!** If you have a wild idea, just PR it. - -1. **The Fast Track:** Once you have at least **one merged PR**, you are eligible to join our **Developer Discord** to help plan the future of PicoClaw. -2. **The Application Track:** If you haven’t submitted a PR yet but want to dive in, email **support@sipeed.com** with the subject: -> `[Apply Join PicoClaw Dev Group] + Your GitHub Account` -> Include the role you're interested in and any evidence of your development experience. - - - -### Looking Ahead - -Powered by PicoClaw, we are crafting a Swarm AI Assistant to transform your environment into a seamless network of personal stewards. By automating the friction of daily life, we empower you to transcend the ordinary and freely explore your creative potential. - -**Finally, Happy Chinese New Year to everyone!** May PicoClaw gallop forward in this **Year of the Horse!** 🐎 diff --git a/firmware/overlay/etc/init.d/S99luckyclaw b/firmware/overlay/etc/init.d/S99luckyclaw index 5b13e52..572d8d6 100755 --- a/firmware/overlay/etc/init.d/S99luckyclaw +++ b/firmware/overlay/etc/init.d/S99luckyclaw @@ -8,8 +8,11 @@ LOGFILE=/var/log/luckyclaw.log CONFIGFILE=/oem/.luckyclaw/config.json # Go runtime memory limits (critical for 64MB DDR2) +# GOGC=20 makes GC trigger at 20% heap growth (aggressive but prevents runaway) +# GOMEMLIMIT=24MiB is a soft ceiling; set below total RAM (57MB) to leave room for OS/buffers +# WARNING: setting GOMEMLIMIT too low (e.g. 8MiB) causes GC to spin at 100% CPU export GOGC=20 -export GOMEMLIMIT=8MiB +export GOMEMLIMIT=24MiB export TZ=EAT-3 export PATH=$PATH:/usr/bin:/usr/local/bin:/sbin:/bin @@ -25,8 +28,26 @@ case "$1" in exit 0 fi echo "Starting LuckyClaw..." + + # Wait for NTP to sync the system clock before starting. + # The Luckfox has no battery-backed RTC, so the clock starts at 1970 + # on cold boot. TLS connections fail until NTP corrects it because + # certificates are "not yet valid". Discord's library does not retry + # after a TLS failure, so launching too early kills it permanently. + NTP_WAIT=0 + while [ "$(date +%Y)" -lt 2025 ] && [ "$NTP_WAIT" -lt 30 ]; do + sleep 1 + NTP_WAIT=$((NTP_WAIT + 1)) + done + if [ "$(date +%Y)" -lt 2025 ]; then + echo "LuckyClaw: WARNING - System clock not synced after 30s, starting anyway" + fi + + # NOTE: start-stop-daemon -b redirects fd 0/1/2 to /dev/null BEFORE + # shell redirection takes effect, making >> $LOGFILE 2>&1 useless. + # Use sh -c wrapper to daemonize with proper log capture instead. start-stop-daemon -S -b -m -p $PIDFILE \ - -x $DAEMON -- gateway >> $LOGFILE 2>&1 + -x /bin/sh -- -c "exec $DAEMON gateway >> $LOGFILE 2>&1" # Protect from OOM killer sleep 1 diff --git a/pkg/agent/context.go b/pkg/agent/context.go index 96f41aa..3309c43 100644 --- a/pkg/agent/context.go +++ b/pkg/agent/context.go @@ -123,7 +123,9 @@ Your workspace is at: %s 2. **Be helpful and accurate** - When using tools, briefly explain what you're doing. -3. **Memory** - When remembering something, write to %s/memory/MEMORY.md%s`, +3. **Memory** - When remembering something, write to %s/memory/MEMORY.md + +4. **Include source links** - When presenting research or web search results, ALWAYS include the relevant URLs/links in your response so the user can follow up directly without having to ask.%s`, timeStr, rt, workspacePath, workspacePath, workspacePath, workspacePath, toolsSection, workspacePath, iterBudget) } diff --git a/pkg/agent/loop.go b/pkg/agent/loop.go index cddc760..fbaceee 100644 --- a/pkg/agent/loop.go +++ b/pkg/agent/loop.go @@ -56,6 +56,7 @@ type processOptions struct { EnableSummary bool // Whether to trigger summarization SendResponse bool // Whether to send response via bus NoHistory bool // If true, don't load session history (for heartbeat) + HeartbeatMode bool // If true, exclude message/send_file tools from LLM } // createToolRegistry creates a tool registry with common tools. @@ -259,6 +260,7 @@ func (al *AgentLoop) ProcessHeartbeat(ctx context.Context, content, channel, cha EnableSummary: false, SendResponse: false, NoHistory: true, // Don't load session history for heartbeat + HeartbeatMode: true, }) } @@ -401,7 +403,11 @@ func (al *AgentLoop) runAgentLoop(ctx context.Context, opts processOptions) (str // 5. Handle empty response if finalContent == "" { - finalContent = opts.DefaultResponse + if opts.UserMessage != "" { + finalContent = fmt.Sprintf("I wasn't able to answer: \"%s\". Please try again.", utils.Truncate(opts.UserMessage, 100)) + } else { + finalContent = opts.DefaultResponse + } } // 6. Save final assistant message to session @@ -453,6 +459,9 @@ func (al *AgentLoop) runLLMIteration(ctx context.Context, messages []providers.M // Build tool definitions providerToolDefs := al.tools.ToProviderDefs() + if opts.HeartbeatMode { + providerToolDefs = filterHeartbeatTools(providerToolDefs) + } // Log LLM request details logger.DebugCF("agent", "LLM request", @@ -461,7 +470,7 @@ func (al *AgentLoop) runLLMIteration(ctx context.Context, messages []providers.M "model": al.model, "messages_count": len(messages), "tools_count": len(providerToolDefs), - "max_tokens": 8192, + "max_tokens": al.contextWindow, "temperature": 0.7, "system_prompt_len": len(messages[0].Content), }) @@ -481,7 +490,7 @@ func (al *AgentLoop) runLLMIteration(ctx context.Context, messages []providers.M maxRetries := 2 for retry := 0; retry <= maxRetries; retry++ { response, err = al.provider.Chat(ctx, messages, providerToolDefs, al.model, map[string]interface{}{ - "max_tokens": 8192, + "max_tokens": al.contextWindow, "temperature": 0.7, }) @@ -519,75 +528,13 @@ func (al *AgentLoop) runLLMIteration(ctx context.Context, messages []providers.M newHistory := al.sessions.GetHistory(opts.SessionKey) newSummary := al.sessions.GetSummary(opts.SessionKey) - // Re-create messages for the next attempt - // We keep the current user message (opts.UserMessage) effectively - messages = al.contextBuilder.BuildMessages( - newHistory, - newSummary, - opts.UserMessage, - nil, - opts.Channel, - opts.ChatID, - ) - - // Important: If we are in the middle of a tool loop (iteration > 1), - // rebuilding messages from session history might duplicate the flow or miss context - // if intermediate steps weren't saved correctly. - // However, al.sessions.AddFullMessage is called after every tool execution, - // so GetHistory should reflect the current state including partial tool execution. - // But we need to ensure we don't duplicate the user message which is appended in BuildMessages. - // BuildMessages(history...) takes the stored history and appends the *current* user message. - // If iteration > 1, the "current user message" was already added to history in step 3 of runAgentLoop. - // So if we pass opts.UserMessage again, we might duplicate it? - // Actually, step 3 is: al.sessions.AddMessage(opts.SessionKey, "user", opts.UserMessage) - // So GetHistory ALREADY contains the user message! - - // CORRECTION: - // BuildMessages combines: [System] + [History] + [CurrentMessage] - // But Step 3 added CurrentMessage to History. - // So if we use GetHistory now, it has the user message. - // If we pass opts.UserMessage to BuildMessages, it adds it AGAIN. - - // For retry in the middle of a loop, we should rely on what's in the session. - // BUT checking BuildMessages implementation: - // It appends history... then appends currentMessage. - - // Logic fix for retry: - // If iteration == 1, opts.UserMessage corresponds to the user input. - // If iteration > 1, we are processing tool results. The "messages" passed to Chat - // already accumulated tool outputs. - // Rebuilding from session history is safest because it persists state. - // Start fresh with rebuilt history. - - // Special case: standard BuildMessages appends "currentMessage". - // If we are strictly retrying the *LLM call*, we want the exact same state as before but compressed. - // However, the "messages" argument passed to runLLMIteration is constructed by the caller. - // If we rebuild from Session, we need to know if "currentMessage" should be appended or is already in history. - - // In runAgentLoop: - // 3. sessions.AddMessage(userMsg) - // 4. runLLMIteration(..., UserMessage) - - // So History contains the user message. - // BuildMessages typically appends the user message as a *new* pending message. - // Wait, standard BuildMessages usage in runAgentLoop: - // messages := BuildMessages(history (has old), UserMessage) - // THEN AddMessage(UserMessage). - // So "history" passed to BuildMessages does NOT contain the current UserMessage yet. - - // But here, inside the loop, we have already saved it. - // So GetHistory() includes the current user message. - // If we call BuildMessages(GetHistory(), UserMessage), we get duplicates. - - // Hack/Fix: - // If we are retrying, we rebuild from Session History ONLY. - // We pass empty string as "currentMessage" to BuildMessages - // because the "current message" is already saved in history (step 3). - + // Rebuild from compressed session history. + // Pass empty currentMessage because step 3 of runAgentLoop already + // saved opts.UserMessage into session history — passing it again would duplicate it. messages = al.contextBuilder.BuildMessages( newHistory, newSummary, - "", // Empty because history already contains the relevant messages + "", // Already in history from step 3 nil, opts.Channel, opts.ChatID, @@ -721,6 +668,17 @@ func (al *AgentLoop) runLLMIteration(ctx context.Context, messages []providers.M return finalContent, iteration, nil } +// filterHeartbeatTools removes tools that can send direct messages to users +func filterHeartbeatTools(tools []providers.ToolDefinition) []providers.ToolDefinition { + filtered := make([]providers.ToolDefinition, 0, len(tools)) + for _, t := range tools { + if t.Function.Name != "message" && t.Function.Name != "send_file" { + filtered = append(filtered, t) + } + } + return filtered +} + // updateToolContexts updates the context for tools that need channel/chatID info. func (al *AgentLoop) updateToolContexts(channel, chatID string) { // Use ContextualTool interface instead of type assertions @@ -756,14 +714,7 @@ func (al *AgentLoop) maybeSummarize(sessionKey, channel, chatID string) { if _, loading := al.summarizing.LoadOrStore(sessionKey, true); !loading { go func() { defer al.summarizing.Delete(sessionKey) - // Notify user about optimization if not an internal channel - if !constants.IsInternalChannel(channel) { - al.bus.PublishOutbound(bus.OutboundMessage{ - Channel: channel, - ChatID: chatID, - Content: "⚠️ Memory threshold reached. Optimizing conversation history...", - }) - } + logger.Info("Memory threshold reached. Optimizing conversation history...") al.summarizeSession(sessionKey) }() } @@ -790,31 +741,24 @@ func (al *AgentLoop) forceCompression(sessionKey string) { mid := len(conversation) / 2 // New history structure: - // 1. System Prompt - // 2. [Summary of dropped part] - synthesized - // 3. Second half of conversation - // 4. Last message - - // Simplified approach for emergency: Drop first half of conversation - // and rely on existing summary if present, or create a placeholder. + // 1. System Prompt (with compression note appended) + // 2. Second half of conversation + // 3. Last message droppedCount := mid keptConversation := conversation[mid:] - newHistory := make([]providers.Message, 0) - newHistory = append(newHistory, history[0]) // System prompt - - // Add a note about compression - compressionNote := fmt.Sprintf("[System: Emergency compression dropped %d oldest messages due to context limit]", droppedCount) - // If there was an existing summary, we might lose it if it was in the dropped part (which is just messages). - // The summary is stored separately in session.Summary, so it persists! - // We just need to ensure the user knows there's a gap. + newHistory := make([]providers.Message, 0, 1+len(keptConversation)+1) - // We only modify the messages list here - newHistory = append(newHistory, providers.Message{ - Role: "system", - Content: compressionNote, - }) + // Append compression note to the original system prompt instead of adding a new system message + // This avoids having two consecutive system messages which some APIs reject + compressionNote := fmt.Sprintf( + "\n\n[System Note: Emergency compression dropped %d oldest messages due to context limit]", + droppedCount, + ) + enhancedSystemPrompt := history[0] + enhancedSystemPrompt.Content = enhancedSystemPrompt.Content + compressionNote + newHistory = append(newHistory, enhancedSystemPrompt) newHistory = append(newHistory, keptConversation...) newHistory = append(newHistory, history[len(history)-1]) // Last message diff --git a/pkg/agent/loop_test.go b/pkg/agent/loop_test.go index 1521dbe..c559844 100644 --- a/pkg/agent/loop_test.go +++ b/pkg/agent/loop_test.go @@ -624,3 +624,25 @@ func TestAgentLoop_ContextExhaustionRetry(t *testing.T) { t.Errorf("Expected history to be compressed (len < 8), got %d", len(finalHistory)) } } + +// TestFilterHeartbeatTools verifies that messaging tools are removed +func TestFilterHeartbeatTools(t *testing.T) { + tools := []providers.ToolDefinition{ + {Function: providers.ToolFunctionDefinition{Name: "message"}}, + {Function: providers.ToolFunctionDefinition{Name: "send_file"}}, + {Function: providers.ToolFunctionDefinition{Name: "read_file"}}, + {Function: providers.ToolFunctionDefinition{Name: "write_file"}}, + } + + filtered := filterHeartbeatTools(tools) + + if len(filtered) != 2 { + t.Fatalf("Expected 2 tools, got %d", len(filtered)) + } + + for _, pt := range filtered { + if pt.Function.Name == "message" || pt.Function.Name == "send_file" { + t.Errorf("Tool %s should have been filtered out", pt.Function.Name) + } + } +} diff --git a/pkg/config/config.go b/pkg/config/config.go index 990eddd..6295769 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -52,6 +52,7 @@ type Config struct { Heartbeat HeartbeatConfig `json:"heartbeat"` Devices DevicesConfig `json:"devices"` mu sync.RWMutex + configDir string // directory containing the config file, used to resolve workspace paths } type AgentsConfig struct { @@ -225,9 +226,9 @@ func DefaultConfig() *Config { RestrictToWorkspace: true, Provider: "openrouter", Model: "arcee-ai/trinity-large-preview:free", - MaxTokens: 8192, + MaxTokens: 16384, Temperature: 0.7, - MaxToolIterations: 15, + MaxToolIterations: 25, }, }, Channels: ChannelsConfig{ @@ -338,6 +339,7 @@ func DefaultConfig() *Config { func LoadConfig(path string) (*Config, error) { cfg := DefaultConfig() + cfg.configDir = filepath.Dir(path) data, err := os.ReadFile(path) if err != nil { @@ -378,7 +380,22 @@ func SaveConfig(path string, cfg *Config) error { func (c *Config) WorkspacePath() string { c.mu.RLock() defer c.mu.RUnlock() - return expandHome(c.Agents.Defaults.Workspace) + ws := c.Agents.Defaults.Workspace + // If workspace starts with ~/ and we know the config directory, + // resolve relative to the config dir so workspace lives alongside config. + // e.g. config at /oem/.luckyclaw/config.json with workspace ~/.luckyclaw/workspace + // → /oem/.luckyclaw/workspace (not /root/.luckyclaw/workspace) + // + // We strip only the leading "~" and preserve the rest of the path after it, + // then join with the parent of configDir (the /oem root) so the full structure + // is retained. Using filepath.Base would drop intermediate segments. + if c.configDir != "" && len(ws) > 2 && ws[0] == '~' && ws[1] == '/' { + // ws[1:] = "/.luckyclaw/workspace" + // filepath.Dir(configDir) = "/oem" + // result = "/oem" + "/.luckyclaw/workspace" = "/oem/.luckyclaw/workspace" ✓ + return filepath.Join(filepath.Dir(c.configDir), ws[1:]) + } + return expandHome(ws) } func (c *Config) GetAPIKey() string { diff --git a/pkg/cron/service.go b/pkg/cron/service.go index 93ac41d..cb4d467 100644 --- a/pkg/cron/service.go +++ b/pkg/cron/service.go @@ -48,6 +48,8 @@ type CronJob struct { CreatedAtMS int64 `json:"createdAtMs"` UpdatedAtMS int64 `json:"updatedAtMs"` DeleteAfterRun bool `json:"deleteAfterRun"` + MaxRuns int `json:"maxRuns,omitempty"` // 0 = unlimited + RunCount int `json:"runCount"` } type CronStore struct { @@ -234,6 +236,7 @@ func (cs *CronService) executeJobByID(jobID string) { } job.State.LastRunAtMS = &startTime + job.RunCount++ job.UpdatedAtMS = time.Now().UnixMilli() if err != nil { @@ -244,6 +247,13 @@ func (cs *CronService) executeJobByID(jobID string) { job.State.LastError = "" } + // Check if max_runs reached (auto-delete bounded recurring jobs) + if job.MaxRuns > 0 && job.RunCount >= job.MaxRuns { + log.Printf("[cron] job %s reached max_runs (%d/%d), removing", job.ID, job.RunCount, job.MaxRuns) + cs.removeJobUnsafe(job.ID) + return + } + // Compute next run time if job.Schedule.Kind == "at" { if job.DeleteAfterRun { @@ -382,7 +392,7 @@ func (cs *CronService) saveStoreUnsafe() error { return os.WriteFile(cs.storePath, data, 0600) } -func (cs *CronService) AddJob(name string, schedule CronSchedule, message string, deliver bool, channel, to string) (*CronJob, error) { +func (cs *CronService) AddJob(name string, schedule CronSchedule, message string, deliver bool, channel, to string, maxRuns int) (*CronJob, error) { cs.mu.Lock() defer cs.mu.Unlock() @@ -409,6 +419,7 @@ func (cs *CronService) AddJob(name string, schedule CronSchedule, message string CreatedAtMS: now, UpdatedAtMS: now, DeleteAfterRun: deleteAfterRun, + MaxRuns: maxRuns, } cs.store.Jobs = append(cs.store.Jobs, job) diff --git a/pkg/cron/service_test.go b/pkg/cron/service_test.go index f860985..7c06b2a 100644 --- a/pkg/cron/service_test.go +++ b/pkg/cron/service_test.go @@ -17,7 +17,7 @@ func TestSaveStore_FilePermissions(t *testing.T) { service := NewCronService(storePath, nil, nil) - _, err := service.AddJob("test", CronSchedule{Kind: "every", EveryMS: int64Ptr(60000)}, "hello", false, "cli", "direct") + _, err := service.AddJob("test", CronSchedule{Kind: "every", EveryMS: int64Ptr(60000)}, "hello", false, "cli", "direct", 0) if err != nil { t.Fatalf("AddJob failed: %v", err) } diff --git a/pkg/heartbeat/service.go b/pkg/heartbeat/service.go index c945a6d..1a29952 100644 --- a/pkg/heartbeat/service.go +++ b/pkg/heartbeat/service.go @@ -149,23 +149,27 @@ func (hs *HeartbeatService) executeHeartbeat() { handler := hs.handler if !hs.enabled || hs.stopChan == nil { hs.mu.RUnlock() + logger.InfoC("heartbeat", "[AUDIT] executeHeartbeat: skipped (disabled or stopChan nil)") return } hs.mu.RUnlock() if !enabled { + logger.InfoC("heartbeat", "[AUDIT] executeHeartbeat: skipped (not enabled)") return } - logger.DebugC("heartbeat", "Executing heartbeat") + logger.InfoC("heartbeat", "[AUDIT] executeHeartbeat: START") prompt := hs.buildPrompt() if prompt == "" { - logger.InfoC("heartbeat", "No heartbeat prompt (HEARTBEAT.md empty or missing)") + logger.InfoC("heartbeat", "[AUDIT] executeHeartbeat: empty prompt, aborting") + hs.logInfo("No heartbeat prompt (HEARTBEAT.md empty or missing)") return } if handler == nil { + logger.InfoC("heartbeat", "[AUDIT] executeHeartbeat: handler nil, aborting") hs.logError("Heartbeat handler not configured") return } @@ -175,15 +179,28 @@ func (hs *HeartbeatService) executeHeartbeat() { channel, chatID := hs.parseLastChannel(lastChannel) // Debug log for channel resolution + logger.InfoCF("heartbeat", "[AUDIT] Pre-handler", map[string]interface{}{ + "channel": channel, "chatID": chatID, "lastChannel": lastChannel, + }) hs.logInfo("Resolved channel: %s, chatID: %s (from lastChannel: %s)", channel, chatID, lastChannel) result := handler(prompt, channel, chatID) if result == nil { + logger.InfoC("heartbeat", "[AUDIT] Post-handler: result is nil") hs.logInfo("Heartbeat handler returned nil result") return } + // AUDIT: log every field of the result + logger.InfoCF("heartbeat", "[AUDIT] Post-handler result", map[string]interface{}{ + "Silent": result.Silent, + "IsError": result.IsError, + "Async": result.Async, + "ForLLM": truncateForLog(result.ForLLM, 150), + "ForUser": truncateForLog(result.ForUser, 150), + }) + // Handle different result types if result.IsError { hs.logError("Heartbeat error: %s", result.ForLLM) @@ -192,15 +209,12 @@ func (hs *HeartbeatService) executeHeartbeat() { if result.Async { hs.logInfo("Async task started: %s", result.ForLLM) - logger.InfoCF("heartbeat", "Async heartbeat task started", - map[string]interface{}{ - "message": result.ForLLM, - }) return } // Check if silent if result.Silent { + logger.InfoC("heartbeat", "[AUDIT] DROPPED: result.Silent=true — NOT sending to user") hs.logInfo("Heartbeat OK - silent") return } @@ -212,10 +226,19 @@ func (hs *HeartbeatService) executeHeartbeat() { } if strings.TrimSpace(content) == "HEARTBEAT_OK" { + logger.InfoC("heartbeat", "[AUDIT] DROPPED: exact HEARTBEAT_OK match — NOT sending to user") hs.logInfo("Heartbeat OK - normal metrics, silent drop") return } + // LEAK PATH: if we reach here, the message WILL be sent to the user + logger.WarnCF("heartbeat", "[AUDIT] LEAK: Heartbeat message reaching sendResponse!", map[string]interface{}{ + "content": truncateForLog(content, 200), + "Silent": result.Silent, + "ForUser": truncateForLog(result.ForUser, 100), + "ForLLM": truncateForLog(result.ForLLM, 100), + }) + // Send result to user if content != "" { hs.sendResponse(content) @@ -238,8 +261,8 @@ func (hs *HeartbeatService) buildPrompt() string { return "" } - content := string(data) - if len(content) == 0 { + content := strings.TrimSpace(string(data)) + if content == "" { return "" } @@ -260,8 +283,12 @@ System Disk Status: %s You are a proactive AI assistant. This is a scheduled heartbeat check. Review the following tasks and execute any necessary actions using available skills. -CRITICAL INSTRUCTION: If there are no tasks in HEARTBEAT.md requiring execution today (and you haven't executed any tools), and the System Status is Normal, respond ONLY with the exact string: HEARTBEAT_OK -Do NOT output anything else. Do NOT generate a full report unless explicitly instructed by the tasks. +CRITICAL INSTRUCTION: When ALL of the following are true, respond with ONLY the exact text HEARTBEAT_OK — nothing else, no extra information, no status summary: + 1. System Status is Normal (disk, memory, network all healthy) + 2. No tasks in HEARTBEAT.md require execution today + 3. All system checks returned normal/healthy results — no warnings, alerts, or anomalies + +If ANY issue, alert, anomaly, or task result needs reporting, do NOT include HEARTBEAT_OK anywhere in your response. Write a concise report instead. %s `, now, diskStatus, content) @@ -271,24 +298,28 @@ Do NOT output anything else. Do NOT generate a full report unless explicitly ins func (hs *HeartbeatService) createDefaultHeartbeatTemplate() { heartbeatPath := filepath.Join(hs.workspace, "HEARTBEAT.md") - defaultContent := `# Heartbeat Check List + defaultContent := `# Heartbeat Tasks -This file contains tasks for the heartbeat service to check periodically. +Execute ALL tasks below every heartbeat cycle. Use shell commands for local data — do NOT waste API tokens on info available locally. -## Examples +## 1. Time & Date (local — use shell) +- Run: ` + "`date '+%A, %B %d %Y — %I:%M %p %Z'`" + ` +- Note any upcoming reminders from memory files -- Check for unread messages -- Review active or scheduled jobs -- Check device status (e.g., MaixCam or LuckFox) +## 2. Device Health (local — use shell) +- Run: ` + "`free -m | grep Mem`" + ` — report available memory +- Run: ` + "`uptime`" + ` — report uptime and load +- If available memory < 5MB, warn the user immediately -## Instructions +## 3. Network (local — use shell) +- Run: ` + "`ping -c 1 -W 2 8.8.8.8 > /dev/null 2>&1 && echo \"Online\" || echo \"OFFLINE\"`" + ` +- If offline, alert the user -- Execute ALL tasks listed below. Do NOT skip any task. -- For simple tasks (e.g., report current time), respond directly. -- For complex tasks that may take time, use the spawn tool to create a subagent. -- The spawn tool is async - subagent results will be sent to the user automatically. -- After spawning a subagent, CONTINUE to process remaining tasks. -- Only respond with HEARTBEAT_OK when ALL tasks are done AND nothing needs attention. +## Instructions +- Use shell tool for ALL tasks above — they are local system checks +- Keep responses brief — one line per task max +- Only respond with HEARTBEAT_OK after ALL tasks are complete and nothing needs attention +- If any task shows a problem, flag it clearly --- @@ -371,15 +402,31 @@ func (hs *HeartbeatService) logError(format string, args ...any) { hs.log("ERROR", format, args...) } -// log writes a message to the heartbeat log file +// log writes a message to the heartbeat log file, with stderr fallback func (hs *HeartbeatService) log(level, format string, args ...any) { logFile := filepath.Join(hs.workspace, "heartbeat.log") - f, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + message := fmt.Sprintf(format, args...) + timestamp := time.Now().Format("2006-01-02 15:04:05") + line := fmt.Sprintf("[%s] [%s] %s\n", timestamp, level, message) + + f, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) if err != nil { + // Fallback: log to stderr and the structured logger so we don't silently lose entries + logger.WarnCF("heartbeat", "Failed to write heartbeat.log", map[string]interface{}{ + "error": err.Error(), + "message": message, + }) return } defer f.Close() - timestamp := time.Now().Format("2006-01-02 15:04:05") - fmt.Fprintf(f, "[%s] [%s] %s\n", timestamp, level, fmt.Sprintf(format, args...)) + fmt.Fprint(f, line) +} + +// truncateForLog truncates a string for safe logging +func truncateForLog(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen] + "..." } diff --git a/pkg/tools/cron.go b/pkg/tools/cron.go index 7ac10b9..ba7de7b 100644 --- a/pkg/tools/cron.go +++ b/pkg/tools/cron.go @@ -59,7 +59,7 @@ func (t *CronTool) Parameters() map[string]interface{} { }, "message": map[string]interface{}{ "type": "string", - "description": "The reminder/task message to display when triggered. If 'command' is used, this describes what the command does.", + "description": "The reminder/task message to display when triggered. Use a clear, actionable text. Do NOT include timing info in the message (e.g., '5 min reminder') — the system handles timing automatically.", }, "command": map[string]interface{}{ "type": "string", @@ -67,16 +67,20 @@ func (t *CronTool) Parameters() map[string]interface{} { }, "at_seconds": map[string]interface{}{ "type": "integer", - "description": "One-time reminder: seconds from NOW to trigger. Use ONLY for relative time offsets (e.g., 'in 10 minutes' → 600, 'in 1 hour' → 3600). Do NOT use for specific clock times like 'at 7am' — use cron_expr instead.", + "description": "One-time reminder: seconds from NOW to trigger. Use ONLY for relative time offsets (e.g., 'in 10 minutes' → 600, 'in 1 hour' → 3600). Do NOT use for specific clock times like 'at 7am' — use cron_expr instead. For multiple staggered one-time reminders, create multiple jobs with different at_seconds values.", }, "every_seconds": map[string]interface{}{ "type": "integer", - "description": "Recurring interval in seconds with NO clock anchor (e.g., 3600 for every hour). Use ONLY when no specific time-of-day is mentioned (e.g., 'every 2 hours', 'every 30 minutes'). Do NOT use for 'daily at 7am' — use cron_expr='0 7 * * *' instead.", + "description": "Recurring interval in seconds (e.g., 300 for every 5 minutes). For bounded recurrence like 'every 5 min for 30 min', combine with max_runs (e.g., every_seconds=300, max_runs=6). Do NOT create multiple separate jobs for the same recurring task — use ONE job with max_runs instead. Do NOT use for 'daily at 7am' — use cron_expr='0 7 * * *' instead.", }, "cron_expr": map[string]interface{}{ "type": "string", "description": "Standard 5-field cron expression (minute hour day month weekday). Use this for ANY schedule at a specific clock time. Examples: daily at 7am → '0 7 * * *', weekdays at 9:30am → '30 9 * * 1-5', every day at 6:15pm → '15 18 * * *'. This is the PREFERRED method for daily alarms and time-anchored reminders.", }, + "max_runs": map[string]interface{}{ + "type": "integer", + "description": "Maximum number of times to run before auto-deleting. Use for bounded recurring tasks: 'every 5 min for 30 min' → every_seconds=300, max_runs=6. Default: 0 (unlimited). Only meaningful with every_seconds or cron_expr.", + }, "job_id": map[string]interface{}{ "type": "string", "description": "Job ID (for remove/enable/disable)", @@ -173,13 +177,15 @@ func (t *CronTool) addJob(args map[string]interface{}) *ToolResult { command, _ := args["command"].(string) if command != "" { - // Commands must be processed by agent/exec tool, so deliver must be false (or handled specifically) - // Actually, let's keep deliver=false to let the system know it's not a simple chat message - // But for our new logic in ExecuteJob, we can handle it regardless of deliver flag if Payload.Command is set. - // However, logically, it's not "delivered" to chat directly as is. deliver = false } + // Read max_runs parameter + maxRuns := 0 + if mr, ok := args["max_runs"].(float64); ok { + maxRuns = int(mr) + } + // Truncate message for job name (max 30 chars) messagePreview := utils.Truncate(message, 30) @@ -190,6 +196,7 @@ func (t *CronTool) addJob(args map[string]interface{}) *ToolResult { deliver, channel, chatID, + maxRuns, ) if err != nil { return ErrorResult(fmt.Sprintf("Error adding job: %v", err)) @@ -205,7 +212,7 @@ func (t *CronTool) addJob(args map[string]interface{}) *ToolResult { } func (t *CronTool) listJobs() *ToolResult { - jobs := t.cronService.ListJobs(false) + jobs := t.cronService.ListJobs(true) if len(jobs) == 0 { return SilentResult("No scheduled jobs") @@ -213,6 +220,10 @@ func (t *CronTool) listJobs() *ToolResult { result := "Scheduled jobs:\n" for _, j := range jobs { + status := "enabled" + if !j.Enabled { + status = "disabled" + } var scheduleInfo string if j.Schedule.Kind == "every" && j.Schedule.EveryMS != nil { scheduleInfo = fmt.Sprintf("every %ds", *j.Schedule.EveryMS/1000) @@ -223,7 +234,11 @@ func (t *CronTool) listJobs() *ToolResult { } else { scheduleInfo = "unknown" } - result += fmt.Sprintf("- %s (id: %s, %s)\n", j.Name, j.ID, scheduleInfo) + progress := "" + if j.MaxRuns > 0 { + progress = fmt.Sprintf(" [run %d/%d]", j.RunCount, j.MaxRuns) + } + result += fmt.Sprintf("- %s (id: %s, %s, %s%s)\n", j.Name, j.ID, status, scheduleInfo, progress) } return SilentResult(result)