Merge origin/main: catch up with upstream (OneCLI, diagnostics, credential proxy)
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
131
.claude/skills/claw/SKILL.md
Normal file
131
.claude/skills/claw/SKILL.md
Normal file
@@ -0,0 +1,131 @@
|
||||
---
|
||||
name: claw
|
||||
description: Install the claw CLI tool — run NanoClaw agent containers from the command line without opening a chat app.
|
||||
---
|
||||
|
||||
# claw — NanoClaw CLI
|
||||
|
||||
`claw` is a Python CLI that sends prompts directly to a NanoClaw agent container from the terminal. It reads registered groups from the NanoClaw database, picks up secrets from `.env`, and pipes a JSON payload into a container run — no chat app required.
|
||||
|
||||
## What it does
|
||||
|
||||
- Send a prompt to any registered group by name, folder, or JID
|
||||
- Default target is the main group (no `-g` needed for most use)
|
||||
- Resume a previous session with `-s <session-id>`
|
||||
- Read prompts from stdin (`--pipe`) for scripting and piping
|
||||
- List all registered groups with `--list-groups`
|
||||
- Auto-detects `container` or `docker` runtime (or override with `--runtime`)
|
||||
- Prints the agent's response to stdout; session ID to stderr
|
||||
- Verbose mode (`-v`) shows the command, redacted payload, and exit code
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Python 3.8 or later
|
||||
- NanoClaw installed with a built and tagged container image (`nanoclaw-agent:latest`)
|
||||
- Either `container` (Apple Container, macOS 15+) or `docker` available in `PATH`
|
||||
|
||||
## Install
|
||||
|
||||
Run this skill from within the NanoClaw directory. The script auto-detects its location, so the symlink always points to the right place.
|
||||
|
||||
### 1. Copy the script
|
||||
|
||||
```bash
|
||||
mkdir -p scripts
|
||||
cp "${CLAUDE_SKILL_DIR}/scripts/claw" scripts/claw
|
||||
chmod +x scripts/claw
|
||||
```
|
||||
|
||||
### 2. Symlink into PATH
|
||||
|
||||
```bash
|
||||
mkdir -p ~/bin
|
||||
ln -sf "$(pwd)/scripts/claw" ~/bin/claw
|
||||
```
|
||||
|
||||
Make sure `~/bin` is in `PATH`. Add this to `~/.zshrc` or `~/.bashrc` if needed:
|
||||
|
||||
```bash
|
||||
export PATH="$HOME/bin:$PATH"
|
||||
```
|
||||
|
||||
Then reload the shell:
|
||||
|
||||
```bash
|
||||
source ~/.zshrc # or ~/.bashrc
|
||||
```
|
||||
|
||||
### 3. Verify
|
||||
|
||||
```bash
|
||||
claw --list-groups
|
||||
```
|
||||
|
||||
You should see registered groups. If NanoClaw isn't running or the database doesn't exist yet, the list will be empty — that's fine.
|
||||
|
||||
## Usage Examples
|
||||
|
||||
```bash
|
||||
# Send a prompt to the main group
|
||||
claw "What's on my calendar today?"
|
||||
|
||||
# Send to a specific group by name (fuzzy match)
|
||||
claw -g "family" "Remind everyone about dinner at 7"
|
||||
|
||||
# Send to a group by exact JID
|
||||
claw -j "120363336345536173@g.us" "Hello"
|
||||
|
||||
# Resume a previous session
|
||||
claw -s abc123 "Continue where we left off"
|
||||
|
||||
# Read prompt from stdin
|
||||
echo "Summarize this" | claw --pipe -g dev
|
||||
|
||||
# Pipe a file
|
||||
cat report.txt | claw --pipe "Summarize this report"
|
||||
|
||||
# List all registered groups
|
||||
claw --list-groups
|
||||
|
||||
# Force a specific runtime
|
||||
claw --runtime docker "Hello"
|
||||
|
||||
# Use a custom image tag (e.g. after rebuilding with a new tag)
|
||||
claw --image nanoclaw-agent:dev "Hello"
|
||||
|
||||
# Verbose mode (debug info, secrets redacted)
|
||||
claw -v "Hello"
|
||||
|
||||
# Custom timeout for long-running tasks
|
||||
claw --timeout 600 "Run the full analysis"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "neither 'container' nor 'docker' found"
|
||||
|
||||
Install Docker Desktop or Apple Container (macOS 15+), or pass `--runtime` explicitly.
|
||||
|
||||
### "no secrets found in .env"
|
||||
|
||||
The script auto-detects your NanoClaw directory and reads `.env` from it. Check that the file exists and contains at least one of: `CLAUDE_CODE_OAUTH_TOKEN`, `ANTHROPIC_API_KEY`, `ANTHROPIC_AUTH_TOKEN`.
|
||||
|
||||
### Container times out
|
||||
|
||||
The default timeout is 300 seconds. For longer tasks, pass `--timeout 600` (or higher). If the container consistently hangs, check that your `nanoclaw-agent:latest` image is up to date by running `./container/build.sh`.
|
||||
|
||||
### "group not found"
|
||||
|
||||
Run `claw --list-groups` to see what's registered. Group lookup does a fuzzy partial match on name and folder — if your query matches multiple groups, you'll get an error listing the ambiguous matches.
|
||||
|
||||
### Container crashes mid-stream
|
||||
|
||||
Containers run with `--rm` so they are automatically removed. If the agent crashes before emitting the output sentinel, `claw` falls back to printing raw stdout. Use `-v` to see what the container produced. Rebuild the image with `./container/build.sh` if crashes are consistent.
|
||||
|
||||
### Override the NanoClaw directory
|
||||
|
||||
If `claw` can't find your database or `.env`, set the `NANOCLAW_DIR` environment variable:
|
||||
|
||||
```bash
|
||||
export NANOCLAW_DIR=/path/to/your/nanoclaw
|
||||
```
|
||||
318
.claude/skills/claw/scripts/claw
Normal file
318
.claude/skills/claw/scripts/claw
Normal file
@@ -0,0 +1,318 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
claw — NanoClaw CLI
|
||||
Run a NanoClaw agent container from the command line.
|
||||
|
||||
Usage:
|
||||
claw "What is 2+2?"
|
||||
claw -g <channel_name> "Review this code"
|
||||
claw -g "<channel name with spaces>" "What's the latest issue?"
|
||||
claw -j "<chatJid>" "Hello"
|
||||
claw -g <channel_name> -s <session-id> "Continue"
|
||||
claw --list-groups
|
||||
echo "prompt text" | claw --pipe -g <channel_name>
|
||||
cat prompt.txt | claw --pipe
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sqlite3
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
from pathlib import Path
|
||||
|
||||
# ── Globals ─────────────────────────────────────────────────────────────────
|
||||
|
||||
VERBOSE = False
|
||||
|
||||
def dbg(*args):
|
||||
if VERBOSE:
|
||||
print("»", *args, file=sys.stderr)
|
||||
|
||||
# ── Config ──────────────────────────────────────────────────────────────────
|
||||
|
||||
def _find_nanoclaw_dir() -> Path:
|
||||
"""Locate the NanoClaw installation directory.
|
||||
|
||||
Resolution order:
|
||||
1. NANOCLAW_DIR env var
|
||||
2. The directory containing this script (if it looks like a NanoClaw install)
|
||||
3. ~/src/nanoclaw (legacy default)
|
||||
"""
|
||||
if env := os.environ.get("NANOCLAW_DIR"):
|
||||
return Path(env).expanduser()
|
||||
# If this script lives inside the NanoClaw tree (e.g. scripts/claw), walk up
|
||||
here = Path(__file__).resolve()
|
||||
for parent in [here.parent, here.parent.parent]:
|
||||
if (parent / "store" / "messages.db").exists() or (parent / ".env").exists():
|
||||
return parent
|
||||
return Path.home() / "src" / "nanoclaw"
|
||||
|
||||
NANOCLAW_DIR = _find_nanoclaw_dir()
|
||||
DB_PATH = NANOCLAW_DIR / "store" / "messages.db"
|
||||
ENV_FILE = NANOCLAW_DIR / ".env"
|
||||
IMAGE = "nanoclaw-agent:latest"
|
||||
|
||||
SECRET_KEYS = [
|
||||
"CLAUDE_CODE_OAUTH_TOKEN",
|
||||
"ANTHROPIC_API_KEY",
|
||||
"ANTHROPIC_BASE_URL",
|
||||
"ANTHROPIC_AUTH_TOKEN",
|
||||
"OLLAMA_HOST",
|
||||
]
|
||||
|
||||
# ── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
def detect_runtime(preference: str | None) -> str:
|
||||
if preference:
|
||||
dbg(f"runtime: forced to {preference}")
|
||||
return preference
|
||||
for rt in ("container", "docker"):
|
||||
result = subprocess.run(["which", rt], capture_output=True)
|
||||
if result.returncode == 0:
|
||||
dbg(f"runtime: auto-detected {rt} at {result.stdout.decode().strip()}")
|
||||
return rt
|
||||
sys.exit("error: neither 'container' nor 'docker' found. Install one or pass --runtime.")
|
||||
|
||||
|
||||
def read_secrets(env_file: Path) -> dict:
|
||||
secrets = {}
|
||||
if not env_file.exists():
|
||||
return secrets
|
||||
for line in env_file.read_text().splitlines():
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
if "=" in line:
|
||||
key, _, val = line.partition("=")
|
||||
key = key.strip()
|
||||
if key in SECRET_KEYS:
|
||||
secrets[key] = val.strip()
|
||||
return secrets
|
||||
|
||||
|
||||
def get_groups(db: Path) -> list[dict]:
|
||||
conn = sqlite3.connect(db)
|
||||
rows = conn.execute(
|
||||
"SELECT jid, name, folder, is_main FROM registered_groups ORDER BY name"
|
||||
).fetchall()
|
||||
conn.close()
|
||||
return [{"jid": r[0], "name": r[1], "folder": r[2], "is_main": bool(r[3])} for r in rows]
|
||||
|
||||
|
||||
def find_group(groups: list[dict], query: str) -> dict | None:
|
||||
q = query.lower()
|
||||
# Exact name match
|
||||
for g in groups:
|
||||
if g["name"].lower() == q or g["folder"].lower() == q:
|
||||
return g
|
||||
# Partial match
|
||||
matches = [g for g in groups if q in g["name"].lower() or q in g["folder"].lower()]
|
||||
if len(matches) == 1:
|
||||
return matches[0]
|
||||
if len(matches) > 1:
|
||||
names = ", ".join(f'"{g["name"]}"' for g in matches)
|
||||
sys.exit(f"error: ambiguous group '{query}'. Matches: {names}")
|
||||
return None
|
||||
|
||||
|
||||
def run_container(runtime: str, image: str, payload: dict, timeout: int = 300) -> None:
|
||||
cmd = [runtime, "run", "-i", "--rm", image]
|
||||
dbg(f"cmd: {' '.join(cmd)}")
|
||||
|
||||
# Show payload sans secrets
|
||||
if VERBOSE:
|
||||
safe = {k: v for k, v in payload.items() if k != "secrets"}
|
||||
safe["secrets"] = {k: "***" for k in payload.get("secrets", {})}
|
||||
dbg(f"payload: {json.dumps(safe, indent=2)}")
|
||||
|
||||
proc = subprocess.Popen(
|
||||
cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
dbg(f"container pid: {proc.pid}")
|
||||
|
||||
# Write JSON payload and close stdin
|
||||
proc.stdin.write(json.dumps(payload).encode())
|
||||
proc.stdin.close()
|
||||
dbg("stdin closed, waiting for response...")
|
||||
|
||||
stdout_lines: list[str] = []
|
||||
stderr_lines: list[str] = []
|
||||
done = threading.Event()
|
||||
|
||||
def stream_stderr():
|
||||
for raw in proc.stderr:
|
||||
line = raw.decode(errors="replace").rstrip()
|
||||
if line.startswith("npm notice"):
|
||||
continue
|
||||
stderr_lines.append(line)
|
||||
print(line, file=sys.stderr)
|
||||
|
||||
def stream_stdout():
|
||||
for raw in proc.stdout:
|
||||
line = raw.decode(errors="replace").rstrip()
|
||||
stdout_lines.append(line)
|
||||
dbg(f"stdout: {line}")
|
||||
# Kill the container as soon as we see the closing sentinel —
|
||||
# the Node.js event loop often keeps the process alive indefinitely.
|
||||
if line.strip() == "---NANOCLAW_OUTPUT_END---":
|
||||
dbg("output sentinel found, terminating container")
|
||||
done.set()
|
||||
try:
|
||||
proc.kill()
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
return
|
||||
|
||||
t_err = threading.Thread(target=stream_stderr, daemon=True)
|
||||
t_out = threading.Thread(target=stream_stdout, daemon=True)
|
||||
t_err.start()
|
||||
t_out.start()
|
||||
|
||||
# Wait for sentinel or timeout
|
||||
if not done.wait(timeout=timeout):
|
||||
# Also check if process exited naturally
|
||||
t_out.join(timeout=2)
|
||||
if not done.is_set():
|
||||
proc.kill()
|
||||
sys.exit(f"error: container timed out after {timeout}s (no output sentinel received)")
|
||||
|
||||
t_err.join(timeout=5)
|
||||
t_out.join(timeout=5)
|
||||
proc.wait()
|
||||
dbg(f"container done (rc={proc.returncode}), {len(stdout_lines)} stdout lines")
|
||||
stdout = "\n".join(stdout_lines)
|
||||
|
||||
# Parse output block
|
||||
match = re.search(
|
||||
r"---NANOCLAW_OUTPUT_START---\n(.*?)\n---NANOCLAW_OUTPUT_END---",
|
||||
stdout,
|
||||
re.DOTALL,
|
||||
)
|
||||
if match:
|
||||
try:
|
||||
data = json.loads(match.group(1))
|
||||
status = data.get("status", "unknown")
|
||||
if status == "success":
|
||||
print(data.get("result", ""))
|
||||
session_id = data.get("newSessionId") or data.get("sessionId")
|
||||
if session_id:
|
||||
print(f"\n[session: {session_id}]", file=sys.stderr)
|
||||
else:
|
||||
print(f"[{status}] {data.get('result', '')}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except json.JSONDecodeError:
|
||||
print(match.group(1))
|
||||
else:
|
||||
# No structured output — print raw stdout
|
||||
print(stdout)
|
||||
|
||||
if proc.returncode not in (0, None):
|
||||
sys.exit(proc.returncode)
|
||||
|
||||
|
||||
# ── Main ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="claw",
|
||||
description="Run a NanoClaw agent from the command line.",
|
||||
)
|
||||
parser.add_argument("prompt", nargs="?", help="Prompt to send")
|
||||
parser.add_argument("-g", "--group", help="Group name or folder (fuzzy match)")
|
||||
parser.add_argument("-j", "--jid", help="Chat JID (exact)")
|
||||
parser.add_argument("-s", "--session", help="Session ID to resume")
|
||||
parser.add_argument("-p", "--pipe", action="store_true",
|
||||
help="Read prompt from stdin (can be combined with a prompt arg as prefix)")
|
||||
parser.add_argument("--runtime", choices=["docker", "container"],
|
||||
help="Container runtime (default: auto-detect)")
|
||||
parser.add_argument("--image", default=IMAGE, help=f"Container image (default: {IMAGE})")
|
||||
parser.add_argument("--list-groups", action="store_true", help="List registered groups and exit")
|
||||
parser.add_argument("--raw", action="store_true", help="Print raw JSON output")
|
||||
parser.add_argument("--timeout", type=int, default=300, metavar="SECS",
|
||||
help="Max seconds to wait for a response (default: 300)")
|
||||
parser.add_argument("-v", "--verbose", action="store_true",
|
||||
help="Show debug info: cmd, payload (secrets redacted), stdout lines, exit code")
|
||||
args = parser.parse_args()
|
||||
|
||||
global VERBOSE
|
||||
VERBOSE = args.verbose
|
||||
|
||||
groups = get_groups(DB_PATH) if DB_PATH.exists() else []
|
||||
|
||||
if args.list_groups:
|
||||
print(f"{'NAME':<35} {'FOLDER':<30} {'JID'}")
|
||||
print("-" * 100)
|
||||
for g in groups:
|
||||
main_tag = " [main]" if g["is_main"] else ""
|
||||
print(f"{g['name']:<35} {g['folder']:<30} {g['jid']}{main_tag}")
|
||||
return
|
||||
|
||||
# Resolve prompt: --pipe reads stdin, optionally prepended with positional arg
|
||||
if args.pipe or (not sys.stdin.isatty() and not args.prompt):
|
||||
stdin_text = sys.stdin.read().strip()
|
||||
if args.prompt:
|
||||
prompt = f"{args.prompt}\n\n{stdin_text}"
|
||||
else:
|
||||
prompt = stdin_text
|
||||
else:
|
||||
prompt = args.prompt
|
||||
|
||||
if not prompt:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
# Resolve group → jid
|
||||
jid = args.jid
|
||||
group_name = None
|
||||
is_main = False
|
||||
|
||||
if args.group:
|
||||
g = find_group(groups, args.group)
|
||||
if g is None:
|
||||
sys.exit(f"error: group '{args.group}' not found. Run --list-groups to see options.")
|
||||
jid = g["jid"]
|
||||
group_name = g["name"]
|
||||
is_main = g["is_main"]
|
||||
elif not jid:
|
||||
# Default: main group
|
||||
mains = [g for g in groups if g["is_main"]]
|
||||
if mains:
|
||||
jid = mains[0]["jid"]
|
||||
group_name = mains[0]["name"]
|
||||
is_main = True
|
||||
else:
|
||||
sys.exit("error: no group specified and no main group found. Use -g or -j.")
|
||||
|
||||
runtime = detect_runtime(args.runtime)
|
||||
secrets = read_secrets(ENV_FILE)
|
||||
|
||||
if not secrets:
|
||||
print("warning: no secrets found in .env — agent may not be authenticated", file=sys.stderr)
|
||||
|
||||
payload: dict = {
|
||||
"prompt": prompt,
|
||||
"chatJid": jid,
|
||||
"isMain": is_main,
|
||||
"secrets": secrets,
|
||||
}
|
||||
if group_name:
|
||||
payload["groupFolder"] = group_name
|
||||
if args.session:
|
||||
payload["sessionId"] = args.session
|
||||
payload["resumeAt"] = "latest"
|
||||
|
||||
print(f"[{group_name or jid}] running via {runtime}...", file=sys.stderr)
|
||||
run_container(runtime, args.image, payload, timeout=args.timeout)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -50,7 +50,7 @@ Already configured. Continue.
|
||||
|
||||
**Verify:** `git remote -v` should show `origin` → user's repo, `upstream` → `qwibitai/nanoclaw.git`.
|
||||
|
||||
## 1. Bootstrap (Node.js + Dependencies)
|
||||
## 1. Bootstrap (Node.js + Dependencies + OneCLI)
|
||||
|
||||
Run `bash setup.sh` and parse the status block.
|
||||
|
||||
@@ -62,6 +62,34 @@ Run `bash setup.sh` and parse the status block.
|
||||
- If NATIVE_OK=false → better-sqlite3 failed to load. Install build tools and re-run.
|
||||
- Record PLATFORM and IS_WSL for later steps.
|
||||
|
||||
After bootstrap succeeds, install OneCLI and its CLI tool:
|
||||
|
||||
```bash
|
||||
curl -fsSL onecli.sh/install | sh
|
||||
curl -fsSL onecli.sh/cli/install | sh
|
||||
```
|
||||
|
||||
Verify both installed: `onecli version`. If the command is not found, the CLI was likely installed to `~/.local/bin/`. Add it to PATH for the current session and persist it:
|
||||
|
||||
```bash
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
# Persist for future sessions (append to shell profile if not already present)
|
||||
grep -q '.local/bin' ~/.bashrc 2>/dev/null || echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.bashrc
|
||||
grep -q '.local/bin' ~/.zshrc 2>/dev/null || echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.zshrc
|
||||
```
|
||||
|
||||
Then re-verify with `onecli version`.
|
||||
|
||||
Point the CLI at the local OneCLI instance (it defaults to the cloud service otherwise):
|
||||
```bash
|
||||
onecli config set api-host http://127.0.0.1:10254
|
||||
```
|
||||
|
||||
Ensure `.env` has the OneCLI URL (create the file if it doesn't exist):
|
||||
```bash
|
||||
grep -q 'ONECLI_URL' .env 2>/dev/null || echo 'ONECLI_URL=http://127.0.0.1:10254' >> .env
|
||||
```
|
||||
|
||||
## 2. Check Environment
|
||||
|
||||
Run `npx tsx setup/index.ts --step environment` and parse the status block.
|
||||
@@ -112,15 +140,47 @@ Run `npx tsx setup/index.ts --step container -- --runtime <chosen>` and parse th
|
||||
|
||||
**If TEST_OK=false but BUILD_OK=true:** The image built but won't run. Check logs — common cause is runtime not fully started. Wait a moment and retry the test.
|
||||
|
||||
## 4. Claude Authentication (No Script)
|
||||
## 4. Anthropic Credentials via OneCLI
|
||||
|
||||
If HAS_ENV=true from step 2, read `.env` and check for `CLAUDE_CODE_OAUTH_TOKEN` or `ANTHROPIC_API_KEY`. If present, confirm with user: keep or reconfigure?
|
||||
NanoClaw uses OneCLI to manage credentials — API keys are never stored in `.env` or exposed to containers. The OneCLI gateway injects them at request time.
|
||||
|
||||
AskUserQuestion: Claude subscription (Pro/Max) vs Anthropic API key?
|
||||
Check if a secret already exists:
|
||||
```bash
|
||||
onecli secrets list
|
||||
```
|
||||
|
||||
**Subscription:** Tell user to run `claude setup-token` in another terminal, copy the token, add `CLAUDE_CODE_OAUTH_TOKEN=<token>` to `.env`. Do NOT collect the token in chat.
|
||||
If an Anthropic secret is listed, confirm with user: keep or reconfigure? If keeping, skip to step 5.
|
||||
|
||||
**API key:** Tell user to add `ANTHROPIC_API_KEY=<key>` to `.env`.
|
||||
AskUserQuestion: Do you want to use your **Claude subscription** (Pro/Max) or an **Anthropic API key**?
|
||||
|
||||
1. **Claude subscription (Pro/Max)** — description: "Uses your existing Claude Pro or Max subscription. You'll run `claude setup-token` in another terminal to get your token."
|
||||
2. **Anthropic API key** — description: "Pay-per-use API key from console.anthropic.com."
|
||||
|
||||
### Subscription path
|
||||
|
||||
Tell the user to run `claude setup-token` in another terminal and copy the token it outputs. Do NOT collect the token in chat.
|
||||
|
||||
Once they have the token, they register it with OneCLI. AskUserQuestion with two options:
|
||||
|
||||
1. **Dashboard** — description: "Best if you have a browser on this machine. Open http://127.0.0.1:10254 and add the secret in the UI. Use type 'anthropic' and paste your token as the value."
|
||||
2. **CLI** — description: "Best for remote/headless servers. Run: `onecli secrets create --name Anthropic --type anthropic --value YOUR_TOKEN --host-pattern api.anthropic.com`"
|
||||
|
||||
### API key path
|
||||
|
||||
Tell the user to get an API key from https://console.anthropic.com/settings/keys if they don't have one.
|
||||
|
||||
Then AskUserQuestion with two options:
|
||||
|
||||
1. **Dashboard** — description: "Best if you have a browser on this machine. Open http://127.0.0.1:10254 and add the secret in the UI."
|
||||
2. **CLI** — description: "Best for remote/headless servers. Run: `onecli secrets create --name Anthropic --type anthropic --value YOUR_KEY --host-pattern api.anthropic.com`"
|
||||
|
||||
### After either path
|
||||
|
||||
Ask them to let you know when done.
|
||||
|
||||
**If the user's response happens to contain a token or key** (starts with `sk-ant-`): handle it gracefully — run the `onecli secrets create` command with that value on their behalf.
|
||||
|
||||
**After user confirms:** verify with `onecli secrets list` that an Anthropic secret exists. If not, ask again.
|
||||
|
||||
## 5. Set Up Channels
|
||||
|
||||
@@ -198,7 +258,7 @@ Run `npx tsx setup/index.ts --step verify` and parse the status block.
|
||||
**If STATUS=failed, fix each:**
|
||||
- SERVICE=stopped → `npm run build`, then restart: `launchctl kickstart -k gui/$(id -u)/com.nanoclaw` (macOS) or `systemctl --user restart nanoclaw` (Linux) or `bash start-nanoclaw.sh` (WSL nohup)
|
||||
- SERVICE=not_found → re-run step 7
|
||||
- CREDENTIALS=missing → re-run step 4
|
||||
- CREDENTIALS=missing → re-run step 4 (check `onecli secrets list` for Anthropic secret)
|
||||
- CHANNEL_AUTH shows `not_found` for any channel → re-invoke that channel's skill (e.g. `/add-telegram`)
|
||||
- REGISTERED_GROUPS=0 → re-invoke the channel skills from step 5
|
||||
- MOUNT_ALLOWLIST=missing → `npx tsx setup/index.ts --step mounts -- --empty`
|
||||
@@ -207,7 +267,7 @@ Tell user to test: send a message in their registered chat. Show: `tail -f logs/
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Service not starting:** Check `logs/nanoclaw.error.log`. Common: wrong Node path (re-run step 7), missing `.env` (step 4), missing channel credentials (re-invoke channel skill).
|
||||
**Service not starting:** Check `logs/nanoclaw.error.log`. Common: wrong Node path (re-run step 7), OneCLI not running (check `curl http://127.0.0.1:10254/api/health`), missing channel credentials (re-invoke channel skill).
|
||||
|
||||
**Container agent fails ("Claude Code process exited with code 1"):** Ensure the container runtime is running — `open -a Docker` (macOS Docker), `container system start` (Apple Container), or `sudo systemctl start docker` (Linux). Check container logs in `groups/main/logs/container-*.log`.
|
||||
|
||||
@@ -216,3 +276,8 @@ Tell user to test: send a message in their registered chat. Show: `tail -f logs/
|
||||
**Channel not connecting:** Verify the channel's credentials are set in `.env`. Channels auto-enable when their credentials are present. For WhatsApp: check `store/auth/creds.json` exists. For token-based channels: check token values in `.env`. Restart the service after any `.env` change.
|
||||
|
||||
**Unload service:** macOS: `launchctl unload ~/Library/LaunchAgents/com.nanoclaw.plist` | Linux: `systemctl --user stop nanoclaw`
|
||||
|
||||
|
||||
## 9. Diagnostics
|
||||
|
||||
Send diagnostics data by following `.claude/skills/setup/diagnostics.md`.
|
||||
|
||||
46
.claude/skills/setup/diagnostics.md
Normal file
46
.claude/skills/setup/diagnostics.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Diagnostics
|
||||
|
||||
Gather system info:
|
||||
|
||||
```bash
|
||||
node -p "require('./package.json').version"
|
||||
uname -s
|
||||
uname -m
|
||||
node -p "process.versions.node.split('.')[0]"
|
||||
```
|
||||
|
||||
Write `/tmp/nanoclaw-diagnostics.json`. No paths, usernames, hostnames, or IP addresses.
|
||||
|
||||
```json
|
||||
{
|
||||
"api_key": "phc_fx1Hhx9ucz8GuaJC8LVZWO8u03yXZZJJ6ObS4yplnaP",
|
||||
"event": "setup_complete",
|
||||
"distinct_id": "<uuid>",
|
||||
"properties": {
|
||||
"success": true,
|
||||
"nanoclaw_version": "1.2.21",
|
||||
"os_platform": "darwin",
|
||||
"arch": "arm64",
|
||||
"node_major_version": 22,
|
||||
"channels_selected": ["telegram", "whatsapp"],
|
||||
"error_count": 0,
|
||||
"failed_step": null
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Show the entire JSON to the user and ask via AskUserQuestion: **Yes** / **No** / **Never ask again**
|
||||
|
||||
**Yes**:
|
||||
```bash
|
||||
curl -s -X POST https://us.i.posthog.com/capture/ -H 'Content-Type: application/json' -d @/tmp/nanoclaw-diagnostics.json
|
||||
rm /tmp/nanoclaw-diagnostics.json
|
||||
```
|
||||
|
||||
**No**: `rm /tmp/nanoclaw-diagnostics.json`
|
||||
|
||||
**Never ask again**:
|
||||
1. Replace contents of `.claude/skills/setup/diagnostics.md` with `# Diagnostics — opted out`
|
||||
2. Replace contents of `.claude/skills/update-nanoclaw/diagnostics.md` with `# Diagnostics — opted out`
|
||||
3. Remove the `## 9. Diagnostics` section from `.claude/skills/setup/SKILL.md` and the `## Diagnostics` section from `.claude/skills/update-nanoclaw/SKILL.md`
|
||||
4. `rm /tmp/nanoclaw-diagnostics.json`
|
||||
@@ -233,3 +233,8 @@ Tell the user:
|
||||
- Restart the service to apply changes:
|
||||
- If using launchd: `launchctl unload ~/Library/LaunchAgents/com.nanoclaw.plist && launchctl load ~/Library/LaunchAgents/com.nanoclaw.plist`
|
||||
- If running manually: restart `npm run dev`
|
||||
|
||||
|
||||
## Diagnostics
|
||||
|
||||
Send diagnostics data by following `.claude/skills/update-nanoclaw/diagnostics.md`.
|
||||
|
||||
49
.claude/skills/update-nanoclaw/diagnostics.md
Normal file
49
.claude/skills/update-nanoclaw/diagnostics.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# Diagnostics
|
||||
|
||||
Gather system info:
|
||||
|
||||
```bash
|
||||
node -p "require('./package.json').version"
|
||||
uname -s
|
||||
uname -m
|
||||
node -p "process.versions.node.split('.')[0]"
|
||||
git log -1 --format=%ci HEAD@{1} 2>/dev/null || echo "unknown"
|
||||
```
|
||||
|
||||
Write `/tmp/nanoclaw-diagnostics.json`. No paths, usernames, hostnames, or IP addresses.
|
||||
|
||||
```json
|
||||
{
|
||||
"api_key": "phc_fx1Hhx9ucz8GuaJC8LVZWO8u03yXZZJJ6ObS4yplnaP",
|
||||
"event": "update_complete",
|
||||
"distinct_id": "<uuid>",
|
||||
"properties": {
|
||||
"success": true,
|
||||
"nanoclaw_version": "1.2.21",
|
||||
"os_platform": "darwin",
|
||||
"arch": "arm64",
|
||||
"node_major_version": 22,
|
||||
"version_age_days": 45,
|
||||
"update_method": "merge",
|
||||
"conflict_count": 0,
|
||||
"breaking_changes_found": false,
|
||||
"error_count": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Show the entire JSON to the user and ask via AskUserQuestion: **Yes** / **No** / **Never ask again**
|
||||
|
||||
**Yes**:
|
||||
```bash
|
||||
curl -s -X POST https://us.i.posthog.com/capture/ -H 'Content-Type: application/json' -d @/tmp/nanoclaw-diagnostics.json
|
||||
rm /tmp/nanoclaw-diagnostics.json
|
||||
```
|
||||
|
||||
**No**: `rm /tmp/nanoclaw-diagnostics.json`
|
||||
|
||||
**Never ask again**:
|
||||
1. Replace contents of `.claude/skills/setup/diagnostics.md` with `# Diagnostics — opted out`
|
||||
2. Replace contents of `.claude/skills/update-nanoclaw/diagnostics.md` with `# Diagnostics — opted out`
|
||||
3. Remove the `## 9. Diagnostics` section from `.claude/skills/setup/SKILL.md` and the `## Diagnostics` section from `.claude/skills/update-nanoclaw/SKILL.md`
|
||||
4. `rm /tmp/nanoclaw-diagnostics.json`
|
||||
157
.claude/skills/use-native-credential-proxy/SKILL.md
Normal file
157
.claude/skills/use-native-credential-proxy/SKILL.md
Normal file
@@ -0,0 +1,157 @@
|
||||
---
|
||||
name: use-native-credential-proxy
|
||||
description: Replace OneCLI gateway with the built-in credential proxy. For users who want simple .env-based credential management without installing OneCLI. Reads API key or OAuth token from .env and injects into container API requests.
|
||||
---
|
||||
|
||||
# Use Native Credential Proxy
|
||||
|
||||
This skill replaces the OneCLI gateway with NanoClaw's built-in credential proxy. Containers get credentials injected via a local HTTP proxy that reads from `.env` — no external services needed.
|
||||
|
||||
## Phase 1: Pre-flight
|
||||
|
||||
### Check if already applied
|
||||
|
||||
Check if `src/credential-proxy.ts` is imported in `src/index.ts`:
|
||||
|
||||
```bash
|
||||
grep "credential-proxy" src/index.ts
|
||||
```
|
||||
|
||||
If it shows an import for `startCredentialProxy`, the native proxy is already active. Skip to Phase 3 (Setup).
|
||||
|
||||
### Check if OneCLI is active
|
||||
|
||||
```bash
|
||||
grep "@onecli-sh/sdk" package.json
|
||||
```
|
||||
|
||||
If `@onecli-sh/sdk` appears, OneCLI is the active credential provider. Proceed with Phase 2 to replace it.
|
||||
|
||||
If neither check matches, you may be on an older version. Run `/update-nanoclaw` first, then retry.
|
||||
|
||||
## Phase 2: Apply Code Changes
|
||||
|
||||
### Ensure upstream remote
|
||||
|
||||
```bash
|
||||
git remote -v
|
||||
```
|
||||
|
||||
If `upstream` is missing, add it:
|
||||
|
||||
```bash
|
||||
git remote add upstream https://github.com/qwibitai/nanoclaw.git
|
||||
```
|
||||
|
||||
### Merge the skill branch
|
||||
|
||||
```bash
|
||||
git fetch upstream skill/native-credential-proxy
|
||||
git merge upstream/skill/native-credential-proxy || {
|
||||
git checkout --theirs package-lock.json
|
||||
git add package-lock.json
|
||||
git merge --continue
|
||||
}
|
||||
```
|
||||
|
||||
This merges in:
|
||||
- `src/credential-proxy.ts` and `src/credential-proxy.test.ts` (the proxy implementation)
|
||||
- Restored credential proxy usage in `src/index.ts`, `src/container-runner.ts`, `src/container-runtime.ts`, `src/config.ts`
|
||||
- Removed `@onecli-sh/sdk` dependency
|
||||
- Restored `CREDENTIAL_PROXY_PORT` config (default 3001)
|
||||
- Restored platform-aware proxy bind address detection
|
||||
- Reverted setup skill to `.env`-based credential instructions
|
||||
|
||||
If the merge reports conflicts beyond `package-lock.json`, resolve them by reading the conflicted files and understanding the intent of both sides.
|
||||
|
||||
### Validate code changes
|
||||
|
||||
```bash
|
||||
npm install
|
||||
npm run build
|
||||
npx vitest run src/credential-proxy.test.ts src/container-runner.test.ts
|
||||
```
|
||||
|
||||
All tests must pass and build must be clean before proceeding.
|
||||
|
||||
## Phase 3: Setup Credentials
|
||||
|
||||
AskUserQuestion: Do you want to use your **Claude subscription** (Pro/Max) or an **Anthropic API key**?
|
||||
|
||||
1. **Claude subscription (Pro/Max)** — description: "Uses your existing Claude Pro or Max subscription. You'll run `claude setup-token` in another terminal to get your token."
|
||||
2. **Anthropic API key** — description: "Pay-per-use API key from console.anthropic.com."
|
||||
|
||||
### Subscription path
|
||||
|
||||
Tell the user to run `claude setup-token` in another terminal and copy the token it outputs. Do NOT collect the token in chat.
|
||||
|
||||
Once they have the token, add it to `.env`:
|
||||
|
||||
```bash
|
||||
# Add to .env (create file if needed)
|
||||
echo 'CLAUDE_CODE_OAUTH_TOKEN=<token>' >> .env
|
||||
```
|
||||
|
||||
Note: `ANTHROPIC_AUTH_TOKEN` is also supported as a fallback.
|
||||
|
||||
### API key path
|
||||
|
||||
Tell the user to get an API key from https://console.anthropic.com/settings/keys if they don't have one.
|
||||
|
||||
Add it to `.env`:
|
||||
|
||||
```bash
|
||||
echo 'ANTHROPIC_API_KEY=<key>' >> .env
|
||||
```
|
||||
|
||||
### After either path
|
||||
|
||||
**If the user's response happens to contain a token or key** (starts with `sk-ant-` or looks like a token): write it to `.env` on their behalf using the appropriate variable name.
|
||||
|
||||
**Optional:** If the user needs a custom API endpoint, they can add `ANTHROPIC_BASE_URL=<url>` to `.env` (defaults to `https://api.anthropic.com`).
|
||||
|
||||
## Phase 4: Verify
|
||||
|
||||
1. Rebuild and restart:
|
||||
|
||||
```bash
|
||||
npm run build
|
||||
```
|
||||
|
||||
Then restart the service:
|
||||
- macOS: `launchctl kickstart -k gui/$(id -u)/com.nanoclaw`
|
||||
- Linux: `systemctl --user restart nanoclaw`
|
||||
- WSL/manual: stop and re-run `bash start-nanoclaw.sh`
|
||||
|
||||
2. Check logs for successful proxy startup:
|
||||
|
||||
```bash
|
||||
tail -20 logs/nanoclaw.log | grep "Credential proxy"
|
||||
```
|
||||
|
||||
Expected: `Credential proxy started` with port and auth mode.
|
||||
|
||||
3. Send a test message in the registered chat to verify the agent responds.
|
||||
|
||||
4. Note: after applying this skill, the OneCLI credential steps in `/setup` no longer apply. `.env` is now the credential source.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**"Credential proxy upstream error" in logs:** Check that `.env` has a valid `ANTHROPIC_API_KEY` or `CLAUDE_CODE_OAUTH_TOKEN`. Verify the API is reachable: `curl -s https://api.anthropic.com/v1/messages -H "x-api-key: test" | head`.
|
||||
|
||||
**Port 3001 already in use:** Set `CREDENTIAL_PROXY_PORT=<other port>` in `.env` or as an environment variable.
|
||||
|
||||
**Container can't reach proxy (Linux):** The proxy binds to the `docker0` bridge IP by default. If that interface doesn't exist (e.g. rootless Docker), set `CREDENTIAL_PROXY_HOST=0.0.0.0` as an environment variable.
|
||||
|
||||
**OAuth token expired (401 errors):** Re-run `claude setup-token` in a terminal and update the token in `.env`.
|
||||
|
||||
## Removal
|
||||
|
||||
To revert to OneCLI gateway:
|
||||
|
||||
1. Find the merge commit: `git log --oneline --merges -5`
|
||||
2. Revert it: `git revert <merge-commit> -m 1` (undoes the skill branch merge, keeps your other changes)
|
||||
3. `npm install` (re-adds `@onecli-sh/sdk`)
|
||||
4. `npm run build`
|
||||
5. Follow `/setup` step 4 to configure OneCLI credentials
|
||||
6. Remove `ANTHROPIC_API_KEY` / `CLAUDE_CODE_OAUTH_TOKEN` from `.env`
|
||||
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,14 +1,18 @@
|
||||
<!-- contributing-guide: v1 -->
|
||||
## Type of Change
|
||||
|
||||
- [ ] **Skill** - adds a new skill in `.claude/skills/`
|
||||
- [ ] **Feature skill** - adds a channel or integration (source code changes + SKILL.md)
|
||||
- [ ] **Utility skill** - adds a standalone tool (code files in `.claude/skills/<name>/`, no source changes)
|
||||
- [ ] **Operational/container skill** - adds a workflow or agent skill (SKILL.md only, no source changes)
|
||||
- [ ] **Fix** - bug fix or security fix to source code
|
||||
- [ ] **Simplification** - reduces or simplifies source code
|
||||
- [ ] **Documentation** - docs, README, or CONTRIBUTING changes only
|
||||
|
||||
## Description
|
||||
|
||||
|
||||
## For Skills
|
||||
|
||||
- [ ] I have not made any changes to source code
|
||||
- [ ] My skill contains instructions for Claude to follow (not pre-built code)
|
||||
- [ ] SKILL.md contains instructions, not inline code (code goes in separate files)
|
||||
- [ ] SKILL.md is under 500 lines
|
||||
- [ ] I tested this skill on a fresh clone
|
||||
|
||||
35
.github/workflows/label-pr.yml
vendored
Normal file
35
.github/workflows/label-pr.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Label PR
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited]
|
||||
|
||||
jobs:
|
||||
label:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const body = context.payload.pull_request.body || '';
|
||||
const labels = [];
|
||||
|
||||
if (body.includes('[x] **Feature skill**')) { labels.push('PR: Skill'); labels.push('PR: Feature'); }
|
||||
else if (body.includes('[x] **Utility skill**')) labels.push('PR: Skill');
|
||||
else if (body.includes('[x] **Operational/container skill**')) labels.push('PR: Skill');
|
||||
else if (body.includes('[x] **Fix**')) labels.push('PR: Fix');
|
||||
else if (body.includes('[x] **Simplification**')) labels.push('PR: Refactor');
|
||||
else if (body.includes('[x] **Documentation**')) labels.push('PR: Docs');
|
||||
|
||||
if (body.includes('contributing-guide: v1')) labels.push('follows-guidelines');
|
||||
|
||||
if (labels.length > 0) {
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.payload.pull_request.number,
|
||||
labels,
|
||||
});
|
||||
}
|
||||
19
CLAUDE.md
19
CLAUDE.md
@@ -19,10 +19,21 @@ Single Node.js process with skill-based channel system. Channels (WhatsApp, Tele
|
||||
| `src/task-scheduler.ts` | Runs scheduled tasks |
|
||||
| `src/db.ts` | SQLite operations |
|
||||
| `groups/{name}/CLAUDE.md` | Per-group memory (isolated) |
|
||||
| `container/skills/agent-browser.md` | Browser automation tool (available to all agents via Bash) |
|
||||
| `container/skills/` | Skills loaded inside agent containers (browser, status, formatting) |
|
||||
|
||||
## Secrets / Credentials / Proxy (OneCLI)
|
||||
|
||||
API keys, secret keys, OAuth tokens, and auth credentials are managed by the OneCLI gateway — which handles secret injection into containers at request time, so no keys or tokens are ever passed to containers directly. Run `onecli --help`.
|
||||
|
||||
## Skills
|
||||
|
||||
Four types of skills exist in NanoClaw. See [CONTRIBUTING.md](CONTRIBUTING.md) for the full taxonomy and guidelines.
|
||||
|
||||
- **Feature skills** — merge a `skill/*` branch to add capabilities (e.g. `/add-telegram`, `/add-slack`)
|
||||
- **Utility skills** — ship code files alongside SKILL.md (e.g. `/claw`)
|
||||
- **Operational skills** — instruction-only workflows, always on `main` (e.g. `/setup`, `/debug`)
|
||||
- **Container skills** — loaded inside agent containers at runtime (`container/skills/`)
|
||||
|
||||
| Skill | When to Use |
|
||||
|-------|-------------|
|
||||
| `/setup` | First-time installation, authentication, service configuration |
|
||||
@@ -32,6 +43,10 @@ Single Node.js process with skill-based channel system. Channels (WhatsApp, Tele
|
||||
| `/qodo-pr-resolver` | Fetch and fix Qodo PR review issues interactively or in batch |
|
||||
| `/get-qodo-rules` | Load org- and repo-level coding rules from Qodo before code tasks |
|
||||
|
||||
## Contributing
|
||||
|
||||
Before creating a PR, adding a skill, or preparing any contribution, you MUST read [CONTRIBUTING.md](CONTRIBUTING.md). It covers accepted change types, the four skill types and their guidelines, SKILL.md format rules, PR requirements, and the pre-submission checklist (searching for existing PRs/issues, testing, description format).
|
||||
|
||||
## Development
|
||||
|
||||
Run commands directly—don't tell the user to run them.
|
||||
@@ -57,7 +72,7 @@ systemctl --user restart nanoclaw
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**WhatsApp not connecting after upgrade:** WhatsApp is now a separate channel fork, not bundled in core. Run `/add-whatsapp` (or `git remote add whatsapp https://github.com/qwibitai/nanoclaw-whatsapp.git && git fetch whatsapp main && (git merge whatsapp/main || { git checkout --theirs package-lock.json && git add package-lock.json && git merge --continue; }) && npm run build`) to install it. Existing auth credentials and groups are preserved.
|
||||
**WhatsApp not connecting after upgrade:** WhatsApp is now a separate skill, not bundled in core. Run `/add-whatsapp` (or `npx tsx scripts/apply-skill.ts .claude/skills/add-whatsapp && npm run build`) to install it. Existing auth credentials and groups are preserved.
|
||||
|
||||
## Container Build Cache
|
||||
|
||||
|
||||
140
CONTRIBUTING.md
140
CONTRIBUTING.md
@@ -1,5 +1,18 @@
|
||||
# Contributing
|
||||
|
||||
## Before You Start
|
||||
|
||||
1. **Check for existing work.** Search open PRs and issues before starting:
|
||||
```bash
|
||||
gh pr list --repo qwibitai/nanoclaw --search "<your feature>"
|
||||
gh issue list --repo qwibitai/nanoclaw --search "<your feature>"
|
||||
```
|
||||
If a related PR or issue exists, build on it rather than duplicating effort.
|
||||
|
||||
2. **Check alignment.** Read the [Philosophy section in README.md](README.md#philosophy). Source code changes should only be things 90%+ of users need. Skills can be more niche, but should still be useful beyond a single person's setup.
|
||||
|
||||
3. **One thing per PR.** Each PR should do one thing — one bug fix, one skill, one simplification. Don't mix unrelated changes in a single PR.
|
||||
|
||||
## Source Code Changes
|
||||
|
||||
**Accepted:** Bug fixes, security fixes, simplifications, reducing code.
|
||||
@@ -8,16 +21,127 @@
|
||||
|
||||
## Skills
|
||||
|
||||
A [skill](https://code.claude.com/docs/en/skills) is a markdown file in `.claude/skills/` that teaches Claude Code how to transform a NanoClaw installation.
|
||||
NanoClaw uses [Claude Code skills](https://code.claude.com/docs/en/skills) — markdown files with optional supporting files that teach Claude how to do something. There are four types of skills in NanoClaw, each serving a different purpose.
|
||||
|
||||
A PR that contributes a skill should not modify any source files.
|
||||
|
||||
Your skill should contain the **instructions** Claude follows to add the feature—not pre-built code. See `/add-telegram` for a good example.
|
||||
|
||||
### Why?
|
||||
### Why skills?
|
||||
|
||||
Every user should have clean and minimal code that does exactly what they need. Skills let users selectively add features to their fork without inheriting code for features they don't want.
|
||||
|
||||
### Testing
|
||||
### Skill types
|
||||
|
||||
Test your skill by running it on a fresh clone before submitting.
|
||||
#### 1. Feature skills (branch-based)
|
||||
|
||||
Add capabilities to NanoClaw by merging a git branch. The SKILL.md contains setup instructions; the actual code lives on a `skill/*` branch.
|
||||
|
||||
**Location:** `.claude/skills/` on `main` (instructions only), code on `skill/*` branch
|
||||
|
||||
**Examples:** `/add-telegram`, `/add-slack`, `/add-discord`, `/add-gmail`
|
||||
|
||||
**How they work:**
|
||||
1. User runs `/add-telegram`
|
||||
2. Claude follows the SKILL.md: fetches and merges the `skill/telegram` branch
|
||||
3. Claude walks through interactive setup (env vars, bot creation, etc.)
|
||||
|
||||
**Contributing a feature skill:**
|
||||
1. Fork `qwibitai/nanoclaw` and branch from `main`
|
||||
2. Make the code changes (new files, modified source, updated `package.json`, etc.)
|
||||
3. Add a SKILL.md in `.claude/skills/<name>/` with setup instructions — step 1 should be merging the branch
|
||||
4. Open a PR. We'll create the `skill/<name>` branch from your work
|
||||
|
||||
See `/add-telegram` for a good example. See [docs/skills-as-branches.md](docs/skills-as-branches.md) for the full system design.
|
||||
|
||||
#### 2. Utility skills (with code files)
|
||||
|
||||
Standalone tools that ship code files alongside the SKILL.md. The SKILL.md tells Claude how to install the tool; the code lives in the skill directory itself (e.g. in a `scripts/` subfolder).
|
||||
|
||||
**Location:** `.claude/skills/<name>/` with supporting files
|
||||
|
||||
**Examples:** `/claw` (Python CLI in `scripts/claw`)
|
||||
|
||||
**Key difference from feature skills:** No branch merge needed. The code is self-contained in the skill directory and gets copied into place during installation.
|
||||
|
||||
**Guidelines:**
|
||||
- Put code in separate files, not inline in the SKILL.md
|
||||
- Use `${CLAUDE_SKILL_DIR}` to reference files in the skill directory
|
||||
- SKILL.md contains installation instructions, usage docs, and troubleshooting
|
||||
|
||||
#### 3. Operational skills (instruction-only)
|
||||
|
||||
Workflows and guides with no code changes. The SKILL.md is the entire skill — Claude follows the instructions to perform a task.
|
||||
|
||||
**Location:** `.claude/skills/` on `main`
|
||||
|
||||
**Examples:** `/setup`, `/debug`, `/customize`, `/update-nanoclaw`, `/update-skills`
|
||||
|
||||
**Guidelines:**
|
||||
- Pure instructions — no code files, no branch merges
|
||||
- Use `AskUserQuestion` for interactive prompts
|
||||
- These stay on `main` and are always available to every user
|
||||
|
||||
#### 4. Container skills (agent runtime)
|
||||
|
||||
Skills that run inside the agent container, not on the host. These teach the container agent how to use tools, format output, or perform tasks. They are synced into each group's `.claude/skills/` directory when a container starts.
|
||||
|
||||
**Location:** `container/skills/<name>/`
|
||||
|
||||
**Examples:** `agent-browser` (web browsing), `capabilities` (/capabilities command), `status` (/status command), `slack-formatting` (Slack mrkdwn syntax)
|
||||
|
||||
**Key difference:** These are NOT invoked by the user on the host. They're loaded by Claude Code inside the container and influence how the agent behaves.
|
||||
|
||||
**Guidelines:**
|
||||
- Follow the same SKILL.md + frontmatter format
|
||||
- Use `allowed-tools` frontmatter to scope tool permissions
|
||||
- Keep them focused — the agent's context window is shared across all container skills
|
||||
|
||||
### SKILL.md format
|
||||
|
||||
All skills use the [Claude Code skills standard](https://code.claude.com/docs/en/skills):
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: my-skill
|
||||
description: What this skill does and when to use it.
|
||||
---
|
||||
|
||||
Instructions here...
|
||||
```
|
||||
|
||||
**Rules:**
|
||||
- Keep SKILL.md **under 500 lines** — move detail to separate reference files
|
||||
- `name`: lowercase, alphanumeric + hyphens, max 64 chars
|
||||
- `description`: required — Claude uses this to decide when to invoke the skill
|
||||
- Put code in separate files, not inline in the markdown
|
||||
- See the [skills standard](https://code.claude.com/docs/en/skills) for all available frontmatter fields
|
||||
|
||||
## Testing
|
||||
|
||||
Test your contribution on a fresh clone before submitting. For skills, run the skill end-to-end and verify it works.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
### Before opening
|
||||
|
||||
1. **Link related issues.** If your PR resolves an open issue, include `Closes #123` in the description so it's auto-closed on merge.
|
||||
2. **Test thoroughly.** Run the feature yourself. For skills, test on a fresh clone.
|
||||
3. **Check the right box** in the PR template. Labels are auto-applied based on your selection:
|
||||
|
||||
| Checkbox | Label |
|
||||
|----------|-------|
|
||||
| Feature skill | `PR: Skill` + `PR: Feature` |
|
||||
| Utility skill | `PR: Skill` |
|
||||
| Operational/container skill | `PR: Skill` |
|
||||
| Fix | `PR: Fix` |
|
||||
| Simplification | `PR: Refactor` |
|
||||
| Documentation | `PR: Docs` |
|
||||
|
||||
### PR description
|
||||
|
||||
Keep it concise. Remove any template sections that don't apply. The description should cover:
|
||||
|
||||
- **What** — what the PR adds or changes
|
||||
- **Why** — the motivation
|
||||
- **How it works** — brief explanation of the approach
|
||||
- **How it was tested** — what you did to verify it works
|
||||
- **Usage** — how the user invokes it (for skills)
|
||||
|
||||
Don't pad the description. A few clear sentences are better than lengthy paragraphs.
|
||||
|
||||
27
README.md
27
README.md
@@ -9,31 +9,13 @@
|
||||
<p align="center">
|
||||
<a href="https://nanoclaw.dev">nanoclaw.dev</a> •
|
||||
<a href="README_zh.md">中文</a> •
|
||||
<a href="README_ja.md">日本語</a> •
|
||||
<a href="https://discord.gg/VDdww8qS42"><img src="https://img.shields.io/discord/1470188214710046894?label=Discord&logo=discord&v=2" alt="Discord" valign="middle"></a> •
|
||||
<a href="repo-tokens"><img src="repo-tokens/badge.svg" alt="34.9k tokens, 17% of context window" valign="middle"></a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
<h2 align="center">🐳 Now Runs in Docker Sandboxes</h2>
|
||||
<p align="center">Every agent gets its own isolated container inside a micro VM.<br>Hypervisor-level isolation. Millisecond startup. No complex setup.</p>
|
||||
|
||||
**macOS (Apple Silicon)**
|
||||
```bash
|
||||
curl -fsSL https://nanoclaw.dev/install-docker-sandboxes.sh | bash
|
||||
```
|
||||
|
||||
**Windows (WSL)**
|
||||
```bash
|
||||
curl -fsSL https://nanoclaw.dev/install-docker-sandboxes-windows.sh | bash
|
||||
```
|
||||
|
||||
> Currently supported on macOS (Apple Silicon) and Windows (x86). Linux support coming soon.
|
||||
|
||||
<p align="center"><a href="https://nanoclaw.dev/blog/nanoclaw-docker-sandboxes">Read the announcement →</a> · <a href="docs/docker-sandboxes.md">Manual setup guide →</a></p>
|
||||
|
||||
---
|
||||
|
||||
## Why I Built NanoClaw
|
||||
|
||||
[OpenClaw](https://github.com/openclaw/openclaw) is an impressive project, but I wouldn't have been able to sleep if I had given complex software I didn't understand full access to my life. OpenClaw has nearly half a million lines of code, 53 config files, and 70+ dependencies. Its security is at the application level (allowlists, pairing codes) rather than true OS-level isolation. Everything runs in one Node process with shared memory.
|
||||
@@ -88,7 +70,7 @@ Then run `/setup`. Claude Code handles everything: dependencies, authentication,
|
||||
- **Main channel** - Your private channel (self-chat) for admin control; every group is completely isolated
|
||||
- **Scheduled tasks** - Recurring jobs that run Claude and can message you back
|
||||
- **Web access** - Search and fetch content from the Web
|
||||
- **Container isolation** - Agents are sandboxed in [Docker Sandboxes](https://nanoclaw.dev/blog/nanoclaw-docker-sandboxes) (micro VM isolation), Apple Container (macOS), or Docker (macOS/Linux)
|
||||
- **Container isolation** - Agents are sandboxed in Docker (macOS/Linux), [Docker Sandboxes](docs/docker-sandboxes.md) (micro VM isolation), or Apple Container (macOS)
|
||||
- **Agent Swarms** - Spin up teams of specialized agents that collaborate on complex tasks
|
||||
- **Optional integrations** - Add Gmail (`/add-gmail`) and more via skills
|
||||
|
||||
@@ -137,9 +119,6 @@ Skills we'd like to see:
|
||||
**Communication Channels**
|
||||
- `/add-signal` - Add Signal as a channel
|
||||
|
||||
**Session Management**
|
||||
- `/clear` - Add a `/clear` command that compacts the conversation (summarizes context while preserving critical information in the same session). Requires figuring out how to trigger compaction programmatically via the Claude Agent SDK.
|
||||
|
||||
## Requirements
|
||||
|
||||
- macOS or Linux
|
||||
@@ -172,7 +151,7 @@ Key files:
|
||||
|
||||
**Why Docker?**
|
||||
|
||||
Docker provides cross-platform support (macOS, Linux and even Windows via WSL2) and a mature ecosystem. On macOS, you can optionally switch to Apple Container via `/convert-to-apple-container` for a lighter-weight native runtime.
|
||||
Docker provides cross-platform support (macOS, Linux and even Windows via WSL2) and a mature ecosystem. On macOS, you can optionally switch to Apple Container via `/convert-to-apple-container` for a lighter-weight native runtime. For additional isolation, [Docker Sandboxes](docs/docker-sandboxes.md) run each container inside a micro VM.
|
||||
|
||||
**Can I run this on Linux?**
|
||||
|
||||
|
||||
232
README_ja.md
Normal file
232
README_ja.md
Normal file
@@ -0,0 +1,232 @@
|
||||
<p align="center">
|
||||
<img src="assets/nanoclaw-logo.png" alt="NanoClaw" width="400">
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
エージェントを専用コンテナで安全に実行するAIアシスタント。軽量で、理解しやすく、あなたのニーズに完全にカスタマイズできるように設計されています。
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://nanoclaw.dev">nanoclaw.dev</a> •
|
||||
<a href="README.md">English</a> •
|
||||
<a href="README_zh.md">中文</a> •
|
||||
<a href="https://discord.gg/VDdww8qS42"><img src="https://img.shields.io/discord/1470188214710046894?label=Discord&logo=discord&v=2" alt="Discord" valign="middle"></a> •
|
||||
<a href="repo-tokens"><img src="repo-tokens/badge.svg" alt="34.9k tokens, 17% of context window" valign="middle"></a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
<h2 align="center">🐳 Dockerサンドボックスで動作</h2>
|
||||
<p align="center">各エージェントはマイクロVM内の独立したコンテナで実行されます。<br>ハイパーバイザーレベルの分離。ミリ秒で起動。複雑なセットアップ不要。</p>
|
||||
|
||||
**macOS (Apple Silicon)**
|
||||
```bash
|
||||
curl -fsSL https://nanoclaw.dev/install-docker-sandboxes.sh | bash
|
||||
```
|
||||
|
||||
**Windows (WSL)**
|
||||
```bash
|
||||
curl -fsSL https://nanoclaw.dev/install-docker-sandboxes-windows.sh | bash
|
||||
```
|
||||
|
||||
> 現在、macOS(Apple Silicon)とWindows(x86)に対応しています。Linux対応は近日公開予定。
|
||||
|
||||
<p align="center"><a href="https://nanoclaw.dev/blog/nanoclaw-docker-sandboxes">発表記事を読む →</a> · <a href="docs/docker-sandboxes.md">手動セットアップガイド →</a></p>
|
||||
|
||||
---
|
||||
|
||||
## NanoClawを作った理由
|
||||
|
||||
[OpenClaw](https://github.com/openclaw/openclaw)は素晴らしいプロジェクトですが、理解しきれない複雑なソフトウェアに自分の生活へのフルアクセスを与えたまま安心して眠れるとは思えませんでした。OpenClawは約50万行のコード、53の設定ファイル、70以上の依存関係を持っています。セキュリティはアプリケーションレベル(許可リスト、ペアリングコード)であり、真のOS レベルの分離ではありません。すべてが共有メモリを持つ1つのNodeプロセスで動作します。
|
||||
|
||||
NanoClawは同じコア機能を提供しますが、理解できる規模のコードベースで実現しています:1つのプロセスと少数のファイル。Claudeエージェントは単なるパーミッションチェックの背後ではなく、ファイルシステム分離された独自のLinuxコンテナで実行されます。
|
||||
|
||||
## クイックスタート
|
||||
|
||||
```bash
|
||||
gh repo fork qwibitai/nanoclaw --clone
|
||||
cd nanoclaw
|
||||
claude
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>GitHub CLIなしの場合</summary>
|
||||
|
||||
1. GitHub上で[qwibitai/nanoclaw](https://github.com/qwibitai/nanoclaw)をフォーク(Forkボタンをクリック)
|
||||
2. `git clone https://github.com/<あなたのユーザー名>/nanoclaw.git`
|
||||
3. `cd nanoclaw`
|
||||
4. `claude`
|
||||
|
||||
</details>
|
||||
|
||||
その後、`/setup`を実行します。Claude Codeがすべてを処理します:依存関係、認証、コンテナセットアップ、サービス設定。
|
||||
|
||||
> **注意:** `/`で始まるコマンド(`/setup`、`/add-whatsapp`など)は[Claude Codeスキル](https://code.claude.com/docs/en/skills)です。通常のターミナルではなく、`claude` CLIプロンプト内で入力してください。Claude Codeをインストールしていない場合は、[claude.com/product/claude-code](https://claude.com/product/claude-code)から入手してください。
|
||||
|
||||
## 設計思想
|
||||
|
||||
**理解できる規模。** 1つのプロセス、少数のソースファイル、マイクロサービスなし。NanoClawのコードベース全体を理解したい場合は、Claude Codeに説明を求めるだけです。
|
||||
|
||||
**分離によるセキュリティ。** エージェントはLinuxコンテナ(macOSではApple Container、またはDocker)で実行され、明示的にマウントされたものだけが見えます。コマンドはホストではなくコンテナ内で実行されるため、Bashアクセスは安全です。
|
||||
|
||||
**個人ユーザー向け。** NanoClawはモノリシックなフレームワークではなく、各ユーザーのニーズに正確にフィットするソフトウェアです。肥大化するのではなく、オーダーメイドになるよう設計されています。自分のフォークを作成し、Claude Codeにニーズに合わせて変更させます。
|
||||
|
||||
**カスタマイズ=コード変更。** 設定ファイルの肥大化なし。動作を変えたい?コードを変更するだけ。コードベースは変更しても安全な規模です。
|
||||
|
||||
**AIネイティブ。**
|
||||
- インストールウィザードなし — Claude Codeがセットアップを案内。
|
||||
- モニタリングダッシュボードなし — Claudeに状況を聞くだけ。
|
||||
- デバッグツールなし — 問題を説明すればClaudeが修正。
|
||||
|
||||
**機能追加ではなくスキル。** コードベースに機能(例:Telegram対応)を追加する代わりに、コントリビューターは`/add-telegram`のような[Claude Codeスキル](https://code.claude.com/docs/en/skills)を提出し、あなたのフォークを変換します。あなたが必要なものだけを正確に実行するクリーンなコードが手に入ります。
|
||||
|
||||
**最高のハーネス、最高のモデル。** NanoClawはClaude Agent SDK上で動作します。つまり、Claude Codeを直接実行しているということです。Claude Codeは高い能力を持ち、そのコーディングと問題解決能力によってNanoClawを変更・拡張し、各ユーザーに合わせてカスタマイズできます。
|
||||
|
||||
## サポート機能
|
||||
|
||||
- **マルチチャネルメッセージング** - WhatsApp、Telegram、Discord、Slack、Gmailからアシスタントと会話。`/add-whatsapp`や`/add-telegram`などのスキルでチャネルを追加。1つでも複数でも同時に実行可能。
|
||||
- **グループごとの分離コンテキスト** - 各グループは独自の`CLAUDE.md`メモリ、分離されたファイルシステムを持ち、そのファイルシステムのみがマウントされた専用コンテナサンドボックスで実行。
|
||||
- **メインチャネル** - 管理制御用のプライベートチャネル(セルフチャット)。各グループは完全に分離。
|
||||
- **スケジュールタスク** - Claudeを実行し、メッセージを返せる定期ジョブ。
|
||||
- **Webアクセス** - Webからのコンテンツ検索・取得。
|
||||
- **コンテナ分離** - エージェントは[Dockerサンドボックス](https://nanoclaw.dev/blog/nanoclaw-docker-sandboxes)(マイクロVM分離)、Apple Container(macOS)、またはDocker(macOS/Linux)でサンドボックス化。
|
||||
- **エージェントスウォーム** - 複雑なタスクで協力する専門エージェントチームを起動。
|
||||
- **オプション連携** - Gmail(`/add-gmail`)などをスキルで追加。
|
||||
|
||||
## 使い方
|
||||
|
||||
トリガーワード(デフォルト:`@Andy`)でアシスタントに話しかけます:
|
||||
|
||||
```
|
||||
@Andy 毎朝9時に営業パイプラインの概要を送って(Obsidian vaultフォルダにアクセス可能)
|
||||
@Andy 毎週金曜に過去1週間のgit履歴をレビューして、差異があればREADMEを更新して
|
||||
@Andy 毎週月曜の朝8時に、Hacker NewsとTechCrunchからAI関連のニュースをまとめてブリーフィングを送って
|
||||
```
|
||||
|
||||
メインチャネル(セルフチャット)から、グループやタスクを管理できます:
|
||||
```
|
||||
@Andy 全グループのスケジュールタスクを一覧表示して
|
||||
@Andy 月曜のブリーフィングタスクを一時停止して
|
||||
@Andy Family Chatグループに参加して
|
||||
```
|
||||
|
||||
## カスタマイズ
|
||||
|
||||
NanoClawは設定ファイルを使いません。変更するには、Claude Codeに伝えるだけです:
|
||||
|
||||
- 「トリガーワードを@Bobに変更して」
|
||||
- 「今後はレスポンスをもっと短く直接的にして」
|
||||
- 「おはようと言ったらカスタム挨拶を追加して」
|
||||
- 「会話の要約を毎週保存して」
|
||||
|
||||
または`/customize`を実行してガイド付きの変更を行えます。
|
||||
|
||||
コードベースは十分に小さいため、Claudeが安全に変更できます。
|
||||
|
||||
## コントリビューション
|
||||
|
||||
**機能を追加するのではなく、スキルを追加してください。**
|
||||
|
||||
Telegram対応を追加したい場合、コアコードベースにTelegramを追加するPRを作成しないでください。代わりに、NanoClawをフォークし、ブランチでコード変更を行い、PRを開いてください。あなたのPRから`skill/telegram`ブランチを作成し、他のユーザーが自分のフォークにマージできるようにします。
|
||||
|
||||
ユーザーは自分のフォークで`/add-telegram`を実行するだけで、あらゆるユースケースに対応しようとする肥大化したシステムではなく、必要なものだけを正確に実行するクリーンなコードが手に入ります。
|
||||
|
||||
### RFS(スキル募集)
|
||||
|
||||
私たちが求めているスキル:
|
||||
|
||||
**コミュニケーションチャネル**
|
||||
- `/add-signal` - Signalをチャネルとして追加
|
||||
|
||||
**セッション管理**
|
||||
- `/clear` - 会話をコンパクト化する`/clear`コマンドの追加(同一セッション内で重要な情報を保持しながらコンテキストを要約)。Claude Agent SDKを通じてプログラム的にコンパクト化をトリガーする方法の解明が必要。
|
||||
|
||||
## 必要条件
|
||||
|
||||
- macOSまたはLinux
|
||||
- Node.js 20以上
|
||||
- [Claude Code](https://claude.ai/download)
|
||||
- [Apple Container](https://github.com/apple/container)(macOS)または[Docker](https://docker.com/products/docker-desktop)(macOS/Linux)
|
||||
|
||||
## アーキテクチャ
|
||||
|
||||
```
|
||||
チャネル --> SQLite --> ポーリングループ --> コンテナ(Claude Agent SDK) --> レスポンス
|
||||
```
|
||||
|
||||
単一のNode.jsプロセス。チャネルはスキルで追加され、起動時に自己登録します — オーケストレーターは認証情報が存在するチャネルを接続します。エージェントはファイルシステム分離された独立したLinuxコンテナで実行されます。マウントされたディレクトリのみアクセス可能。グループごとのメッセージキューと同時実行制御。ファイルシステム経由のIPC。
|
||||
|
||||
詳細なアーキテクチャについては、[docs/SPEC.md](docs/SPEC.md)を参照してください。
|
||||
|
||||
主要ファイル:
|
||||
- `src/index.ts` - オーケストレーター:状態、メッセージループ、エージェント呼び出し
|
||||
- `src/channels/registry.ts` - チャネルレジストリ(起動時の自己登録)
|
||||
- `src/ipc.ts` - IPCウォッチャーとタスク処理
|
||||
- `src/router.ts` - メッセージフォーマットとアウトバウンドルーティング
|
||||
- `src/group-queue.ts` - グローバル同時実行制限付きのグループごとのキュー
|
||||
- `src/container-runner.ts` - ストリーミングエージェントコンテナの起動
|
||||
- `src/task-scheduler.ts` - スケジュールタスクの実行
|
||||
- `src/db.ts` - SQLite操作(メッセージ、グループ、セッション、状態)
|
||||
- `groups/*/CLAUDE.md` - グループごとのメモリ
|
||||
|
||||
## FAQ
|
||||
|
||||
**なぜDockerなのか?**
|
||||
|
||||
Dockerはクロスプラットフォーム対応(macOS、Linux、さらにWSL2経由のWindows)と成熟したエコシステムを提供します。macOSでは、`/convert-to-apple-container`でオプションとしてApple Containerに切り替え、より軽量なネイティブランタイムを使用できます。
|
||||
|
||||
**Linuxで実行できますか?**
|
||||
|
||||
はい。DockerがデフォルトのランタイムでmacOSとLinuxの両方で動作します。`/setup`を実行するだけです。
|
||||
|
||||
**セキュリティは大丈夫ですか?**
|
||||
|
||||
エージェントはアプリケーションレベルのパーミッションチェックの背後ではなく、コンテナで実行されます。明示的にマウントされたディレクトリのみアクセスできます。実行するものをレビューすべきですが、コードベースは十分に小さいため実際にレビュー可能です。完全なセキュリティモデルについては[docs/SECURITY.md](docs/SECURITY.md)を参照してください。
|
||||
|
||||
**なぜ設定ファイルがないのか?**
|
||||
|
||||
設定の肥大化を避けたいからです。すべてのユーザーがNanoClawをカスタマイズし、汎用的なシステムを設定するのではなく、コードが必要なことを正確に実行するようにすべきです。設定ファイルが欲しい場合は、Claudeに追加するよう伝えることができます。
|
||||
|
||||
**サードパーティやオープンソースモデルを使えますか?**
|
||||
|
||||
はい。NanoClawはClaude API互換のモデルエンドポイントに対応しています。`.env`ファイルで以下の環境変数を設定してください:
|
||||
|
||||
```bash
|
||||
ANTHROPIC_BASE_URL=https://your-api-endpoint.com
|
||||
ANTHROPIC_AUTH_TOKEN=your-token-here
|
||||
```
|
||||
|
||||
以下が使用可能です:
|
||||
- [Ollama](https://ollama.ai)とAPIプロキシ経由のローカルモデル
|
||||
- [Together AI](https://together.ai)、[Fireworks](https://fireworks.ai)等でホストされたオープンソースモデル
|
||||
- Anthropic互換APIのカスタムモデルデプロイメント
|
||||
|
||||
注意:最高の互換性のため、モデルはAnthropic APIフォーマットに対応している必要があります。
|
||||
|
||||
**問題のデバッグ方法は?**
|
||||
|
||||
Claude Codeに聞いてください。「スケジューラーが動いていないのはなぜ?」「最近のログには何がある?」「このメッセージに返信がなかったのはなぜ?」これがNanoClawの基盤となるAIネイティブなアプローチです。
|
||||
|
||||
**セットアップがうまくいかない場合は?**
|
||||
|
||||
問題がある場合、セットアップ中にClaudeが動的に修正を試みます。それでもうまくいかない場合は、`claude`を実行してから`/debug`を実行してください。Claudeが他のユーザーにも影響する可能性のある問題を見つけた場合は、セットアップのSKILL.mdを修正するPRを開いてください。
|
||||
|
||||
**どのような変更がコードベースに受け入れられますか?**
|
||||
|
||||
セキュリティ修正、バグ修正、明確な改善のみが基本設定に受け入れられます。それだけです。
|
||||
|
||||
それ以外のすべて(新機能、OS互換性、ハードウェアサポート、機能拡張)はスキルとしてコントリビューションすべきです。
|
||||
|
||||
これにより、基本システムを最小限に保ち、すべてのユーザーが不要な機能を継承することなく、自分のインストールをカスタマイズできます。
|
||||
|
||||
## コミュニティ
|
||||
|
||||
質問やアイデアは?[Discordに参加](https://discord.gg/VDdww8qS42)してください。
|
||||
|
||||
## 変更履歴
|
||||
|
||||
破壊的変更と移行ノートについては[CHANGELOG.md](CHANGELOG.md)を参照してください。
|
||||
|
||||
## ライセンス
|
||||
|
||||
MIT
|
||||
@@ -9,6 +9,7 @@
|
||||
<p align="center">
|
||||
<a href="https://nanoclaw.dev">nanoclaw.dev</a> •
|
||||
<a href="README.md">English</a> •
|
||||
<a href="README_ja.md">日本語</a> •
|
||||
<a href="https://discord.gg/VDdww8qS42"><img src="https://img.shields.io/discord/1470188214710046894?label=Discord&logo=discord&v=2" alt="Discord" valign="middle"></a> •
|
||||
<a href="repo-tokens"><img src="repo-tokens/badge.svg" alt="34.9k tokens, 17% of context window" valign="middle"></a>
|
||||
</p>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# NanoClaw Agent Container
|
||||
# Runs Claude Agent SDK in isolated Linux VM with browser automation
|
||||
|
||||
FROM node:22-slim
|
||||
FROM node:24-slim
|
||||
|
||||
# Install system dependencies for Chromium
|
||||
RUN apt-get update && apt-get install -y \
|
||||
|
||||
100
container/skills/capabilities/SKILL.md
Normal file
100
container/skills/capabilities/SKILL.md
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
name: capabilities
|
||||
description: Show what this NanoClaw instance can do — installed skills, available tools, and system info. Read-only. Use when the user asks what the bot can do, what's installed, or runs /capabilities.
|
||||
---
|
||||
|
||||
# /capabilities — System Capabilities Report
|
||||
|
||||
Generate a structured read-only report of what this NanoClaw instance can do.
|
||||
|
||||
**Main-channel check:** Only the main channel has `/workspace/project` mounted. Run:
|
||||
|
||||
```bash
|
||||
test -d /workspace/project && echo "MAIN" || echo "NOT_MAIN"
|
||||
```
|
||||
|
||||
If `NOT_MAIN`, respond with:
|
||||
> This command is available in your main chat only. Send `/capabilities` there to see what I can do.
|
||||
|
||||
Then stop — do not generate the report.
|
||||
|
||||
## How to gather the information
|
||||
|
||||
Run these commands and compile the results into the report format below.
|
||||
|
||||
### 1. Installed skills
|
||||
|
||||
List skill directories available to you:
|
||||
|
||||
```bash
|
||||
ls -1 /home/node/.claude/skills/ 2>/dev/null || echo "No skills found"
|
||||
```
|
||||
|
||||
Each directory is an installed skill. The directory name is the skill name (e.g., `agent-browser` → `/agent-browser`).
|
||||
|
||||
### 2. Available tools
|
||||
|
||||
Read the allowed tools from your SDK configuration. You always have access to:
|
||||
- **Core:** Bash, Read, Write, Edit, Glob, Grep
|
||||
- **Web:** WebSearch, WebFetch
|
||||
- **Orchestration:** Task, TaskOutput, TaskStop, TeamCreate, TeamDelete, SendMessage
|
||||
- **Other:** TodoWrite, ToolSearch, Skill, NotebookEdit
|
||||
- **MCP:** mcp__nanoclaw__* (messaging, tasks, group management)
|
||||
|
||||
### 3. MCP server tools
|
||||
|
||||
The NanoClaw MCP server exposes these tools (via `mcp__nanoclaw__*` prefix):
|
||||
- `send_message` — send a message to the user/group
|
||||
- `schedule_task` — schedule a recurring or one-time task
|
||||
- `list_tasks` — list scheduled tasks
|
||||
- `pause_task` — pause a scheduled task
|
||||
- `resume_task` — resume a paused task
|
||||
- `cancel_task` — cancel and delete a task
|
||||
- `update_task` — update an existing task
|
||||
- `register_group` — register a new chat/group (main only)
|
||||
|
||||
### 4. Container skills (Bash tools)
|
||||
|
||||
Check for executable tools in the container:
|
||||
|
||||
```bash
|
||||
which agent-browser 2>/dev/null && echo "agent-browser: available" || echo "agent-browser: not found"
|
||||
```
|
||||
|
||||
### 5. Group info
|
||||
|
||||
```bash
|
||||
ls /workspace/group/CLAUDE.md 2>/dev/null && echo "Group memory: yes" || echo "Group memory: no"
|
||||
ls /workspace/extra/ 2>/dev/null && echo "Extra mounts: $(ls /workspace/extra/ 2>/dev/null | wc -l | tr -d ' ')" || echo "Extra mounts: none"
|
||||
```
|
||||
|
||||
## Report format
|
||||
|
||||
Present the report as a clean, readable message. Example:
|
||||
|
||||
```
|
||||
📋 *NanoClaw Capabilities*
|
||||
|
||||
*Installed Skills:*
|
||||
• /agent-browser — Browse the web, fill forms, extract data
|
||||
• /capabilities — This report
|
||||
(list all found skills)
|
||||
|
||||
*Tools:*
|
||||
• Core: Bash, Read, Write, Edit, Glob, Grep
|
||||
• Web: WebSearch, WebFetch
|
||||
• Orchestration: Task, TeamCreate, SendMessage
|
||||
• MCP: send_message, schedule_task, list_tasks, pause/resume/cancel/update_task, register_group
|
||||
|
||||
*Container Tools:*
|
||||
• agent-browser: ✓
|
||||
|
||||
*System:*
|
||||
• Group memory: yes/no
|
||||
• Extra mounts: N directories
|
||||
• Main channel: yes
|
||||
```
|
||||
|
||||
Adapt the output based on what you actually find — don't list things that aren't installed.
|
||||
|
||||
**See also:** `/status` for a quick health check of session, workspace, and tasks.
|
||||
94
container/skills/slack-formatting/SKILL.md
Normal file
94
container/skills/slack-formatting/SKILL.md
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
name: slack-formatting
|
||||
description: Format messages for Slack using mrkdwn syntax. Use when responding to Slack channels (folder starts with "slack_" or JID contains slack identifiers).
|
||||
---
|
||||
|
||||
# Slack Message Formatting (mrkdwn)
|
||||
|
||||
When responding to Slack channels, use Slack's mrkdwn syntax instead of standard Markdown.
|
||||
|
||||
## How to detect Slack context
|
||||
|
||||
Check your group folder name or workspace path:
|
||||
- Folder starts with `slack_` (e.g., `slack_engineering`, `slack_general`)
|
||||
- Or check `/workspace/group/` path for `slack_` prefix
|
||||
|
||||
## Formatting reference
|
||||
|
||||
### Text styles
|
||||
|
||||
| Style | Syntax | Example |
|
||||
|-------|--------|---------|
|
||||
| Bold | `*text*` | *bold text* |
|
||||
| Italic | `_text_` | _italic text_ |
|
||||
| Strikethrough | `~text~` | ~strikethrough~ |
|
||||
| Code (inline) | `` `code` `` | `inline code` |
|
||||
| Code block | ` ```code``` ` | Multi-line code |
|
||||
|
||||
### Links and mentions
|
||||
|
||||
```
|
||||
<https://example.com|Link text> # Named link
|
||||
<https://example.com> # Auto-linked URL
|
||||
<@U1234567890> # Mention user by ID
|
||||
<#C1234567890> # Mention channel by ID
|
||||
<!here> # @here
|
||||
<!channel> # @channel
|
||||
```
|
||||
|
||||
### Lists
|
||||
|
||||
Slack supports simple bullet lists but NOT numbered lists:
|
||||
|
||||
```
|
||||
• First item
|
||||
• Second item
|
||||
• Third item
|
||||
```
|
||||
|
||||
Use `•` (bullet character) or `- ` or `* ` for bullets.
|
||||
|
||||
### Block quotes
|
||||
|
||||
```
|
||||
> This is a block quote
|
||||
> It can span multiple lines
|
||||
```
|
||||
|
||||
### Emoji
|
||||
|
||||
Use standard emoji shortcodes: `:white_check_mark:`, `:x:`, `:rocket:`, `:tada:`
|
||||
|
||||
## What NOT to use
|
||||
|
||||
- **NO** `##` headings (use `*Bold text*` for headers instead)
|
||||
- **NO** `**double asterisks**` for bold (use `*single asterisks*`)
|
||||
- **NO** `[text](url)` links (use `<url|text>` instead)
|
||||
- **NO** `1.` numbered lists (use bullets with numbers: `• 1. First`)
|
||||
- **NO** tables (use code blocks or plain text alignment)
|
||||
- **NO** `---` horizontal rules
|
||||
|
||||
## Example message
|
||||
|
||||
```
|
||||
*Daily Standup Summary*
|
||||
|
||||
_March 21, 2026_
|
||||
|
||||
• *Completed:* Fixed authentication bug in login flow
|
||||
• *In Progress:* Building new dashboard widgets
|
||||
• *Blocked:* Waiting on API access from DevOps
|
||||
|
||||
> Next sync: Monday 10am
|
||||
|
||||
:white_check_mark: All tests passing | <https://ci.example.com/builds/123|View Build>
|
||||
```
|
||||
|
||||
## Quick rules
|
||||
|
||||
1. Use `*bold*` not `**bold**`
|
||||
2. Use `<url|text>` not `[text](url)`
|
||||
3. Use `•` bullets, avoid numbered lists
|
||||
4. Use `:emoji:` shortcodes
|
||||
5. Quote blocks with `>`
|
||||
6. Skip headings — use bold text instead
|
||||
104
container/skills/status/SKILL.md
Normal file
104
container/skills/status/SKILL.md
Normal file
@@ -0,0 +1,104 @@
|
||||
---
|
||||
name: status
|
||||
description: Quick read-only health check — session context, workspace mounts, tool availability, and task snapshot. Use when the user asks for system status or runs /status.
|
||||
---
|
||||
|
||||
# /status — System Status Check
|
||||
|
||||
Generate a quick read-only status report of the current agent environment.
|
||||
|
||||
**Main-channel check:** Only the main channel has `/workspace/project` mounted. Run:
|
||||
|
||||
```bash
|
||||
test -d /workspace/project && echo "MAIN" || echo "NOT_MAIN"
|
||||
```
|
||||
|
||||
If `NOT_MAIN`, respond with:
|
||||
> This command is available in your main chat only. Send `/status` there to check system status.
|
||||
|
||||
Then stop — do not generate the report.
|
||||
|
||||
## How to gather the information
|
||||
|
||||
Run the checks below and compile results into the report format.
|
||||
|
||||
### 1. Session context
|
||||
|
||||
```bash
|
||||
echo "Timestamp: $(date)"
|
||||
echo "Working dir: $(pwd)"
|
||||
echo "Channel: main"
|
||||
```
|
||||
|
||||
### 2. Workspace and mount visibility
|
||||
|
||||
```bash
|
||||
echo "=== Workspace ==="
|
||||
ls /workspace/ 2>/dev/null
|
||||
echo "=== Group folder ==="
|
||||
ls /workspace/group/ 2>/dev/null | head -20
|
||||
echo "=== Extra mounts ==="
|
||||
ls /workspace/extra/ 2>/dev/null || echo "none"
|
||||
echo "=== IPC ==="
|
||||
ls /workspace/ipc/ 2>/dev/null
|
||||
```
|
||||
|
||||
### 3. Tool availability
|
||||
|
||||
Confirm which tool families are available to you:
|
||||
|
||||
- **Core:** Bash, Read, Write, Edit, Glob, Grep
|
||||
- **Web:** WebSearch, WebFetch
|
||||
- **Orchestration:** Task, TaskOutput, TaskStop, TeamCreate, TeamDelete, SendMessage
|
||||
- **MCP:** mcp__nanoclaw__* (send_message, schedule_task, list_tasks, pause_task, resume_task, cancel_task, update_task, register_group)
|
||||
|
||||
### 4. Container utilities
|
||||
|
||||
```bash
|
||||
which agent-browser 2>/dev/null && echo "agent-browser: available" || echo "agent-browser: not installed"
|
||||
node --version 2>/dev/null
|
||||
claude --version 2>/dev/null
|
||||
```
|
||||
|
||||
### 5. Task snapshot
|
||||
|
||||
Use the MCP tool to list tasks:
|
||||
|
||||
```
|
||||
Call mcp__nanoclaw__list_tasks to get scheduled tasks.
|
||||
```
|
||||
|
||||
If no tasks exist, report "No scheduled tasks."
|
||||
|
||||
## Report format
|
||||
|
||||
Present as a clean, readable message:
|
||||
|
||||
```
|
||||
🔍 *NanoClaw Status*
|
||||
|
||||
*Session:*
|
||||
• Channel: main
|
||||
• Time: 2026-03-14 09:30 UTC
|
||||
• Working dir: /workspace/group
|
||||
|
||||
*Workspace:*
|
||||
• Group folder: ✓ (N files)
|
||||
• Extra mounts: none / N directories
|
||||
• IPC: ✓ (messages, tasks, input)
|
||||
|
||||
*Tools:*
|
||||
• Core: ✓ Web: ✓ Orchestration: ✓ MCP: ✓
|
||||
|
||||
*Container:*
|
||||
• agent-browser: ✓ / not installed
|
||||
• Node: vXX.X.X
|
||||
• Claude Code: vX.X.X
|
||||
|
||||
*Scheduled Tasks:*
|
||||
• N active tasks / No scheduled tasks
|
||||
```
|
||||
|
||||
Adapt based on what you actually find. Keep it concise — this is a quick health check, not a deep diagnostic.
|
||||
|
||||
**See also:** `/capabilities` for a full list of installed skills and tools.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,168 +0,0 @@
|
||||
# NanoClaw Skills Architecture
|
||||
|
||||
## What Skills Are For
|
||||
|
||||
NanoClaw's core is intentionally minimal. Skills are how users extend it: adding channels, integrations, cross-platform support, or replacing internals entirely. Examples: add Telegram alongside WhatsApp, switch from Apple Container to Docker, add Gmail integration, add voice message transcription. Each skill modifies the actual codebase, adding channel handlers, updating the message router, changing container configuration, and adding dependencies, rather than working through a plugin API or runtime hooks.
|
||||
|
||||
## Why This Architecture
|
||||
|
||||
The problem: users need to combine multiple modifications to a shared codebase, keep those modifications working across core updates, and do all of this without becoming git experts or losing their custom changes. A plugin system would be simpler but constrains what skills can do. Giving skills full codebase access means they can change anything, but that creates merge conflicts, update breakage, and state tracking challenges.
|
||||
|
||||
This architecture solves that by making skill application fully programmatic using standard git mechanics, with AI as a fallback for conflicts git can't resolve, and a shared resolution cache so most users never hit those conflicts at all. The result: users compose exactly the features they want, customizations survive core updates automatically, and the system is always recoverable.
|
||||
|
||||
## Core Principle
|
||||
|
||||
Skills are self-contained, auditable packages applied via standard git merge mechanics. Claude Code orchestrates the process — running git commands, reading skill manifests, and stepping in only when git can't resolve a conflict. The system uses existing git features (`merge-file`, `rerere`, `apply`) rather than custom merge infrastructure.
|
||||
|
||||
## Three-Level Resolution Model
|
||||
|
||||
Every operation follows this escalation:
|
||||
|
||||
1. **Git** — deterministic. `git merge-file` merges, `git rerere` replays cached resolutions, structured operations apply without merging. No AI. Handles the vast majority of cases.
|
||||
2. **Claude Code** — reads `SKILL.md`, `.intent.md`, and `state.yaml` to resolve conflicts git can't handle. Caches resolutions via `git rerere` so the same conflict never needs resolving twice.
|
||||
3. **Claude Code + user input** — when Claude Code lacks sufficient context to determine intent (e.g., two features genuinely conflict at an application level), it asks the user for a decision, then uses that input to perform the resolution. Claude Code still does the work — the user provides direction, not code.
|
||||
|
||||
**Important**: A clean merge doesn't guarantee working code. Semantic conflicts can produce clean text merges that break at runtime. **Tests run after every operation.**
|
||||
|
||||
## Backup/Restore Safety
|
||||
|
||||
Before any operation, all affected files are copied to `.nanoclaw/backup/`. On success, backup is deleted. On failure, backup is restored. Works safely for users who don't use git.
|
||||
|
||||
## The Shared Base
|
||||
|
||||
`.nanoclaw/base/` holds a clean copy of the core codebase. This is the single common ancestor for all three-way merges, only updated during core updates.
|
||||
|
||||
## Two Types of Changes
|
||||
|
||||
### Code Files (Three-Way Merge)
|
||||
Source code where skills weave in logic. Merged via `git merge-file` against the shared base. Skills carry full modified files.
|
||||
|
||||
### Structured Data (Deterministic Operations)
|
||||
Files like `package.json`, `docker-compose.yml`, `.env.example`. Skills declare requirements in the manifest; the system applies them programmatically. Multiple skills' declarations are batched — dependencies merged, `package.json` written once, `npm install` run once.
|
||||
|
||||
```yaml
|
||||
structured:
|
||||
npm_dependencies:
|
||||
whatsapp-web.js: "^2.1.0"
|
||||
env_additions:
|
||||
- WHATSAPP_TOKEN
|
||||
docker_compose_services:
|
||||
whatsapp-redis:
|
||||
image: redis:alpine
|
||||
ports: ["6380:6379"]
|
||||
```
|
||||
|
||||
Structured conflicts (version incompatibilities, port collisions) follow the same three-level resolution model.
|
||||
|
||||
## Skill Package Structure
|
||||
|
||||
A skill contains only the files it adds or modifies. Modified code files carry the **full file** (clean core + skill's changes), making `git merge-file` straightforward and auditable.
|
||||
|
||||
```
|
||||
skills/add-whatsapp/
|
||||
SKILL.md # What this skill does and why
|
||||
manifest.yaml # Metadata, dependencies, structured ops
|
||||
tests/whatsapp.test.ts # Integration tests
|
||||
add/src/channels/whatsapp.ts # New files
|
||||
modify/src/server.ts # Full modified file for merge
|
||||
modify/src/server.ts.intent.md # Structured intent for conflict resolution
|
||||
```
|
||||
|
||||
### Intent Files
|
||||
Each modified file has a `.intent.md` with structured headings: **What this skill adds**, **Key sections**, **Invariants**, and **Must-keep sections**. These give Claude Code specific guidance during conflict resolution.
|
||||
|
||||
### Manifest
|
||||
Declares: skill metadata, core version compatibility, files added/modified, file operations, structured operations, skill relationships (conflicts, depends, tested_with), post-apply commands, and test command.
|
||||
|
||||
## Customization and Layering
|
||||
|
||||
**One skill, one happy path** — a skill implements the reasonable default for 80% of users.
|
||||
|
||||
**Customization is more patching.** Apply the skill, then modify via tracked patches, direct editing, or additional layered skills. Custom modifications are recorded in `state.yaml` and replayable.
|
||||
|
||||
**Skills layer via `depends`.** Extension skills build on base skills (e.g., `telegram-reactions` depends on `add-telegram`).
|
||||
|
||||
## File Operations
|
||||
|
||||
Renames, deletes, and moves are declared in the manifest and run **before** code merges. When core renames a file, a **path remap** resolves skill references at apply time — skill packages are never mutated.
|
||||
|
||||
## The Apply Flow
|
||||
|
||||
1. Pre-flight checks (compatibility, dependencies, untracked changes)
|
||||
2. Backup
|
||||
3. File operations + path remapping
|
||||
4. Copy new files
|
||||
5. Merge modified code files (`git merge-file`)
|
||||
6. Conflict resolution (shared cache → `git rerere` → Claude Code → Claude Code + user input)
|
||||
7. Apply structured operations (batched)
|
||||
8. Post-apply commands, update `state.yaml`
|
||||
9. **Run tests** (mandatory, even if all merges were clean)
|
||||
10. Clean up (delete backup on success, restore on failure)
|
||||
|
||||
## Shared Resolution Cache
|
||||
|
||||
`.nanoclaw/resolutions/` ships pre-computed, verified conflict resolutions with **hash enforcement** — a cached resolution only applies if base, current, and skill input hashes match exactly. This means most users never encounter unresolved conflicts for common skill combinations.
|
||||
|
||||
### rerere Adapter
|
||||
`git rerere` requires unmerged index entries that `git merge-file` doesn't create. An adapter sets up the required index state after `merge-file` produces a conflict, enabling rerere caching. This requires the project to be a git repository; users without `.git/` lose caching but not functionality.
|
||||
|
||||
## State Tracking
|
||||
|
||||
`.nanoclaw/state.yaml` records: core version, all applied skills (with per-file hashes for base/skill/merged), structured operation outcomes, custom patches, and path remaps. This makes drift detection instant and replay deterministic.
|
||||
|
||||
## Untracked Changes
|
||||
|
||||
Direct edits are detected via hash comparison before any operation. Users can record them as tracked patches, continue untracked, or abort. The three-level model can always recover coherent state from any starting point.
|
||||
|
||||
## Core Updates
|
||||
|
||||
Most changes propagate automatically through three-way merge. **Breaking changes** require a **migration skill** — a regular skill that preserves the old behavior, authored against the new core. Migrations are declared in `migrations.yaml` and applied automatically during updates.
|
||||
|
||||
### Update Flow
|
||||
1. Preview changes (git-only, no files modified)
|
||||
2. Backup → file operations → three-way merge → conflict resolution
|
||||
3. Re-apply custom patches (`git apply --3way`)
|
||||
4. **Update base** to new core
|
||||
5. Apply migration skills (preserves user's setup automatically)
|
||||
6. Re-apply updated skills (version-changed skills only)
|
||||
7. Re-run structured operations → run all tests → clean up
|
||||
|
||||
The user sees no prompts during updates. To accept a new default later, they remove the migration skill.
|
||||
|
||||
## Skill Removal
|
||||
|
||||
Uninstall is **replay without the skill**: read `state.yaml`, remove the target skill, replay all remaining skills from clean base using the resolution cache. Backup for safety.
|
||||
|
||||
## Rebase
|
||||
|
||||
Flatten accumulated layers into a clean starting point. Updates base, regenerates diffs, clears old patches and stale cache entries. Trades individual skill history for simpler future merges.
|
||||
|
||||
## Replay
|
||||
|
||||
Given `state.yaml`, reproduce the exact installation on a fresh machine with no AI (assuming cached resolutions). Apply skills in order, merge, apply custom patches, batch structured operations, run tests.
|
||||
|
||||
## Skill Tests
|
||||
|
||||
Each skill includes integration tests. Tests run **always** — after apply, after update, after uninstall, during replay, in CI. CI tests all official skills individually and pairwise combinations for skills sharing modified files or structured operations.
|
||||
|
||||
## Design Principles
|
||||
|
||||
1. **Use git, don't reinvent it.**
|
||||
2. **Three-level resolution: git → Claude Code → Claude Code + user input.**
|
||||
3. **Clean merges aren't enough.** Tests run after every operation.
|
||||
4. **All operations are safe.** Backup/restore, no half-applied state.
|
||||
5. **One shared base**, only updated on core updates.
|
||||
6. **Code merges vs. structured operations.** Source code is merged; configs are aggregated.
|
||||
7. **Resolutions are learned and shared** with hash enforcement.
|
||||
8. **One skill, one happy path.** Customization is more patching.
|
||||
9. **Skills layer and compose.**
|
||||
10. **Intent is first-class and structured.**
|
||||
11. **State is explicit and complete.** Replay is deterministic.
|
||||
12. **Always recoverable.**
|
||||
13. **Uninstall is replay.**
|
||||
14. **Core updates are the maintainers' responsibility.** Breaking changes require migration skills.
|
||||
15. **File operations and path remapping are first-class.**
|
||||
16. **Skills are tested.** CI tests pairwise by overlap.
|
||||
17. **Deterministic serialization.** No noisy diffs.
|
||||
18. **Rebase when needed.**
|
||||
19. **Progressive core slimming** via migration skills.
|
||||
@@ -2,7 +2,20 @@
|
||||
|
||||
## Overview
|
||||
|
||||
NanoClaw skills are distributed as git branches on the upstream repository. Applying a skill is a `git merge`. Updating core is a `git merge`. Everything is standard git.
|
||||
This document covers **feature skills** — skills that add capabilities via git branch merges. This is the most complex skill type and the primary way NanoClaw is extended.
|
||||
|
||||
NanoClaw has four types of skills overall. See [CONTRIBUTING.md](../CONTRIBUTING.md) for the full taxonomy:
|
||||
|
||||
| Type | Location | How it works |
|
||||
|------|----------|-------------|
|
||||
| **Feature** (this doc) | `.claude/skills/` + `skill/*` branch | SKILL.md has instructions; code lives on a branch, applied via `git merge` |
|
||||
| **Utility** | `.claude/skills/<name>/` with code files | Self-contained tools; code in skill directory, copied into place on install |
|
||||
| **Operational** | `.claude/skills/` on `main` | Instruction-only workflows (setup, debug, update) |
|
||||
| **Container** | `container/skills/` | Loaded inside agent containers at runtime |
|
||||
|
||||
---
|
||||
|
||||
Feature skills are distributed as git branches on the upstream repository. Applying a skill is a `git merge`. Updating core is a `git merge`. Everything is standard git.
|
||||
|
||||
This replaces the previous `skills-engine/` system (three-way file merging, `.nanoclaw/` state, manifest files, replay, backup/restore) with plain git operations and Claude for conflict resolution.
|
||||
|
||||
@@ -310,7 +323,9 @@ Standard fork contribution workflow. Their custom changes stay on their main and
|
||||
|
||||
## Contributing a Skill
|
||||
|
||||
### Contributor flow
|
||||
The flow below is for **feature skills** (branch-based). For utility skills (self-contained tools) and container skills, the contributor opens a PR that adds files directly to `.claude/skills/<name>/` or `container/skills/<name>/` — no branch extraction needed. See [CONTRIBUTING.md](../CONTRIBUTING.md) for all skill types.
|
||||
|
||||
### Contributor flow (feature skills)
|
||||
|
||||
1. Fork `qwibitai/nanoclaw`
|
||||
2. Branch from `main`
|
||||
|
||||
32
eslint.config.js
Normal file
32
eslint.config.js
Normal file
@@ -0,0 +1,32 @@
|
||||
import globals from 'globals'
|
||||
import pluginJs from '@eslint/js'
|
||||
import tseslint from 'typescript-eslint'
|
||||
import noCatchAll from 'eslint-plugin-no-catch-all'
|
||||
|
||||
export default [
|
||||
{ ignores: ['node_modules/', 'dist/', 'container/', 'groups/'] },
|
||||
{ files: ['src/**/*.{js,ts}'] },
|
||||
{ languageOptions: { globals: globals.node } },
|
||||
pluginJs.configs.recommended,
|
||||
...tseslint.configs.recommended,
|
||||
{
|
||||
plugins: { 'no-catch-all': noCatchAll },
|
||||
rules: {
|
||||
'preserve-caught-error': ['error', { requireCatchParameter: true }],
|
||||
'@typescript-eslint/no-unused-vars': [
|
||||
'error',
|
||||
{
|
||||
args: 'all',
|
||||
argsIgnorePattern: '^_',
|
||||
caughtErrors: 'all',
|
||||
caughtErrorsIgnorePattern: '^_',
|
||||
destructuredArrayIgnorePattern: '^_',
|
||||
varsIgnorePattern: '^_',
|
||||
ignoreRestSiblings: true,
|
||||
},
|
||||
],
|
||||
'no-catch-all/no-catch-all': 'warn',
|
||||
'@typescript-eslint/no-explicit-any': 'warn',
|
||||
},
|
||||
},
|
||||
]
|
||||
@@ -49,10 +49,28 @@ When you learn something important:
|
||||
|
||||
## Message Formatting
|
||||
|
||||
NEVER use markdown. Only use WhatsApp/Telegram formatting:
|
||||
- *single asterisks* for bold (NEVER **double asterisks**)
|
||||
- _underscores_ for italic
|
||||
- • bullet points
|
||||
- ```triple backticks``` for code
|
||||
Format messages based on the channel you're responding to. Check your group folder name:
|
||||
|
||||
No ## headings. No [links](url). No **double stars**.
|
||||
### Slack channels (folder starts with `slack_`)
|
||||
|
||||
Use Slack mrkdwn syntax. Run `/slack-formatting` for the full reference. Key rules:
|
||||
- `*bold*` (single asterisks)
|
||||
- `_italic_` (underscores)
|
||||
- `<https://url|link text>` for links (NOT `[text](url)`)
|
||||
- `•` bullets (no numbered lists)
|
||||
- `:emoji:` shortcodes
|
||||
- `>` for block quotes
|
||||
- No `##` headings — use `*Bold text*` instead
|
||||
|
||||
### WhatsApp/Telegram channels (folder starts with `whatsapp_` or `telegram_`)
|
||||
|
||||
- `*bold*` (single asterisks, NEVER **double**)
|
||||
- `_italic_` (underscores)
|
||||
- `•` bullet points
|
||||
- ` ``` ` code blocks
|
||||
|
||||
No `##` headings. No `[links](url)`. No `**double stars**`.
|
||||
|
||||
### Discord channels (folder starts with `discord_`)
|
||||
|
||||
Standard Markdown works: `**bold**`, `*italic*`, `[links](url)`, `# headings`.
|
||||
|
||||
@@ -43,15 +43,33 @@ When you learn something important:
|
||||
- Split files larger than 500 lines into folders
|
||||
- Keep an index in your memory for the files you create
|
||||
|
||||
## WhatsApp Formatting (and other messaging apps)
|
||||
## Message Formatting
|
||||
|
||||
Do NOT use markdown headings (##) in WhatsApp messages. Only use:
|
||||
- *Bold* (single asterisks) (NEVER **double asterisks**)
|
||||
- _Italic_ (underscores)
|
||||
- • Bullets (bullet points)
|
||||
- ```Code blocks``` (triple backticks)
|
||||
Format messages based on the channel. Check the group folder name prefix:
|
||||
|
||||
Keep messages clean and readable for WhatsApp.
|
||||
### Slack channels (folder starts with `slack_`)
|
||||
|
||||
Use Slack mrkdwn syntax. Run `/slack-formatting` for the full reference. Key rules:
|
||||
- `*bold*` (single asterisks)
|
||||
- `_italic_` (underscores)
|
||||
- `<https://url|link text>` for links (NOT `[text](url)`)
|
||||
- `•` bullets (no numbered lists)
|
||||
- `:emoji:` shortcodes like `:white_check_mark:`, `:rocket:`
|
||||
- `>` for block quotes
|
||||
- No `##` headings — use `*Bold text*` instead
|
||||
|
||||
### WhatsApp/Telegram (folder starts with `whatsapp_` or `telegram_`)
|
||||
|
||||
- `*bold*` (single asterisks, NEVER **double**)
|
||||
- `_italic_` (underscores)
|
||||
- `•` bullet points
|
||||
- ` ``` ` code blocks
|
||||
|
||||
No `##` headings. No `[links](url)`. No `**double stars**`.
|
||||
|
||||
### Discord (folder starts with `discord_`)
|
||||
|
||||
Standard Markdown: `**bold**`, `*italic*`, `[links](url)`, `# headings`.
|
||||
|
||||
---
|
||||
|
||||
|
||||
1416
package-lock.json
generated
1416
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
10
package.json
10
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "nanoclaw",
|
||||
"version": "1.2.15",
|
||||
"version": "1.2.23",
|
||||
"description": "Personal Claude assistant. Lightweight, secure, customizable.",
|
||||
"type": "module",
|
||||
"main": "dist/index.js",
|
||||
@@ -15,10 +15,13 @@
|
||||
"prepare": "husky",
|
||||
"setup": "tsx setup/index.ts",
|
||||
"auth": "tsx src/whatsapp-auth.ts",
|
||||
"lint": "eslint src/",
|
||||
"lint:fix": "eslint src/ --fix",
|
||||
"test": "vitest run",
|
||||
"test:watch": "vitest"
|
||||
},
|
||||
"dependencies": {
|
||||
"@onecli-sh/sdk": "^0.2.0",
|
||||
"better-sqlite3": "^11.8.1",
|
||||
"grammy": "^1.39.3",
|
||||
"cron-parser": "^5.5.0",
|
||||
@@ -28,13 +31,18 @@
|
||||
"zod": "^4.3.6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.35.0",
|
||||
"@types/better-sqlite3": "^7.6.12",
|
||||
"@types/node": "^22.10.0",
|
||||
"@vitest/coverage-v8": "^4.0.18",
|
||||
"eslint": "^9.35.0",
|
||||
"eslint-plugin-no-catch-all": "^1.1.0",
|
||||
"globals": "^15.12.0",
|
||||
"husky": "^9.1.7",
|
||||
"prettier": "^3.8.1",
|
||||
"tsx": "^4.19.0",
|
||||
"typescript": "^5.7.0",
|
||||
"typescript-eslint": "^8.35.0",
|
||||
"vitest": "^4.0.18"
|
||||
},
|
||||
"engines": {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="97" height="20" role="img" aria-label="41.1k tokens, 21% of context window">
|
||||
<title>41.1k tokens, 21% of context window</title>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="97" height="20" role="img" aria-label="39.9k tokens, 20% of context window">
|
||||
<title>39.9k tokens, 20% of context window</title>
|
||||
<linearGradient id="s" x2="0" y2="100%">
|
||||
<stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
|
||||
<stop offset="1" stop-opacity=".1"/>
|
||||
@@ -15,8 +15,8 @@
|
||||
<g fill="#fff" text-anchor="middle" font-family="Verdana,Geneva,DejaVu Sans,sans-serif" font-size="11">
|
||||
<text aria-hidden="true" x="26" y="15" fill="#010101" fill-opacity=".3">tokens</text>
|
||||
<text x="26" y="14">tokens</text>
|
||||
<text aria-hidden="true" x="74" y="15" fill="#010101" fill-opacity=".3">41.1k</text>
|
||||
<text x="74" y="14">41.1k</text>
|
||||
<text aria-hidden="true" x="74" y="15" fill="#010101" fill-opacity=".3">39.9k</text>
|
||||
<text x="74" y="14">39.9k</text>
|
||||
</g>
|
||||
</g>
|
||||
</a>
|
||||
|
||||
|
Before Width: | Height: | Size: 1.1 KiB After Width: | Height: | Size: 1.1 KiB |
@@ -101,7 +101,7 @@ export async function run(_args: string[]): Promise<void> {
|
||||
const envFile = path.join(projectRoot, '.env');
|
||||
if (fs.existsSync(envFile)) {
|
||||
const envContent = fs.readFileSync(envFile, 'utf-8');
|
||||
if (/^(CLAUDE_CODE_OAUTH_TOKEN|ANTHROPIC_API_KEY)=/m.test(envContent)) {
|
||||
if (/^(CLAUDE_CODE_OAUTH_TOKEN|ANTHROPIC_API_KEY|ONECLI_URL)=/m.test(envContent)) {
|
||||
credentials = 'configured';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { describe, it, expect, beforeEach } from 'vitest';
|
||||
import { describe, it, expect } from 'vitest';
|
||||
|
||||
import {
|
||||
registerChannel,
|
||||
|
||||
@@ -4,9 +4,11 @@ import path from 'path';
|
||||
import { readEnvFile } from './env.js';
|
||||
|
||||
// Read config values from .env (falls back to process.env).
|
||||
// Secrets (API keys, tokens) are NOT read here — they are loaded only
|
||||
// by the credential proxy (credential-proxy.ts), never exposed to containers.
|
||||
const envConfig = readEnvFile(['ASSISTANT_NAME', 'ASSISTANT_HAS_OWN_NUMBER']);
|
||||
const envConfig = readEnvFile([
|
||||
'ASSISTANT_NAME',
|
||||
'ASSISTANT_HAS_OWN_NUMBER',
|
||||
'ONECLI_URL',
|
||||
]);
|
||||
|
||||
export const ASSISTANT_NAME =
|
||||
process.env.ASSISTANT_NAME || envConfig.ASSISTANT_NAME || 'Andy';
|
||||
@@ -47,10 +49,8 @@ export const CONTAINER_MAX_OUTPUT_SIZE = parseInt(
|
||||
process.env.CONTAINER_MAX_OUTPUT_SIZE || '10485760',
|
||||
10,
|
||||
); // 10MB default
|
||||
export const CREDENTIAL_PROXY_PORT = parseInt(
|
||||
process.env.CREDENTIAL_PROXY_PORT || '3001',
|
||||
10,
|
||||
);
|
||||
export const ONECLI_URL =
|
||||
process.env.ONECLI_URL || envConfig.ONECLI_URL || 'http://localhost:10254';
|
||||
export const IPC_POLL_INTERVAL = 1000;
|
||||
export const IDLE_TIMEOUT = parseInt(process.env.IDLE_TIMEOUT || '1800000', 10); // 30min default — how long to keep container alive after last result
|
||||
export const MAX_CONCURRENT_CONTAINERS = Math.max(
|
||||
|
||||
@@ -11,10 +11,10 @@ vi.mock('./config.js', () => ({
|
||||
CONTAINER_IMAGE: 'nanoclaw-agent:latest',
|
||||
CONTAINER_MAX_OUTPUT_SIZE: 10485760,
|
||||
CONTAINER_TIMEOUT: 1800000, // 30min
|
||||
CREDENTIAL_PROXY_PORT: 3001,
|
||||
DATA_DIR: '/tmp/nanoclaw-test-data',
|
||||
GROUPS_DIR: '/tmp/nanoclaw-test-groups',
|
||||
IDLE_TIMEOUT: 1800000, // 30min
|
||||
ONECLI_URL: 'http://localhost:10254',
|
||||
TIMEZONE: 'America/Los_Angeles',
|
||||
}));
|
||||
|
||||
@@ -51,6 +51,17 @@ vi.mock('./mount-security.js', () => ({
|
||||
validateAdditionalMounts: vi.fn(() => []),
|
||||
}));
|
||||
|
||||
// Mock OneCLI SDK
|
||||
vi.mock('@onecli-sh/sdk', () => ({
|
||||
OneCLI: class {
|
||||
applyContainerConfig = vi.fn().mockResolvedValue(true);
|
||||
createAgent = vi.fn().mockResolvedValue({ id: 'test' });
|
||||
ensureAgent = vi
|
||||
.fn()
|
||||
.mockResolvedValue({ name: 'test', identifier: 'test', created: true });
|
||||
},
|
||||
}));
|
||||
|
||||
// Create a controllable fake ChildProcess
|
||||
function createFakeProcess() {
|
||||
const proc = new EventEmitter() as EventEmitter & {
|
||||
|
||||
@@ -10,25 +10,26 @@ import {
|
||||
CONTAINER_IMAGE,
|
||||
CONTAINER_MAX_OUTPUT_SIZE,
|
||||
CONTAINER_TIMEOUT,
|
||||
CREDENTIAL_PROXY_PORT,
|
||||
DATA_DIR,
|
||||
GROUPS_DIR,
|
||||
IDLE_TIMEOUT,
|
||||
ONECLI_URL,
|
||||
TIMEZONE,
|
||||
} from './config.js';
|
||||
import { resolveGroupFolderPath, resolveGroupIpcPath } from './group-folder.js';
|
||||
import { logger } from './logger.js';
|
||||
import {
|
||||
CONTAINER_HOST_GATEWAY,
|
||||
CONTAINER_RUNTIME_BIN,
|
||||
hostGatewayArgs,
|
||||
readonlyMountArgs,
|
||||
stopContainer,
|
||||
} from './container-runtime.js';
|
||||
import { detectAuthMode } from './credential-proxy.js';
|
||||
import { OneCLI } from '@onecli-sh/sdk';
|
||||
import { validateAdditionalMounts } from './mount-security.js';
|
||||
import { RegisteredGroup } from './types.js';
|
||||
|
||||
const onecli = new OneCLI({ url: ONECLI_URL });
|
||||
|
||||
// Sentinel markers for robust output parsing (must match agent-runner)
|
||||
const OUTPUT_START_MARKER = '---NANOCLAW_OUTPUT_START---';
|
||||
const OUTPUT_END_MARKER = '---NANOCLAW_OUTPUT_END---';
|
||||
@@ -77,7 +78,7 @@ function buildVolumeMounts(
|
||||
});
|
||||
|
||||
// Shadow .env so the agent cannot read secrets from the mounted project root.
|
||||
// Credentials are injected by the credential proxy, never exposed to containers.
|
||||
// Credentials are injected by the OneCLI gateway, never exposed to containers.
|
||||
const envFile = path.join(projectRoot, '.env');
|
||||
if (fs.existsSync(envFile)) {
|
||||
mounts.push({
|
||||
@@ -212,30 +213,29 @@ function buildVolumeMounts(
|
||||
return mounts;
|
||||
}
|
||||
|
||||
function buildContainerArgs(
|
||||
async function buildContainerArgs(
|
||||
mounts: VolumeMount[],
|
||||
containerName: string,
|
||||
): string[] {
|
||||
agentIdentifier?: string,
|
||||
): Promise<string[]> {
|
||||
const args: string[] = ['run', '-i', '--rm', '--name', containerName];
|
||||
|
||||
// Pass host timezone so container's local time matches the user's
|
||||
args.push('-e', `TZ=${TIMEZONE}`);
|
||||
|
||||
// Route API traffic through the credential proxy (containers never see real secrets)
|
||||
args.push(
|
||||
'-e',
|
||||
`ANTHROPIC_BASE_URL=http://${CONTAINER_HOST_GATEWAY}:${CREDENTIAL_PROXY_PORT}`,
|
||||
);
|
||||
|
||||
// Mirror the host's auth method with a placeholder value.
|
||||
// API key mode: SDK sends x-api-key, proxy replaces with real key.
|
||||
// OAuth mode: SDK exchanges placeholder token for temp API key,
|
||||
// proxy injects real OAuth token on that exchange request.
|
||||
const authMode = detectAuthMode();
|
||||
if (authMode === 'api-key') {
|
||||
args.push('-e', 'ANTHROPIC_API_KEY=placeholder');
|
||||
// OneCLI gateway handles credential injection — containers never see real secrets.
|
||||
// The gateway intercepts HTTPS traffic and injects API keys or OAuth tokens.
|
||||
const onecliApplied = await onecli.applyContainerConfig(args, {
|
||||
addHostMapping: false, // Nanoclaw already handles host gateway
|
||||
agent: agentIdentifier,
|
||||
});
|
||||
if (onecliApplied) {
|
||||
logger.info({ containerName }, 'OneCLI gateway config applied');
|
||||
} else {
|
||||
args.push('-e', 'CLAUDE_CODE_OAUTH_TOKEN=placeholder');
|
||||
logger.warn(
|
||||
{ containerName },
|
||||
'OneCLI gateway not reachable — container will have no credentials',
|
||||
);
|
||||
}
|
||||
|
||||
// Runtime-specific args for host gateway resolution
|
||||
@@ -278,7 +278,15 @@ export async function runContainerAgent(
|
||||
const mounts = buildVolumeMounts(group, input.isMain);
|
||||
const safeName = group.folder.replace(/[^a-zA-Z0-9-]/g, '-');
|
||||
const containerName = `nanoclaw-${safeName}-${Date.now()}`;
|
||||
const containerArgs = buildContainerArgs(mounts, containerName);
|
||||
// Main group uses the default OneCLI agent; others use their own agent.
|
||||
const agentIdentifier = input.isMain
|
||||
? undefined
|
||||
: group.folder.toLowerCase().replace(/_/g, '-');
|
||||
const containerArgs = await buildContainerArgs(
|
||||
mounts,
|
||||
containerName,
|
||||
agentIdentifier,
|
||||
);
|
||||
|
||||
logger.debug(
|
||||
{
|
||||
@@ -503,10 +511,20 @@ export async function runContainerAgent(
|
||||
const isError = code !== 0;
|
||||
|
||||
if (isVerbose || isError) {
|
||||
// On error, log input metadata only — not the full prompt.
|
||||
// Full input is only included at verbose level to avoid
|
||||
// persisting user conversation content on every non-zero exit.
|
||||
if (isVerbose) {
|
||||
logLines.push(`=== Input ===`, JSON.stringify(input, null, 2), ``);
|
||||
} else {
|
||||
logLines.push(
|
||||
`=== Input ===`,
|
||||
JSON.stringify(input, null, 2),
|
||||
`=== Input Summary ===`,
|
||||
`Prompt length: ${input.prompt.length} chars`,
|
||||
`Session ID: ${input.sessionId || 'new'}`,
|
||||
``,
|
||||
);
|
||||
}
|
||||
logLines.push(
|
||||
`=== Container Args ===`,
|
||||
containerArgs.join(' '),
|
||||
``,
|
||||
@@ -684,7 +702,7 @@ export function writeGroupsSnapshot(
|
||||
groupFolder: string,
|
||||
isMain: boolean,
|
||||
groups: AvailableGroup[],
|
||||
registeredJids: Set<string>,
|
||||
_registeredJids: Set<string>,
|
||||
): void {
|
||||
const groupIpcDir = resolveGroupIpcPath(groupFolder);
|
||||
fs.mkdirSync(groupIpcDir, { recursive: true });
|
||||
|
||||
@@ -41,7 +41,7 @@ describe('readonlyMountArgs', () => {
|
||||
describe('stopContainer', () => {
|
||||
it('returns stop command using CONTAINER_RUNTIME_BIN', () => {
|
||||
expect(stopContainer('nanoclaw-test-123')).toBe(
|
||||
`${CONTAINER_RUNTIME_BIN} stop nanoclaw-test-123`,
|
||||
`${CONTAINER_RUNTIME_BIN} stop -t 1 nanoclaw-test-123`,
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -93,12 +93,12 @@ describe('cleanupOrphans', () => {
|
||||
expect(mockExecSync).toHaveBeenCalledTimes(3);
|
||||
expect(mockExecSync).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
`${CONTAINER_RUNTIME_BIN} stop nanoclaw-group1-111`,
|
||||
`${CONTAINER_RUNTIME_BIN} stop -t 1 nanoclaw-group1-111`,
|
||||
{ stdio: 'pipe' },
|
||||
);
|
||||
expect(mockExecSync).toHaveBeenNthCalledWith(
|
||||
3,
|
||||
`${CONTAINER_RUNTIME_BIN} stop nanoclaw-group2-222`,
|
||||
`${CONTAINER_RUNTIME_BIN} stop -t 1 nanoclaw-group2-222`,
|
||||
{ stdio: 'pipe' },
|
||||
);
|
||||
expect(logger.info).toHaveBeenCalledWith(
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
* All runtime-specific logic lives here so swapping runtimes means changing one file.
|
||||
*/
|
||||
import { execSync } from 'child_process';
|
||||
import fs from 'fs';
|
||||
import os from 'os';
|
||||
|
||||
import { logger } from './logger.js';
|
||||
@@ -11,35 +10,6 @@ import { logger } from './logger.js';
|
||||
/** The container runtime binary name. */
|
||||
export const CONTAINER_RUNTIME_BIN = 'docker';
|
||||
|
||||
/** Hostname containers use to reach the host machine. */
|
||||
export const CONTAINER_HOST_GATEWAY = 'host.docker.internal';
|
||||
|
||||
/**
|
||||
* Address the credential proxy binds to.
|
||||
* Docker Desktop (macOS): 127.0.0.1 — the VM routes host.docker.internal to loopback.
|
||||
* Docker (Linux): bind to the docker0 bridge IP so only containers can reach it,
|
||||
* falling back to 0.0.0.0 if the interface isn't found.
|
||||
*/
|
||||
export const PROXY_BIND_HOST =
|
||||
process.env.CREDENTIAL_PROXY_HOST || detectProxyBindHost();
|
||||
|
||||
function detectProxyBindHost(): string {
|
||||
if (os.platform() === 'darwin') return '127.0.0.1';
|
||||
|
||||
// WSL uses Docker Desktop (same VM routing as macOS) — loopback is correct.
|
||||
// Check /proc filesystem, not env vars — WSL_DISTRO_NAME isn't set under systemd.
|
||||
if (fs.existsSync('/proc/sys/fs/binfmt_misc/WSLInterop')) return '127.0.0.1';
|
||||
|
||||
// Bare-metal Linux: bind to the docker0 bridge IP instead of 0.0.0.0
|
||||
const ifaces = os.networkInterfaces();
|
||||
const docker0 = ifaces['docker0'];
|
||||
if (docker0) {
|
||||
const ipv4 = docker0.find((a) => a.family === 'IPv4');
|
||||
if (ipv4) return ipv4.address;
|
||||
}
|
||||
return '0.0.0.0';
|
||||
}
|
||||
|
||||
/** CLI args needed for the container to resolve the host gateway. */
|
||||
export function hostGatewayArgs(): string[] {
|
||||
// On Linux, host.docker.internal isn't built-in — add it explicitly
|
||||
@@ -59,7 +29,7 @@ export function readonlyMountArgs(
|
||||
|
||||
/** Returns the shell command to stop a container by name. */
|
||||
export function stopContainer(name: string): string {
|
||||
return `${CONTAINER_RUNTIME_BIN} stop ${name}`;
|
||||
return `${CONTAINER_RUNTIME_BIN} stop -t 1 ${name}`;
|
||||
}
|
||||
|
||||
/** Ensure the container runtime is running, starting it if needed. */
|
||||
@@ -96,7 +66,9 @@ export function ensureContainerRuntimeRunning(): void {
|
||||
console.error(
|
||||
'╚════════════════════════════════════════════════════════════════╝\n',
|
||||
);
|
||||
throw new Error('Container runtime is required but failed to start');
|
||||
throw new Error('Container runtime is required but failed to start', {
|
||||
cause: err,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,192 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import http from 'http';
|
||||
import type { AddressInfo } from 'net';
|
||||
|
||||
const mockEnv: Record<string, string> = {};
|
||||
vi.mock('./env.js', () => ({
|
||||
readEnvFile: vi.fn(() => ({ ...mockEnv })),
|
||||
}));
|
||||
|
||||
vi.mock('./logger.js', () => ({
|
||||
logger: { info: vi.fn(), error: vi.fn(), debug: vi.fn(), warn: vi.fn() },
|
||||
}));
|
||||
|
||||
import { startCredentialProxy } from './credential-proxy.js';
|
||||
|
||||
function makeRequest(
|
||||
port: number,
|
||||
options: http.RequestOptions,
|
||||
body = '',
|
||||
): Promise<{
|
||||
statusCode: number;
|
||||
body: string;
|
||||
headers: http.IncomingHttpHeaders;
|
||||
}> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const req = http.request(
|
||||
{ ...options, hostname: '127.0.0.1', port },
|
||||
(res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (c) => chunks.push(c));
|
||||
res.on('end', () => {
|
||||
resolve({
|
||||
statusCode: res.statusCode!,
|
||||
body: Buffer.concat(chunks).toString(),
|
||||
headers: res.headers,
|
||||
});
|
||||
});
|
||||
},
|
||||
);
|
||||
req.on('error', reject);
|
||||
req.write(body);
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
describe('credential-proxy', () => {
|
||||
let proxyServer: http.Server;
|
||||
let upstreamServer: http.Server;
|
||||
let proxyPort: number;
|
||||
let upstreamPort: number;
|
||||
let lastUpstreamHeaders: http.IncomingHttpHeaders;
|
||||
|
||||
beforeEach(async () => {
|
||||
lastUpstreamHeaders = {};
|
||||
|
||||
upstreamServer = http.createServer((req, res) => {
|
||||
lastUpstreamHeaders = { ...req.headers };
|
||||
res.writeHead(200, { 'content-type': 'application/json' });
|
||||
res.end(JSON.stringify({ ok: true }));
|
||||
});
|
||||
await new Promise<void>((resolve) =>
|
||||
upstreamServer.listen(0, '127.0.0.1', resolve),
|
||||
);
|
||||
upstreamPort = (upstreamServer.address() as AddressInfo).port;
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await new Promise<void>((r) => proxyServer?.close(() => r()));
|
||||
await new Promise<void>((r) => upstreamServer?.close(() => r()));
|
||||
for (const key of Object.keys(mockEnv)) delete mockEnv[key];
|
||||
});
|
||||
|
||||
async function startProxy(env: Record<string, string>): Promise<number> {
|
||||
Object.assign(mockEnv, env, {
|
||||
ANTHROPIC_BASE_URL: `http://127.0.0.1:${upstreamPort}`,
|
||||
});
|
||||
proxyServer = await startCredentialProxy(0);
|
||||
return (proxyServer.address() as AddressInfo).port;
|
||||
}
|
||||
|
||||
it('API-key mode injects x-api-key and strips placeholder', async () => {
|
||||
proxyPort = await startProxy({ ANTHROPIC_API_KEY: 'sk-ant-real-key' });
|
||||
|
||||
await makeRequest(
|
||||
proxyPort,
|
||||
{
|
||||
method: 'POST',
|
||||
path: '/v1/messages',
|
||||
headers: {
|
||||
'content-type': 'application/json',
|
||||
'x-api-key': 'placeholder',
|
||||
},
|
||||
},
|
||||
'{}',
|
||||
);
|
||||
|
||||
expect(lastUpstreamHeaders['x-api-key']).toBe('sk-ant-real-key');
|
||||
});
|
||||
|
||||
it('OAuth mode replaces Authorization when container sends one', async () => {
|
||||
proxyPort = await startProxy({
|
||||
CLAUDE_CODE_OAUTH_TOKEN: 'real-oauth-token',
|
||||
});
|
||||
|
||||
await makeRequest(
|
||||
proxyPort,
|
||||
{
|
||||
method: 'POST',
|
||||
path: '/api/oauth/claude_cli/create_api_key',
|
||||
headers: {
|
||||
'content-type': 'application/json',
|
||||
authorization: 'Bearer placeholder',
|
||||
},
|
||||
},
|
||||
'{}',
|
||||
);
|
||||
|
||||
expect(lastUpstreamHeaders['authorization']).toBe(
|
||||
'Bearer real-oauth-token',
|
||||
);
|
||||
});
|
||||
|
||||
it('OAuth mode does not inject Authorization when container omits it', async () => {
|
||||
proxyPort = await startProxy({
|
||||
CLAUDE_CODE_OAUTH_TOKEN: 'real-oauth-token',
|
||||
});
|
||||
|
||||
// Post-exchange: container uses x-api-key only, no Authorization header
|
||||
await makeRequest(
|
||||
proxyPort,
|
||||
{
|
||||
method: 'POST',
|
||||
path: '/v1/messages',
|
||||
headers: {
|
||||
'content-type': 'application/json',
|
||||
'x-api-key': 'temp-key-from-exchange',
|
||||
},
|
||||
},
|
||||
'{}',
|
||||
);
|
||||
|
||||
expect(lastUpstreamHeaders['x-api-key']).toBe('temp-key-from-exchange');
|
||||
expect(lastUpstreamHeaders['authorization']).toBeUndefined();
|
||||
});
|
||||
|
||||
it('strips hop-by-hop headers', async () => {
|
||||
proxyPort = await startProxy({ ANTHROPIC_API_KEY: 'sk-ant-real-key' });
|
||||
|
||||
await makeRequest(
|
||||
proxyPort,
|
||||
{
|
||||
method: 'POST',
|
||||
path: '/v1/messages',
|
||||
headers: {
|
||||
'content-type': 'application/json',
|
||||
connection: 'keep-alive',
|
||||
'keep-alive': 'timeout=5',
|
||||
'transfer-encoding': 'chunked',
|
||||
},
|
||||
},
|
||||
'{}',
|
||||
);
|
||||
|
||||
// Proxy strips client hop-by-hop headers. Node's HTTP client may re-add
|
||||
// its own Connection header (standard HTTP/1.1 behavior), but the client's
|
||||
// custom keep-alive and transfer-encoding must not be forwarded.
|
||||
expect(lastUpstreamHeaders['keep-alive']).toBeUndefined();
|
||||
expect(lastUpstreamHeaders['transfer-encoding']).toBeUndefined();
|
||||
});
|
||||
|
||||
it('returns 502 when upstream is unreachable', async () => {
|
||||
Object.assign(mockEnv, {
|
||||
ANTHROPIC_API_KEY: 'sk-ant-real-key',
|
||||
ANTHROPIC_BASE_URL: 'http://127.0.0.1:59999',
|
||||
});
|
||||
proxyServer = await startCredentialProxy(0);
|
||||
proxyPort = (proxyServer.address() as AddressInfo).port;
|
||||
|
||||
const res = await makeRequest(
|
||||
proxyPort,
|
||||
{
|
||||
method: 'POST',
|
||||
path: '/v1/messages',
|
||||
headers: { 'content-type': 'application/json' },
|
||||
},
|
||||
'{}',
|
||||
);
|
||||
|
||||
expect(res.statusCode).toBe(502);
|
||||
expect(res.body).toBe('Bad Gateway');
|
||||
});
|
||||
});
|
||||
@@ -1,125 +0,0 @@
|
||||
/**
|
||||
* Credential proxy for container isolation.
|
||||
* Containers connect here instead of directly to the Anthropic API.
|
||||
* The proxy injects real credentials so containers never see them.
|
||||
*
|
||||
* Two auth modes:
|
||||
* API key: Proxy injects x-api-key on every request.
|
||||
* OAuth: Container CLI exchanges its placeholder token for a temp
|
||||
* API key via /api/oauth/claude_cli/create_api_key.
|
||||
* Proxy injects real OAuth token on that exchange request;
|
||||
* subsequent requests carry the temp key which is valid as-is.
|
||||
*/
|
||||
import { createServer, Server } from 'http';
|
||||
import { request as httpsRequest } from 'https';
|
||||
import { request as httpRequest, RequestOptions } from 'http';
|
||||
|
||||
import { readEnvFile } from './env.js';
|
||||
import { logger } from './logger.js';
|
||||
|
||||
export type AuthMode = 'api-key' | 'oauth';
|
||||
|
||||
export interface ProxyConfig {
|
||||
authMode: AuthMode;
|
||||
}
|
||||
|
||||
export function startCredentialProxy(
|
||||
port: number,
|
||||
host = '127.0.0.1',
|
||||
): Promise<Server> {
|
||||
const secrets = readEnvFile([
|
||||
'ANTHROPIC_API_KEY',
|
||||
'CLAUDE_CODE_OAUTH_TOKEN',
|
||||
'ANTHROPIC_AUTH_TOKEN',
|
||||
'ANTHROPIC_BASE_URL',
|
||||
]);
|
||||
|
||||
const authMode: AuthMode = secrets.ANTHROPIC_API_KEY ? 'api-key' : 'oauth';
|
||||
const oauthToken =
|
||||
secrets.CLAUDE_CODE_OAUTH_TOKEN || secrets.ANTHROPIC_AUTH_TOKEN;
|
||||
|
||||
const upstreamUrl = new URL(
|
||||
secrets.ANTHROPIC_BASE_URL || 'https://api.anthropic.com',
|
||||
);
|
||||
const isHttps = upstreamUrl.protocol === 'https:';
|
||||
const makeRequest = isHttps ? httpsRequest : httpRequest;
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const server = createServer((req, res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
req.on('data', (c) => chunks.push(c));
|
||||
req.on('end', () => {
|
||||
const body = Buffer.concat(chunks);
|
||||
const headers: Record<string, string | number | string[] | undefined> =
|
||||
{
|
||||
...(req.headers as Record<string, string>),
|
||||
host: upstreamUrl.host,
|
||||
'content-length': body.length,
|
||||
};
|
||||
|
||||
// Strip hop-by-hop headers that must not be forwarded by proxies
|
||||
delete headers['connection'];
|
||||
delete headers['keep-alive'];
|
||||
delete headers['transfer-encoding'];
|
||||
|
||||
if (authMode === 'api-key') {
|
||||
// API key mode: inject x-api-key on every request
|
||||
delete headers['x-api-key'];
|
||||
headers['x-api-key'] = secrets.ANTHROPIC_API_KEY;
|
||||
} else {
|
||||
// OAuth mode: replace placeholder Bearer token with the real one
|
||||
// only when the container actually sends an Authorization header
|
||||
// (exchange request + auth probes). Post-exchange requests use
|
||||
// x-api-key only, so they pass through without token injection.
|
||||
if (headers['authorization']) {
|
||||
delete headers['authorization'];
|
||||
if (oauthToken) {
|
||||
headers['authorization'] = `Bearer ${oauthToken}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const upstream = makeRequest(
|
||||
{
|
||||
hostname: upstreamUrl.hostname,
|
||||
port: upstreamUrl.port || (isHttps ? 443 : 80),
|
||||
path: req.url,
|
||||
method: req.method,
|
||||
headers,
|
||||
} as RequestOptions,
|
||||
(upRes) => {
|
||||
res.writeHead(upRes.statusCode!, upRes.headers);
|
||||
upRes.pipe(res);
|
||||
},
|
||||
);
|
||||
|
||||
upstream.on('error', (err) => {
|
||||
logger.error(
|
||||
{ err, url: req.url },
|
||||
'Credential proxy upstream error',
|
||||
);
|
||||
if (!res.headersSent) {
|
||||
res.writeHead(502);
|
||||
res.end('Bad Gateway');
|
||||
}
|
||||
});
|
||||
|
||||
upstream.write(body);
|
||||
upstream.end();
|
||||
});
|
||||
});
|
||||
|
||||
server.listen(port, host, () => {
|
||||
logger.info({ port, host, authMode }, 'Credential proxy started');
|
||||
resolve(server);
|
||||
});
|
||||
|
||||
server.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
/** Detect which auth mode the host is configured for. */
|
||||
export function detectAuthMode(): AuthMode {
|
||||
const secrets = readEnvFile(['ANTHROPIC_API_KEY']);
|
||||
return secrets.ANTHROPIC_API_KEY ? 'api-key' : 'oauth';
|
||||
}
|
||||
@@ -40,7 +40,7 @@ describe('GroupQueue', () => {
|
||||
let concurrentCount = 0;
|
||||
let maxConcurrent = 0;
|
||||
|
||||
const processMessages = vi.fn(async (groupJid: string) => {
|
||||
const processMessages = vi.fn(async (_groupJid: string) => {
|
||||
concurrentCount++;
|
||||
maxConcurrent = Math.max(maxConcurrent, concurrentCount);
|
||||
// Simulate async work
|
||||
@@ -69,7 +69,7 @@ describe('GroupQueue', () => {
|
||||
let maxActive = 0;
|
||||
const completionCallbacks: Array<() => void> = [];
|
||||
|
||||
const processMessages = vi.fn(async (groupJid: string) => {
|
||||
const processMessages = vi.fn(async (_groupJid: string) => {
|
||||
activeCount++;
|
||||
maxActive = Math.max(maxActive, activeCount);
|
||||
await new Promise<void>((resolve) => completionCallbacks.push(resolve));
|
||||
@@ -104,7 +104,7 @@ describe('GroupQueue', () => {
|
||||
const executionOrder: string[] = [];
|
||||
let resolveFirst: () => void;
|
||||
|
||||
const processMessages = vi.fn(async (groupJid: string) => {
|
||||
const processMessages = vi.fn(async (_groupJid: string) => {
|
||||
if (executionOrder.length === 0) {
|
||||
// First call: block until we release it
|
||||
await new Promise<void>((resolve) => {
|
||||
|
||||
@@ -351,7 +351,7 @@ export class GroupQueue {
|
||||
// via idle timeout or container timeout. The --rm flag cleans them up on exit.
|
||||
// This prevents WhatsApp reconnection restarts from killing working agents.
|
||||
const activeContainers: string[] = [];
|
||||
for (const [jid, state] of this.groups) {
|
||||
for (const [_jid, state] of this.groups) {
|
||||
if (state.process && !state.process.killed && state.containerName) {
|
||||
activeContainers.push(state.containerName);
|
||||
}
|
||||
|
||||
62
src/index.ts
62
src/index.ts
@@ -1,15 +1,16 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
|
||||
import { OneCLI } from '@onecli-sh/sdk';
|
||||
|
||||
import {
|
||||
ASSISTANT_NAME,
|
||||
CREDENTIAL_PROXY_PORT,
|
||||
IDLE_TIMEOUT,
|
||||
ONECLI_URL,
|
||||
POLL_INTERVAL,
|
||||
TIMEZONE,
|
||||
TRIGGER_PATTERN,
|
||||
} from './config.js';
|
||||
import { startCredentialProxy } from './credential-proxy.js';
|
||||
import './channels/index.js';
|
||||
import {
|
||||
getChannelFactory,
|
||||
@@ -24,7 +25,6 @@ import {
|
||||
import {
|
||||
cleanupOrphans,
|
||||
ensureContainerRuntimeRunning,
|
||||
PROXY_BIND_HOST,
|
||||
} from './container-runtime.js';
|
||||
import {
|
||||
getAllChats,
|
||||
@@ -33,7 +33,6 @@ import {
|
||||
getAllTasks,
|
||||
getMessagesSince,
|
||||
getNewMessages,
|
||||
getRegisteredGroup,
|
||||
getRouterState,
|
||||
initDatabase,
|
||||
setRegisteredGroup,
|
||||
@@ -73,6 +72,27 @@ let messageLoopRunning = false;
|
||||
const channels: Channel[] = [];
|
||||
const queue = new GroupQueue();
|
||||
|
||||
const onecli = new OneCLI({ url: ONECLI_URL });
|
||||
|
||||
function ensureOneCLIAgent(jid: string, group: RegisteredGroup): void {
|
||||
if (group.isMain) return;
|
||||
const identifier = group.folder.toLowerCase().replace(/_/g, '-');
|
||||
onecli.ensureAgent({ name: group.name, identifier }).then(
|
||||
(res) => {
|
||||
logger.info(
|
||||
{ jid, identifier, created: res.created },
|
||||
'OneCLI agent ensured',
|
||||
);
|
||||
},
|
||||
(err) => {
|
||||
logger.debug(
|
||||
{ jid, identifier, err: String(err) },
|
||||
'OneCLI agent ensure skipped',
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
function loadState(): void {
|
||||
lastTimestamp = getRouterState('last_timestamp') || '';
|
||||
const agentTs = getRouterState('last_agent_timestamp');
|
||||
@@ -113,6 +133,9 @@ function registerGroup(jid: string, group: RegisteredGroup): void {
|
||||
// Create group folder
|
||||
fs.mkdirSync(path.join(groupDir, 'logs'), { recursive: true });
|
||||
|
||||
// Ensure a corresponding OneCLI agent exists (best-effort, non-blocking)
|
||||
ensureOneCLIAgent(jid, group);
|
||||
|
||||
logger.info(
|
||||
{ jid, name: group.name, folder: group.folder },
|
||||
'Group registered',
|
||||
@@ -221,7 +244,7 @@ async function processGroupMessages(chatJid: string): Promise<boolean> {
|
||||
: JSON.stringify(result.result);
|
||||
// Strip <internal>...</internal> blocks — agent uses these for internal reasoning
|
||||
const text = raw.replace(/<internal>[\s\S]*?<\/internal>/g, '').trim();
|
||||
logger.info({ group: group.name }, `Agent output: ${raw.slice(0, 200)}`);
|
||||
logger.info({ group: group.name }, `Agent output: ${raw.length} chars`);
|
||||
if (text) {
|
||||
await channel.sendMessage(chatJid, text);
|
||||
outputSentToUser = true;
|
||||
@@ -475,18 +498,18 @@ async function main(): Promise<void> {
|
||||
initDatabase();
|
||||
logger.info('Database initialized');
|
||||
loadState();
|
||||
restoreRemoteControl();
|
||||
|
||||
// Start credential proxy (containers route API calls through this)
|
||||
const proxyServer = await startCredentialProxy(
|
||||
CREDENTIAL_PROXY_PORT,
|
||||
PROXY_BIND_HOST,
|
||||
);
|
||||
// Ensure OneCLI agents exist for all registered groups.
|
||||
// Recovers from missed creates (e.g. OneCLI was down at registration time).
|
||||
for (const [jid, group] of Object.entries(registeredGroups)) {
|
||||
ensureOneCLIAgent(jid, group);
|
||||
}
|
||||
|
||||
restoreRemoteControl();
|
||||
|
||||
// Graceful shutdown handlers
|
||||
const shutdown = async (signal: string) => {
|
||||
logger.info({ signal }, 'Shutdown signal received');
|
||||
proxyServer.close();
|
||||
await queue.shutdown(10000);
|
||||
for (const ch of channels) await ch.disconnect();
|
||||
process.exit(0);
|
||||
@@ -632,6 +655,21 @@ async function main(): Promise<void> {
|
||||
getAvailableGroups,
|
||||
writeGroupsSnapshot: (gf, im, ag, rj) =>
|
||||
writeGroupsSnapshot(gf, im, ag, rj),
|
||||
onTasksChanged: () => {
|
||||
const tasks = getAllTasks();
|
||||
const taskRows = tasks.map((t) => ({
|
||||
id: t.id,
|
||||
groupFolder: t.group_folder,
|
||||
prompt: t.prompt,
|
||||
schedule_type: t.schedule_type,
|
||||
schedule_value: t.schedule_value,
|
||||
status: t.status,
|
||||
next_run: t.next_run,
|
||||
}));
|
||||
for (const group of Object.values(registeredGroups)) {
|
||||
writeTasksSnapshot(group.folder, group.isMain === true, taskRows);
|
||||
}
|
||||
},
|
||||
});
|
||||
queue.setProcessMessagesFn(processGroupMessages);
|
||||
recoverPendingMessages();
|
||||
|
||||
@@ -62,6 +62,7 @@ beforeEach(() => {
|
||||
syncGroups: async () => {},
|
||||
getAvailableGroups: () => [],
|
||||
writeGroupsSnapshot: () => {},
|
||||
onTasksChanged: () => {},
|
||||
};
|
||||
});
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ export interface IpcDeps {
|
||||
availableGroups: AvailableGroup[],
|
||||
registeredJids: Set<string>,
|
||||
) => void;
|
||||
onTasksChanged: () => void;
|
||||
}
|
||||
|
||||
let ipcWatcherRunning = false;
|
||||
@@ -270,6 +271,7 @@ export async function processTaskIpc(
|
||||
{ taskId, sourceGroup, targetFolder, contextMode },
|
||||
'Task created via IPC',
|
||||
);
|
||||
deps.onTasksChanged();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -282,6 +284,7 @@ export async function processTaskIpc(
|
||||
{ taskId: data.taskId, sourceGroup },
|
||||
'Task paused via IPC',
|
||||
);
|
||||
deps.onTasksChanged();
|
||||
} else {
|
||||
logger.warn(
|
||||
{ taskId: data.taskId, sourceGroup },
|
||||
@@ -300,6 +303,7 @@ export async function processTaskIpc(
|
||||
{ taskId: data.taskId, sourceGroup },
|
||||
'Task resumed via IPC',
|
||||
);
|
||||
deps.onTasksChanged();
|
||||
} else {
|
||||
logger.warn(
|
||||
{ taskId: data.taskId, sourceGroup },
|
||||
@@ -318,6 +322,7 @@ export async function processTaskIpc(
|
||||
{ taskId: data.taskId, sourceGroup },
|
||||
'Task cancelled via IPC',
|
||||
);
|
||||
deps.onTasksChanged();
|
||||
} else {
|
||||
logger.warn(
|
||||
{ taskId: data.taskId, sourceGroup },
|
||||
@@ -388,6 +393,7 @@ export async function processTaskIpc(
|
||||
{ taskId: data.taskId, sourceGroup, updates },
|
||||
'Task updated via IPC',
|
||||
);
|
||||
deps.onTasksChanged();
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ describe('remote-control', () => {
|
||||
let readFileSyncSpy: ReturnType<typeof vi.spyOn>;
|
||||
let writeFileSyncSpy: ReturnType<typeof vi.spyOn>;
|
||||
let unlinkSyncSpy: ReturnType<typeof vi.spyOn>;
|
||||
let mkdirSyncSpy: ReturnType<typeof vi.spyOn>;
|
||||
let _mkdirSyncSpy: ReturnType<typeof vi.spyOn>;
|
||||
let openSyncSpy: ReturnType<typeof vi.spyOn>;
|
||||
let closeSyncSpy: ReturnType<typeof vi.spyOn>;
|
||||
|
||||
@@ -50,7 +50,7 @@ describe('remote-control', () => {
|
||||
stdoutFileContent = '';
|
||||
|
||||
// Default fs mocks
|
||||
mkdirSyncSpy = vi
|
||||
_mkdirSyncSpy = vi
|
||||
.spyOn(fs, 'mkdirSync')
|
||||
.mockImplementation(() => undefined as any);
|
||||
writeFileSyncSpy = vi
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { describe, it, expect, beforeEach } from 'vitest';
|
||||
|
||||
import { _initTestDatabase, getAllChats, storeChatMetadata } from './db.js';
|
||||
import { _initTestDatabase, storeChatMetadata } from './db.js';
|
||||
import { getAvailableGroups, _setRegisteredGroups } from './index.js';
|
||||
|
||||
beforeEach(() => {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import fs from 'fs';
|
||||
import os from 'os';
|
||||
import path from 'path';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
|
||||
import {
|
||||
isSenderAllowed,
|
||||
|
||||
Reference in New Issue
Block a user