Refactor index (#156)
* feat: add Telegram channel with agent swarm support Add Telegram as a messaging channel that can run alongside WhatsApp or standalone (TELEGRAM_ONLY mode). Includes bot pool support for agent swarms where each subagent appears as a different bot identity in the group. - Add grammy dependency for Telegram Bot API - Route messages through tg: JID prefix convention - Add storeMessageDirect for non-Baileys channels - Add sender field to IPC send_message for swarm identity - Support TELEGRAM_BOT_TOKEN, TELEGRAM_ONLY, TELEGRAM_BOT_POOL config Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * docs: add index.ts refactor plan Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * refactor: extract channel abstraction, IPC, and router from index.ts Break the 1088-line monolith into focused modules: - src/channels/whatsapp.ts: WhatsAppChannel class implementing Channel interface - src/ipc.ts: IPC watcher and task processing with dependency injection - src/router.ts: message formatting, outbound routing, channel lookup - src/types.ts: Channel interface, OnInboundMessage, OnChatMetadata types Also adds regression test suite (98 tests), updates all documentation and skill files to reflect the new architecture. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * ci: add test workflow for PRs Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * chore: remove accidentally committed pool-bot assets Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix(ci): remove grammy from base dependencies Grammy is installed by the /add-telegram skill, not a base dependency. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
245
src/group-queue.test.ts
Normal file
245
src/group-queue.test.ts
Normal file
@@ -0,0 +1,245 @@
|
||||
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
|
||||
|
||||
import { GroupQueue } from './group-queue.js';
|
||||
|
||||
// Mock config to control concurrency limit
|
||||
vi.mock('./config.js', () => ({
|
||||
DATA_DIR: '/tmp/nanoclaw-test-data',
|
||||
MAX_CONCURRENT_CONTAINERS: 2,
|
||||
}));
|
||||
|
||||
// Mock fs operations used by sendMessage/closeStdin
|
||||
vi.mock('fs', async () => {
|
||||
const actual = await vi.importActual<typeof import('fs')>('fs');
|
||||
return {
|
||||
...actual,
|
||||
default: {
|
||||
...actual,
|
||||
mkdirSync: vi.fn(),
|
||||
writeFileSync: vi.fn(),
|
||||
renameSync: vi.fn(),
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
describe('GroupQueue', () => {
|
||||
let queue: GroupQueue;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
queue = new GroupQueue();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
// --- Single group at a time ---
|
||||
|
||||
it('only runs one container per group at a time', async () => {
|
||||
let concurrentCount = 0;
|
||||
let maxConcurrent = 0;
|
||||
|
||||
const processMessages = vi.fn(async (groupJid: string) => {
|
||||
concurrentCount++;
|
||||
maxConcurrent = Math.max(maxConcurrent, concurrentCount);
|
||||
// Simulate async work
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
concurrentCount--;
|
||||
return true;
|
||||
});
|
||||
|
||||
queue.setProcessMessagesFn(processMessages);
|
||||
|
||||
// Enqueue two messages for the same group
|
||||
queue.enqueueMessageCheck('group1@g.us');
|
||||
queue.enqueueMessageCheck('group1@g.us');
|
||||
|
||||
// Advance timers to let the first process complete
|
||||
await vi.advanceTimersByTimeAsync(200);
|
||||
|
||||
// Second enqueue should have been queued, not concurrent
|
||||
expect(maxConcurrent).toBe(1);
|
||||
});
|
||||
|
||||
// --- Global concurrency limit ---
|
||||
|
||||
it('respects global concurrency limit', async () => {
|
||||
let activeCount = 0;
|
||||
let maxActive = 0;
|
||||
const completionCallbacks: Array<() => void> = [];
|
||||
|
||||
const processMessages = vi.fn(async (groupJid: string) => {
|
||||
activeCount++;
|
||||
maxActive = Math.max(maxActive, activeCount);
|
||||
await new Promise<void>((resolve) => completionCallbacks.push(resolve));
|
||||
activeCount--;
|
||||
return true;
|
||||
});
|
||||
|
||||
queue.setProcessMessagesFn(processMessages);
|
||||
|
||||
// Enqueue 3 groups (limit is 2)
|
||||
queue.enqueueMessageCheck('group1@g.us');
|
||||
queue.enqueueMessageCheck('group2@g.us');
|
||||
queue.enqueueMessageCheck('group3@g.us');
|
||||
|
||||
// Let promises settle
|
||||
await vi.advanceTimersByTimeAsync(10);
|
||||
|
||||
// Only 2 should be active (MAX_CONCURRENT_CONTAINERS = 2)
|
||||
expect(maxActive).toBe(2);
|
||||
expect(activeCount).toBe(2);
|
||||
|
||||
// Complete one — third should start
|
||||
completionCallbacks[0]();
|
||||
await vi.advanceTimersByTimeAsync(10);
|
||||
|
||||
expect(processMessages).toHaveBeenCalledTimes(3);
|
||||
});
|
||||
|
||||
// --- Tasks prioritized over messages ---
|
||||
|
||||
it('drains tasks before messages for same group', async () => {
|
||||
const executionOrder: string[] = [];
|
||||
let resolveFirst: () => void;
|
||||
|
||||
const processMessages = vi.fn(async (groupJid: string) => {
|
||||
if (executionOrder.length === 0) {
|
||||
// First call: block until we release it
|
||||
await new Promise<void>((resolve) => {
|
||||
resolveFirst = resolve;
|
||||
});
|
||||
}
|
||||
executionOrder.push('messages');
|
||||
return true;
|
||||
});
|
||||
|
||||
queue.setProcessMessagesFn(processMessages);
|
||||
|
||||
// Start processing messages (takes the active slot)
|
||||
queue.enqueueMessageCheck('group1@g.us');
|
||||
await vi.advanceTimersByTimeAsync(10);
|
||||
|
||||
// While active, enqueue both a task and pending messages
|
||||
const taskFn = vi.fn(async () => {
|
||||
executionOrder.push('task');
|
||||
});
|
||||
queue.enqueueTask('group1@g.us', 'task-1', taskFn);
|
||||
queue.enqueueMessageCheck('group1@g.us');
|
||||
|
||||
// Release the first processing
|
||||
resolveFirst!();
|
||||
await vi.advanceTimersByTimeAsync(10);
|
||||
|
||||
// Task should have run before the second message check
|
||||
expect(executionOrder[0]).toBe('messages'); // first call
|
||||
expect(executionOrder[1]).toBe('task'); // task runs first in drain
|
||||
// Messages would run after task completes
|
||||
});
|
||||
|
||||
// --- Retry with backoff on failure ---
|
||||
|
||||
it('retries with exponential backoff on failure', async () => {
|
||||
let callCount = 0;
|
||||
|
||||
const processMessages = vi.fn(async () => {
|
||||
callCount++;
|
||||
return false; // failure
|
||||
});
|
||||
|
||||
queue.setProcessMessagesFn(processMessages);
|
||||
queue.enqueueMessageCheck('group1@g.us');
|
||||
|
||||
// First call happens immediately
|
||||
await vi.advanceTimersByTimeAsync(10);
|
||||
expect(callCount).toBe(1);
|
||||
|
||||
// First retry after 5000ms (BASE_RETRY_MS * 2^0)
|
||||
await vi.advanceTimersByTimeAsync(5000);
|
||||
await vi.advanceTimersByTimeAsync(10);
|
||||
expect(callCount).toBe(2);
|
||||
|
||||
// Second retry after 10000ms (BASE_RETRY_MS * 2^1)
|
||||
await vi.advanceTimersByTimeAsync(10000);
|
||||
await vi.advanceTimersByTimeAsync(10);
|
||||
expect(callCount).toBe(3);
|
||||
});
|
||||
|
||||
// --- Shutdown prevents new enqueues ---
|
||||
|
||||
it('prevents new enqueues after shutdown', async () => {
|
||||
const processMessages = vi.fn(async () => true);
|
||||
queue.setProcessMessagesFn(processMessages);
|
||||
|
||||
await queue.shutdown(1000);
|
||||
|
||||
queue.enqueueMessageCheck('group1@g.us');
|
||||
await vi.advanceTimersByTimeAsync(100);
|
||||
|
||||
expect(processMessages).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// --- Max retries exceeded ---
|
||||
|
||||
it('stops retrying after MAX_RETRIES and resets', async () => {
|
||||
let callCount = 0;
|
||||
|
||||
const processMessages = vi.fn(async () => {
|
||||
callCount++;
|
||||
return false; // always fail
|
||||
});
|
||||
|
||||
queue.setProcessMessagesFn(processMessages);
|
||||
queue.enqueueMessageCheck('group1@g.us');
|
||||
|
||||
// Run through all 5 retries (MAX_RETRIES = 5)
|
||||
// Initial call
|
||||
await vi.advanceTimersByTimeAsync(10);
|
||||
expect(callCount).toBe(1);
|
||||
|
||||
// Retry 1: 5000ms, Retry 2: 10000ms, Retry 3: 20000ms, Retry 4: 40000ms, Retry 5: 80000ms
|
||||
const retryDelays = [5000, 10000, 20000, 40000, 80000];
|
||||
for (let i = 0; i < retryDelays.length; i++) {
|
||||
await vi.advanceTimersByTimeAsync(retryDelays[i] + 10);
|
||||
expect(callCount).toBe(i + 2);
|
||||
}
|
||||
|
||||
// After 5 retries (6 total calls), should stop — no more retries
|
||||
const countAfterMaxRetries = callCount;
|
||||
await vi.advanceTimersByTimeAsync(200000); // Wait a long time
|
||||
expect(callCount).toBe(countAfterMaxRetries);
|
||||
});
|
||||
|
||||
// --- Waiting groups get drained when slots free up ---
|
||||
|
||||
it('drains waiting groups when active slots free up', async () => {
|
||||
const processed: string[] = [];
|
||||
const completionCallbacks: Array<() => void> = [];
|
||||
|
||||
const processMessages = vi.fn(async (groupJid: string) => {
|
||||
processed.push(groupJid);
|
||||
await new Promise<void>((resolve) => completionCallbacks.push(resolve));
|
||||
return true;
|
||||
});
|
||||
|
||||
queue.setProcessMessagesFn(processMessages);
|
||||
|
||||
// Fill both slots
|
||||
queue.enqueueMessageCheck('group1@g.us');
|
||||
queue.enqueueMessageCheck('group2@g.us');
|
||||
await vi.advanceTimersByTimeAsync(10);
|
||||
|
||||
// Queue a third
|
||||
queue.enqueueMessageCheck('group3@g.us');
|
||||
await vi.advanceTimersByTimeAsync(10);
|
||||
|
||||
expect(processed).toEqual(['group1@g.us', 'group2@g.us']);
|
||||
|
||||
// Free up a slot
|
||||
completionCallbacks[0]();
|
||||
await vi.advanceTimersByTimeAsync(10);
|
||||
|
||||
expect(processed).toContain('group3@g.us');
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user