Files
openclaw/src/auto-reply/reply/followup-runner.test.ts
2026-02-17 13:36:48 +09:00

349 lines
11 KiB
TypeScript

import fs from "node:fs/promises";
import { tmpdir } from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import { loadSessionStore, saveSessionStore, type SessionEntry } from "../../config/sessions.js";
import type { FollowupRun } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
import { createFollowupRunner } from "./followup-runner.js";
const baseQueuedRun = (messageProvider = "whatsapp"): FollowupRun =>
({
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
originatingTo: "channel:C1",
run: {
sessionId: "session",
sessionKey: "main",
messageProvider,
agentAccountId: "primary",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
}) as FollowupRun;
describe("createFollowupRunner compaction", () => {
it("adds verbose auto-compaction notice and tracks count", async () => {
const storePath = path.join(
await fs.mkdtemp(path.join(tmpdir(), "openclaw-compaction-")),
"sessions.json",
);
const sessionEntry: SessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
};
const sessionStore: Record<string, SessionEntry> = {
main: sessionEntry,
};
const onBlockReply = vi.fn(async () => {});
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: {
onAgentEvent?: (evt: { stream: string; data: Record<string, unknown> }) => void;
}) => {
params.onAgentEvent?.({
stream: "compaction",
data: { phase: "end", willRetry: true },
});
return { payloads: [{ text: "final" }], meta: {} };
},
);
const runner = createFollowupRunner({
opts: { onBlockReply },
typing: createMockTypingController(),
typingMode: "instant",
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
defaultModel: "anthropic/claude-opus-4-5",
});
const queued = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
sessionId: "session",
sessionKey: "main",
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: "on",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as FollowupRun;
await runner(queued);
expect(onBlockReply).toHaveBeenCalled();
expect(onBlockReply.mock.calls[0][0].text).toContain("Auto-compaction complete");
expect(sessionStore.main.compactionCount).toBe(1);
});
it("updates totalTokens after auto-compaction using lastCallUsage", async () => {
const storePath = path.join(
await fs.mkdtemp(path.join(tmpdir(), "openclaw-followup-compaction-")),
"sessions.json",
);
const sessionKey = "main";
const sessionEntry: SessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 180_000,
compactionCount: 0,
};
const sessionStore: Record<string, SessionEntry> = { [sessionKey]: sessionEntry };
await saveSessionStore(storePath, sessionStore);
const onBlockReply = vi.fn(async () => {});
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: {
onAgentEvent?: (evt: { stream: string; data: Record<string, unknown> }) => void;
}) => {
params.onAgentEvent?.({
stream: "compaction",
data: { phase: "end", willRetry: false },
});
return {
payloads: [{ text: "done" }],
meta: {
agentMeta: {
// Accumulated usage across pre+post compaction calls.
usage: { input: 190_000, output: 8_000, total: 198_000 },
// Last call usage reflects post-compaction context.
lastCallUsage: { input: 11_000, output: 2_000, total: 13_000 },
model: "claude-opus-4-5",
provider: "anthropic",
},
},
};
},
);
const runner = createFollowupRunner({
opts: { onBlockReply },
typing: createMockTypingController(),
typingMode: "instant",
sessionEntry,
sessionStore,
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 200_000,
});
await runner(baseQueuedRun());
const store = loadSessionStore(storePath, { skipCache: true });
expect(store[sessionKey]?.compactionCount).toBe(1);
expect(store[sessionKey]?.totalTokens).toBe(11_000);
// We only keep the total estimate after compaction.
expect(store[sessionKey]?.inputTokens).toBeUndefined();
expect(store[sessionKey]?.outputTokens).toBeUndefined();
});
});
describe("createFollowupRunner messaging tool dedupe", () => {
it("drops payloads already sent via messaging tool", async () => {
const onBlockReply = vi.fn(async () => {});
runEmbeddedPiAgentMock.mockResolvedValueOnce({
payloads: [{ text: "hello world!" }],
messagingToolSentTexts: ["hello world!"],
meta: {},
});
const runner = createFollowupRunner({
opts: { onBlockReply },
typing: createMockTypingController(),
typingMode: "instant",
defaultModel: "anthropic/claude-opus-4-5",
});
await runner(baseQueuedRun());
expect(onBlockReply).not.toHaveBeenCalled();
});
it("delivers payloads when not duplicates", async () => {
const onBlockReply = vi.fn(async () => {});
runEmbeddedPiAgentMock.mockResolvedValueOnce({
payloads: [{ text: "hello world!" }],
messagingToolSentTexts: ["different message"],
meta: {},
});
const runner = createFollowupRunner({
opts: { onBlockReply },
typing: createMockTypingController(),
typingMode: "instant",
defaultModel: "anthropic/claude-opus-4-5",
});
await runner(baseQueuedRun());
expect(onBlockReply).toHaveBeenCalledTimes(1);
});
it("suppresses replies when a messaging tool sent via the same provider + target", async () => {
const onBlockReply = vi.fn(async () => {});
runEmbeddedPiAgentMock.mockResolvedValueOnce({
payloads: [{ text: "hello world!" }],
messagingToolSentTexts: ["different message"],
messagingToolSentTargets: [{ tool: "slack", provider: "slack", to: "channel:C1" }],
meta: {},
});
const runner = createFollowupRunner({
opts: { onBlockReply },
typing: createMockTypingController(),
typingMode: "instant",
defaultModel: "anthropic/claude-opus-4-5",
});
await runner(baseQueuedRun("slack"));
expect(onBlockReply).not.toHaveBeenCalled();
});
it("drops media URL from payload when messaging tool already sent it", async () => {
const onBlockReply = vi.fn(async () => {});
runEmbeddedPiAgentMock.mockResolvedValueOnce({
payloads: [{ mediaUrl: "/tmp/img.png" }],
messagingToolSentMediaUrls: ["/tmp/img.png"],
meta: {},
});
const runner = createFollowupRunner({
opts: { onBlockReply },
typing: createMockTypingController(),
typingMode: "instant",
defaultModel: "anthropic/claude-opus-4-5",
});
await runner(baseQueuedRun());
// Media stripped → payload becomes non-renderable → not delivered.
expect(onBlockReply).not.toHaveBeenCalled();
});
it("delivers media payload when not a duplicate", async () => {
const onBlockReply = vi.fn(async () => {});
runEmbeddedPiAgentMock.mockResolvedValueOnce({
payloads: [{ mediaUrl: "/tmp/img.png" }],
messagingToolSentMediaUrls: ["/tmp/other.png"],
meta: {},
});
const runner = createFollowupRunner({
opts: { onBlockReply },
typing: createMockTypingController(),
typingMode: "instant",
defaultModel: "anthropic/claude-opus-4-5",
});
await runner(baseQueuedRun());
expect(onBlockReply).toHaveBeenCalledTimes(1);
});
it("persists usage even when replies are suppressed", async () => {
const storePath = path.join(
await fs.mkdtemp(path.join(tmpdir(), "openclaw-followup-usage-")),
"sessions.json",
);
const sessionKey = "main";
const sessionEntry: SessionEntry = { sessionId: "session", updatedAt: Date.now() };
const sessionStore: Record<string, SessionEntry> = { [sessionKey]: sessionEntry };
await saveSessionStore(storePath, sessionStore);
const onBlockReply = vi.fn(async () => {});
runEmbeddedPiAgentMock.mockResolvedValueOnce({
payloads: [{ text: "hello world!" }],
messagingToolSentTexts: ["different message"],
messagingToolSentTargets: [{ tool: "slack", provider: "slack", to: "channel:C1" }],
meta: {
agentMeta: {
usage: { input: 1_000, output: 50 },
lastCallUsage: { input: 400, output: 20 },
model: "claude-opus-4-5",
provider: "anthropic",
},
},
});
const runner = createFollowupRunner({
opts: { onBlockReply },
typing: createMockTypingController(),
typingMode: "instant",
sessionEntry,
sessionStore,
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
});
await runner(baseQueuedRun("slack"));
expect(onBlockReply).not.toHaveBeenCalled();
const store = loadSessionStore(storePath, { skipCache: true });
// totalTokens should reflect the last call usage snapshot, not the accumulated input.
expect(store[sessionKey]?.totalTokens).toBe(400);
expect(store[sessionKey]?.model).toBe("claude-opus-4-5");
// Accumulated usage is still stored for usage/cost tracking.
expect(store[sessionKey]?.inputTokens).toBe(1_000);
expect(store[sessionKey]?.outputTokens).toBe(50);
});
});