feat(security): warn on likely multi-user trust-model mismatch

This commit is contained in:
Peter Steinberger
2026-02-24 14:03:04 +00:00
parent 32d7756d8c
commit 4d124e4a9b
7 changed files with 236 additions and 48 deletions

View File

@@ -7,6 +7,7 @@ Docs: https://docs.openclaw.ai
### Changes
- Auto-reply/Abort shortcuts: expand standalone stop phrases (`stop openclaw`, `stop action`, `stop run`, `stop agent`, `please stop`, and related variants), accept trailing punctuation (for example `STOP OPENCLAW!!!`), and add multilingual stop keywords (including ES/FR/ZH/HI/AR/JP/DE/PT/RU forms) so emergency stop messages are caught more reliably. (#25103) Thanks @steipete and @vincentkoc.
- Security/Audit: add `security.trust_model.multi_user_heuristic` to flag likely shared-user ingress and clarify the personal-assistant trust model, with hardening guidance for intentional multi-user setups (`sandbox.mode="all"`, workspace-scoped FS, reduced tool surface, no personal/private identities on shared runtimes).
### Fixes

View File

@@ -25,6 +25,8 @@ openclaw security audit --json
The audit warns when multiple DM senders share the main session and recommends **secure DM mode**: `session.dmScope="per-channel-peer"` (or `per-account-channel-peer` for multi-account channels) for shared inboxes.
This is for cooperative/shared inbox hardening. A single Gateway shared by mutually untrusted/adversarial operators is not a recommended setup; split trust boundaries with separate gateways (or separate OS users/hosts).
It also emits `security.trust_model.multi_user_heuristic` when config suggests likely shared-user ingress (for example configured group targets or wildcard sender rules), and reminds you that OpenClaw is a personal-assistant trust model by default.
For intentional shared-user setups, the audit guidance is to sandbox all sessions, keep filesystem access workspace-scoped, and keep personal/private identities or credentials off that runtime.
It also warns when small models (`<=300B`) are used without sandboxing and with web/browser tools enabled.
For webhook ingress, it warns when `hooks.defaultSessionKey` is unset, when request `sessionKey` overrides are enabled, and when overrides are enabled without `hooks.allowedSessionKeyPrefixes`.
It also warns when sandbox Docker settings are configured while sandbox mode is off, when `gateway.nodes.denyCommands` uses ineffective pattern-like/unknown entries, when `gateway.nodes.allowCommands` explicitly enables dangerous node commands, when global `tools.profile="minimal"` is overridden by agent tool profiles, when open groups expose runtime/filesystem tools without sandbox/workspace guards, and when installed extension plugin tools may be reachable under permissive tool policy.

View File

@@ -7,6 +7,22 @@ title: "Security"
# Security 🔒
> [!WARNING]
> **Personal assistant trust model:** this guidance assumes one trusted operator boundary per gateway (single-user/personal assistant model).
> OpenClaw is **not** a hostile multi-tenant security boundary for multiple adversarial users sharing one agent/gateway.
> If you need mixed-trust or adversarial-user operation, split trust boundaries (separate gateway + credentials, ideally separate OS users/hosts).
## Scope first: personal assistant security model
OpenClaw security guidance assumes a **personal assistant** deployment: one trusted operator boundary, potentially many agents.
- Supported security posture: one user/trust boundary per gateway (prefer one OS user/host/VPS per boundary).
- Not a supported security boundary: one shared gateway/agent used by mutually untrusted or adversarial users.
- If adversarial-user isolation is required, split by trust boundary (separate gateway + credentials, and ideally separate OS users/hosts).
- If multiple untrusted users can message one tool-enabled agent, treat them as sharing the same delegated tool authority for that agent.
This page explains hardening **within that model**. It does not claim hostile multi-tenant isolation on one shared gateway.
## Quick check: `openclaw security audit`
See also: [Formal Verification (Security Models)](/security/formal-verification/)

View File

@@ -338,6 +338,137 @@ function listGroupPolicyOpen(cfg: OpenClawConfig): string[] {
return out;
}
function hasConfiguredGroupTargets(section: Record<string, unknown>): boolean {
const groupKeys = ["groups", "guilds", "channels", "rooms"];
return groupKeys.some((key) => {
const value = section[key];
return Boolean(value && typeof value === "object" && Object.keys(value).length > 0);
});
}
function listPotentialMultiUserSignals(cfg: OpenClawConfig): string[] {
const out = new Set<string>();
const channels = cfg.channels as Record<string, unknown> | undefined;
if (!channels || typeof channels !== "object") {
return [];
}
const inspectSection = (section: Record<string, unknown>, basePath: string) => {
const groupPolicy = typeof section.groupPolicy === "string" ? section.groupPolicy : null;
if (groupPolicy === "open") {
out.add(`${basePath}.groupPolicy="open"`);
} else if (groupPolicy === "allowlist" && hasConfiguredGroupTargets(section)) {
out.add(`${basePath}.groupPolicy="allowlist" with configured group targets`);
}
const dmPolicy = typeof section.dmPolicy === "string" ? section.dmPolicy : null;
if (dmPolicy === "open") {
out.add(`${basePath}.dmPolicy="open"`);
}
const allowFrom = Array.isArray(section.allowFrom) ? section.allowFrom : [];
if (allowFrom.some((entry) => String(entry).trim() === "*")) {
out.add(`${basePath}.allowFrom includes "*"`);
}
const groupAllowFrom = Array.isArray(section.groupAllowFrom) ? section.groupAllowFrom : [];
if (groupAllowFrom.some((entry) => String(entry).trim() === "*")) {
out.add(`${basePath}.groupAllowFrom includes "*"`);
}
const dm = section.dm;
if (dm && typeof dm === "object") {
const dmSection = dm as Record<string, unknown>;
const dmLegacyPolicy = typeof dmSection.policy === "string" ? dmSection.policy : null;
if (dmLegacyPolicy === "open") {
out.add(`${basePath}.dm.policy="open"`);
}
const dmAllowFrom = Array.isArray(dmSection.allowFrom) ? dmSection.allowFrom : [];
if (dmAllowFrom.some((entry) => String(entry).trim() === "*")) {
out.add(`${basePath}.dm.allowFrom includes "*"`);
}
}
};
for (const [channelId, value] of Object.entries(channels)) {
if (!value || typeof value !== "object") {
continue;
}
const section = value as Record<string, unknown>;
inspectSection(section, `channels.${channelId}`);
const accounts = section.accounts;
if (!accounts || typeof accounts !== "object") {
continue;
}
for (const [accountId, accountValue] of Object.entries(accounts)) {
if (!accountValue || typeof accountValue !== "object") {
continue;
}
inspectSection(
accountValue as Record<string, unknown>,
`channels.${channelId}.accounts.${accountId}`,
);
}
}
return Array.from(out);
}
function collectRiskyToolExposureContexts(cfg: OpenClawConfig): {
riskyContexts: string[];
hasRuntimeRisk: boolean;
} {
const contexts: Array<{
label: string;
agentId?: string;
tools?: AgentToolsConfig;
}> = [{ label: "agents.defaults" }];
for (const agent of cfg.agents?.list ?? []) {
if (!agent || typeof agent !== "object" || typeof agent.id !== "string") {
continue;
}
contexts.push({
label: `agents.list.${agent.id}`,
agentId: agent.id,
tools: agent.tools,
});
}
const riskyContexts: string[] = [];
let hasRuntimeRisk = false;
for (const context of contexts) {
const sandboxMode = resolveSandboxConfigForAgent(cfg, context.agentId).mode;
const policies = resolveToolPolicies({
cfg,
agentTools: context.tools,
sandboxMode,
agentId: context.agentId ?? null,
});
const runtimeTools = ["exec", "process"].filter((tool) =>
isToolAllowedByPolicies(tool, policies),
);
const fsTools = ["read", "write", "edit", "apply_patch"].filter((tool) =>
isToolAllowedByPolicies(tool, policies),
);
const fsWorkspaceOnly = context.tools?.fs?.workspaceOnly ?? cfg.tools?.fs?.workspaceOnly;
const runtimeUnguarded = runtimeTools.length > 0 && sandboxMode !== "all";
const fsUnguarded = fsTools.length > 0 && sandboxMode !== "all" && fsWorkspaceOnly !== true;
if (!runtimeUnguarded && !fsUnguarded) {
continue;
}
if (runtimeUnguarded) {
hasRuntimeRisk = true;
}
riskyContexts.push(
`${context.label} (sandbox=${sandboxMode}; runtime=[${runtimeTools.join(", ") || "off"}]; fs=[${fsTools.join(", ") || "off"}]; fs.workspaceOnly=${
fsWorkspaceOnly === true ? "true" : "false"
})`,
);
}
return { riskyContexts, hasRuntimeRisk };
}
// --------------------------------------------------------------------------
// Exported collectors
// --------------------------------------------------------------------------
@@ -358,7 +489,9 @@ export function collectAttackSurfaceSummaryFindings(cfg: OpenClawConfig): Securi
`\n` +
`hooks.internal: ${internalHooksEnabled ? "enabled" : "disabled"}` +
`\n` +
`browser control: ${browserEnabled ? "enabled" : "disabled"}`;
`browser control: ${browserEnabled ? "enabled" : "disabled"}` +
`\n` +
"trust model: personal assistant (one trusted operator boundary), not hostile multi-tenant on one shared gateway";
return [
{
@@ -1096,53 +1229,7 @@ export function collectExposureMatrixFindings(cfg: OpenClawConfig): SecurityAudi
});
}
const contexts: Array<{
label: string;
agentId?: string;
tools?: AgentToolsConfig;
}> = [{ label: "agents.defaults" }];
for (const agent of cfg.agents?.list ?? []) {
if (!agent || typeof agent !== "object" || typeof agent.id !== "string") {
continue;
}
contexts.push({
label: `agents.list.${agent.id}`,
agentId: agent.id,
tools: agent.tools,
});
}
const riskyContexts: string[] = [];
let hasRuntimeRisk = false;
for (const context of contexts) {
const sandboxMode = resolveSandboxConfigForAgent(cfg, context.agentId).mode;
const policies = resolveToolPolicies({
cfg,
agentTools: context.tools,
sandboxMode,
agentId: context.agentId ?? null,
});
const runtimeTools = ["exec", "process"].filter((tool) =>
isToolAllowedByPolicies(tool, policies),
);
const fsTools = ["read", "write", "edit", "apply_patch"].filter((tool) =>
isToolAllowedByPolicies(tool, policies),
);
const fsWorkspaceOnly = context.tools?.fs?.workspaceOnly ?? cfg.tools?.fs?.workspaceOnly;
const runtimeUnguarded = runtimeTools.length > 0 && sandboxMode !== "all";
const fsUnguarded = fsTools.length > 0 && sandboxMode !== "all" && fsWorkspaceOnly !== true;
if (!runtimeUnguarded && !fsUnguarded) {
continue;
}
if (runtimeUnguarded) {
hasRuntimeRisk = true;
}
riskyContexts.push(
`${context.label} (sandbox=${sandboxMode}; runtime=[${runtimeTools.join(", ") || "off"}]; fs=[${fsTools.join(", ") || "off"}]; fs.workspaceOnly=${
fsWorkspaceOnly === true ? "true" : "false"
})`,
);
}
const { riskyContexts, hasRuntimeRisk } = collectRiskyToolExposureContexts(cfg);
if (riskyContexts.length > 0) {
findings.push({
@@ -1160,3 +1247,35 @@ export function collectExposureMatrixFindings(cfg: OpenClawConfig): SecurityAudi
return findings;
}
export function collectLikelyMultiUserSetupFindings(cfg: OpenClawConfig): SecurityAuditFinding[] {
const findings: SecurityAuditFinding[] = [];
const signals = listPotentialMultiUserSignals(cfg);
if (signals.length === 0) {
return findings;
}
const { riskyContexts, hasRuntimeRisk } = collectRiskyToolExposureContexts(cfg);
const impactLine = hasRuntimeRisk
? "Runtime/process tools are exposed without full sandboxing in at least one context."
: "No unguarded runtime/process tools were detected by this heuristic.";
const riskyContextsDetail =
riskyContexts.length > 0
? `Potential high-impact tool exposure contexts:\n${riskyContexts.map((line) => `- ${line}`).join("\n")}`
: "No unguarded runtime/filesystem contexts detected.";
findings.push({
checkId: "security.trust_model.multi_user_heuristic",
severity: "warn",
title: "Potential multi-user setup detected (personal-assistant model warning)",
detail:
"Heuristic signals indicate this gateway may be reachable by multiple users:\n" +
signals.map((signal) => `- ${signal}`).join("\n") +
`\n${impactLine}\n${riskyContextsDetail}\n` +
"OpenClaw's default security model is personal-assistant (one trusted operator boundary), not hostile multi-tenant isolation on one shared gateway.",
remediation:
'If users may be mutually untrusted, split trust boundaries (separate gateways + credentials, ideally separate OS users/hosts). If you intentionally run shared-user access, set agents.defaults.sandbox.mode="all", keep tools.fs.workspaceOnly=true, deny runtime/fs/web tools unless required, and keep personal/private identities + credentials off that runtime.',
});
return findings;
}

View File

@@ -14,6 +14,7 @@ export {
collectGatewayHttpNoAuthFindings,
collectGatewayHttpSessionKeyOverrideFindings,
collectHooksHardeningFindings,
collectLikelyMultiUserSetupFindings,
collectMinimalProfileOverrideFindings,
collectModelHygieneFindings,
collectNodeDangerousAllowCommandFindings,

View File

@@ -178,12 +178,14 @@ describe("security audit", () => {
};
const res = await audit(cfg);
const summary = res.findings.find((f) => f.checkId === "summary.attack_surface");
expect(res.findings).toEqual(
expect.arrayContaining([
expect.objectContaining({ checkId: "summary.attack_surface", severity: "info" }),
]),
);
expect(summary?.detail).toContain("trust model: personal assistant");
});
it("flags non-loopback bind without auth as critical", async () => {
@@ -2696,6 +2698,51 @@ description: test skill
).toBe(false);
});
it("warns when config heuristics suggest a likely multi-user setup", async () => {
const cfg: OpenClawConfig = {
channels: {
discord: {
groupPolicy: "allowlist",
guilds: {
"1234567890": {
channels: {
"7777777777": { allow: true },
},
},
},
},
},
tools: { elevated: { enabled: false } },
};
const res = await audit(cfg);
const finding = res.findings.find(
(f) => f.checkId === "security.trust_model.multi_user_heuristic",
);
expect(finding?.severity).toBe("warn");
expect(finding?.detail).toContain(
'channels.discord.groupPolicy="allowlist" with configured group targets',
);
expect(finding?.detail).toContain("personal-assistant");
expect(finding?.remediation).toContain('agents.defaults.sandbox.mode="all"');
});
it("does not warn for multi-user heuristic when no shared-user signals are configured", async () => {
const cfg: OpenClawConfig = {
channels: {
discord: {
groupPolicy: "allowlist",
},
},
tools: { elevated: { enabled: false } },
};
const res = await audit(cfg);
expectNoFinding(res, "security.trust_model.multi_user_heuristic");
});
describe("maybeProbeGateway auth selection", () => {
const makeProbeCapture = () => {
let capturedAuth: { token?: string; password?: string } | undefined;

View File

@@ -24,6 +24,7 @@ import {
collectHooksHardeningFindings,
collectIncludeFilePermFindings,
collectInstalledSkillsCodeSafetyFindings,
collectLikelyMultiUserSetupFindings,
collectSandboxBrowserHashLabelFindings,
collectMinimalProfileOverrideFindings,
collectModelHygieneFindings,
@@ -866,6 +867,7 @@ export async function runSecurityAudit(opts: SecurityAuditOptions): Promise<Secu
findings.push(...collectModelHygieneFindings(cfg));
findings.push(...collectSmallModelRiskFindings({ cfg, env }));
findings.push(...collectExposureMatrixFindings(cfg));
findings.push(...collectLikelyMultiUserSetupFindings(cfg));
const configSnapshot =
opts.includeFilesystem !== false