fix(model): preserve reasoning in provider fallback resolution (#29285)

* fix(model): preserve reasoning in provider fallback resolution

* test(model): cover fallback reasoning propagation
This commit is contained in:
Vincent Koc
2026-02-27 17:38:22 -08:00
committed by GitHub
parent 8090cb4c5e
commit 50aa6a43ed
2 changed files with 27 additions and 1 deletions

View File

@@ -200,6 +200,32 @@ describe("resolveModel", () => {
expect(result.model?.maxTokens).toBe(32768);
});
it("propagates reasoning from matching configured fallback model", () => {
const cfg = {
models: {
providers: {
custom: {
baseUrl: "http://localhost:9000",
models: [
{
...makeModel("model-a"),
reasoning: false,
},
{
...makeModel("model-b"),
reasoning: true,
},
],
},
},
},
} as OpenClawConfig;
const result = resolveModel("custom", "model-b", "/tmp/agent", cfg);
expect(result.model?.reasoning).toBe(true);
});
it("builds an openai-codex fallback for gpt-5.3-codex", () => {
mockOpenAICodexTemplateModel();

View File

@@ -103,7 +103,7 @@ export function resolveModel(
api: providerCfg?.api ?? "openai-responses",
provider,
baseUrl: providerCfg?.baseUrl,
reasoning: false,
reasoning: configuredModel?.reasoning ?? false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: