fix(model): preserve reasoning in provider fallback resolution (#29285)
* fix(model): preserve reasoning in provider fallback resolution * test(model): cover fallback reasoning propagation
This commit is contained in:
@@ -200,6 +200,32 @@ describe("resolveModel", () => {
|
||||
expect(result.model?.maxTokens).toBe(32768);
|
||||
});
|
||||
|
||||
it("propagates reasoning from matching configured fallback model", () => {
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
custom: {
|
||||
baseUrl: "http://localhost:9000",
|
||||
models: [
|
||||
{
|
||||
...makeModel("model-a"),
|
||||
reasoning: false,
|
||||
},
|
||||
{
|
||||
...makeModel("model-b"),
|
||||
reasoning: true,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig;
|
||||
|
||||
const result = resolveModel("custom", "model-b", "/tmp/agent", cfg);
|
||||
|
||||
expect(result.model?.reasoning).toBe(true);
|
||||
});
|
||||
|
||||
it("builds an openai-codex fallback for gpt-5.3-codex", () => {
|
||||
mockOpenAICodexTemplateModel();
|
||||
|
||||
|
||||
@@ -103,7 +103,7 @@ export function resolveModel(
|
||||
api: providerCfg?.api ?? "openai-responses",
|
||||
provider,
|
||||
baseUrl: providerCfg?.baseUrl,
|
||||
reasoning: false,
|
||||
reasoning: configuredModel?.reasoning ?? false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow:
|
||||
|
||||
Reference in New Issue
Block a user