From ffdf4030c001c6adbcee12cf5861eaa01bc6ad39 Mon Sep 17 00:00:00 2001 From: "a.simakov" Date: Fri, 2 Jan 2026 04:38:14 +0300 Subject: [PATCH 1/6] ollama plugin: add options --- go/plugins/ollama/ollama.go | 89 +++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/go/plugins/ollama/ollama.go b/go/plugins/ollama/ollama.go index 4eb4469673..8aacbdf65d 100644 --- a/go/plugins/ollama/ollama.go +++ b/go/plugins/ollama/ollama.go @@ -119,6 +119,27 @@ type ollamaMessage struct { Content string `json:"content,omitempty"` Images []string `json:"images,omitempty"` ToolCalls []ollamaToolCall `json:"tool_calls,omitempty"` + Thinking string `json:"thinking,omitempty"` +} + +type GenerateContentConfig struct { + // Thinking mode: + // ollama: true | false + // gpt-oss: "low" | "medium" | "high" + Think any + + // Runtime options + Seed *int + Temperature *float64 + TopK *int + TopP *float64 + MinP *float64 + Stop []string + NumCtx *int + NumPredict *int + + // Ollama-specific + KeepAlive string } // Ollama has two API endpoints, one with a chat interface and another with a generate response interface. @@ -142,6 +163,8 @@ type ollamaChatRequest struct { Stream bool `json:"stream"` Format string `json:"format,omitempty"` Tools []ollamaTool `json:"tools,omitempty"` + Think any `json:"think,omitempty"` + Options map[string]any `json:"options,omitempty"` } type ollamaModelRequest struct { @@ -184,6 +207,7 @@ type ollamaChatResponse struct { Message struct { Role string `json:"role"` Content string `json:"content"` + Thinking string `json:"thinking"` ToolCalls []ollamaToolCall `json:"tool_calls,omitempty"` } `json:"message"` } @@ -253,6 +277,7 @@ func (g *generator) generate(ctx context.Context, input *ai.ModelRequest, cb fun Images: images, Stream: stream, } + } else { var messages []*ollamaMessage // Translate all messages to ollama message format. @@ -263,12 +288,15 @@ func (g *generator) generate(ctx context.Context, input *ai.ModelRequest, cb fun } messages = append(messages, message) } + chatReq := ollamaChatRequest{ Messages: messages, Model: g.model.Name, Stream: stream, Images: images, } + + applyGenerateConfigToOllama(&chatReq, input.Config) if len(input.Tools) > 0 { tools, err := convertTools(input.Tools) if err != nil { @@ -369,6 +397,55 @@ func (g *generator) generate(ctx context.Context, input *ai.ModelRequest, cb fun } } +func applyGenerateConfigToOllama( + req *ollamaChatRequest, + cfg any, +) { + if cfg == nil { + return + } + + switch cfg := cfg.(type) { + case GenerateContentConfig: + // Thinking + if cfg.Think != "" { + req.Think = cfg.Think + } + + // Runtime options + opts := map[string]any{} + + if cfg.Seed != nil { + opts["seed"] = *cfg.Seed + } + if cfg.Temperature != nil { + opts["temperature"] = *cfg.Temperature + } + if cfg.TopK != nil { + opts["top_k"] = *cfg.TopK + } + if cfg.TopP != nil { + opts["top_p"] = *cfg.TopP + } + if cfg.MinP != nil { + opts["min_p"] = *cfg.MinP + } + if len(cfg.Stop) > 0 { + opts["stop"] = cfg.Stop + } + if cfg.NumCtx != nil { + opts["num_ctx"] = *cfg.NumCtx + } + if cfg.NumPredict != nil { + opts["num_predict"] = *cfg.NumPredict + } + + if len(opts) > 0 { + req.Options = opts + } + } +} + // convertTools converts Genkit tool definitions to Ollama tool format func convertTools(tools []*ai.ToolDefinition) ([]ollamaTool, error) { ollamaTools := make([]ollamaTool, 0, len(tools)) @@ -417,6 +494,8 @@ func convertParts(role ai.Role, parts []*ai.Part) (*ollamaMessage, error) { return nil, fmt.Errorf("failed to marshal tool response: %v", err) } contentBuilder.WriteString(string(outputJSON)) + } else if part.IsReasoning() { + } else { return nil, errors.New("unsupported content type") } @@ -439,6 +518,7 @@ func translateChatResponse(responseData []byte) (*ai.ModelResponse, error) { if err := json.Unmarshal(responseData, &response); err != nil { return nil, fmt.Errorf("failed to parse response JSON: %v", err) } + modelResponse := &ai.ModelResponse{ FinishReason: ai.FinishReason("stop"), Message: &ai.Message{ @@ -458,6 +538,10 @@ func translateChatResponse(responseData []byte) (*ai.ModelResponse, error) { aiPart := ai.NewTextPart(response.Message.Content) modelResponse.Message.Content = append(modelResponse.Message.Content, aiPart) } + if response.Message.Thinking != "" { + aiPart := ai.NewReasoningPart(response.Message.Thinking, nil) + modelResponse.Message.Content = append(modelResponse.Message.Content, aiPart) + } return modelResponse, nil } @@ -504,6 +588,11 @@ func translateChatChunk(input string) (*ai.ModelResponseChunk, error) { chunk.Content = append(chunk.Content, aiPart) } + if response.Message.Thinking != "" { + aiPart := ai.NewReasoningPart(response.Message.Thinking, nil) + chunk.Content = append(chunk.Content, aiPart) + } + return chunk, nil } From ff792327c323bf10e80c58cc88f69b26a6079f5f Mon Sep 17 00:00:00 2001 From: "a.simakov" Date: Fri, 2 Jan 2026 05:04:14 +0300 Subject: [PATCH 2/6] fix: gemini-code-assist --- go/plugins/ollama/ollama.go | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/go/plugins/ollama/ollama.go b/go/plugins/ollama/ollama.go index 8aacbdf65d..5ed17e3ff2 100644 --- a/go/plugins/ollama/ollama.go +++ b/go/plugins/ollama/ollama.go @@ -157,14 +157,15 @@ raw: if true no formatting will be applied to the prompt. You may choose to use keep_alive: controls how long the model will stay loaded into memory following the request (default: 5m) */ type ollamaChatRequest struct { - Messages []*ollamaMessage `json:"messages"` - Images []string `json:"images,omitempty"` - Model string `json:"model"` - Stream bool `json:"stream"` - Format string `json:"format,omitempty"` - Tools []ollamaTool `json:"tools,omitempty"` - Think any `json:"think,omitempty"` - Options map[string]any `json:"options,omitempty"` + Messages []*ollamaMessage `json:"messages"` + Images []string `json:"images,omitempty"` + Model string `json:"model"` + Stream bool `json:"stream"` + Format string `json:"format,omitempty"` + Tools []ollamaTool `json:"tools,omitempty"` + Think any `json:"think,omitempty"` + Options map[string]any `json:"options,omitempty"` + KeepAlive string `json:"keep_alive,omitempty"` } type ollamaModelRequest struct { @@ -408,10 +409,13 @@ func applyGenerateConfigToOllama( switch cfg := cfg.(type) { case GenerateContentConfig: // Thinking - if cfg.Think != "" { + if cfg.Think != nil { req.Think = cfg.Think } + if cfg.KeepAlive != "" { + req.KeepAlive = cfg.KeepAlive + } // Runtime options opts := map[string]any{} From 2002c2e3999eac40bffb54808a4528a1e601bf52 Mon Sep 17 00:00:00 2001 From: "a.simakov" Date: Fri, 2 Jan 2026 05:57:31 +0300 Subject: [PATCH 3/6] fix: gemini-code-assist --- go/plugins/ollama/ollama.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/go/plugins/ollama/ollama.go b/go/plugins/ollama/ollama.go index 5ed17e3ff2..aac95b4c4a 100644 --- a/go/plugins/ollama/ollama.go +++ b/go/plugins/ollama/ollama.go @@ -444,6 +444,21 @@ func applyGenerateConfigToOllama( opts["num_predict"] = *cfg.NumPredict } + if len(opts) > 0 { + req.Options = opts + } + case *ai.GenerationCommonConfig: + opts := map[string]any{} + b, err := json.Marshal(cfg) + if err != nil { + return + } + + res := map[string]any{} + err = json.Unmarshal(b, &res) + if err != nil { + return + } if len(opts) > 0 { req.Options = opts } @@ -499,7 +514,7 @@ func convertParts(role ai.Role, parts []*ai.Part) (*ollamaMessage, error) { } contentBuilder.WriteString(string(outputJSON)) } else if part.IsReasoning() { - + contentBuilder.WriteString(part.Text) } else { return nil, errors.New("unsupported content type") } From e56e5bba5a3aedf220c2871953f220753d995ea2 Mon Sep 17 00:00:00 2001 From: "a.simakov" Date: Fri, 2 Jan 2026 06:07:46 +0300 Subject: [PATCH 4/6] fix: ai.GenerationCommonConfig --- go/plugins/ollama/ollama.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/go/plugins/ollama/ollama.go b/go/plugins/ollama/ollama.go index aac95b4c4a..48b3165c4e 100644 --- a/go/plugins/ollama/ollama.go +++ b/go/plugins/ollama/ollama.go @@ -298,6 +298,7 @@ func (g *generator) generate(ctx context.Context, input *ai.ModelRequest, cb fun } applyGenerateConfigToOllama(&chatReq, input.Config) + fmt.Printf("TEST: %+v\n", chatReq) if len(input.Tools) > 0 { tools, err := convertTools(input.Tools) if err != nil { @@ -454,11 +455,11 @@ func applyGenerateConfigToOllama( return } - res := map[string]any{} - err = json.Unmarshal(b, &res) + err = json.Unmarshal(b, &opts) if err != nil { return } + if len(opts) > 0 { req.Options = opts } From 1b9330cde77eaf7ca8c30db8d2f1d0e715393ef9 Mon Sep 17 00:00:00 2001 From: "a.simakov" Date: Sat, 3 Jan 2026 02:27:38 +0300 Subject: [PATCH 5/6] fix: Edits according to hugoaguirre's comments --- go/plugins/ollama/model.go | 165 ++++++++++++++++++++++++++++++++ go/plugins/ollama/model_test.go | 136 ++++++++++++++++++++++++++ go/plugins/ollama/ollama.go | 98 +------------------ 3 files changed, 304 insertions(+), 95 deletions(-) create mode 100644 go/plugins/ollama/model.go create mode 100644 go/plugins/ollama/model_test.go diff --git a/go/plugins/ollama/model.go b/go/plugins/ollama/model.go new file mode 100644 index 0000000000..f7ed39adc8 --- /dev/null +++ b/go/plugins/ollama/model.go @@ -0,0 +1,165 @@ +package ollama + +import ( + "encoding/json" + "errors" + + "github.com/firebase/genkit/go/ai" +) + +var allowedOllamaOptions = map[string]string{ + "seed": "opts", + "temperature": "opts", + "top_k": "opts", + "top_p": "opts", + "min_p": "opts", + "stop": "opts", + "num_ctx": "opts", + "num_predict": "opts", + "think": "main", + "keep_alive": "main", +} + +// Ollama has two API endpoints, one with a chat interface and another with a generate response interface. +// That's why have multiple request interfaces for the Ollama API below. + +/* +TODO: Support optional, advanced parameters: +format: the format to return a response in. Currently the only accepted value is json +options: additional model parameters listed in the documentation for the Modelfile such as temperature +system: system message to (overrides what is defined in the Modelfile) +template: the prompt template to use (overrides what is defined in the Modelfile) +context: the context parameter returned from a previous request to /generate, this can be used to keep a short conversational memory +stream: if false the response will be returned as a single response object, rather than a stream of objects +raw: if true no formatting will be applied to the prompt. You may choose to use the raw parameter if you are specifying a full templated prompt in your request to the API +keep_alive: controls how long the model will stay loaded into memory following the request (default: 5m) +*/ +type ollamaChatRequest struct { + Messages []*ollamaMessage `json:"messages"` + Images []string `json:"images,omitempty"` + Model string `json:"model"` + Stream bool `json:"stream"` + Format string `json:"format,omitempty"` + Tools []ollamaTool `json:"tools,omitempty"` + Think any `json:"think,omitempty"` + Options map[string]any `json:"options,omitempty"` + KeepAlive string `json:"keep_alive,omitempty"` +} + +func (o *ollamaChatRequest) ApplyOptions(cfg any) error { + if cfg == nil { + return nil + } + + switch cfg := cfg.(type) { + case GenerateContentConfig: + o.applyGenerateContentConfig(&cfg) + return nil + case *GenerateContentConfig: + o.applyGenerateContentConfig(cfg) + return nil + case map[string]any: + return o.applyMapAny(cfg) + case *ai.GenerationCommonConfig: + return o.applyGenerationCommonConfig(cfg) + case ai.GenerationCommonConfig: + return o.applyGenerationCommonConfig(&cfg) + default: + return errors.New("unknown generation config") + } +} +func (o *ollamaChatRequest) applyGenerateContentConfig(cfg *GenerateContentConfig) { + if cfg == nil { + return + } + + // thinking + if cfg.Think != nil { + o.Think = cfg.Think + } + + // runtime options + opts := map[string]any{} + + if cfg.Seed != nil { + opts["seed"] = *cfg.Seed + } + if cfg.Temperature != nil { + opts["temperature"] = *cfg.Temperature + } + if cfg.TopK != nil { + opts["top_k"] = *cfg.TopK + } + if cfg.TopP != nil { + opts["top_p"] = *cfg.TopP + } + if cfg.MinP != nil { + opts["min_p"] = *cfg.MinP + } + if len(cfg.Stop) > 0 { + opts["stop"] = cfg.Stop + } + if cfg.NumCtx != nil { + opts["num_ctx"] = *cfg.NumCtx + } + if cfg.NumPredict != nil { + opts["num_predict"] = *cfg.NumPredict + } + + if len(opts) > 0 { + o.Options = opts + } +} +func (o *ollamaChatRequest) applyGenerationCommonConfig(cfg *ai.GenerationCommonConfig) error { + opts := map[string]any{} + b, err := json.Marshal(cfg) + if err != nil { + return err + } + + err = json.Unmarshal(b, &opts) + if err != nil { + return err + } + + if len(opts) > 0 { + o.Options = opts + } + return nil +} +func (o *ollamaChatRequest) applyMapAny(m map[string]any) error { + if len(m) == 0 { + return nil + } + + opts := map[string]any{} + + for k, v := range m { + typeVal, ok := allowedOllamaOptions[k] + if !ok { + return errors.New("unknown option: " + k) + } + switch typeVal { + case "main": + switch k { + case "think": + o.Think = v + case "keep_alive": + if s, ok := v.(string); ok { + o.KeepAlive = s + } else { + return errors.New("keep_alive must be string") + } + } + + case "opts": + opts[k] = v + } + + } + + if len(opts) > 0 { + o.Options = opts + } + return nil +} diff --git a/go/plugins/ollama/model_test.go b/go/plugins/ollama/model_test.go new file mode 100644 index 0000000000..1c1b24f433 --- /dev/null +++ b/go/plugins/ollama/model_test.go @@ -0,0 +1,136 @@ +package ollama + +import ( + "reflect" + "testing" + + "github.com/firebase/genkit/go/ai" +) + +func TestOllamaChatRequest_ApplyOptions(t *testing.T) { + seed := 42 + temp := 0.7 + + tests := []struct { + name string + cfg any + want *ollamaChatRequest + wantErr bool + }{ + { + name: "GenerateContentConfig pointer", + cfg: &GenerateContentConfig{ + Seed: &seed, + Temperature: &temp, + Think: true, + }, + want: &ollamaChatRequest{ + Think: true, + Options: map[string]any{ + "seed": seed, + "temperature": temp, + }, + }, + }, + { + name: "GenerateContentConfig value", + cfg: GenerateContentConfig{ + Seed: &seed, + Think: true, + }, + want: &ollamaChatRequest{ + Think: true, + Options: map[string]any{ + "seed": seed, + }, + }, + }, + { + name: "map[string]any with opts only", + cfg: map[string]any{ + "temperature": 0.5, + "top_k": 40, + }, + want: &ollamaChatRequest{ + Options: map[string]any{ + "temperature": 0.5, + "top_k": 40, + }, + }, + }, + { + name: "map[string]any with main fields", + cfg: map[string]any{ + "think": true, + "keep_alive": "10m", + }, + want: &ollamaChatRequest{ + Think: true, + KeepAlive: "10m", + }, + }, + { + name: "map[string]any mixed main and opts", + cfg: map[string]any{ + "temperature": 0.9, + "think": true, + }, + want: &ollamaChatRequest{ + Think: true, + Options: map[string]any{ + "temperature": 0.9, + }, + }, + }, + { + name: "map[string]any unknown option", + cfg: map[string]any{ + "unknown": 123, + }, + wantErr: true, + }, + { + name: "GenerationCommonConfig pointer", + cfg: &ai.GenerationCommonConfig{ + Temperature: temp, + }, + want: &ollamaChatRequest{ + Options: map[string]any{ + "temperature": temp, + }, + }, + }, + { + name: "nil config", + cfg: nil, + want: &ollamaChatRequest{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := &ollamaChatRequest{} + + err := req.ApplyOptions(tt.cfg) + + if tt.wantErr { + if err == nil { + t.Fatalf("expected error, got nil") + } + return + } + + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !reflect.DeepEqual(req, tt.want) { + t.Errorf( + "unexpected result:\nwant: %#v\n got: %#v", + tt.want, + req, + ) + } + }) + } +} diff --git a/go/plugins/ollama/ollama.go b/go/plugins/ollama/ollama.go index 48b3165c4e..163ade9ed1 100644 --- a/go/plugins/ollama/ollama.go +++ b/go/plugins/ollama/ollama.go @@ -142,32 +142,6 @@ type GenerateContentConfig struct { KeepAlive string } -// Ollama has two API endpoints, one with a chat interface and another with a generate response interface. -// That's why have multiple request interfaces for the Ollama API below. - -/* -TODO: Support optional, advanced parameters: -format: the format to return a response in. Currently the only accepted value is json -options: additional model parameters listed in the documentation for the Modelfile such as temperature -system: system message to (overrides what is defined in the Modelfile) -template: the prompt template to use (overrides what is defined in the Modelfile) -context: the context parameter returned from a previous request to /generate, this can be used to keep a short conversational memory -stream: if false the response will be returned as a single response object, rather than a stream of objects -raw: if true no formatting will be applied to the prompt. You may choose to use the raw parameter if you are specifying a full templated prompt in your request to the API -keep_alive: controls how long the model will stay loaded into memory following the request (default: 5m) -*/ -type ollamaChatRequest struct { - Messages []*ollamaMessage `json:"messages"` - Images []string `json:"images,omitempty"` - Model string `json:"model"` - Stream bool `json:"stream"` - Format string `json:"format,omitempty"` - Tools []ollamaTool `json:"tools,omitempty"` - Think any `json:"think,omitempty"` - Options map[string]any `json:"options,omitempty"` - KeepAlive string `json:"keep_alive,omitempty"` -} - type ollamaModelRequest struct { System string `json:"system,omitempty"` Images []string `json:"images,omitempty"` @@ -296,9 +270,10 @@ func (g *generator) generate(ctx context.Context, input *ai.ModelRequest, cb fun Stream: stream, Images: images, } + if err := chatReq.ApplyOptions(input.Config); err != nil { + return nil, fmt.Errorf("failed to apply options: %v", err) + } - applyGenerateConfigToOllama(&chatReq, input.Config) - fmt.Printf("TEST: %+v\n", chatReq) if len(input.Tools) > 0 { tools, err := convertTools(input.Tools) if err != nil { @@ -399,73 +374,6 @@ func (g *generator) generate(ctx context.Context, input *ai.ModelRequest, cb fun } } -func applyGenerateConfigToOllama( - req *ollamaChatRequest, - cfg any, -) { - if cfg == nil { - return - } - - switch cfg := cfg.(type) { - case GenerateContentConfig: - // Thinking - if cfg.Think != nil { - req.Think = cfg.Think - } - - if cfg.KeepAlive != "" { - req.KeepAlive = cfg.KeepAlive - } - // Runtime options - opts := map[string]any{} - - if cfg.Seed != nil { - opts["seed"] = *cfg.Seed - } - if cfg.Temperature != nil { - opts["temperature"] = *cfg.Temperature - } - if cfg.TopK != nil { - opts["top_k"] = *cfg.TopK - } - if cfg.TopP != nil { - opts["top_p"] = *cfg.TopP - } - if cfg.MinP != nil { - opts["min_p"] = *cfg.MinP - } - if len(cfg.Stop) > 0 { - opts["stop"] = cfg.Stop - } - if cfg.NumCtx != nil { - opts["num_ctx"] = *cfg.NumCtx - } - if cfg.NumPredict != nil { - opts["num_predict"] = *cfg.NumPredict - } - - if len(opts) > 0 { - req.Options = opts - } - case *ai.GenerationCommonConfig: - opts := map[string]any{} - b, err := json.Marshal(cfg) - if err != nil { - return - } - - err = json.Unmarshal(b, &opts) - if err != nil { - return - } - - if len(opts) > 0 { - req.Options = opts - } - } -} - // convertTools converts Genkit tool definitions to Ollama tool format func convertTools(tools []*ai.ToolDefinition) ([]ollamaTool, error) { ollamaTools := make([]ollamaTool, 0, len(tools)) From ec3fa732c1b944f1b1f6dd6cf228feb274807481 Mon Sep 17 00:00:00 2001 From: "a.simakov" Date: Sat, 3 Jan 2026 10:52:51 +0300 Subject: [PATCH 6/6] fix: Edits according to hugoaguirre's comments --- go/plugins/ollama/model.go | 59 +++++++++++++++------------------ go/plugins/ollama/model_test.go | 9 +---- 2 files changed, 28 insertions(+), 40 deletions(-) diff --git a/go/plugins/ollama/model.go b/go/plugins/ollama/model.go index f7ed39adc8..ee88a747e0 100644 --- a/go/plugins/ollama/model.go +++ b/go/plugins/ollama/model.go @@ -1,23 +1,14 @@ package ollama import ( - "encoding/json" "errors" "github.com/firebase/genkit/go/ai" ) -var allowedOllamaOptions = map[string]string{ - "seed": "opts", - "temperature": "opts", - "top_k": "opts", - "top_p": "opts", - "min_p": "opts", - "stop": "opts", - "num_ctx": "opts", - "num_predict": "opts", - "think": "main", - "keep_alive": "main", +var topLevelOpts = map[string]struct{}{ + "think": {}, + "keep_alive": {}, } // Ollama has two API endpoints, one with a chat interface and another with a generate response interface. @@ -32,7 +23,6 @@ template: the prompt template to use (overrides what is defined in the Modelfile context: the context parameter returned from a previous request to /generate, this can be used to keep a short conversational memory stream: if false the response will be returned as a single response object, rather than a stream of objects raw: if true no formatting will be applied to the prompt. You may choose to use the raw parameter if you are specifying a full templated prompt in your request to the API -keep_alive: controls how long the model will stay loaded into memory following the request (default: 5m) */ type ollamaChatRequest struct { Messages []*ollamaMessage `json:"messages"` @@ -111,36 +101,42 @@ func (o *ollamaChatRequest) applyGenerateContentConfig(cfg *GenerateContentConfi } } func (o *ollamaChatRequest) applyGenerationCommonConfig(cfg *ai.GenerationCommonConfig) error { - opts := map[string]any{} - b, err := json.Marshal(cfg) - if err != nil { - return err + if cfg == nil { + return nil } - err = json.Unmarshal(b, &opts) - if err != nil { - return err + opts := map[string]any{} + + if cfg.MaxOutputTokens > 0 { + opts["num_predict"] = cfg.MaxOutputTokens + } + if len(cfg.StopSequences) > 0 { + opts["stop"] = cfg.StopSequences + } + if cfg.Temperature != 0 { + opts["temperature"] = cfg.Temperature + } + if cfg.TopK > 0 { + opts["top_k"] = cfg.TopK + } + if cfg.TopP > 0 { + opts["top_p"] = cfg.TopP } if len(opts) > 0 { o.Options = opts } + return nil } + func (o *ollamaChatRequest) applyMapAny(m map[string]any) error { if len(m) == 0 { return nil } - opts := map[string]any{} - for k, v := range m { - typeVal, ok := allowedOllamaOptions[k] - if !ok { - return errors.New("unknown option: " + k) - } - switch typeVal { - case "main": + if _, isTopLevel := topLevelOpts[k]; isTopLevel { switch k { case "think": o.Think = v @@ -151,15 +147,14 @@ func (o *ollamaChatRequest) applyMapAny(m map[string]any) error { return errors.New("keep_alive must be string") } } - - case "opts": - opts[k] = v + continue } - + opts[k] = v } if len(opts) > 0 { o.Options = opts } + return nil } diff --git a/go/plugins/ollama/model_test.go b/go/plugins/ollama/model_test.go index 1c1b24f433..a18df09770 100644 --- a/go/plugins/ollama/model_test.go +++ b/go/plugins/ollama/model_test.go @@ -59,7 +59,7 @@ func TestOllamaChatRequest_ApplyOptions(t *testing.T) { }, }, { - name: "map[string]any with main fields", + name: "map[string]any with top level fields", cfg: map[string]any{ "think": true, "keep_alive": "10m", @@ -82,13 +82,6 @@ func TestOllamaChatRequest_ApplyOptions(t *testing.T) { }, }, }, - { - name: "map[string]any unknown option", - cfg: map[string]any{ - "unknown": 123, - }, - wantErr: true, - }, { name: "GenerationCommonConfig pointer", cfg: &ai.GenerationCommonConfig{