From 979ae95fbae05957c1f9147f5272c0aa9f2b98af Mon Sep 17 00:00:00 2001 From: vinyas-bharadwaj Date: Sun, 5 Oct 2025 18:48:32 +0000 Subject: [PATCH 1/2] added ollama integration with the CLI --- cmd/cli/createMsg.go | 12 ++++++++++++ cmd/cli/llmSetup.go | 26 ++++++++++++++++---------- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/cmd/cli/createMsg.go b/cmd/cli/createMsg.go index 63c8c26..f68c258 100644 --- a/cmd/cli/createMsg.go +++ b/cmd/cli/createMsg.go @@ -12,6 +12,7 @@ import ( "github.com/dfanso/commit-msg/internal/gemini" "github.com/dfanso/commit-msg/internal/git" "github.com/dfanso/commit-msg/internal/grok" + "github.com/dfanso/commit-msg/internal/ollama" "github.com/dfanso/commit-msg/internal/stats" "github.com/dfanso/commit-msg/pkg/types" "github.com/pterm/pterm" @@ -106,6 +107,17 @@ func CreateCommitMsg () { case "Claude": commitMsg, err = claude.GenerateCommitMessage(config, changes, apiKey) + case "Ollama": + url := os.Getenv("OLLAMA_URL") + if url == "" { + url = "http://localhost:11434/api/generate" + } + model := os.Getenv("OLLAMA_MODEL") + if model == "" { + model = "llama3:latest" + } + commitMsg, err = ollama.GenerateCommitMessage(config, changes, url, model) + default: commitMsg, err = grok.GenerateCommitMessage(config, changes, apiKey) diff --git a/cmd/cli/llmSetup.go b/cmd/cli/llmSetup.go index e4f40e9..f170c65 100644 --- a/cmd/cli/llmSetup.go +++ b/cmd/cli/llmSetup.go @@ -11,7 +11,7 @@ import ( func SetupLLM() error { - providers := []string{"OpenAI", "Claude", "Gemini", "Grok"} + providers := []string{"OpenAI", "Claude", "Gemini", "Grok", "Ollama"} prompt := promptui.Select{ Label: "Select LLM", Items: providers, @@ -22,15 +22,21 @@ func SetupLLM() error { return fmt.Errorf("prompt failed") } - apiKeyPrompt := promptui.Prompt{ - Label: "Enter API Key", - Mask: '*', - - } - - apiKey, err := apiKeyPrompt.Run() - if err != nil { - return fmt.Errorf("failed to read API Key: %w", err) + var apiKey string + + // Skip API key prompt for Ollama (local LLM) + if model != "Ollama" { + apiKeyPrompt := promptui.Prompt{ + Label: "Enter API Key", + Mask: '*', + } + + apiKey, err = apiKeyPrompt.Run() + if err != nil { + return fmt.Errorf("failed to read API Key: %w", err) + } + } else { + apiKey = "" // No API key needed for Ollama } LLMConfig := store.LLMProvider{ From daf551379f3342fdca45b7fe670e1c4838b8a705 Mon Sep 17 00:00:00 2001 From: vinyas-bharadwaj Date: Sun, 5 Oct 2025 19:00:03 +0000 Subject: [PATCH 2/2] updated readme to have the correct default model --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8be43f9..6eb5aaa 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ You can use **Google Gemini**, **Grok**, **Claude**, **ChatGPT**, or **Ollama** | `CLAUDE_API_KEY` | Your API key | Required if using Claude | | `OPENAI_API_KEY` | Your API key | Required if using ChatGPT | | `OLLAMA_URL` | URL (optional) | Ollama server URL (default: http://localhost:11434/api/generate) | -| `OLLAMA_MODEL` | Model name (optional) | Ollama model to use (default: qwen2:0.5b) | +| `OLLAMA_MODEL` | Model name (optional) | Ollama model to use (default: llama3) | ---