Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ Looking to contribute? Check out:

✨ **AI-Powered Commit Messages** - Automatically generate meaningful commit messages
🔄 **Multiple LLM Support** - Choose between Google Gemini, Grok, Claude, ChatGPT, or Ollama (local)
🧪 **Dry Run Mode** - Preview prompts without making API calls
📝 **Context-Aware** - Analyzes staged and unstaged changes
📋 **Auto-Copy to Clipboard** - Generated messages are automatically copied for instant use
🎛️ **Interactive Review Flow** - Accept, regenerate with new styles, or open the message in your editor before committing
Expand Down Expand Up @@ -112,6 +113,27 @@ Or if running from source:
go run cmd/commit-msg/main.go .
```

### Preview Mode (Dry Run)

Preview what would be sent to the LLM without making an API call:

```bash
commit . --dry-run
```

This displays:
- The LLM provider that would be used
- The exact prompt that would be sent
- File statistics and change summary
- Estimated token count
- All without consuming API credits or sharing data

Perfect for:
- 🐛 **Debugging** - See exactly what prompt is being sent
- 💰 **Cost Control** - Review before consuming API credits
- 🔒 **Privacy** - Verify what data would be shared with external APIs
- 🧪 **Development** - Test prompt changes without API calls

### Setup LLM and API Key

```bash
Expand Down
125 changes: 113 additions & 12 deletions cmd/cli/createMsg.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ import (

// CreateCommitMsg launches the interactive flow for reviewing, regenerating,
// editing, and accepting AI-generated commit messages in the current repo.
func CreateCommitMsg() {
// If dryRun is true, it displays the prompt without making an API call.
func CreateCommitMsg(dryRun bool) {
// Validate COMMIT_LLM and required API keys
useLLM, err := store.DefaultLLMKey()
if err != nil {
Expand Down Expand Up @@ -94,6 +95,13 @@ func CreateCommitMsg() {
return
}

// Handle dry-run mode: display what would be sent to LLM without making API call
if dryRun {
pterm.Println()
displayDryRunInfo(commitLLM, config, changes, apiKey)
return
}

pterm.Println()
spinnerGenerating, err := pterm.DefaultSpinner.
WithSequence("⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏").
Expand Down Expand Up @@ -226,6 +234,22 @@ var (
}
errSelectionCancelled = errors.New("selection cancelled")
)
// resolveOllamaConfig returns the URL and model for Ollama, using environment variables as fallbacks
func resolveOllamaConfig(apiKey string) (url, model string) {
url = apiKey
if strings.TrimSpace(url) == "" {
url = os.Getenv("OLLAMA_URL")
if url == "" {
url = "http://localhost:11434/api/generate"
}
}
model = os.Getenv("OLLAMA_MODEL")
if model == "" {
model = "llama3.1"
}
return url, model
}


func generateMessage(provider types.LLMProvider, config *types.Config, changes string, apiKey string, opts *types.GenerationOptions) (string, error) {
switch provider {
Expand All @@ -238,17 +262,7 @@ func generateMessage(provider types.LLMProvider, config *types.Config, changes s
case types.ProviderGroq:
return groq.GenerateCommitMessage(config, changes, apiKey, opts)
case types.ProviderOllama:
url := apiKey
if strings.TrimSpace(url) == "" {
url = os.Getenv("OLLAMA_URL")
if url == "" {
url = "http://localhost:11434/api/generate"
}
}
model := os.Getenv("OLLAMA_MODEL")
if model == "" {
model = "llama3.1"
}
url, model := resolveOllamaConfig(apiKey)
return ollama.GenerateCommitMessage(config, changes, url, model, opts)
default:
return grok.GenerateCommitMessage(config, changes, apiKey, opts)
Expand Down Expand Up @@ -425,3 +439,90 @@ func displayProviderError(provider types.LLMProvider, err error) {
pterm.Error.Printf("LLM API error: %v\n", err)
}
}

// displayDryRunInfo shows what would be sent to the LLM without making an API call
func displayDryRunInfo(provider types.LLMProvider, config *types.Config, changes string, apiKey string) {
pterm.DefaultHeader.WithFullWidth().
WithBackgroundStyle(pterm.NewStyle(pterm.BgBlue)).
WithTextStyle(pterm.NewStyle(pterm.FgWhite, pterm.Bold)).
Println("DRY RUN MODE - Preview Only")

pterm.Println()
pterm.Info.Println("This is a dry-run. No API call will be made to the LLM provider.")
pterm.Println()

// Display provider information
pterm.DefaultSection.Println("LLM Provider Configuration")
providerInfo := [][]string{
{"Provider", provider.String()},
}

// Add provider-specific info
switch provider {
case types.ProviderOllama:
url, model := resolveOllamaConfig(apiKey)
providerInfo = append(providerInfo, []string{"Ollama URL", url})
providerInfo = append(providerInfo, []string{"Model", model})
case types.ProviderGrok:
providerInfo = append(providerInfo, []string{"API Endpoint", config.GrokAPI})
providerInfo = append(providerInfo, []string{"API Key", maskAPIKey(apiKey)})
default:
providerInfo = append(providerInfo, []string{"API Key", maskAPIKey(apiKey)})
}

pterm.DefaultTable.WithHasHeader(false).WithData(providerInfo).Render()

pterm.Println()

// Build and display the prompt
opts := &types.GenerationOptions{Attempt: 1}
prompt := types.BuildCommitPrompt(changes, opts)

pterm.DefaultSection.Println("Prompt That Would Be Sent")
pterm.Println()

// Display prompt in a box
promptBox := pterm.DefaultBox.
WithTitle("Full LLM Prompt").
WithTitleTopCenter().
WithBoxStyle(pterm.NewStyle(pterm.FgCyan))
promptBox.Println(prompt)

pterm.Println()

// Display changes statistics
pterm.DefaultSection.Println("Changes Summary")
linesCount := len(strings.Split(changes, "\n"))
charsCount := len(changes)

statsData := [][]string{
{"Total Lines", fmt.Sprintf("%d", linesCount)},
{"Total Characters", fmt.Sprintf("%d", charsCount)},
{"Prompt Size (approx)", fmt.Sprintf("%d tokens", estimateTokens(prompt))},
}
pterm.DefaultTable.WithHasHeader(false).WithData(statsData).Render()

pterm.Println()
pterm.Success.Println("Dry-run complete. To generate actual commit message, run without --dry-run flag.")
}

// maskAPIKey masks the API key for display purposes
func maskAPIKey(apiKey string) string {
if len(apiKey) == 0 {
return "[NOT SET]"
}
// Don't mask URLs (used by Ollama)
if strings.HasPrefix(apiKey, "http://") || strings.HasPrefix(apiKey, "https://") {
return apiKey
}
if len(apiKey) <= 8 {
return strings.Repeat("*", len(apiKey))
}
// Show first 4 and last 4 characters
return apiKey[:4] + strings.Repeat("*", len(apiKey)-8) + apiKey[len(apiKey)-4:]
}

// estimateTokens provides a rough estimate of token count (1 token ≈ 4 characters)
func estimateTokens(text string) int {
return len(text) / 4
}
10 changes: 9 additions & 1 deletion cmd/cli/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,11 @@ var creatCommitMsg = &cobra.Command{
Use: ".",
Short: "Create Commit Message",
RunE: func(cmd *cobra.Command, args []string) error {
CreateCommitMsg()
dryRun, err := cmd.Flags().GetBool("dry-run")
if err != nil {
return err
}
CreateCommitMsg(dryRun)
return nil
},
}
Expand All @@ -68,6 +72,10 @@ func init() {
// Cobra also supports local flags, which will only run
// when this action is called directly.
rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")

// Add --dry-run flag to the commit command
creatCommitMsg.Flags().Bool("dry-run", false, "Preview the prompt that would be sent to the LLM without making an API call")

rootCmd.AddCommand(creatCommitMsg)
rootCmd.AddCommand(llmCmd)
llmCmd.AddCommand(llmSetupCmd)
Expand Down
Loading