diff --git a/README.md b/README.md index 5035074..08b4ff9 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,7 @@ Looking to contribute? Check out: โœจ **AI-Powered Commit Messages** - Automatically generate meaningful commit messages ๐Ÿ”„ **Multiple LLM Support** - Choose between Google Gemini, Grok, Claude, ChatGPT, or Ollama (local) +๐Ÿงช **Dry Run Mode** - Preview prompts without making API calls ๐Ÿ“ **Context-Aware** - Analyzes staged and unstaged changes ๐Ÿ“‹ **Auto-Copy to Clipboard** - Generated messages are automatically copied for instant use ๐ŸŽ›๏ธ **Interactive Review Flow** - Accept, regenerate with new styles, or open the message in your editor before committing @@ -112,6 +113,27 @@ Or if running from source: go run cmd/commit-msg/main.go . ``` +### Preview Mode (Dry Run) + +Preview what would be sent to the LLM without making an API call: + +```bash +commit . --dry-run +``` + +This displays: +- The LLM provider that would be used +- The exact prompt that would be sent +- File statistics and change summary +- Estimated token count +- All without consuming API credits or sharing data + +Perfect for: +- ๐Ÿ› **Debugging** - See exactly what prompt is being sent +- ๐Ÿ’ฐ **Cost Control** - Review before consuming API credits +- ๐Ÿ”’ **Privacy** - Verify what data would be shared with external APIs +- ๐Ÿงช **Development** - Test prompt changes without API calls + ### Setup LLM and API Key ```bash diff --git a/cmd/cli/createMsg.go b/cmd/cli/createMsg.go index 14bc4c5..d32713e 100644 --- a/cmd/cli/createMsg.go +++ b/cmd/cli/createMsg.go @@ -26,7 +26,8 @@ import ( // CreateCommitMsg launches the interactive flow for reviewing, regenerating, // editing, and accepting AI-generated commit messages in the current repo. -func CreateCommitMsg() { +// If dryRun is true, it displays the prompt without making an API call. +func CreateCommitMsg(dryRun bool) { // Validate COMMIT_LLM and required API keys useLLM, err := store.DefaultLLMKey() if err != nil { @@ -94,6 +95,13 @@ func CreateCommitMsg() { return } + // Handle dry-run mode: display what would be sent to LLM without making API call + if dryRun { + pterm.Println() + displayDryRunInfo(commitLLM, config, changes, apiKey) + return + } + pterm.Println() spinnerGenerating, err := pterm.DefaultSpinner. WithSequence("โ ‹", "โ ™", "โ น", "โ ธ", "โ ผ", "โ ด", "โ ฆ", "โ ง", "โ ‡", "โ "). @@ -226,6 +234,22 @@ var ( } errSelectionCancelled = errors.New("selection cancelled") ) +// resolveOllamaConfig returns the URL and model for Ollama, using environment variables as fallbacks +func resolveOllamaConfig(apiKey string) (url, model string) { + url = apiKey + if strings.TrimSpace(url) == "" { + url = os.Getenv("OLLAMA_URL") + if url == "" { + url = "http://localhost:11434/api/generate" + } + } + model = os.Getenv("OLLAMA_MODEL") + if model == "" { + model = "llama3.1" + } + return url, model +} + func generateMessage(provider types.LLMProvider, config *types.Config, changes string, apiKey string, opts *types.GenerationOptions) (string, error) { switch provider { @@ -238,17 +262,7 @@ func generateMessage(provider types.LLMProvider, config *types.Config, changes s case types.ProviderGroq: return groq.GenerateCommitMessage(config, changes, apiKey, opts) case types.ProviderOllama: - url := apiKey - if strings.TrimSpace(url) == "" { - url = os.Getenv("OLLAMA_URL") - if url == "" { - url = "http://localhost:11434/api/generate" - } - } - model := os.Getenv("OLLAMA_MODEL") - if model == "" { - model = "llama3.1" - } + url, model := resolveOllamaConfig(apiKey) return ollama.GenerateCommitMessage(config, changes, url, model, opts) default: return grok.GenerateCommitMessage(config, changes, apiKey, opts) @@ -425,3 +439,90 @@ func displayProviderError(provider types.LLMProvider, err error) { pterm.Error.Printf("LLM API error: %v\n", err) } } + +// displayDryRunInfo shows what would be sent to the LLM without making an API call +func displayDryRunInfo(provider types.LLMProvider, config *types.Config, changes string, apiKey string) { + pterm.DefaultHeader.WithFullWidth(). + WithBackgroundStyle(pterm.NewStyle(pterm.BgBlue)). + WithTextStyle(pterm.NewStyle(pterm.FgWhite, pterm.Bold)). + Println("DRY RUN MODE - Preview Only") + + pterm.Println() + pterm.Info.Println("This is a dry-run. No API call will be made to the LLM provider.") + pterm.Println() + + // Display provider information + pterm.DefaultSection.Println("LLM Provider Configuration") + providerInfo := [][]string{ + {"Provider", provider.String()}, + } + + // Add provider-specific info + switch provider { + case types.ProviderOllama: + url, model := resolveOllamaConfig(apiKey) + providerInfo = append(providerInfo, []string{"Ollama URL", url}) + providerInfo = append(providerInfo, []string{"Model", model}) + case types.ProviderGrok: + providerInfo = append(providerInfo, []string{"API Endpoint", config.GrokAPI}) + providerInfo = append(providerInfo, []string{"API Key", maskAPIKey(apiKey)}) + default: + providerInfo = append(providerInfo, []string{"API Key", maskAPIKey(apiKey)}) + } + + pterm.DefaultTable.WithHasHeader(false).WithData(providerInfo).Render() + + pterm.Println() + + // Build and display the prompt + opts := &types.GenerationOptions{Attempt: 1} + prompt := types.BuildCommitPrompt(changes, opts) + + pterm.DefaultSection.Println("Prompt That Would Be Sent") + pterm.Println() + + // Display prompt in a box + promptBox := pterm.DefaultBox. + WithTitle("Full LLM Prompt"). + WithTitleTopCenter(). + WithBoxStyle(pterm.NewStyle(pterm.FgCyan)) + promptBox.Println(prompt) + + pterm.Println() + + // Display changes statistics + pterm.DefaultSection.Println("Changes Summary") + linesCount := len(strings.Split(changes, "\n")) + charsCount := len(changes) + + statsData := [][]string{ + {"Total Lines", fmt.Sprintf("%d", linesCount)}, + {"Total Characters", fmt.Sprintf("%d", charsCount)}, + {"Prompt Size (approx)", fmt.Sprintf("%d tokens", estimateTokens(prompt))}, + } + pterm.DefaultTable.WithHasHeader(false).WithData(statsData).Render() + + pterm.Println() + pterm.Success.Println("Dry-run complete. To generate actual commit message, run without --dry-run flag.") +} + +// maskAPIKey masks the API key for display purposes +func maskAPIKey(apiKey string) string { + if len(apiKey) == 0 { + return "[NOT SET]" + } + // Don't mask URLs (used by Ollama) + if strings.HasPrefix(apiKey, "http://") || strings.HasPrefix(apiKey, "https://") { + return apiKey + } + if len(apiKey) <= 8 { + return strings.Repeat("*", len(apiKey)) + } + // Show first 4 and last 4 characters + return apiKey[:4] + strings.Repeat("*", len(apiKey)-8) + apiKey[len(apiKey)-4:] +} + +// estimateTokens provides a rough estimate of token count (1 token โ‰ˆ 4 characters) +func estimateTokens(text string) int { + return len(text) / 4 +} diff --git a/cmd/cli/root.go b/cmd/cli/root.go index 9b61366..fe6b258 100644 --- a/cmd/cli/root.go +++ b/cmd/cli/root.go @@ -53,7 +53,11 @@ var creatCommitMsg = &cobra.Command{ Use: ".", Short: "Create Commit Message", RunE: func(cmd *cobra.Command, args []string) error { - CreateCommitMsg() + dryRun, err := cmd.Flags().GetBool("dry-run") + if err != nil { + return err + } + CreateCommitMsg(dryRun) return nil }, } @@ -68,6 +72,10 @@ func init() { // Cobra also supports local flags, which will only run // when this action is called directly. rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") + + // Add --dry-run flag to the commit command + creatCommitMsg.Flags().Bool("dry-run", false, "Preview the prompt that would be sent to the LLM without making an API call") + rootCmd.AddCommand(creatCommitMsg) rootCmd.AddCommand(llmCmd) llmCmd.AddCommand(llmSetupCmd)