diff --git a/init.lua b/init.lua index e7bfcba45e0..f48c4160e2e 100644 --- a/init.lua +++ b/init.lua @@ -105,7 +105,7 @@ vim.o.number = true vim.o.relativenumber = true -- Enable mouse mode, can be useful for resizing splits for example! -vim.o.mouse = 'a' +vim.o.mouse = 'r' -- Don't show the mode, since it's already in the status line vim.o.showmode = false @@ -837,12 +837,16 @@ require('lazy').setup({ -- : Toggle signature help -- -- See :h blink-cmp-config-keymap for defining your own keymap - preset = 'super-tab', + preset = 'enter', -- For more advanced Luasnip keymaps (e.g. selecting choice nodes, expansion) see: -- https://github.com/L3MON4D3/LuaSnip?tab=readme-ov-file#keymaps }, appearance = { + -- format = function(entry, item) + -- item.menu = entry.source.name + -- return item + -- end, -- 'mono' (default) for 'Nerd Font Mono' or 'normal' for 'Nerd Font' -- Adjusts spacing to ensure icons are aligned nerd_font_variant = 'normal', @@ -870,6 +874,24 @@ require('lazy').setup({ Keyword = '󰻾', Constant = '󰏿', + Minuet = '󰙯', + -- Minuet = '🤖', + + fallback = '󰜚', + + -- Et si je veux voir si je peux + -- et encore une fois + + claude = '󰋦', + openai = '󱢆', + openai_compatible = '󱢆', + codestral = '󱎥', + gemini = '', + Groq = '', + Openrouter = '󱂇', + ollama = '󰳆', + Deepseek = '', + Snippet = '󱄽', Color = '󰏘', File = '󰈔', @@ -882,10 +904,38 @@ require('lazy').setup({ }, completion = { + trigger = { show_on_keyboard = true }, + list = { selection = { preselect = true } }, -- By default, you may press `` to show the documentation. -- Optionally, set `auto_show = true` to show the documentation after a delay. - documentation = { auto_show = false, auto_show_delay_ms = 500 }, + documentation = { auto_show = true, auto_show_delay_ms = 500 }, -- trigger = { prefetch_on_insert = false }, + menu = { + draw = { + columns = { + { 'kind_icon', gap = 1 }, + { 'label', 'label_description', gap = 1 }, + { 'kind' }, + }, + components = { + kind = { + text = function(ctx) + return '[' .. ctx.kind .. ']' + end, + highlight = 'BlinkCmpKind', + }, + -- source_icon = { + -- -- don't truncate source_icon + -- ellipsis = false, + -- text = function(ctx) + -- return ctx.source_name:lower() + -- -- return source_icons[ctx.source_name:lower()] or '󰜚' + -- end, + -- highlight = 'BlinkCmpSource', + -- }, + }, + }, + }, }, sources = { @@ -895,7 +945,7 @@ require('lazy').setup({ copilot = { name = 'copilot', module = 'blink-cmp-copilot', - score_offset = 100, + score_offset = 200, async = true, transform_items = function(_, items) local CompletionItemKind = require('blink.cmp.types').CompletionItemKind @@ -913,8 +963,17 @@ require('lazy').setup({ async = true, -- Should match minuet.config.request_timeout * 1000, -- since minuet.config.request_timeout is in seconds - timeout_ms = 3000, - score_offset = 50, -- Gives minuet higher priority among suggestions + timeout_ms = 10000, + score_offset = 100, -- Gives minuet higher priority among suggestions + transform_items = function(_, items) + local K = require('blink.cmp.types').CompletionItemKind + local idx = #K + 1 + K[idx] = 'Minuet' + for _, it in ipairs(items) do + it.kind = idx + end + return items + end, }, }, }, diff --git a/lua/custom/plugins/minuet-ai.lua b/lua/custom/plugins/minuet-ai.lua index 0615af07d4b..f3d67f4280f 100644 --- a/lua/custom/plugins/minuet-ai.lua +++ b/lua/custom/plugins/minuet-ai.lua @@ -7,46 +7,64 @@ return { }, config = function() require('minuet').setup { - -- completion = { keyword_length = 1 }, - -- n_completions = 1, -- recommend for local model for resource saving - -- I recommend you start with a small context window firstly, and gradually - -- increase it based on your local computing power. - -- context_window = 1024, - -- notify = 'debug', request_timeout = 30000, context_window = 768, - -- provider = 'openai_compatible', - -- provider_options = { - -- openai_compatible = { - -- api_key = 'PWD', - -- name = 'lmstudio', - -- end_point = 'http://m4max.local:1234/v1/chat/completions', - -- -- end_point = 'http://localhost:1234/v1/chat/completions', - -- model = 'qwen2.5-coder-3b-instruct', - -- -- model = 'deepseek-r1-distill-qwen-7b', - -- optional = { - -- max_tokens = 256, - -- top_p = 0.9, - -- }, - -- }, - -- }, - provider = 'openai_fim_compatible', - n_completions = 1, -- recommend for local model for resource saving - -- I recommend you start with a small context window firstly, and gradually - -- increase it based on your local computing power. + -- notify = 'debug', -- Enabled debug notifications + provider = 'openai_compatible', -- Using standard OpenAI provider provider_options = { - openai_fim_compatible = { + openai = { + api_key = 'OPENAI_API_KEY', + name = 'OpenAI', + end_point = 'https://api.openai.com/v1/chat/completions', -- Standard OpenAI chat endpoint + model = 'gpt-3.5-turbo', -- Standard chat model + optional = { + max_tokens = 256, + top_p = 0.9, + }, + }, + openai_compatible = { + stream = true, api_key = 'TERM', - name = 'Ollama', - -- end_point = 'http://localhost:11434/v1/completions', - end_point = 'http://m4max.local:11434/v1/completions', - model = 'qwen2.5-coder:7b', + name = 'ollama', + end_point = 'http://m4max.local:1234/v1/chat/completions', + model = 'qwen2.5-coder-3b-instruct', optional = { max_tokens = 256, top_p = 0.9, }, }, + gemini = { + model = 'gemini-2.0-flash', + system = 'see [Prompt] section for the default value', + few_shots = 'see [Prompt] section for the default value', + chat_input = 'See [Prompt Section for default value]', + stream = true, + api_key = 'GEMINI_API_KEY', + end_point = 'https://generativelanguage.googleapis.com/v1beta/models', + optional = { + generationConfig = { + maxOutputTokens = 256, + -- When using `gemini-2.5-flash`, it is recommended to entirely + -- disable thinking for faster completion retrieval. + thinkingConfig = { + thinkingBudget = 0, + }, + }, + safetySettings = { + { + -- HARM_CATEGORY_HATE_SPEECH, + -- HARM_CATEGORY_HARASSMENT + -- HARM_CATEGORY_SEXUALLY_EXPLICIT + category = 'HARM_CATEGORY_DANGEROUS_CONTENT', + -- BLOCK_NONE + threshold = 'BLOCK_ONLY_HIGH', + }, + }, + }, + }, }, + -- Other options like n_completions can be added here if needed for OpenAI + -- n_completions = 1, } end, } diff --git a/lua/custom/plugins/rustaceanvim.lua b/lua/custom/plugins/rustaceanvim.lua index ad523815e60..385c03de152 100644 --- a/lua/custom/plugins/rustaceanvim.lua +++ b/lua/custom/plugins/rustaceanvim.lua @@ -2,37 +2,4 @@ return { 'mrcjkb/rustaceanvim', version = '^6', lazy = false, - -- config = function(_, _) - -- vim.g.rustaceanvim = { - -- -- Plugin configuration - -- tools = { - -- }, - -- -- LSP configuration - -- server = { - -- on_attach = function(client, buffer) - -- require("lsp_on_attach")(client, buffer) - -- - -- -- Enable codelens refresh - -- vim.api.nvim_create_autocmd({ "BufEnter", "CursorHold", "InsertLeave" }, { - -- buffer = buffer, - -- callback = function() - -- vim.lsp.codelens.refresh({ bufnr = buffer }) - -- end, - -- }) - -- end, - -- default_settings = { - -- -- rust-analyzer language server configuration - -- ['rust-analyzer'] = { - -- assist = { - -- importMergeBehaviour = "full", - -- }, - -- }, - -- }, - -- - -- }, - -- -- DAP configuration - -- dap = { - -- }, - -- } - -- end } diff --git a/lua/kickstart/plugins/debug.lua b/lua/kickstart/plugins/debug.lua index 753cb0cedd3..e36533a3bcf 100644 --- a/lua/kickstart/plugins/debug.lua +++ b/lua/kickstart/plugins/debug.lua @@ -95,6 +95,7 @@ return { ensure_installed = { -- Update this to ensure that you have the debuggers for the langs you want 'delve', + 'codelldb', -- Added for Rust debugging }, }