Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 65 additions & 6 deletions init.lua
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ vim.o.number = true
vim.o.relativenumber = true

-- Enable mouse mode, can be useful for resizing splits for example!
vim.o.mouse = 'a'
vim.o.mouse = 'r'

-- Don't show the mode, since it's already in the status line
vim.o.showmode = false
Expand Down Expand Up @@ -837,12 +837,16 @@ require('lazy').setup({
-- <c-k>: Toggle signature help
--
-- See :h blink-cmp-config-keymap for defining your own keymap
preset = 'super-tab',
preset = 'enter',

-- For more advanced Luasnip keymaps (e.g. selecting choice nodes, expansion) see:
-- https://github.com/L3MON4D3/LuaSnip?tab=readme-ov-file#keymaps
},
appearance = {
-- format = function(entry, item)
-- item.menu = entry.source.name
-- return item
-- end,
-- 'mono' (default) for 'Nerd Font Mono' or 'normal' for 'Nerd Font'
-- Adjusts spacing to ensure icons are aligned
nerd_font_variant = 'normal',
Expand Down Expand Up @@ -870,6 +874,24 @@ require('lazy').setup({
Keyword = '󰻾',
Constant = '󰏿',

Minuet = '󰙯',
-- Minuet = '🤖',

fallback = '󰜚',

-- Et si je veux voir si je peux
-- et encore une fois

claude = '󰋦',
openai = '󱢆',
openai_compatible = '󱢆',
codestral = '󱎥',
gemini = '',
Groq = '',
Openrouter = '󱂇',
ollama = '󰳆',
Deepseek = '',

Snippet = '󱄽',
Color = '󰏘',
File = '󰈔',
Expand All @@ -882,10 +904,38 @@ require('lazy').setup({
},

completion = {
trigger = { show_on_keyboard = true },
list = { selection = { preselect = true } },
-- By default, you may press `<c-space>` to show the documentation.
-- Optionally, set `auto_show = true` to show the documentation after a delay.
documentation = { auto_show = false, auto_show_delay_ms = 500 },
documentation = { auto_show = true, auto_show_delay_ms = 500 },
-- trigger = { prefetch_on_insert = false },
menu = {
draw = {
columns = {
{ 'kind_icon', gap = 1 },
{ 'label', 'label_description', gap = 1 },
{ 'kind' },
},
components = {
kind = {
text = function(ctx)
return '[' .. ctx.kind .. ']'
end,
highlight = 'BlinkCmpKind',
},
-- source_icon = {
-- -- don't truncate source_icon
-- ellipsis = false,
-- text = function(ctx)
-- return ctx.source_name:lower()
-- -- return source_icons[ctx.source_name:lower()] or '󰜚'
-- end,
-- highlight = 'BlinkCmpSource',
-- },
},
},
},
},

sources = {
Expand All @@ -895,7 +945,7 @@ require('lazy').setup({
copilot = {
name = 'copilot',
module = 'blink-cmp-copilot',
score_offset = 100,
score_offset = 200,
async = true,
transform_items = function(_, items)
local CompletionItemKind = require('blink.cmp.types').CompletionItemKind
Expand All @@ -913,8 +963,17 @@ require('lazy').setup({
async = true,
-- Should match minuet.config.request_timeout * 1000,
-- since minuet.config.request_timeout is in seconds
timeout_ms = 3000,
score_offset = 50, -- Gives minuet higher priority among suggestions
timeout_ms = 10000,
score_offset = 100, -- Gives minuet higher priority among suggestions
transform_items = function(_, items)
local K = require('blink.cmp.types').CompletionItemKind
local idx = #K + 1
K[idx] = 'Minuet'
for _, it in ipairs(items) do
it.kind = idx
end
return items
end,
},
},
},
Expand Down
78 changes: 48 additions & 30 deletions lua/custom/plugins/minuet-ai.lua
Original file line number Diff line number Diff line change
Expand Up @@ -7,46 +7,64 @@ return {
},
config = function()
require('minuet').setup {
-- completion = { keyword_length = 1 },
-- n_completions = 1, -- recommend for local model for resource saving
-- I recommend you start with a small context window firstly, and gradually
-- increase it based on your local computing power.
-- context_window = 1024,
-- notify = 'debug',
request_timeout = 30000,
context_window = 768,
-- provider = 'openai_compatible',
-- provider_options = {
-- openai_compatible = {
-- api_key = 'PWD',
-- name = 'lmstudio',
-- end_point = 'http://m4max.local:1234/v1/chat/completions',
-- -- end_point = 'http://localhost:1234/v1/chat/completions',
-- model = 'qwen2.5-coder-3b-instruct',
-- -- model = 'deepseek-r1-distill-qwen-7b',
-- optional = {
-- max_tokens = 256,
-- top_p = 0.9,
-- },
-- },
-- },
provider = 'openai_fim_compatible',
n_completions = 1, -- recommend for local model for resource saving
-- I recommend you start with a small context window firstly, and gradually
-- increase it based on your local computing power.
-- notify = 'debug', -- Enabled debug notifications
provider = 'openai_compatible', -- Using standard OpenAI provider
provider_options = {
openai_fim_compatible = {
openai = {
api_key = 'OPENAI_API_KEY',
name = 'OpenAI',
end_point = 'https://api.openai.com/v1/chat/completions', -- Standard OpenAI chat endpoint
model = 'gpt-3.5-turbo', -- Standard chat model
optional = {
max_tokens = 256,
top_p = 0.9,
},
},
openai_compatible = {
stream = true,
api_key = 'TERM',
name = 'Ollama',
-- end_point = 'http://localhost:11434/v1/completions',
end_point = 'http://m4max.local:11434/v1/completions',
model = 'qwen2.5-coder:7b',
name = 'ollama',
end_point = 'http://m4max.local:1234/v1/chat/completions',
model = 'qwen2.5-coder-3b-instruct',
optional = {
max_tokens = 256,
top_p = 0.9,
},
},
gemini = {
model = 'gemini-2.0-flash',
system = 'see [Prompt] section for the default value',
few_shots = 'see [Prompt] section for the default value',
chat_input = 'See [Prompt Section for default value]',
stream = true,
api_key = 'GEMINI_API_KEY',
end_point = 'https://generativelanguage.googleapis.com/v1beta/models',
optional = {
generationConfig = {
maxOutputTokens = 256,
-- When using `gemini-2.5-flash`, it is recommended to entirely
-- disable thinking for faster completion retrieval.
thinkingConfig = {
thinkingBudget = 0,
},
},
safetySettings = {
{
-- HARM_CATEGORY_HATE_SPEECH,
-- HARM_CATEGORY_HARASSMENT
-- HARM_CATEGORY_SEXUALLY_EXPLICIT
category = 'HARM_CATEGORY_DANGEROUS_CONTENT',
-- BLOCK_NONE
threshold = 'BLOCK_ONLY_HIGH',
},
},
},
},
},
-- Other options like n_completions can be added here if needed for OpenAI
-- n_completions = 1,
}
end,
}
33 changes: 0 additions & 33 deletions lua/custom/plugins/rustaceanvim.lua
Original file line number Diff line number Diff line change
Expand Up @@ -2,37 +2,4 @@ return {
'mrcjkb/rustaceanvim',
version = '^6',
lazy = false,
-- config = function(_, _)
-- vim.g.rustaceanvim = {
-- -- Plugin configuration
-- tools = {
-- },
-- -- LSP configuration
-- server = {
-- on_attach = function(client, buffer)
-- require("lsp_on_attach")(client, buffer)
--
-- -- Enable codelens refresh
-- vim.api.nvim_create_autocmd({ "BufEnter", "CursorHold", "InsertLeave" }, {
-- buffer = buffer,
-- callback = function()
-- vim.lsp.codelens.refresh({ bufnr = buffer })
-- end,
-- })
-- end,
-- default_settings = {
-- -- rust-analyzer language server configuration
-- ['rust-analyzer'] = {
-- assist = {
-- importMergeBehaviour = "full",
-- },
-- },
-- },
--
-- },
-- -- DAP configuration
-- dap = {
-- },
-- }
-- end
}
1 change: 1 addition & 0 deletions lua/kickstart/plugins/debug.lua
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ return {
ensure_installed = {
-- Update this to ensure that you have the debuggers for the langs you want
'delve',
'codelldb', -- Added for Rust debugging
},
}

Expand Down