From 4292c2f21534e07724fef3c312ddaf8e61d895cd Mon Sep 17 00:00:00 2001 From: pika Date: Tue, 25 Jun 2024 22:25:51 +0200 Subject: [PATCH] addet genvim llm --- lua/pika/core/keymaps.lua | 3 +++ lua/pika/plugins/genvim.lua | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 lua/pika/plugins/genvim.lua diff --git a/lua/pika/core/keymaps.lua b/lua/pika/core/keymaps.lua index b88730a..1a4268d 100644 --- a/lua/pika/core/keymaps.lua +++ b/lua/pika/core/keymaps.lua @@ -89,3 +89,6 @@ map("n", "L", ":LoremIpsum ") -- window management map("n", "sv", "v", { desc = "Split window vertically" }) -- split window vertically map("n", "sh", "s", { desc = "Split window horizontally" }) -- split window horizontally + +-- ─< genvim keymaps (ai enhanced, coding!) >─────────────────────────────────────────── +map({ "n", "v" }, "a", ":Gen") diff --git a/lua/pika/plugins/genvim.lua b/lua/pika/plugins/genvim.lua new file mode 100644 index 0000000..de77b6a --- /dev/null +++ b/lua/pika/plugins/genvim.lua @@ -0,0 +1,34 @@ +return { + { + "David-Kunz/gen.nvim", + opts = { + model = "mistral", -- The default model to use. + host = "10.0.0.4", -- The host running the Ollama service. + port = "11434", -- The port on which the Ollama service is listening. + quit_map = "q", -- set keymap for close the response window + retry_map = "", -- set keymap to re-send the current prompt + init = function(options) + pcall(io.popen, "ollama serve > /dev/null 2>&1 &") + end, + -- Function to initialize Ollama + command = function(options) + local body = { model = options.model, stream = true } + return "curl --silent --no-buffer -X POST http://" + .. options.host + .. ":" + .. options.port + .. "/api/chat -d $body" + end, + -- The command for the Ollama service. You can use placeholders $prompt, $model and $body (shellescaped). + -- This can also be a command string. + -- The executed command must return a JSON object with { response, context } + -- (context property is optional). + -- list_models = '', -- Retrieves a list of model names + display_mode = "float", -- The display mode. Can be "float" or "split" or "horizontal-split". + show_prompt = false, -- Shows the prompt submitted to Ollama. + show_model = true, -- Displays which model you are using at the beginning of your chat session. + no_auto_close = false, -- Never closes the window automatically. + debug = false, -- Prints errors and the command which is run. + }, + }, +}