From 40e4ba0c4f0a07b2158c02e32b70c8dcf608ffa2 Mon Sep 17 00:00:00 2001 From: gptlang Date: Sat, 27 Jul 2024 00:58:56 +0800 Subject: [PATCH] use gpt-4o by default! --- README.md | 2 +- lua/CopilotChat/config.lua | 2 +- lua/CopilotChat/copilot.lua | 2 +- lua/CopilotChat/init.lua | 3 +++ lua/CopilotChat/tiktoken.lua | 2 +- 5 files changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 077b326b..bcc97b98 100644 --- a/README.md +++ b/README.md @@ -198,7 +198,7 @@ Also see [here](/lua/CopilotChat/config.lua): allow_insecure = false, -- Allow insecure server connections system_prompt = prompts.COPILOT_INSTRUCTIONS, -- System prompt to use - model = 'gpt-4', -- GPT model to use, 'gpt-3.5-turbo' or 'gpt-4' + model = 'gpt-4o', -- GPT model to use, 'gpt-3.5-turbo', 'gpt-4', or 'gpt-4o' temperature = 0.1, -- GPT temperature question_header = '## User ', -- Header to use for user questions diff --git a/lua/CopilotChat/config.lua b/lua/CopilotChat/config.lua index c962264e..5fa09cd7 100644 --- a/lua/CopilotChat/config.lua +++ b/lua/CopilotChat/config.lua @@ -83,7 +83,7 @@ return { allow_insecure = false, -- Allow insecure server connections system_prompt = prompts.COPILOT_INSTRUCTIONS, -- System prompt to use - model = 'gpt-4', -- GPT model to use, 'gpt-3.5-turbo' or 'gpt-4' + model = 'gpt-4o-2024-05-13', -- GPT model to use, 'gpt-3.5-turbo', 'gpt-4', or `gpt-4o-2024-05-13` temperature = 0.1, -- GPT temperature question_header = '## User ', -- Header to use for user questions diff --git a/lua/CopilotChat/copilot.lua b/lua/CopilotChat/copilot.lua index 01031f98..eebf1314 100644 --- a/lua/CopilotChat/copilot.lua +++ b/lua/CopilotChat/copilot.lua @@ -353,7 +353,7 @@ function Copilot:ask(prompt, opts) local start_row = opts.start_row or 0 local end_row = opts.end_row or 0 local system_prompt = opts.system_prompt or prompts.COPILOT_INSTRUCTIONS - local model = opts.model or 'gpt-4' + local model = opts.model or 'gpt-4o-2024-05-13' local temperature = opts.temperature or 0.1 local on_done = opts.on_done local on_progress = opts.on_progress diff --git a/lua/CopilotChat/init.lua b/lua/CopilotChat/init.lua index b7df7523..1e39bc0f 100644 --- a/lua/CopilotChat/init.lua +++ b/lua/CopilotChat/init.lua @@ -606,6 +606,9 @@ function M.setup(config) end M.config = vim.tbl_deep_extend('force', default_config, config or {}) + if M.config.model == 'gpt-4o' then + M.config.model = 'gpt-4o-2024-05-13' + end if state.copilot then state.copilot:stop() diff --git a/lua/CopilotChat/tiktoken.lua b/lua/CopilotChat/tiktoken.lua index f7639548..08087e8d 100644 --- a/lua/CopilotChat/tiktoken.lua +++ b/lua/CopilotChat/tiktoken.lua @@ -22,7 +22,7 @@ end local function load_tiktoken_data(done, model) local tiktoken_url = 'https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken' -- If model is gpt-4o, use o200k_base.tiktoken - if model == 'gpt-4o' then + if model == 'gpt-4o-2024-05-13' then tiktoken_url = 'https://openaipublic.blob.core.windows.net/encodings/o200k_base.tiktoken' end local async