Skip to content

Commit

Permalink
new role syntax
Browse files Browse the repository at this point in the history
  • Loading branch information
madox2 committed Dec 16, 2024
1 parent b81a3c7 commit 2a418ad
Show file tree
Hide file tree
Showing 9 changed files with 223 additions and 81 deletions.
39 changes: 15 additions & 24 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -196,20 +196,14 @@ let g:vim_ai_roles_config_file = '/path/to/my/roles.ini'

[grammar]
prompt = fix spelling and grammar

[grammar.options]
temperature = 0.4

[grammar]
prompt = fix spelling and grammar
config.options.temperature = 0.4

[o1-mini]
[o1-mini.options]
stream = 0
model = o1-mini
max_completion_tokens = 25000
temperature = 1
initial_prompt =
config.options.stream = 0
config.options.model = o1-mini
config.options.max_completion_tokens = 25000
config.options.temperature = 1
config.options.initial_prompt =
```

Now you can select text and run it with command `:AIEdit /grammar`.
Expand Down Expand Up @@ -429,22 +423,19 @@ Then you set up a custom role that points to the OpenRouter endpoint:

```ini
[gemini]
[gemini.options]
token_file_path = ~/.config/vim-ai-openrouter.token
endpoint_url = https://openrouter.ai/api/v1/chat/completions
model = google/gemini-exp-1121:free
config.options.token_file_path = ~/.config/vim-ai-openrouter.token
config.options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
config.options.model = google/gemini-exp-1121:free

[llama]
[llama.options]
token_file_path = ~/.config/vim-ai-openrouter.token
endpoint_url = https://openrouter.ai/api/v1/chat/completions
model = meta-llama/llama-3.3-70b-instruct
config.options.token_file_path = ~/.config/vim-ai-openrouter.token
config.options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
config.options.model = meta-llama/llama-3.3-70b-instruct

[claude]
[claude.options]
token_file_path = ~/.config/vim-ai-openrouter.token
endpoint_url = https://openrouter.ai/api/v1/chat/completions
model = anthropic/claude-3.5-haiku
config.options.token_file_path = ~/.config/vim-ai-openrouter.token
config.options.endpoint_url = https://openrouter.ai/api/v1/chat/completions
config.options.model = anthropic/claude-3.5-haiku
```

Now you can use the role:
Expand Down
5 changes: 1 addition & 4 deletions doc/vim-ai.txt
Original file line number Diff line number Diff line change
Expand Up @@ -198,10 +198,7 @@ Example of a role: >

[grammar]
prompt = fix spelling and grammar

[grammar.options]
temperature = 0.4

config.options.temperature = 0.4

Now you can select text and run it with command `:AIEdit /grammar`.
See roles-example.ini for more examples.
Expand Down
97 changes: 68 additions & 29 deletions py/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,63 @@ def merge_deep(objects):
merge_deep_recursive(result, o)
return result

def is_deprecated_role_syntax(roles, role):
deprecated_sections = [
'options', 'options-complete', 'options-edit', 'options-chat',
'ui', 'ui-complete', 'ui-edit', 'ui-chat',
]
for section in deprecated_sections:
if f"{role}.{section}" in roles:
return True
return False

def load_roles_with_deprecated_syntax(roles, role):
prompt = dict(roles[role]).get('prompt', '')
return {
'role_default': {
'prompt': prompt,
'config': {
'options': dict(roles.get(f"{role}.options", {})),
'ui': dict(roles.get(f"{role}.ui", {})),
},
},
'role_complete': {
'prompt': prompt,
'config': {
'options': dict(roles.get(f"{role}.options-complete", {})),
'ui': dict(roles.get(f"{role}.ui-complete", {})),
},
},
'role_edit': {
'prompt': prompt,
'config': {
'options': dict(roles.get(f"{role}.options-edit", {})),
'ui': dict(roles.get(f"{role}.ui-edit", {})),
},
},
'role_chat': {
'prompt': prompt,
'config': {
'options': dict(roles.get(f"{role}.options-chat", {})),
'ui': dict(roles.get(f"{role}.ui-chat", {})),
},
},
}

def parse_role_section(role):
result = {}
for key in role.keys():
parts = key.split('.')
structure = parts[:-1]
primitive = parts[-1]
obj = result
for path in structure:
if not path in obj:
obj[path] = {}
obj = obj[path]
obj[primitive] = role.get(key)
return result

def load_role_config(role):
roles_config_path = os.path.expanduser(vim.eval("g:vim_ai_roles_config_file"))
if not os.path.exists(roles_config_path):
Expand All @@ -38,34 +95,14 @@ def load_role_config(role):
if not role in roles:
raise Exception(f"Role `{role}` not found")

options = roles.get(f"{role}.options", {})
options_complete = roles.get(f"{role}.options-complete", {})
options_edit = roles.get(f"{role}.options-edit", {})
options_chat = roles.get(f"{role}.options-chat", {})

ui = roles.get(f"{role}.ui", {})
ui_complete = roles.get(f"{role}.ui-complete", {})
ui_edit = roles.get(f"{role}.ui-edit", {})
ui_chat = roles.get(f"{role}.ui-chat", {})
if is_deprecated_role_syntax(roles, role):
return load_roles_with_deprecated_syntax(roles, role)

return {
'role': dict(roles[role]),
'config_default': {
'options': dict(options),
'ui': dict(ui),
},
'config_complete': {
'options': dict(options_complete),
'ui': dict(ui_complete),
},
'config_edit': {
'options': dict(options_edit),
'ui': dict(ui_edit),
},
'config_chat': {
'options': dict(options_chat),
'ui': dict(ui_chat),
},
'role_default': parse_role_section(roles.get(role, {})),
'role_complete': parse_role_section(roles.get(f"{role}.complete", {})),
'role_edit': parse_role_section(roles.get(f"{role}.edit", {})),
'role_chat': parse_role_section(roles.get(f"{role}.chat", {})),
}

def parse_role_names(prompt):
Expand All @@ -87,9 +124,11 @@ def parse_prompt_and_role_config(user_instruction, command_type):
last_role = roles[-1]
user_prompt = user_instruction[user_instruction.index(last_role) + len(last_role):].strip() # strip roles

role_configs = merge_deep([load_role_config(role) for role in roles])
config = merge_deep([role_configs['config_default'], role_configs['config_' + command_type]])
role_prompt = role_configs['role'].get('prompt', '')
parsed_role = merge_deep([load_role_config(role) for role in roles])
role_default = parsed_role['role_default']
role_command = parsed_role['role_' + command_type]
config = merge_deep([role_default.get('config', {}), role_command.get('config', {})])
role_prompt = role_default.get('prompt') or role_command.get('prompt', '')
return user_prompt, role_prompt, config

def make_selection_prompt(user_selection, user_prompt, role_prompt, selection_boundary):
Expand Down
4 changes: 4 additions & 0 deletions py/roles.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
import vim
import os

if "PYTEST_VERSION" in os.environ:
from utils import *

roles_py_imported = True

Expand Down
28 changes: 13 additions & 15 deletions roles-example.ini
Original file line number Diff line number Diff line change
Expand Up @@ -7,27 +7,25 @@
[grammar]
prompt = fix spelling and grammar

# common options for all commands (complete, edit, chat)
[refactor]
prompt =
You are a Clean Code expert, I have the following code,
please refactor it in a more clean and concise way so that my colleagues
can maintain the code more easily. Also, explain why you want to refactor
the code so that I can add the explanation to the Pull Request.
# common options for all commands (complete, edit, chat)
[refactor.options]
temperature = 0.4
config.options.temperature = 0.4
# command specific options:
[refactor.options-chat]
model = gpt-4o
[refactor.options-complete]
model = gpt-4
[refactor.options-edit]
model = gpt-4
[refactor.chat]
config.options.model = gpt-4o
[refactor.complete]
config.options.model = gpt-4
[refactor.edit]
config.options.model = gpt-4

[o1-mini]
[o1-mini.options]
stream = 0
model = o1-mini
max_completion_tokens = 25000
temperature = 1
initial_prompt =
config.options.stream = 0
config.options.model = o1-mini
config.options.max_completion_tokens = 25000
config.options.temperature = 1
config.options.initial_prompt =
5 changes: 4 additions & 1 deletion tests/context_test.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import vim
from context import make_ai_context, make_prompt

default_config = {
"engine": "chat",
"options": {
"model": "gpt-4o",
"endpoint_url": "https://api.openai.com/v1/chat/completions",
Expand Down Expand Up @@ -81,6 +81,7 @@ def test_role_config_different_commands():
assert 'preset_tab' == actual_config['ui']['open_chat_command']
assert 'hello' == actual_prompt
assert 'https://localhost/chat' == actual_config['options']['endpoint_url']
assert 'chat' == actual_config['engine']

context = make_ai_context({ **base, 'command_type': 'complete' })
actual_config = context['config']
Expand All @@ -89,6 +90,7 @@ def test_role_config_different_commands():
assert '0' == actual_config['ui']['paste_mode']
assert 'hello' == actual_prompt
assert 'https://localhost/complete' == actual_config['options']['endpoint_url']
assert 'complete' == actual_config['engine']

context = make_ai_context({ **base, 'command_type': 'edit' })
actual_config = context['config']
Expand All @@ -97,6 +99,7 @@ def test_role_config_different_commands():
assert '0' == actual_config['ui']['paste_mode']
assert 'hello' == actual_prompt
assert 'https://localhost/edit' == actual_config['options']['endpoint_url']
assert 'complete' == actual_config['engine']

def test_multiple_role_configs():
context = make_ai_context({
Expand Down
83 changes: 83 additions & 0 deletions tests/deprecated_role_syntax_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
from context import make_ai_context, make_prompt

default_config = {
"options": {
"model": "gpt-4o",
"endpoint_url": "https://api.openai.com/v1/chat/completions",
"max_tokens": "0",
"max_completion_tokens": "0",
"temperature": "1",
"request_timeout": "20",
"stream": "1",
"enable_auth": "1",
"token_file_path": "",
"selection_boundary": "",
"initial_prompt": "You are a general assistant.",
},
"ui": {
"open_chat_command": "preset_below",
"scratch_buffer_keep_open": "0",
"populate_options": "0",
"code_syntax_enabled": "1",
"paste_mode": "1",
},
}

def test_role_config():
context = make_ai_context({
'config_default': default_config,
'config_extension': {},
'user_instruction': '/deprecated-test-role-simple user instruction',
'user_selection': 'selected text',
'command_type': 'chat',
})
actual_config = context['config']
actual_prompt = context['prompt']
assert 'o1-preview' == actual_config['options']['model']
assert 'simple role prompt:\nuser instruction:\nselected text' == actual_prompt

def test_role_config_different_commands():
base = {
'config_default': default_config,
'config_extension': {},
'user_instruction': '/deprecated-test-role hello',
'user_selection': '',
}
context = make_ai_context({ **base, 'command_type': 'chat' })
actual_config = context['config']
actual_prompt = context['prompt']
assert 'model-common' == actual_config['options']['model']
assert '0' == actual_config['ui']['paste_mode']
assert 'preset_tab' == actual_config['ui']['open_chat_command']
assert 'hello' == actual_prompt
assert 'https://localhost/chat' == actual_config['options']['endpoint_url']

context = make_ai_context({ **base, 'command_type': 'complete' })
actual_config = context['config']
actual_prompt = context['prompt']
assert 'model-common' == actual_config['options']['model']
assert '0' == actual_config['ui']['paste_mode']
assert 'hello' == actual_prompt
assert 'https://localhost/complete' == actual_config['options']['endpoint_url']

context = make_ai_context({ **base, 'command_type': 'edit' })
actual_config = context['config']
actual_prompt = context['prompt']
assert 'model-common' == actual_config['options']['model']
assert '0' == actual_config['ui']['paste_mode']
assert 'hello' == actual_prompt
assert 'https://localhost/edit' == actual_config['options']['endpoint_url']

def test_multiple_role_configs():
context = make_ai_context({
'config_default': default_config,
'config_extension': {},
'user_instruction': '/deprecated-test-role /deprecated-test-role-simple hello',
'user_selection': '',
'command_type': 'chat',
})
actual_config = context['config']
actual_prompt = context['prompt']
assert 'o1-preview' == actual_config['options']['model']
assert 'https://localhost/chat' == actual_config['options']['endpoint_url']
assert 'simple role prompt:\nhello' == actual_prompt
Loading

8 comments on commit 2a418ad

@Konfekt
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just wondering if this only affects the config files or also the internal dictionary (as used here) ?

@madox2
Copy link
Owner Author

@madox2 madox2 commented on 2a418ad Dec 17, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Old syntax is deprecated but still supported. If you want to use new syntax, it probably affects the internal dictionary. New planned features (like custom providers) may work with new syntax only

@Konfekt
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would you mind documenting the differences at some point? I could pipe the diff through an AI and propose a PR to this end, but likely you've got something similar already in mind?

@madox2
Copy link
Owner Author

@madox2 madox2 commented on 2a418ad Dec 17, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Konfekt
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you,

Could you give a hint on how this changes the internal dictionaries for example at https://github.com/madox2/vim-ai?tab=readme-ov-file#configuration?

They seem to be working as before, but I suppose the current model options such as

\  "options": {
\    "model": "o1-mini",
\    "max_tokens": 0,
\    "initial_prompt": g:vim_ai_edit_initial_prompt,
\  },

will eventually have to be changed as they have been deprecated?

@madox2
Copy link
Owner Author

@madox2 madox2 commented on 2a418ad Dec 17, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

no, configuration structure is not going to change. what has changed is the format of role file and how it is parsed

@Konfekt
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This o1-mini role

[o1-mini]
options.stream = 0
options.model = o1-mini
options.max_completion_tokens = 25000
options.temperature = 1
options.initial_prompt =

is a role that reuses the generic prompt and only changes the model settings?
Outsiders might imagine a role to be defined by a prompt (plus additional options), so it looks a bit surprising at first to see a role named like a model without a prompt

@Konfekt
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see that this role was already there before, so the question comes too late, I beg your pardon

Please sign in to comment.