-
Notifications
You must be signed in to change notification settings - Fork 12
/
config.yaml.example
59 lines (47 loc) · 1.85 KB
/
config.yaml.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# CodeGate Example Configuration
# Network settings
port: 8989 # Port to listen on (1-65535)
proxy_port: 8990 # Proxy port to listen on (1-65535)
host: "localhost" # Host to bind to (use localhost for all interfaces)
# Logging configuration
log_level: "INFO" # One of: ERROR, WARNING, INFO, DEBUG
# Note: This configuration can be overridden by:
# 1. CLI arguments (--port, --proxy-port, --host, --log-level)
# 2. Environment variables (CODEGATE_APP_PORT, CODEGATE_APP_PROXY_PORT, CODEGATE_APP_HOST, CODEGATE_APP_LOG_LEVEL)
# Provider URLs
provider_urls:
openai: "https://api.openai.com/v1"
anthropic: "https://api.anthropic.com/v1"
vllm: "http://localhost:8000" # Base URL without /v1 path, it will be added automatically
# Note: Provider URLs can be overridden by environment variables:
# CODEGATE_PROVIDER_OPENAI_URL
# CODEGATE_PROVIDER_ANTHROPIC_URL
# CODEGATE_PROVIDER_VLLM_URL
# Or by CLI flags:
# --vllm-url
# --openai-url
# --anthropic-url
# Certificate configuration
certs_dir: "./certs" # Directory for certificate files
ca_cert: "ca.crt" # CA certificate file name
ca_key: "ca.key" # CA key file name
server_cert: "server.crt" # Server certificate file name
server_key: "server.key" # Server key file name
# Note: Certificate configuration can be overridden by:
# 1. CLI arguments (--certs-dir, --ca-cert, --ca-key, --server-cert, --server-key)
# 2. Environment variables:
# CODEGATE_CERTS_DIR
# CODEGATE_CA_CERT
# CODEGATE_CA_KEY
# CODEGATE_SERVER_CERT
# CODEGATE_SERVER_KEY
# Embedding model configuration
####
# Inference model configuration
##
# Model to use for chatting
chat_model_path: "./codegate_volume/models/qwen2.5-coder-1.5b-instruct-q5_k_m.gguf"
# Context length of the model
chat_model_n_ctx: 32768
# Number of layers to offload to GPU. If -1, all layers are offloaded.
chat_model_n_gpu_layers: -1