-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
256 lines (228 loc) · 7.34 KB
/
.env.example
File metadata and controls
256 lines (228 loc) · 7.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
# ===========================
# 🔑 API KEYS & LLM PROVIDERS
# ===========================
# LLM Providers Configuration (JSON format)
# Each provider entry contains: providerName, endpoint, apiKey, desc, and name
# providerName: unique identifier used in model records (e.g., "openai", "deepseek")
# endpoint: API endpoint URL
# apiKey: authentication key for the provider
# desc: description of the provider
# name: display name shown in the UI
LLM_PROVIDERS='[
{
"providerName": "openai",
"endpoint": "https://api.openai.com/v1/",
"apiKey": "",
"desc": "OpenAI Official API",
"name": "OpenAI"
},
{
"providerName": "deepseek",
"endpoint": "https://api.deepseek.com/",
"apiKey": "",
"desc": "DeepSeek API",
"name": "DeepSeek"
},
{
"providerName": "bielik",
"endpoint": "http://your-bielik-endpoint:4000/",
"apiKey": "",
"desc": "Bielik Institute Hosting",
"name": "Bielik"
},
{
"providerName": "replicate",
"endpoint": "https://api.replicate.com/v1/",
"apiKey": "",
"desc": "Replicate API",
"name": "Replicate"
},
{
"providerName": "anthropic",
"endpoint": "https://api.anthropic.com/v1/",
"apiKey": "",
"desc": "Anthropic Claude API",
"name": "Anthropic"
},
{
"providerName": "google",
"endpoint": "https://generativelanguage.googleapis.com/v1beta/",
"apiKey": "",
"desc": "Google Gemini API",
"name": "Google"
},
{
"providerName": "fp",
"endpoint": "",
"apiKey": "",
"desc": "Feature Preview Provider",
"name": "Feature Preview"
},
{
"providerName": "pcss",
"endpoint": "https://llm.hpc.pcss.pl",
"apiKey": "",
"desc": "PCSS HPC LLM Service",
"name": "PCSS",
"requireCustomLlmProvider": true
}
]'
# Model to Provider Mapping (JSON format)
# Maps each model to its provider and defines a unique identifier/alias
# modelName: the actual model name used in API calls (e.g., "gpt-5-mini", "deepseek-chat")
# providerName: must match a providerName from LLM_PROVIDERS
# modelIdentifier: unique alias/identifier for referencing this model in env vars
# (e.g., "gpt-5-mini" can be referenced as "openai-gpt5" vs "deepseek-gpt5")
# This allows you to use the same model from different providers:
# OPENAI_CHAT_MODEL_CURRENT=openai-gpt5
# DEEPSEEK_CHAT_MODEL_CURRENT=deepseek-gpt5
#
# Optional model configuration fields:
# supportsToolChoice: false - Indicates model doesn't support automatic tool calling
# assistantsToolChoiceOverride: "none"|"auto"|"required" - Override tool_choice for Assistants API
# assistantsFallbackModel: "model-identifier" - Use another model to decide which tools to call
# When set, the fallback model analyzes queries and decides if tools
# (code_interpreter, retrieval) are needed, while the main model does
# the actual research work. This allows vLLM models without tool support
# to benefit from intelligent tool routing via a capable fallback model.
MODEL_PROVIDER_MAP='[
{
"modelName": "gpt-5-mini",
"providerName": "openai",
"modelIdentifier": "openai-gpt5-mini"
},
{
"modelName": "gpt-5.2",
"providerName": "openai",
"modelIdentifier": "openai-gpt5.2"
},
{
"modelName": "gpt-4o-mini-transcribe",
"providerName": "openai",
"modelIdentifier": "openai-gpt4o-mini-transcribe"
},
{
"modelName": "claude-3-5-sonnet-20241022",
"providerName": "anthropic",
"modelIdentifier": "claude-sonnet-3.5"
},
{
"modelName": "claude-3-opus-20240229",
"providerName": "anthropic",
"modelIdentifier": "claude-opus-3"
},
{
"modelName": "claude-3-haiku-20240307",
"providerName": "anthropic",
"modelIdentifier": "claude-haiku-3"
},
{
"modelName": "gemini-2.0-flash-exp",
"providerName": "google",
"modelIdentifier": "gemini-2.0-flash"
},
{
"modelName": "gemini-1.5-pro",
"providerName": "google",
"modelIdentifier": "gemini-1.5-pro"
},
{
"modelName": "gemini-1.5-flash",
"providerName": "google",
"modelIdentifier": "gemini-1.5-flash"
},
{
"modelName": "deepseek-chat",
"providerName": "deepseek",
"modelIdentifier": "deepseek-chat"
},
{
"modelName": "bielik-11b-v2.3-instruct",
"providerName": "bielik",
"modelIdentifier": "bielik-11b-instruct"
},
{
"modelName": "gpt-oss_120b",
"providerName": "pcss",
"modelIdentifier": "pcss_gpt-oss_120b",
"supportsToolChoice": false,
"assistantsToolChoiceOverride": "none"
},
{
"modelName": "Qwen3-VL-235B-A22B-Instruct",
"providerName": "pcss",
"modelIdentifier": "pcss_Qwen3-VL-235B-A22B-Instruct",
"supportsToolChoice": false,
"assistantsToolChoiceOverride": "none"
},
{
"modelName": "DeepSeek-V3.1-vLLM",
"providerName": "pcss",
"modelIdentifier": "pcss_DeepSeek-V3.1-vLLM",
"supportsToolChoice": false
}
]'
# ===========================
# MODEL PROFILE CONFIGURATION
# ===========================
# Which profile to use at startup: legacy/current
MODEL_PROFILE=current
# Backend type: completions or responses
LLM_BACKEND=completions
# Default fallback model for tool choice decisions
# Used when a model has supportsToolChoice=false but no specific assistantsFallbackModel configured
# The fallback model analyzes queries and decides if tools (code_interpreter, retrieval) are needed
DEFAULT_ASSISTANTS_FALLBACK_MODEL=openai-gpt5-mini
# Model profiles are now defined in profiles.json
# Use MODEL_PROFILE to select which profile to use (e.g., current, current_mixed, legacy)
# You can add custom profiles in profiles.json with your preferred model combinations
# ===========================
# EXTERNAL SERVICES API KEYS
# ===========================
# SerpAPI key (required for web search)
SERPAPI_API_KEY=your_serpapi_api_key_here
#Shodan API: used by shodan.Shodan() (required for webSearch)
SHODAN_API_KEY=your_shodan_api_key_here
#NVD (USA GOV)
NVD_API_KEY=your_nvd_api_key_here
#Wpscan Token: 25 scan per day
WPSCAN_API_TOKEN=your_api_token_here
# ===========================
# 🗣️ AUDIO SETTINGS
# ===========================
# Enable speech-to-text (microphone input)
# Set to "true" to use speech_recognition with PyAudio, or "false" to disable microphone input.
ENABLE_LISTEN=false
# Enable text-to-speech (audio output)
# Set to "true" to allow pyttsx3 or playsound3 to speak responses aloud.
ENABLE_SPEAK=true
# Enable clipboard (Read/Write)
ENABLE_CLIPBOARD=true
# Enable screenshot, vision, ocr etc.
ENABLE_VISUAL_PERCEPTION=false
# ===========================
# LOG SETTINGS
# ===========================
LOG_LEVEL=DEBUG
# ===========================
# LLM SETTINGS
# ===========================
# Warning Threshold
CONTEXT_WARN_THRESHOLD=0.75
# ===========================
# Core SETTINGS
# ===========================
REDACT_MODE=off
# User types "redactedIP", ECHO sends "127.0.0.1" to tools/LLM
REDACT_MAP='{"redactedIP": "127.0.0.1", "redactedOrg": "MegaCorp S.A."}'
SHOW_EXTRA_WINDOWS=false
#How many entires will be remembered
HISTORY_ENTRIES_LIMIT=100
# ===========================
# WEBSOCKET LOG STREAM
# ===========================
# Enable/disable WebSocket log streaming
WEBSOCKET_LOG_ENABLED=false
# Where to bind the WebSocket server
WEBSOCKET_LOG_HOST=127.0.0.1
WEBSOCKET_LOG_PORT=9876