martynka commited on
Commit
51090d9
·
verified ·
1 Parent(s): cc078c4

Create confg.yaml

Browse files
Files changed (1) hide show
  1. confg.yaml +593 -0
confg.yaml ADDED
@@ -0,0 +1,593 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 1.2.4
2
+
3
+ cache: false
4
+
5
+ interface:
6
+ privacyPolicy:
7
+ externalUrl: 'https://go.easierit.org/privacy'
8
+ openNewTab: true
9
+ termsOfService:
10
+ externalUrl: 'https://go.easierit.org/tos'
11
+ openNewTab: true
12
+
13
+ registration:
14
+ socialLogins: ["openid"]
15
+
16
+ endpoints:
17
+ agents:
18
+ capabilities:
19
+ - "file_search"
20
+ - "actions"
21
+ - "artifacts"
22
+ - "chain"
23
+ custom:
24
+
25
+ # glhf.chat
26
+ # Model list (auth header required): https://glhf.chat/models
27
+ - name: "EasierIT.glhf"
28
+ iconURL: "https://glhf.chat/apple-touch-icon.png"
29
+ apiKey: {$glhf}
30
+ baseURL: "https://glhf.chat/api/openai/v1"
31
+ models:
32
+ default:
33
+ - hf:NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
34
+ - hf:Qwen/QwQ-32B-Preview
35
+ - hf:Qwen/Qwen2.5-72B-Instruct
36
+ - hf:Qwen/Qwen2.5-7B-Instruct
37
+ - hf:Qwen/Qwen2.5-Coder-32B-Instruct
38
+ - hf:anthracite-org/magnum-v4-12b
39
+ - hf:deepseek-ai/DeepSeek-R1
40
+ - hf:deepseek-ai/DeepSeek-R1-Distill-Llama-70B
41
+ - hf:deepseek-ai/DeepSeek-V3
42
+ - hf:deepseek-ai/DeepSeek-V3-0324
43
+ - hf:google/gemma-2-27b-it
44
+ - hf:google/gemma-2-9b-it
45
+ - hf:huihui-ai/Llama-3.3-70B-Instruct-abliterated
46
+ - hf:meta-llama/Llama-3.1-405B-Instruct
47
+ - hf:meta-llama/Llama-3.1-70B-Instruct
48
+ - hf:meta-llama/Llama-3.1-8B-Instruct
49
+ - hf:meta-llama/Llama-3.2-11B-Vision-Instruct
50
+ - hf:meta-llama/Llama-3.2-3B-Instruct
51
+ - hf:meta-llama/Llama-3.2-90B-Vision-Instruct
52
+ - hf:meta-llama/Llama-3.3-70B-Instruct
53
+ - hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8
54
+ - hf:meta-llama/Llama-4-Scout-17B-16E-Instruct
55
+ - hf:mistralai/Mistral-7B-Instruct-v0.3
56
+ - hf:mistralai/Mixtral-8x22B-Instruct-v0.1
57
+ - hf:mistralai/Mixtral-8x7B-Instruct-v0.1
58
+ - hf:nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
59
+ - hf:upstage/SOLAR-10.7B-Instruct-v1.0
60
+ fetch: false
61
+ titleConvo: true
62
+ titleModel: "current_model"
63
+ # groq
64
+ # Model list: https://console.groq.com/settings/limits
65
+ - name: "groq"
66
+ apiKey: {$groq_key}
67
+ baseURL: "https://api.groq.com/openai/v1/"
68
+ models:
69
+ default:
70
+ - allam-2-7b
71
+ - deepseek-r1-distill-llama-70b
72
+ - gemma2-9b-it
73
+ - llama-3.1-8b-instant
74
+ - llama-3.3-70b-versatile
75
+ - llama-guard-3-8b
76
+ - llama3-70b-8192
77
+ - llama3-8b-8192
78
+ - meta-llama/llama-4-maverick-17b-128e-instruct
79
+ - meta-llama/llama-4-scout-17b-16e-instruct
80
+ - mistral-saba-24b
81
+ - playai-tts
82
+ - playai-tts-arabic
83
+ - qwen-qwq-32b
84
+ fetch: false
85
+ titleConvo: true
86
+ titleModel: "mixtral-8x7b-32768"
87
+ modelDisplayLabel: "groq"
88
+
89
+ # HuggingFace
90
+ # https://huggingface.co/settings/tokens
91
+ - name: 'HuggingFace'
92
+ apiKey: {$hf_token}}
93
+ baseURL: 'https://api-inference.huggingface.co/v1'
94
+ models:
95
+ default:
96
+ - AIDC-AI/Marco-o1
97
+ - CohereLabs/c4ai-command-r-plus
98
+ - CohereLabs/c4ai-command-r-v01
99
+ - HuggingFaceH4/zephyr-7b-alpha
100
+ - HuggingFaceH4/zephyr-7b-beta
101
+ - HuggingFaceTB/SmolLM2-1.7B-Instruct
102
+ - Intel/neural-chat-7b-v3-1
103
+ - MiniMaxAI/MiniMax-Text-01
104
+ - NexaAIDev/Octopus-v2
105
+ - NovaSky-AI/Sky-T1-32B-Preview
106
+ - Open-Orca/Mistral-7B-OpenOrca
107
+ - PygmalionAI/pygmalion-6b
108
+ - Qwen/QwQ-32B
109
+ - Qwen/QwQ-32B-Preview
110
+ - Qwen/Qwen2-72B-Instruct
111
+ - Qwen/Qwen2-7B-Instruct
112
+ - Qwen/Qwen2.5-72B-Instruct
113
+ - Qwen/Qwen2.5-7B-Instruct
114
+ - Qwen/Qwen2.5-Coder-32B-Instruct
115
+ - TinyLlama/TinyLlama-1.1B-Chat-v1.0
116
+ - berkeley-nest/Starling-LM-7B-alpha
117
+ - cognitivecomputations/dolphin-2.5-mixtral-8x7b
118
+ - databricks/dbrx-base
119
+ - databricks/dbrx-instruct
120
+ - deepseek-ai/DeepSeek-Coder-V2-Instruct
121
+ - deepseek-ai/DeepSeek-R1
122
+ - deepseek-ai/DeepSeek-R1-Distill-Llama-70B
123
+ - deepseek-ai/DeepSeek-R1-Distill-Llama-8B
124
+ - deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
125
+ - deepseek-ai/DeepSeek-R1-Distill-Qwen-14B
126
+ - deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
127
+ - deepseek-ai/DeepSeek-R1-Distill-Qwen-7B
128
+ - deepseek-ai/DeepSeek-R1-Zero
129
+ - deepseek-ai/DeepSeek-V2.5
130
+ - deepseek-ai/DeepSeek-V3
131
+ - deepseek-ai/DeepSeek-V3-0324
132
+ - deepseek-ai/deepseek-coder-33b-instruct
133
+ - google/gemma-2-27b-it
134
+ - google/gemma-2-2b-it
135
+ - google/gemma-2-9b-it
136
+ - google/gemma-2b-it
137
+ - google/gemma-7b-it
138
+ - gradientai/Llama-3-8B-Instruct-Gradient-1048k
139
+ - jinaai/ReaderLM-v2
140
+ - jinaai/reader-lm-1.5b
141
+ - manycore-research/SpatialLM-Llama-1B
142
+ - mattshumer/Reflection-Llama-3.1-70B
143
+ - meta-llama/Llama-2-13b-chat-hf
144
+ - meta-llama/Llama-2-70b-chat-hf
145
+ - meta-llama/Llama-2-7b-chat-hf
146
+ - meta-llama/Llama-3.1-405B-Instruct
147
+ - meta-llama/Llama-3.1-70B-Instruct
148
+ - meta-llama/Llama-3.1-8B-Instruct
149
+ - meta-llama/Llama-3.2-1B-Instruct
150
+ - meta-llama/Llama-3.2-3B-Instruct
151
+ - meta-llama/Llama-3.3-70B-Instruct
152
+ - meta-llama/Meta-Llama-3-70B-Instruct
153
+ - meta-llama/Meta-Llama-3-8B-Instruct
154
+ - microsoft/Phi-3-mini-128k-instruct
155
+ - microsoft/Phi-3-mini-4k-instruct
156
+ - microsoft/Phi-3-vision-128k-instruct
157
+ - microsoft/Phi-3.5-MoE-instruct
158
+ - microsoft/Phi-3.5-mini-instruct
159
+ - microsoft/phi-4
160
+ - mistralai/Codestral-22B-v0.1
161
+ - mistralai/Mistral-7B-Instruct-v0.1
162
+ - mistralai/Mistral-7B-Instruct-v0.2
163
+ - mistralai/Mistral-7B-Instruct-v0.3
164
+ - mistralai/Mistral-Nemo-Instruct-2407
165
+ - mistralai/Mistral-Small-24B-Instruct-2501
166
+ - mistralai/Mixtral-8x22B-Instruct-v0.1
167
+ - mistralai/Mixtral-8x7B-Instruct-v0.1
168
+ - nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
169
+ - nvidia/Llama3-ChatQA-1.5-8B
170
+ - openchat/openchat_3.5
171
+ - perplexity-ai/r1-1776
172
+ - shenzhi-wang/Llama3-8B-Chinese-Chat
173
+ - teknium/OpenHermes-2.5-Mistral-7B
174
+ - tiiuae/falcon-180B-chat
175
+ - tiiuae/falcon-7b-instruct
176
+ - unsloth/DeepSeek-R1-GGUF
177
+ - upstage/SOLAR-10.7B-Instruct-v1.0
178
+ fetch: false
179
+ titleConvo: true
180
+ titleModel: "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
181
+ dropParams:
182
+ - "top_p"
183
+
184
+ # kluster.ai
185
+ # https://platform.kluster.ai/apikeys
186
+ - name: "Kluster"
187
+ iconURL: "https://platform.kluster.ai/cropped-fav-1-144x144.png"
188
+ apiKey: {$Kluster_key}
189
+ baseURL: "https://api.kluster.ai/v1/"
190
+ models:
191
+ default:
192
+ - Qwen/Qwen2.5-VL-7B-Instruct
193
+ - deepseek-ai/DeepSeek-R1
194
+ - deepseek-ai/DeepSeek-V3
195
+ - deepseek-ai/DeepSeek-V3-0324
196
+ - google/gemma-3-27b-it
197
+ - klusterai/Meta-Llama-3.1-405B-Instruct-Turbo
198
+ - klusterai/Meta-Llama-3.1-8B-Instruct-Turbo
199
+ - klusterai/Meta-Llama-3.3-70B-Instruct-Turbo
200
+ - meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8
201
+ - meta-llama/Llama-4-Scout-17B-16E-Instruct
202
+ fetch: false
203
+ titleConvo: true
204
+ titleModel: 'klusterai/Meta-Llama-3.1-8B-Instruct-Turbo'
205
+ modelDisplayLabel: 'Kluster'
206
+
207
+ # Mistral AI API
208
+ # Model list: https://docs.mistral.ai/getting-started/models/
209
+ - name: "Mistral"
210
+ apiKey: {$mistral_key}
211
+ baseURL: "https://api.mistral.ai/v1"
212
+ models:
213
+ default:
214
+ - codestral-2405
215
+ - codestral-2411-rc5
216
+ - codestral-2412
217
+ - codestral-2501
218
+ - codestral-latest
219
+ - codestral-mamba-2407
220
+ - codestral-mamba-latest
221
+ - ministral-3b-2410
222
+ - ministral-3b-latest
223
+ - ministral-8b-2410
224
+ - ministral-8b-latest
225
+ - mistral-embed
226
+ - mistral-large-2402
227
+ - mistral-large-2407
228
+ - mistral-large-2411
229
+ - mistral-large-latest
230
+ - mistral-large-pixtral-2411
231
+ - mistral-medium
232
+ - mistral-medium-2312
233
+ - mistral-medium-latest
234
+ - mistral-moderation-2411
235
+ - mistral-moderation-latest
236
+ - mistral-ocr-2503
237
+ - mistral-ocr-latest
238
+ - mistral-saba-2502
239
+ - mistral-saba-latest
240
+ - mistral-small
241
+ - mistral-small-2312
242
+ - mistral-small-2402
243
+ - mistral-small-2409
244
+ - mistral-small-2501
245
+ - mistral-small-2503
246
+ - mistral-small-latest
247
+ - mistral-tiny
248
+ - mistral-tiny-2312
249
+ - mistral-tiny-2407
250
+ - mistral-tiny-latest
251
+ - open-codestral-mamba
252
+ - open-mistral-7b
253
+ - open-mistral-nemo
254
+ - open-mistral-nemo-2407
255
+ - open-mixtral-8x22b
256
+ - open-mixtral-8x22b-2404
257
+ - open-mixtral-8x7b
258
+ - pixtral-12b
259
+ - pixtral-12b-2409
260
+ - pixtral-12b-latest
261
+ - pixtral-large-2411
262
+ - pixtral-large-latest
263
+ fetch: false
264
+ titleConvo: true
265
+ titleMethod: "completion"
266
+ titleModel: "mistral-tiny"
267
+ summarize: false
268
+ summaryModel: "mistral-tiny"
269
+ forcePrompt: false
270
+ modelDisplayLabel: "Mistral"
271
+ dropParams:
272
+ - "stop"
273
+ - "user"
274
+ - "frequency_penalty"
275
+ - "presence_penalty"
276
+
277
+
278
+ # NVIDIA
279
+ # https://build.nvidia.com/explore/discover
280
+ - name: "Nvidia"
281
+ iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/refs/heads/main/icons/nvidia.png"
282
+ apiKey: {$gpu_key}
283
+ baseURL: "https://integrate.api.nvidia.com/v1/"
284
+ models:
285
+ default:
286
+ - 01-ai/yi-large
287
+ - abacusai/dracarys-llama-3.1-70b-instruct
288
+ - adept/fuyu-8b
289
+ - ai21labs/jamba-1.5-large-instruct
290
+ - ai21labs/jamba-1.5-mini-instruct
291
+ - aisingapore/sea-lion-7b-instruct
292
+ - baai/bge-m3
293
+ - baichuan-inc/baichuan2-13b-chat
294
+ - bigcode/starcoder2-15b
295
+ - bigcode/starcoder2-7b
296
+ - databricks/dbrx-instruct
297
+ - deepseek-ai/deepseek-coder-6.7b-instruct
298
+ - deepseek-ai/deepseek-r1
299
+ - deepseek-ai/deepseek-r1-distill-llama-8b
300
+ - deepseek-ai/deepseek-r1-distill-qwen-14b
301
+ - deepseek-ai/deepseek-r1-distill-qwen-32b
302
+ - deepseek-ai/deepseek-r1-distill-qwen-7b
303
+ - google/codegemma-1.1-7b
304
+ - google/codegemma-7b
305
+ - google/deplot
306
+ - google/gemma-2-27b-it
307
+ - google/gemma-2-2b-it
308
+ - google/gemma-2-9b-it
309
+ - google/gemma-2b
310
+ - google/gemma-3-12b-it
311
+ - google/gemma-3-1b-it
312
+ - google/gemma-3-27b-it
313
+ - google/gemma-3-4b-it
314
+ - google/gemma-7b
315
+ - google/paligemma
316
+ - google/recurrentgemma-2b
317
+ - google/shieldgemma-9b
318
+ - ibm/granite-3.0-3b-a800m-instruct
319
+ - ibm/granite-3.0-8b-instruct
320
+ - ibm/granite-34b-code-instruct
321
+ - ibm/granite-8b-code-instruct
322
+ - ibm/granite-guardian-3.0-8b
323
+ - igenius/colosseum_355b_instruct_16k
324
+ - igenius/italia_10b_instruct_16k
325
+ - institute-of-science-tokyo/llama-3.1-swallow-70b-instruct-v0.1
326
+ - institute-of-science-tokyo/llama-3.1-swallow-8b-instruct-v0.1
327
+ - mediatek/breeze-7b-instruct
328
+ - meta/codellama-70b
329
+ - meta/llama-3.1-405b-instruct
330
+ - meta/llama-3.1-70b-instruct
331
+ - meta/llama-3.1-8b-instruct
332
+ - meta/llama-3.2-11b-vision-instruct
333
+ - meta/llama-3.2-1b-instruct
334
+ - meta/llama-3.2-3b-instruct
335
+ - meta/llama-3.2-90b-vision-instruct
336
+ - meta/llama-3.3-70b-instruct
337
+ - meta/llama-4-maverick-17b-128e-instruct
338
+ - meta/llama-4-scout-17b-16e-instruct
339
+ - meta/llama2-70b
340
+ - meta/llama3-70b-instruct
341
+ - meta/llama3-8b-instruct
342
+ - microsoft/kosmos-2
343
+ - microsoft/phi-3-medium-128k-instruct
344
+ - microsoft/phi-3-medium-4k-instruct
345
+ - microsoft/phi-3-mini-128k-instruct
346
+ - microsoft/phi-3-mini-4k-instruct
347
+ - microsoft/phi-3-small-128k-instruct
348
+ - microsoft/phi-3-small-8k-instruct
349
+ - microsoft/phi-3-vision-128k-instruct
350
+ - microsoft/phi-3.5-mini-instruct
351
+ - microsoft/phi-3.5-moe-instruct
352
+ - microsoft/phi-3.5-vision-instruct
353
+ - microsoft/phi-4-mini-instruct
354
+ - microsoft/phi-4-multimodal-instruct
355
+ - mistralai/codestral-22b-instruct-v0.1
356
+ - mistralai/mamba-codestral-7b-v0.1
357
+ - mistralai/mathstral-7b-v0.1
358
+ - mistralai/mistral-7b-instruct-v0.2
359
+ - mistralai/mistral-7b-instruct-v0.3
360
+ - mistralai/mistral-large
361
+ - mistralai/mistral-large-2-instruct
362
+ - mistralai/mistral-small-24b-instruct
363
+ - mistralai/mixtral-8x22b-instruct-v0.1
364
+ - mistralai/mixtral-8x22b-v0.1
365
+ - mistralai/mixtral-8x7b-instruct-v0.1
366
+ - nv-mistralai/mistral-nemo-12b-instruct
367
+ - nvidia/embed-qa-4
368
+ - nvidia/llama-3.1-nemoguard-8b-content-safety
369
+ - nvidia/llama-3.1-nemoguard-8b-topic-control
370
+ - nvidia/llama-3.1-nemotron-51b-instruct
371
+ - nvidia/llama-3.1-nemotron-70b-instruct
372
+ - nvidia/llama-3.1-nemotron-70b-reward
373
+ - nvidia/llama-3.1-nemotron-nano-8b-v1
374
+ - nvidia/llama-3.1-nemotron-ultra-253b-v1
375
+ - nvidia/llama-3.2-nv-embedqa-1b-v1
376
+ - nvidia/llama-3.2-nv-embedqa-1b-v2
377
+ - nvidia/llama-3.3-nemotron-super-49b-v1
378
+ - nvidia/llama3-chatqa-1.5-70b
379
+ - nvidia/llama3-chatqa-1.5-8b
380
+ - nvidia/mistral-nemo-minitron-8b-8k-instruct
381
+ - nvidia/mistral-nemo-minitron-8b-base
382
+ - nvidia/nemoretriever-parse
383
+ - nvidia/nemotron-4-340b-instruct
384
+ - nvidia/nemotron-4-340b-reward
385
+ - nvidia/nemotron-4-mini-hindi-4b-instruct
386
+ - nvidia/nemotron-mini-4b-instruct
387
+ - nvidia/neva-22b
388
+ - nvidia/nv-embed-v1
389
+ - nvidia/nv-embedcode-7b-v1
390
+ - nvidia/nv-embedqa-e5-v5
391
+ - nvidia/nv-embedqa-mistral-7b-v2
392
+ - nvidia/nvclip
393
+ - nvidia/usdcode-llama-3.1-70b-instruct
394
+ - nvidia/vila
395
+ - qwen/qwen2-7b-instruct
396
+ - qwen/qwen2.5-7b-instruct
397
+ - qwen/qwen2.5-coder-32b-instruct
398
+ - qwen/qwen2.5-coder-7b-instruct
399
+ - qwen/qwq-32b
400
+ - rakuten/rakutenai-7b-chat
401
+ - rakuten/rakutenai-7b-instruct
402
+ - snowflake/arctic-embed-l
403
+ - thudm/chatglm3-6b
404
+ - tiiuae/falcon3-7b-instruct
405
+ - tokyotech-llm/llama-3-swallow-70b-instruct-v0.1
406
+ - upstage/solar-10.7b-instruct
407
+ - writer/palmyra-creative-122b
408
+ - writer/palmyra-fin-70b-32k
409
+ - writer/palmyra-med-70b
410
+ - writer/palmyra-med-70b-32k
411
+ - yentinglin/llama-3-taiwan-70b-instruct
412
+ - zyphra/zamba2-7b-instruct
413
+ fetch: false
414
+ titleConvo: true
415
+ titleModel: "nvidia/nemotron-mini-4b-instruct"
416
+ modelDisplayLabel: "Nvidia"
417
+
418
+ # Unify
419
+ # Model list: https://unify.ai/chat
420
+ - name: "Unify"
421
+ apiKey: unify_key}
422
+ baseURL: "https://api.unify.ai/v0/"
423
+ models:
424
+ default:
425
+ - chatgpt-4o-latest@openai
426
+ - claude-3-haiku@anthropic
427
+ - claude-3-haiku@aws-bedrock
428
+ - claude-3-haiku@vertex-ai
429
+ - claude-3-opus@anthropic
430
+ - claude-3-opus@aws-bedrock
431
+ - claude-3-opus@vertex-ai
432
+ - claude-3-sonnet@anthropic
433
+ - claude-3-sonnet@aws-bedrock
434
+ - claude-3.5-haiku@anthropic
435
+ - claude-3.5-haiku@aws-bedrock
436
+ - claude-3.5-haiku@replicate
437
+ - claude-3.5-haiku@vertex-ai
438
+ - claude-3.5-sonnet-20240620@anthropic
439
+ - claude-3.5-sonnet-20240620@aws-bedrock
440
+ - claude-3.5-sonnet-20240620@vertex-ai
441
+ - claude-3.5-sonnet@anthropic
442
+ - claude-3.5-sonnet@aws-bedrock
443
+ - claude-3.5-sonnet@replicate
444
+ - claude-3.5-sonnet@vertex-ai
445
+ - claude-3.7-sonnet@anthropic
446
+ - claude-3.7-sonnet@aws-bedrock
447
+ - claude-3.7-sonnet@replicate
448
+ - claude-3.7-sonnet@vertex-ai
449
+ - command-r-plus@aws-bedrock
450
+ - deepseek-r1@aws-bedrock
451
+ - deepseek-r1@deepinfra
452
+ - deepseek-r1@deepseek
453
+ - deepseek-r1@fireworks-ai
454
+ - deepseek-r1@replicate
455
+ - deepseek-r1@together-ai
456
+ - deepseek-v3-0324@deepinfra
457
+ - deepseek-v3-0324@fireworks-ai
458
+ - deepseek-v3@deepinfra
459
+ - deepseek-v3@deepseek
460
+ - deepseek-v3@fireworks-ai
461
+ - deepseek-v3@replicate
462
+ - deepseek-v3@together-ai
463
+ - gemini-1.5-flash-001@vertex-ai
464
+ - gemini-1.5-flash-002@vertex-ai
465
+ - gemini-1.5-flash@vertex-ai
466
+ - gemini-1.5-pro-001@vertex-ai
467
+ - gemini-1.5-pro-002@vertex-ai
468
+ - gemini-1.5-pro@vertex-ai
469
+ - gemini-2.0-flash-lite@vertex-ai
470
+ - gemini-2.0-flash@vertex-ai
471
+ - gemini-2.5-pro@vertex-ai
472
+ - gemma-2-27b-it@together-ai
473
+ - gemma-2-9b-it@groq
474
+ - gemma-2-9b-it@lepton-ai
475
+ - gemma-3-12b-it@deepinfra
476
+ - gemma-3-27b-it@deepinfra
477
+ - gemma-3-4b-it@deepinfra
478
+ - gpt-3.5-turbo@openai
479
+ - gpt-4-turbo@openai
480
+ - gpt-4.5-preview@openai
481
+ - gpt-4@openai
482
+ - gpt-4o-2024-05-13@openai
483
+ - gpt-4o-2024-08-06@openai
484
+ - gpt-4o-2024-11-20@openai
485
+ - gpt-4o-mini-search-preview@openai
486
+ - gpt-4o-mini@openai
487
+ - gpt-4o-search-preview@openai
488
+ - gpt-4o@openai
489
+ - grok-2-vision@xai
490
+ - grok-2@xai
491
+ - grok-3-beta@xai
492
+ - grok-3-fast-beta@xai
493
+ - grok-3-mini-beta@xai
494
+ - grok-3-mini-fast-beta@xai
495
+ - llama-3-70b-chat@aws-bedrock
496
+ - llama-3-70b-chat@deepinfra
497
+ - llama-3-70b-chat@fireworks-ai
498
+ - llama-3-70b-chat@groq
499
+ - llama-3-70b-chat@replicate
500
+ - llama-3-8b-chat@aws-bedrock
501
+ - llama-3-8b-chat@deepinfra
502
+ - llama-3-8b-chat@groq
503
+ - llama-3-8b-chat@replicate
504
+ - llama-3.1-405b-chat@aws-bedrock
505
+ - llama-3.1-405b-chat@deepinfra
506
+ - llama-3.1-405b-chat@fireworks-ai
507
+ - llama-3.1-405b-chat@replicate
508
+ - llama-3.1-405b-chat@together-ai
509
+ - llama-3.1-405b-chat@vertex-ai
510
+ - llama-3.1-70b-chat@aws-bedrock
511
+ - llama-3.1-70b-chat@deepinfra
512
+ - llama-3.1-70b-chat@fireworks-ai
513
+ - llama-3.1-70b-chat@together-ai
514
+ - llama-3.1-70b-chat@vertex-ai
515
+ - llama-3.1-8b-chat@aws-bedrock
516
+ - llama-3.1-8b-chat@deepinfra
517
+ - llama-3.1-8b-chat@fireworks-ai
518
+ - llama-3.1-8b-chat@groq
519
+ - llama-3.1-8b-chat@lepton-ai
520
+ - llama-3.1-8b-chat@together-ai
521
+ - llama-3.1-8b-chat@vertex-ai
522
+ - llama-3.1-nemotron-70b-chat@deepinfra
523
+ - llama-3.2-11b-chat@deepinfra
524
+ - llama-3.2-11b-chat@together-ai
525
+ - llama-3.2-11b-chat@vertex-ai
526
+ - llama-3.2-1b-chat@aws-bedrock
527
+ - llama-3.2-1b-chat@deepinfra
528
+ - llama-3.2-1b-chat@lepton-ai
529
+ - llama-3.2-3b-chat@aws-bedrock
530
+ - llama-3.2-3b-chat@deepinfra
531
+ - llama-3.2-3b-chat@lepton-ai
532
+ - llama-3.2-3b-chat@together-ai
533
+ - llama-3.2-90b-chat@deepinfra
534
+ - llama-3.2-90b-chat@together-ai
535
+ - llama-3.2-90b-chat@vertex-ai
536
+ - llama-3.3-70b-chat@aws-bedrock
537
+ - llama-3.3-70b-chat@deepinfra
538
+ - llama-3.3-70b-chat@fireworks-ai
539
+ - llama-3.3-70b-chat@groq
540
+ - llama-3.3-70b-chat@lepton-ai
541
+ - llama-3.3-70b-chat@together-ai
542
+ - llama-4-maverick-instruct@deepinfra
543
+ - llama-4-maverick-instruct@fireworks-ai
544
+ - llama-4-maverick-instruct@groq
545
+ - llama-4-maverick-instruct@replicate
546
+ - llama-4-maverick-instruct@together-ai
547
+ - llama-4-scout-instruct@deepinfra
548
+ - llama-4-scout-instruct@fireworks-ai
549
+ - llama-4-scout-instruct@groq
550
+ - llama-4-scout-instruct@replicate
551
+ - llama-4-scout-instruct@together-ai
552
+ - ministral-3b@mistral-ai
553
+ - ministral-8b@mistral-ai
554
+ - mistral-7b-instruct-v0.3@deepinfra
555
+ - mistral-7b-instruct-v0.3@lepton-ai
556
+ - mistral-7b-instruct-v0.3@together-ai
557
+ - mistral-large@mistral-ai
558
+ - mistral-large@vertex-ai
559
+ - mistral-nemo@deepinfra
560
+ - mistral-nemo@lepton-ai
561
+ - mistral-nemo@mistral-ai
562
+ - mistral-nemo@vertex-ai
563
+ - mistral-small@deepinfra
564
+ - mistral-small@mistral-ai
565
+ - mistral-small@together-ai
566
+ - mixtral-8x22b-instruct-v0.1@fireworks-ai
567
+ - mixtral-8x7b-instruct-v0.1@deepinfra
568
+ - mixtral-8x7b-instruct-v0.1@lepton-ai
569
+ - mixtral-8x7b-instruct-v0.1@together-ai
570
+ - o1-mini@openai
571
+ - o1-pro@openai
572
+ - o1@openai
573
+ - o3-mini@openai
574
+ - qwen-2-72b-instruct@together-ai
575
+ - qwen-2.5-72b-instruct@deepinfra
576
+ - qwen-2.5-72b-instruct@fireworks-ai
577
+ - qwen-2.5-72b-instruct@together-ai
578
+ - qwen-2.5-7b-instruct@deepinfra
579
+ - qwen-2.5-7b-instruct@together-ai
580
+ - qwen-2.5-coder-32b-instruct@deepinfra
581
+ - qwen-2.5-coder-32b-instruct@together-ai
582
+ - qwen-qwq-32b@deepinfra
583
+ - qwen-qwq-32b@fireworks-ai
584
+ - qwen-qwq-32b@groq
585
+ - qwen-qwq-32b@together-ai
586
+ fetch: false
587
+ titleConvo: true
588
+ titleModel: "gpt-4o-mini@openai"
589
+ dropParams:
590
+ - "stop"
591
+ - "user"
592
+ - "frequency_penalty"
593
+ - "presence_penalty"