martynka commited on
Commit
fa8066c
·
verified ·
1 Parent(s): 14d4ebe

Update config.yaml

Browse files
Files changed (1) hide show
  1. config.yaml +10 -476
config.yaml CHANGED
@@ -10,10 +10,9 @@ interface:
10
  externalUrl: 'https://go.easierit.org/tos'
11
  openNewTab: true
12
  runCode: false
13
-
14
  registration:
15
  socialLogins: ["openid"]
16
-
17
  #modelSpecs:
18
  # list:
19
  # - name: "gpt-4.1-mini"
@@ -70,14 +69,14 @@ registration:
70
  # model: "o3-mini"
71
  # max_tokens: 100000
72
  # reasoning_effort: "high"
73
-
74
  endpoints:
75
  agents:
76
- capabilities:
77
- - "file_search"
78
- - "actions"
79
- - "artifacts"
80
- - "chain"
 
81
  custom:
82
  # - name: "Github Models"
83
  # iconURL: https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png
@@ -146,8 +145,6 @@ endpoints:
146
  - hf:upstage/SOLAR-10.7B-Instruct-v1.0
147
  titleConvo: true
148
  titleModel: "current_model"
149
- # groq
150
- # Model list: https://console.groq.com/settings/limits
151
  - name: "groq"
152
  apiKey: "${groq_key}"
153
  baseURL: "https://api.groq.com/openai/v1/"
@@ -170,9 +167,6 @@ endpoints:
170
  titleConvo: true
171
  titleModel: "mixtral-8x7b-32768"
172
  modelDisplayLabel: "groq"
173
-
174
- # HuggingFace
175
- # https://huggingface.co/settings/tokens
176
  - name: 'HuggingFace'
177
  apiKey: "${hf_token}"
178
  baseURL: 'https://api-inference.huggingface.co/v1'
@@ -180,497 +174,35 @@ endpoints:
180
  default:
181
  - AIDC-AI/Marco-o1
182
  - CohereLabs/c4ai-command-r-plus
183
- - CohereLabs/c4ai-command-r-v01
184
  - HuggingFaceH4/zephyr-7b-alpha
185
- - HuggingFaceH4/zephyr-7b-beta
186
  - HuggingFaceTB/SmolLM2-1.7B-Instruct
187
  - Intel/neural-chat-7b-v3-1
188
  - MiniMaxAI/MiniMax-Text-01
189
  - NexaAIDev/Octopus-v2
190
  - NovaSky-AI/Sky-T1-32B-Preview
191
- - Open-Orca/Mistral-7B-OpenOrca
192
- - PygmalionAI/pygmalion-6b
193
  - Qwen/QwQ-32B
194
  - Qwen/QwQ-32B-Preview
195
  - Qwen/Qwen2-72B-Instruct
196
- - Qwen/Qwen2-7B-Instruct
197
- - Qwen/Qwen2.5-72B-Instruct
198
  - Qwen/Qwen2.5-7B-Instruct
199
- - Qwen/Qwen2.5-Coder-32B-Instruct
200
  - TinyLlama/TinyLlama-1.1B-Chat-v1.0
201
  - berkeley-nest/Starling-LM-7B-alpha
202
  - cognitivecomputations/dolphin-2.5-mixtral-8x7b
203
  - databricks/dbrx-base
204
- - databricks/dbrx-instruct
205
  - deepseek-ai/DeepSeek-Coder-V2-Instruct
206
  - deepseek-ai/DeepSeek-R1
207
- - deepseek-ai/DeepSeek-R1-Distill-Llama-70B
208
- - deepseek-ai/DeepSeek-R1-Distill-Llama-8B
209
- - deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
210
- - deepseek-ai/DeepSeek-R1-Distill-Qwen-14B
211
- - deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
212
- - deepseek-ai/DeepSeek-R1-Distill-Qwen-7B
213
- - deepseek-ai/DeepSeek-R1-Zero
214
- - deepseek-ai/DeepSeek-V2.5
215
  - deepseek-ai/DeepSeek-V3
216
- - deepseek-ai/DeepSeek-V3-0324
217
- - deepseek-ai/deepseek-coder-33b-instruct
218
  - google/gemma-2-27b-it
219
- - google/gemma-2-2b-it
220
- - google/gemma-2-9b-it
221
- - google/gemma-2b-it
222
- - google/gemma-7b-it
223
- - gradientai/Llama-3-8B-Instruct-Gradient-1048k
224
- - jinaai/ReaderLM-v2
225
- - jinaai/reader-lm-1.5b
226
- - manycore-research/SpatialLM-Llama-1B
227
- - mattshumer/Reflection-Llama-3.1-70B
228
  - meta-llama/Llama-2-13b-chat-hf
229
  - meta-llama/Llama-2-70b-chat-hf
230
  - meta-llama/Llama-2-7b-chat-hf
231
  - meta-llama/Llama-3.1-405B-Instruct
232
- - meta-llama/Llama-3.1-70B-Instruct
233
- - meta-llama/Llama-3.1-8B-Instruct
234
- - meta-llama/Llama-3.2-1B-Instruct
235
- - meta-llama/Llama-3.2-3B-Instruct
236
- - meta-llama/Llama-3.3-70B-Instruct
237
- - meta-llama/Meta-Llama-3-70B-Instruct
238
- - meta-llama/Meta-Llama-3-8B-Instruct
239
  - microsoft/Phi-3-mini-128k-instruct
240
- - microsoft/Phi-3-mini-4k-instruct
241
- - microsoft/Phi-3-vision-128k-instruct
242
- - microsoft/Phi-3.5-MoE-instruct
243
- - microsoft/Phi-3.5-mini-instruct
244
- - microsoft/phi-4
245
- - mistralai/Codestral-22B-v0.1
246
- - mistralai/Mistral-7B-Instruct-v0.1
247
- - mistralai/Mistral-7B-Instruct-v0.2
248
- - mistralai/Mistral-7B-Instruct-v0.3
249
- - mistralai/Mistral-Nemo-Instruct-2407
250
- - mistralai/Mistral-Small-24B-Instruct-2501
251
- - mistralai/Mixtral-8x22B-Instruct-v0.1
252
- - mistralai/Mixtral-8x7B-Instruct-v0.1
253
  - nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
254
  - nvidia/Llama3-ChatQA-1.5-8B
255
  - openchat/openchat_3.5
256
  - perplexity-ai/r1-1776
257
- - shenzhi-wang/Llama3-8B-Chinese-Chat
258
- - teknium/OpenHermes-2.5-Mistral-7B
259
- - tiiuae/falcon-180B-chat
260
- - tiiuae/falcon-7b-instruct
261
- - unsloth/DeepSeek-R1-GGUF
262
- - upstage/SOLAR-10.7B-Instruct-v1.0
263
  titleConvo: true
264
  titleModel: "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
265
- dropParams:
266
- - "top_p"
267
-
268
- # kluster.ai
269
- # https://platform.kluster.ai/apikeys
270
- - name: "Kluster"
271
- iconURL: "https://platform.kluster.ai/cropped-fav-1-144x144.png"
272
- apiKey: "${Kluster_key}"
273
- baseURL: "https://api.kluster.ai/v1/"
274
- models:
275
- default:
276
- - Qwen/Qwen2.5-VL-7B-Instruct
277
- - deepseek-ai/DeepSeek-R1
278
- - deepseek-ai/DeepSeek-V3
279
- - deepseek-ai/DeepSeek-V3-0324
280
- - google/gemma-3-27b-it
281
- - klusterai/Meta-Llama-3.1-405B-Instruct-Turbo
282
- - klusterai/Meta-Llama-3.1-8B-Instruct-Turbo
283
- - klusterai/Meta-Llama-3.3-70B-Instruct-Turbo
284
- - meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8
285
- - meta-llama/Llama-4-Scout-17B-16E-Instruct
286
- titleConvo: true
287
- titleModel: 'klusterai/Meta-Llama-3.1-8B-Instruct-Turbo'
288
- modelDisplayLabel: 'Kluster'
289
-
290
- # Mistral AI API
291
- # Model list: https://docs.mistral.ai/getting-started/models/
292
- - name: "Mistral"
293
- apiKey: "${mistral_key}"
294
- baseURL: "https://api.mistral.ai/v1"
295
- models:
296
- default:
297
- - codestral-2405
298
- - codestral-2411-rc5
299
- - codestral-2412
300
- - codestral-2501
301
- - codestral-latest
302
- - codestral-mamba-2407
303
- - codestral-mamba-latest
304
- - ministral-3b-2410
305
- - ministral-3b-latest
306
- - ministral-8b-2410
307
- - ministral-8b-latest
308
- - mistral-embed
309
- - mistral-large-2402
310
- - mistral-large-2407
311
- - mistral-large-2411
312
- - mistral-large-latest
313
- - mistral-large-pixtral-2411
314
- - mistral-medium
315
- - mistral-medium-2312
316
- - mistral-medium-latest
317
- - mistral-moderation-2411
318
- - mistral-moderation-latest
319
- - mistral-ocr-2503
320
- - mistral-ocr-latest
321
- - mistral-saba-2502
322
- - mistral-saba-latest
323
- - mistral-small
324
- - mistral-small-2312
325
- - mistral-small-2402
326
- - mistral-small-2409
327
- - mistral-small-2501
328
- - mistral-small-2503
329
- - mistral-small-latest
330
- - mistral-tiny
331
- - mistral-tiny-2312
332
- - mistral-tiny-2407
333
- - mistral-tiny-latest
334
- - open-codestral-mamba
335
- - open-mistral-7b
336
- - open-mistral-nemo
337
- - open-mistral-nemo-2407
338
- - open-mixtral-8x22b
339
- - open-mixtral-8x22b-2404
340
- - open-mixtral-8x7b
341
- - pixtral-12b
342
- - pixtral-12b-2409
343
- - pixtral-12b-latest
344
- - pixtral-large-2411
345
- - pixtral-large-latest
346
- titleConvo: true
347
- titleMethod: "completion"
348
- titleModel: "mistral-tiny"
349
- summarize: false
350
- summaryModel: "mistral-tiny"
351
- forcePrompt: false
352
- modelDisplayLabel: "Mistral"
353
- dropParams:
354
- - "stop"
355
- - "user"
356
- - "frequency_penalty"
357
- - "presence_penalty"
358
-
359
-
360
- # NVIDIA
361
- # https://build.nvidia.com/explore/discover
362
- - name: "Nvidia"
363
- iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/refs/heads/main/icons/nvidia.png"
364
- apiKey: "${gpu_key}"
365
- baseURL: "https://integrate.api.nvidia.com/v1/"
366
- models:
367
- default:
368
- - 01-ai/yi-large
369
- - abacusai/dracarys-llama-3.1-70b-instruct
370
- - adept/fuyu-8b
371
- - ai21labs/jamba-1.5-large-instruct
372
- - ai21labs/jamba-1.5-mini-instruct
373
- - aisingapore/sea-lion-7b-instruct
374
- - baai/bge-m3
375
- - baichuan-inc/baichuan2-13b-chat
376
- - bigcode/starcoder2-15b
377
- - bigcode/starcoder2-7b
378
- - databricks/dbrx-instruct
379
- - deepseek-ai/deepseek-coder-6.7b-instruct
380
- - deepseek-ai/deepseek-r1
381
- - deepseek-ai/deepseek-r1-distill-llama-8b
382
- - deepseek-ai/deepseek-r1-distill-qwen-14b
383
- - deepseek-ai/deepseek-r1-distill-qwen-32b
384
- - deepseek-ai/deepseek-r1-distill-qwen-7b
385
- - google/codegemma-1.1-7b
386
- - google/codegemma-7b
387
- - google/deplot
388
- - google/gemma-2-27b-it
389
- - google/gemma-2-2b-it
390
- - google/gemma-2-9b-it
391
- - google/gemma-2b
392
- - google/gemma-3-12b-it
393
- - google/gemma-3-1b-it
394
- - google/gemma-3-27b-it
395
- - google/gemma-3-4b-it
396
- - google/gemma-7b
397
- - google/paligemma
398
- - google/recurrentgemma-2b
399
- - google/shieldgemma-9b
400
- - ibm/granite-3.0-3b-a800m-instruct
401
- - ibm/granite-3.0-8b-instruct
402
- - ibm/granite-34b-code-instruct
403
- - ibm/granite-8b-code-instruct
404
- - ibm/granite-guardian-3.0-8b
405
- - igenius/colosseum_355b_instruct_16k
406
- - igenius/italia_10b_instruct_16k
407
- - institute-of-science-tokyo/llama-3.1-swallow-70b-instruct-v0.1
408
- - institute-of-science-tokyo/llama-3.1-swallow-8b-instruct-v0.1
409
- - mediatek/breeze-7b-instruct
410
- - meta/codellama-70b
411
- - meta/llama-3.1-405b-instruct
412
- - meta/llama-3.1-70b-instruct
413
- - meta/llama-3.1-8b-instruct
414
- - meta/llama-3.2-11b-vision-instruct
415
- - meta/llama-3.2-1b-instruct
416
- - meta/llama-3.2-3b-instruct
417
- - meta/llama-3.2-90b-vision-instruct
418
- - meta/llama-3.3-70b-instruct
419
- - meta/llama-4-maverick-17b-128e-instruct
420
- - meta/llama-4-scout-17b-16e-instruct
421
- - meta/llama2-70b
422
- - meta/llama3-70b-instruct
423
- - meta/llama3-8b-instruct
424
- - microsoft/kosmos-2
425
- - microsoft/phi-3-medium-128k-instruct
426
- - microsoft/phi-3-medium-4k-instruct
427
- - microsoft/phi-3-mini-128k-instruct
428
- - microsoft/phi-3-mini-4k-instruct
429
- - microsoft/phi-3-small-128k-instruct
430
- - microsoft/phi-3-small-8k-instruct
431
- - microsoft/phi-3-vision-128k-instruct
432
- - microsoft/phi-3.5-mini-instruct
433
- - microsoft/phi-3.5-moe-instruct
434
- - microsoft/phi-3.5-vision-instruct
435
- - microsoft/phi-4-mini-instruct
436
- - microsoft/phi-4-multimodal-instruct
437
- - mistralai/codestral-22b-instruct-v0.1
438
- - mistralai/mamba-codestral-7b-v0.1
439
- - mistralai/mathstral-7b-v0.1
440
- - mistralai/mistral-7b-instruct-v0.2
441
- - mistralai/mistral-7b-instruct-v0.3
442
- - mistralai/mistral-large
443
- - mistralai/mistral-large-2-instruct
444
- - mistralai/mistral-small-24b-instruct
445
- - mistralai/mixtral-8x22b-instruct-v0.1
446
- - mistralai/mixtral-8x22b-v0.1
447
- - mistralai/mixtral-8x7b-instruct-v0.1
448
- - nv-mistralai/mistral-nemo-12b-instruct
449
- - nvidia/embed-qa-4
450
- - nvidia/llama-3.1-nemoguard-8b-content-safety
451
- - nvidia/llama-3.1-nemoguard-8b-topic-control
452
- - nvidia/llama-3.1-nemotron-51b-instruct
453
- - nvidia/llama-3.1-nemotron-70b-instruct
454
- - nvidia/llama-3.1-nemotron-70b-reward
455
- - nvidia/llama-3.1-nemotron-nano-8b-v1
456
- - nvidia/llama-3.1-nemotron-ultra-253b-v1
457
- - nvidia/llama-3.2-nv-embedqa-1b-v1
458
- - nvidia/llama-3.2-nv-embedqa-1b-v2
459
- - nvidia/llama-3.3-nemotron-super-49b-v1
460
- - nvidia/llama3-chatqa-1.5-70b
461
- - nvidia/llama3-chatqa-1.5-8b
462
- - nvidia/mistral-nemo-minitron-8b-8k-instruct
463
- - nvidia/mistral-nemo-minitron-8b-base
464
- - nvidia/nemoretriever-parse
465
- - nvidia/nemotron-4-340b-instruct
466
- - nvidia/nemotron-4-340b-reward
467
- - nvidia/nemotron-4-mini-hindi-4b-instruct
468
- - nvidia/nemotron-mini-4b-instruct
469
- - nvidia/neva-22b
470
- - nvidia/nv-embed-v1
471
- - nvidia/nv-embedcode-7b-v1
472
- - nvidia/nv-embedqa-e5-v5
473
- - nvidia/nv-embedqa-mistral-7b-v2
474
- - nvidia/nvclip
475
- - nvidia/usdcode-llama-3.1-70b-instruct
476
- - nvidia/vila
477
- - qwen/qwen2-7b-instruct
478
- - qwen/qwen2.5-7b-instruct
479
- - qwen/qwen2.5-coder-32b-instruct
480
- - qwen/qwen2.5-coder-7b-instruct
481
- - qwen/qwq-32b
482
- - rakuten/rakutenai-7b-chat
483
- - rakuten/rakutenai-7b-instruct
484
- - snowflake/arctic-embed-l
485
- - thudm/chatglm3-6b
486
- - tiiuae/falcon3-7b-instruct
487
- - tokyotech-llm/llama-3-swallow-70b-instruct-v0.1
488
- - upstage/solar-10.7b-instruct
489
- - writer/palmyra-creative-122b
490
- - writer/palmyra-fin-70b-32k
491
- - writer/palmyra-med-70b
492
- - writer/palmyra-med-70b-32k
493
- - yentinglin/llama-3-taiwan-70b-instruct
494
- - zyphra/zamba2-7b-instruct
495
- titleConvo: true
496
- titleModel: "nvidia/nemotron-mini-4b-instruct"
497
- modelDisplayLabel: "Nvidia"
498
-
499
- # Unify
500
- # Model list: https://unify.ai/chat
501
- - name: "Unify"
502
- apiKey: "${unify_key}"
503
- baseURL: "https://api.unify.ai/v0/"
504
- models:
505
- default:
506
- - chatgpt-4o-latest@openai
507
- - claude-3-haiku@anthropic
508
- - claude-3-haiku@aws-bedrock
509
- - claude-3-haiku@vertex-ai
510
- - claude-3-opus@anthropic
511
- - claude-3-opus@aws-bedrock
512
- - claude-3-opus@vertex-ai
513
- - claude-3-sonnet@anthropic
514
- - claude-3-sonnet@aws-bedrock
515
- - claude-3.5-haiku@anthropic
516
- - claude-3.5-haiku@aws-bedrock
517
- - claude-3.5-haiku@replicate
518
- - claude-3.5-haiku@vertex-ai
519
- - claude-3.5-sonnet-20240620@anthropic
520
- - claude-3.5-sonnet-20240620@aws-bedrock
521
- - claude-3.5-sonnet-20240620@vertex-ai
522
- - claude-3.5-sonnet@anthropic
523
- - claude-3.5-sonnet@aws-bedrock
524
- - claude-3.5-sonnet@replicate
525
- - claude-3.5-sonnet@vertex-ai
526
- - claude-3.7-sonnet@anthropic
527
- - claude-3.7-sonnet@aws-bedrock
528
- - claude-3.7-sonnet@replicate
529
- - claude-3.7-sonnet@vertex-ai
530
- - command-r-plus@aws-bedrock
531
- - deepseek-r1@aws-bedrock
532
- - deepseek-r1@deepinfra
533
- - deepseek-r1@deepseek
534
- - deepseek-r1@fireworks-ai
535
- - deepseek-r1@replicate
536
- - deepseek-r1@together-ai
537
- - deepseek-v3-0324@deepinfra
538
- - deepseek-v3-0324@fireworks-ai
539
- - deepseek-v3@deepinfra
540
- - deepseek-v3@deepseek
541
- - deepseek-v3@fireworks-ai
542
- - deepseek-v3@replicate
543
- - deepseek-v3@together-ai
544
- - gemini-1.5-flash-001@vertex-ai
545
- - gemini-1.5-flash-002@vertex-ai
546
- - gemini-1.5-flash@vertex-ai
547
- - gemini-1.5-pro-001@vertex-ai
548
- - gemini-1.5-pro-002@vertex-ai
549
- - gemini-1.5-pro@vertex-ai
550
- - gemini-2.0-flash-lite@vertex-ai
551
- - gemini-2.0-flash@vertex-ai
552
- - gemini-2.5-pro@vertex-ai
553
- - gemma-2-27b-it@together-ai
554
- - gemma-2-9b-it@groq
555
- - gemma-2-9b-it@lepton-ai
556
- - gemma-3-12b-it@deepinfra
557
- - gemma-3-27b-it@deepinfra
558
- - gemma-3-4b-it@deepinfra
559
- - gpt-3.5-turbo@openai
560
- - gpt-4-turbo@openai
561
- - gpt-4.5-preview@openai
562
- - gpt-4@openai
563
- - gpt-4o-2024-05-13@openai
564
- - gpt-4o-2024-08-06@openai
565
- - gpt-4o-2024-11-20@openai
566
- - gpt-4o-mini-search-preview@openai
567
- - gpt-4o-mini@openai
568
- - gpt-4o-search-preview@openai
569
- - gpt-4o@openai
570
- - grok-2-vision@xai
571
- - grok-2@xai
572
- - grok-3-beta@xai
573
- - grok-3-fast-beta@xai
574
- - grok-3-mini-beta@xai
575
- - grok-3-mini-fast-beta@xai
576
- - llama-3-70b-chat@aws-bedrock
577
- - llama-3-70b-chat@deepinfra
578
- - llama-3-70b-chat@fireworks-ai
579
- - llama-3-70b-chat@groq
580
- - llama-3-70b-chat@replicate
581
- - llama-3-8b-chat@aws-bedrock
582
- - llama-3-8b-chat@deepinfra
583
- - llama-3-8b-chat@groq
584
- - llama-3-8b-chat@replicate
585
- - llama-3.1-405b-chat@aws-bedrock
586
- - llama-3.1-405b-chat@deepinfra
587
- - llama-3.1-405b-chat@fireworks-ai
588
- - llama-3.1-405b-chat@replicate
589
- - llama-3.1-405b-chat@together-ai
590
- - llama-3.1-405b-chat@vertex-ai
591
- - llama-3.1-70b-chat@aws-bedrock
592
- - llama-3.1-70b-chat@deepinfra
593
- - llama-3.1-70b-chat@fireworks-ai
594
- - llama-3.1-70b-chat@together-ai
595
- - llama-3.1-70b-chat@vertex-ai
596
- - llama-3.1-8b-chat@aws-bedrock
597
- - llama-3.1-8b-chat@deepinfra
598
- - llama-3.1-8b-chat@fireworks-ai
599
- - llama-3.1-8b-chat@groq
600
- - llama-3.1-8b-chat@lepton-ai
601
- - llama-3.1-8b-chat@together-ai
602
- - llama-3.1-8b-chat@vertex-ai
603
- - llama-3.1-nemotron-70b-chat@deepinfra
604
- - llama-3.2-11b-chat@deepinfra
605
- - llama-3.2-11b-chat@together-ai
606
- - llama-3.2-11b-chat@vertex-ai
607
- - llama-3.2-1b-chat@aws-bedrock
608
- - llama-3.2-1b-chat@deepinfra
609
- - llama-3.2-1b-chat@lepton-ai
610
- - llama-3.2-3b-chat@aws-bedrock
611
- - llama-3.2-3b-chat@deepinfra
612
- - llama-3.2-3b-chat@lepton-ai
613
- - llama-3.2-3b-chat@together-ai
614
- - llama-3.2-90b-chat@deepinfra
615
- - llama-3.2-90b-chat@together-ai
616
- - llama-3.2-90b-chat@vertex-ai
617
- - llama-3.3-70b-chat@aws-bedrock
618
- - llama-3.3-70b-chat@deepinfra
619
- - llama-3.3-70b-chat@fireworks-ai
620
- - llama-3.3-70b-chat@groq
621
- - llama-3.3-70b-chat@lepton-ai
622
- - llama-3.3-70b-chat@together-ai
623
- - llama-4-maverick-instruct@deepinfra
624
- - llama-4-maverick-instruct@fireworks-ai
625
- - llama-4-maverick-instruct@groq
626
- - llama-4-maverick-instruct@replicate
627
- - llama-4-maverick-instruct@together-ai
628
- - llama-4-scout-instruct@deepinfra
629
- - llama-4-scout-instruct@fireworks-ai
630
- - llama-4-scout-instruct@groq
631
- - llama-4-scout-instruct@replicate
632
- - llama-4-scout-instruct@together-ai
633
- - ministral-3b@mistral-ai
634
- - ministral-8b@mistral-ai
635
- - mistral-7b-instruct-v0.3@deepinfra
636
- - mistral-7b-instruct-v0.3@lepton-ai
637
- - mistral-7b-instruct-v0.3@together-ai
638
- - mistral-large@mistral-ai
639
- - mistral-large@vertex-ai
640
- - mistral-nemo@deepinfra
641
- - mistral-nemo@lepton-ai
642
- - mistral-nemo@mistral-ai
643
- - mistral-nemo@vertex-ai
644
- - mistral-small@deepinfra
645
- - mistral-small@mistral-ai
646
- - mistral-small@together-ai
647
- - mixtral-8x22b-instruct-v0.1@fireworks-ai
648
- - mixtral-8x7b-instruct-v0.1@deepinfra
649
- - mixtral-8x7b-instruct-v0.1@lepton-ai
650
- - mixtral-8x7b-instruct-v0.1@together-ai
651
- - o1-mini@openai
652
- - o1-pro@openai
653
- - o1@openai
654
- - o3-mini@openai
655
- - qwen-2-72b-instruct@together-ai
656
- - qwen-2.5-72b-instruct@deepinfra
657
- - qwen-2.5-72b-instruct@fireworks-ai
658
- - qwen-2.5-72b-instruct@together-ai
659
- - qwen-2.5-7b-instruct@deepinfra
660
- - qwen-2.5-7b-instruct@together-ai
661
- - qwen-2.5-coder-32b-instruct@deepinfra
662
- - qwen-2.5-coder-32b-instruct@together-ai
663
- - qwen-qwq-32b@deepinfra
664
- - qwen-qwq-32b@fireworks-ai
665
- - qwen-qwq-32b@groq
666
- - qwen-qwq-32b@together-ai
667
- titleConvo: true
668
- titleModel: "gpt-4o-mini@openai"
669
- dropParams:
670
- - "stop"
671
- - "user"
672
- - "frequency_penalty"
673
- - "presence_penalty"
674
 
675
  speech:
676
  speechTab:
@@ -688,4 +220,6 @@ speech:
688
  url: "https://api.groq.com/openai/v1/audio/speech"
689
  apiKey: "${groq_key}"
690
  model: "playai-tts"
691
- voices: ["Celeste-PlayAI"]
 
 
 
10
  externalUrl: 'https://go.easierit.org/tos'
11
  openNewTab: true
12
  runCode: false
13
+
14
  registration:
15
  socialLogins: ["openid"]
 
16
  #modelSpecs:
17
  # list:
18
  # - name: "gpt-4.1-mini"
 
69
  # model: "o3-mini"
70
  # max_tokens: 100000
71
  # reasoning_effort: "high"
 
72
  endpoints:
73
  agents:
74
+ capabilities:
75
+ - "file_search"
76
+ - "actions"
77
+ - "artifacts"
78
+ - "chain"
79
+
80
  custom:
81
  # - name: "Github Models"
82
  # iconURL: https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png
 
145
  - hf:upstage/SOLAR-10.7B-Instruct-v1.0
146
  titleConvo: true
147
  titleModel: "current_model"
 
 
148
  - name: "groq"
149
  apiKey: "${groq_key}"
150
  baseURL: "https://api.groq.com/openai/v1/"
 
167
  titleConvo: true
168
  titleModel: "mixtral-8x7b-32768"
169
  modelDisplayLabel: "groq"
 
 
 
170
  - name: 'HuggingFace'
171
  apiKey: "${hf_token}"
172
  baseURL: 'https://api-inference.huggingface.co/v1'
 
174
  default:
175
  - AIDC-AI/Marco-o1
176
  - CohereLabs/c4ai-command-r-plus
 
177
  - HuggingFaceH4/zephyr-7b-alpha
 
178
  - HuggingFaceTB/SmolLM2-1.7B-Instruct
179
  - Intel/neural-chat-7b-v3-1
180
  - MiniMaxAI/MiniMax-Text-01
181
  - NexaAIDev/Octopus-v2
182
  - NovaSky-AI/Sky-T1-32B-Preview
 
 
183
  - Qwen/QwQ-32B
184
  - Qwen/QwQ-32B-Preview
185
  - Qwen/Qwen2-72B-Instruct
 
 
186
  - Qwen/Qwen2.5-7B-Instruct
 
187
  - TinyLlama/TinyLlama-1.1B-Chat-v1.0
188
  - berkeley-nest/Starling-LM-7B-alpha
189
  - cognitivecomputations/dolphin-2.5-mixtral-8x7b
190
  - databricks/dbrx-base
 
191
  - deepseek-ai/DeepSeek-Coder-V2-Instruct
192
  - deepseek-ai/DeepSeek-R1
 
 
 
 
 
 
 
 
193
  - deepseek-ai/DeepSeek-V3
 
 
194
  - google/gemma-2-27b-it
 
 
 
 
 
 
 
 
 
195
  - meta-llama/Llama-2-13b-chat-hf
196
  - meta-llama/Llama-2-70b-chat-hf
197
  - meta-llama/Llama-2-7b-chat-hf
198
  - meta-llama/Llama-3.1-405B-Instruct
 
 
 
 
 
 
 
199
  - microsoft/Phi-3-mini-128k-instruct
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  - nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
201
  - nvidia/Llama3-ChatQA-1.5-8B
202
  - openchat/openchat_3.5
203
  - perplexity-ai/r1-1776
 
 
 
 
 
 
204
  titleConvo: true
205
  titleModel: "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
  speech:
208
  speechTab:
 
220
  url: "https://api.groq.com/openai/v1/audio/speech"
221
  apiKey: "${groq_key}"
222
  model: "playai-tts"
223
+ voices: ["Celeste-PlayAI"]
224
+
225
+