|
import streamlit as st |
|
from secretsload import load_stsecrets |
|
|
|
credentials = load_stsecrets() |
|
|
|
|
|
ACTIVE_MODEL = 0 |
|
ACTIVE_INDEX = 0 |
|
|
|
TYPE = "chat" |
|
SELECTED_MODEL_1 = "meta-llama/llama-3-1-70b-instruct" |
|
SELECTED_MODEL_2 = "mistralai/mistral-large" |
|
|
|
VERIFY = False |
|
|
|
|
|
PROMPT_TEMPLATE_1 = "llama3-instruct (llama-3, 3.1 & 3.2) - system" |
|
PROMPT_TEMPLATE_2 = "mistral & mixtral v2 tokenizer - system segmented" |
|
|
|
BAKE_IN_PROMPT_SYNTAX = True |
|
|
|
|
|
BOT_1_NAME = "PATH-er B." |
|
BOT_2_NAME = "MOD-ther S." |
|
BOT_3_NAME = "SYS-ter V." |
|
|
|
|
|
|
|
BOT_1_PROMPT = str(st.secrets["system_prompt_1"]) |
|
BOT_2_PROMPT = str(st.secrets["system_prompt_2"]) |
|
BOT_3_PROMPT = str(st.secrets["system_prompt_3"]) |
|
|
|
|
|
VECTOR_INDEX_1 = str(st.secrets["vector_index_id_1"]) |
|
VECTOR_INDEX_2 = str(st.secrets["vector_index_id_2"]) |
|
|
|
|
|
DECODING_METHOD = "greedy" |
|
MAX_NEW_TOKENS = 850 |
|
MIN_NEW_TOKENS = 1 |
|
REPETITION_PENALTY = 1.0 |
|
STOP_SEQUENCES = ["<|end_of_text|>","</s>"] |
|
|
|
|
|
TEMPERATURE = 0.7 |
|
TOP_P = 1.0 |
|
TOP_K = 50 |
|
|
|
DISPLAY_CHAT_HISTORY = 1 |
|
TOKEN_CAPTURE_ENABLED = 0 |