Spaces:
Runtime error
Runtime error
File size: 4,909 Bytes
14415d3 a053eab 14415d3 d4f68f1 14415d3 d4f68f1 55b1ec6 14415d3 d4f68f1 cef0a6e 14415d3 d4f68f1 cef0a6e 14415d3 d4f68f1 cef0a6e 14415d3 53746df 14415d3 d4f68f1 14415d3 d249eac 14415d3 a053eab 14415d3 1d0c268 d4f68f1 14415d3 d249eac 14415d3 d4f68f1 14415d3 a053eab d249eac 14415d3 1d0c268 14415d3 d4f68f1 14415d3 d4f68f1 53746df d4f68f1 263c6c5 14415d3 263c6c5 14415d3 53746df 263c6c5 d249eac 14415d3 a053eab 88dec7e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
import streamlit as st
from huggingface_hub import InferenceClient
import os
import pickle
st.title("CODEFUSSION ☄")
base_url = "https://api-inference.huggingface.co/models/"
API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
model_links = {
"LegacyLift🚀": base_url + "mistralai/Mistral-7B-Instruct-v0.2",
"ModernMigrate⭐": base_url + "mistralai/Mixtral-8x7B-Instruct-v0.1",
"RetroRecode🔄": base_url + "microsoft/Phi-3-mini-4k-instruct"
}
model_info = {
"LegacyLift🚀": {
'description': """The LegacyLift model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.\n""",
'logo': './11.jpg'
},
"ModernMigrate⭐": {
'description': """The ModernMigrate model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference. \n""",
'logo': './2.jpg'
},
"RetroRecode🔄": {
'description': """The RetroRecode model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.\n""",
'logo': './3.jpg'
},
}
def format_prompt(message, conversation_history, custom_instructions=None):
prompt = ""
if custom_instructions:
prompt += f"\[INST\] {custom_instructions} \[/INST\]"
# Add conversation history to the prompt
prompt += "\[CONV_HISTORY\]\n"
for role, content in conversation_history:
prompt += f"{role.upper()}: {content}\n"
prompt += "\[/CONV_HISTORY\]"
# Add the current message
prompt += f"\[INST\] {message} \[/INST\]"
return prompt
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
save_conversation_history([])
return None
def load_conversation_history():
history_file = "conversation_history.pickle"
if os.path.exists(history_file):
with open(history_file, "rb") as f:
conversation_history = pickle.load(f)
else:
conversation_history = []
return conversation_history
def save_conversation_history(conversation_history):
history_file = "conversation_history.pickle"
with open(history_file, "wb") as f:
pickle.dump(conversation_history, f)
models = [key for key in model_links.keys()]
selected_model = st.sidebar.selectbox("Select Model", models)
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown(model_info[selected_model]['description'])
st.sidebar.image(model_info[selected_model]['logo'])
st.sidebar.markdown("\*Generating the code might go slow if you are using low power resources \*")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
st.session_state.prev_option = selected_model
repo_id = model_links[selected_model]
st.subheader(f'{selected_model}')
# Load the conversation history from the file
st.session_state.messages = load_conversation_history()
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
custom_instruction = "Act like a Human in conversation"
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
formatted_text = format_prompt(prompt, conversation_history, custom_instruction)
max_new_tokens = 3000
if selected_model != "RetroRecode🔄":
input_tokens = len(formatted_text.split())
max_tokens = {"LegacyLift🚀": 32000, "ModernMigrate⭐": 8192}
max_new_tokens = max_tokens[selected_model] - input_tokens
with st.chat_message("assistant"):
client = InferenceClient(model=model_links[selected_model])
output = client.text_generation(
formatted_text,
temperature=temp_values,
max_new_tokens=max_new_tokens,
stream=True
)
response = st.write_stream(output)
st.session_state.messages.append({"role": "assistant", "content": response})
# Save the updated conversation history to the file
save_conversation_history(st.session_state.messages)
|