Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -51,7 +51,7 @@ def reset_conversation():
|
|
51 |
'''
|
52 |
st.session_state.conversation = []
|
53 |
st.session_state.messages = []
|
54 |
-
save_conversation_history([])
|
55 |
return None
|
56 |
|
57 |
def load_conversation_history():
|
@@ -68,14 +68,6 @@ def save_conversation_history(conversation_history):
|
|
68 |
with open(history_file, "wb") as f:
|
69 |
pickle.dump(conversation_history, f)
|
70 |
|
71 |
-
# Ensure session state is initialized
|
72 |
-
if "conversation" not in st.session_state:
|
73 |
-
st.session_state.conversation = []
|
74 |
-
if "messages" not in st.session_state:
|
75 |
-
st.session_state.messages = []
|
76 |
-
if "prev_option" not in st.session_state:
|
77 |
-
st.session_state.prev_option = None
|
78 |
-
|
79 |
models = [key for key in model_links.keys()]
|
80 |
selected_model = st.sidebar.selectbox("Select Model", models)
|
81 |
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
|
@@ -87,6 +79,9 @@ st.sidebar.image(model_info[selected_model]['logo'])
|
|
87 |
|
88 |
st.sidebar.markdown("\*Generating the code might go slow if you are using low power resources \*")
|
89 |
|
|
|
|
|
|
|
90 |
if st.session_state.prev_option != selected_model:
|
91 |
st.session_state.messages = []
|
92 |
st.session_state.prev_option = selected_model
|
@@ -95,8 +90,7 @@ repo_id = model_links[selected_model]
|
|
95 |
st.subheader(f'{selected_model}')
|
96 |
|
97 |
# Load the conversation history from the file
|
98 |
-
|
99 |
-
st.session_state.messages = load_conversation_history()
|
100 |
|
101 |
for message in st.session_state.messages:
|
102 |
with st.chat_message(message["role"]):
|
@@ -112,13 +106,20 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"
|
|
112 |
|
113 |
formated_text = format_promt(prompt, conversation_history, custom_instruction)
|
114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
with st.chat_message("assistant"):
|
116 |
client = InferenceClient(
|
117 |
model=model_links[selected_model], )
|
118 |
output = client.text_generation(
|
119 |
formated_text,
|
120 |
temperature=temp_values, # 0.5
|
121 |
-
max_new_tokens=
|
122 |
stream=True
|
123 |
)
|
124 |
response = st.write_stream(output)
|
|
|
51 |
'''
|
52 |
st.session_state.conversation = []
|
53 |
st.session_state.messages = []
|
54 |
+
save_conversation_history([])
|
55 |
return None
|
56 |
|
57 |
def load_conversation_history():
|
|
|
68 |
with open(history_file, "wb") as f:
|
69 |
pickle.dump(conversation_history, f)
|
70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
models = [key for key in model_links.keys()]
|
72 |
selected_model = st.sidebar.selectbox("Select Model", models)
|
73 |
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
|
|
|
79 |
|
80 |
st.sidebar.markdown("\*Generating the code might go slow if you are using low power resources \*")
|
81 |
|
82 |
+
if "prev_option" not in st.session_state:
|
83 |
+
st.session_state.prev_option = selected_model
|
84 |
+
|
85 |
if st.session_state.prev_option != selected_model:
|
86 |
st.session_state.messages = []
|
87 |
st.session_state.prev_option = selected_model
|
|
|
90 |
st.subheader(f'{selected_model}')
|
91 |
|
92 |
# Load the conversation history from the file
|
93 |
+
st.session_state.messages = load_conversation_history()
|
|
|
94 |
|
95 |
for message in st.session_state.messages:
|
96 |
with st.chat_message(message["role"]):
|
|
|
106 |
|
107 |
formated_text = format_promt(prompt, conversation_history, custom_instruction)
|
108 |
|
109 |
+
max_tokens = {
|
110 |
+
"LegacyLift🚀": 32000,
|
111 |
+
"ModernMigrate⭐": 8192,
|
112 |
+
"RetroRecode🔄": 4096
|
113 |
+
}
|
114 |
+
max_new_tokens = max_tokens[selected_model] - len(formated_text.split())
|
115 |
+
|
116 |
with st.chat_message("assistant"):
|
117 |
client = InferenceClient(
|
118 |
model=model_links[selected_model], )
|
119 |
output = client.text_generation(
|
120 |
formated_text,
|
121 |
temperature=temp_values, # 0.5
|
122 |
+
max_new_tokens=max_new_tokens,
|
123 |
stream=True
|
124 |
)
|
125 |
response = st.write_stream(output)
|