Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -110,14 +110,15 @@ def search_glossary(query):
|
|
110 |
filename = generate_filename(query + ' --- ' + response, "md")
|
111 |
create_file(filename, query, response, should_save)
|
112 |
|
113 |
-
st.write('## 🔍 Running with Llama.') # -------------------------------------------------------------------------------------------------
|
114 |
-
response2 = StreamLLMChatResponse(query)
|
115 |
#st.write(response2)
|
116 |
|
117 |
-
filename_txt = generate_filename(query + ' --- ' + response2, "md")
|
118 |
-
create_file(filename_txt, query, response2, should_save)
|
119 |
|
120 |
-
all = '# Query: ' + query + '# Response: ' + response + '# Response2: ' + response2
|
|
|
121 |
|
122 |
filename_txt2 = generate_filename(query + ' --- ' + all, "md")
|
123 |
create_file(filename_txt2, query, all, should_save)
|
@@ -1062,15 +1063,15 @@ def main():
|
|
1062 |
|
1063 |
# Llama versus GPT Battle!
|
1064 |
all=""
|
1065 |
-
try:
|
1066 |
-
|
1067 |
-
|
1068 |
-
|
1069 |
-
|
1070 |
-
|
1071 |
#SpeechSynthesis(response)
|
1072 |
-
except:
|
1073 |
-
|
1074 |
|
1075 |
# gpt
|
1076 |
try:
|
|
|
110 |
filename = generate_filename(query + ' --- ' + response, "md")
|
111 |
create_file(filename, query, response, should_save)
|
112 |
|
113 |
+
#st.write('## 🔍 Running with Llama.') # -------------------------------------------------------------------------------------------------
|
114 |
+
#response2 = StreamLLMChatResponse(query)
|
115 |
#st.write(response2)
|
116 |
|
117 |
+
#filename_txt = generate_filename(query + ' --- ' + response2, "md")
|
118 |
+
#create_file(filename_txt, query, response2, should_save)
|
119 |
|
120 |
+
#all = '# Query: ' + query + '# Response: ' + response + '# Response2: ' + response2
|
121 |
+
all = '# Query: ' + query + '# Response: ' + response
|
122 |
|
123 |
filename_txt2 = generate_filename(query + ' --- ' + all, "md")
|
124 |
create_file(filename_txt2, query, all, should_save)
|
|
|
1063 |
|
1064 |
# Llama versus GPT Battle!
|
1065 |
all=""
|
1066 |
+
#try:
|
1067 |
+
# st.write('🔍Running with Llama.')
|
1068 |
+
# response = StreamLLMChatResponse(file_contents)
|
1069 |
+
# filename = generate_filename(user_prompt, "md")
|
1070 |
+
# create_file(filename, file_contents, response, should_save)
|
1071 |
+
# all=response
|
1072 |
#SpeechSynthesis(response)
|
1073 |
+
#except:
|
1074 |
+
# st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
|
1075 |
|
1076 |
# gpt
|
1077 |
try:
|