Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -178,7 +178,7 @@ def display_glossary_grid(roleplaying_glossary):
|
|
| 178 |
"π²": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(k)}", # this url plus query!
|
| 179 |
"π": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix)}{quote(k)}", # this url plus query!
|
| 180 |
"π": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix2)}{quote(k)}", # this url plus query!
|
| 181 |
-
"
|
| 182 |
}
|
| 183 |
|
| 184 |
for category, details in roleplaying_glossary.items():
|
|
@@ -201,7 +201,7 @@ def display_glossary_entity(k):
|
|
| 201 |
"π²": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(k)}", # this url plus query!
|
| 202 |
"π": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix)}{quote(k)}", # this url plus query!
|
| 203 |
"π": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix2)}{quote(k)}", # this url plus query!
|
| 204 |
-
"
|
| 205 |
}
|
| 206 |
links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
|
| 207 |
st.markdown(f"{k} {links_md}", unsafe_allow_html=True)
|
|
@@ -450,20 +450,24 @@ def search_glossary(query):
|
|
| 450 |
st.write(f"- {query}")
|
| 451 |
|
| 452 |
all=""
|
| 453 |
-
|
| 454 |
-
query2 = PromptPrefix + query
|
|
|
|
| 455 |
response = chat_with_model(query2)
|
|
|
|
|
|
|
|
|
|
| 456 |
|
| 457 |
-
query3 = PromptPrefix2 + query + ' for story outline of method steps: ' + response # Add prompt preface for coding task behavior
|
| 458 |
-
response2 = chat_with_model(query3)
|
| 459 |
|
| 460 |
-
query4 = PromptPrefix3 + query + ' using this streamlit python programspecification to define features. Create entities for each variable and generate UI with HTML5 and JS that matches the streamlit program: ' + response2 # Add prompt preface for coding task behavior
|
| 461 |
-
response3 = chat_with_model(query4)
|
| 462 |
|
| 463 |
-
all = query + ' ' + response + ' ' + response2 + ' ' + response3
|
| 464 |
|
| 465 |
-
filename = generate_filename(all, "md")
|
| 466 |
-
create_file(filename, query, all, should_save)
|
| 467 |
|
| 468 |
SpeechSynthesis(all)
|
| 469 |
return all
|
|
|
|
| 178 |
"π²": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(k)}", # this url plus query!
|
| 179 |
"π": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix)}{quote(k)}", # this url plus query!
|
| 180 |
"π": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix2)}{quote(k)}", # this url plus query!
|
| 181 |
+
"π¬": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix3)}{quote(k)}", # this url plus query!
|
| 182 |
}
|
| 183 |
|
| 184 |
for category, details in roleplaying_glossary.items():
|
|
|
|
| 201 |
"π²": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(k)}", # this url plus query!
|
| 202 |
"π": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix)}{quote(k)}", # this url plus query!
|
| 203 |
"π": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix2)}{quote(k)}", # this url plus query!
|
| 204 |
+
"π¬": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicAINovel?q={quote(PromptPrefix3)}{quote(k)}", # this url plus query!
|
| 205 |
}
|
| 206 |
links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
|
| 207 |
st.markdown(f"{k} {links_md}", unsafe_allow_html=True)
|
|
|
|
| 450 |
st.write(f"- {query}")
|
| 451 |
|
| 452 |
all=""
|
| 453 |
+
|
| 454 |
+
#query2 = PromptPrefix + query
|
| 455 |
+
query2 = query
|
| 456 |
response = chat_with_model(query2)
|
| 457 |
+
all = query + ' ' + response
|
| 458 |
+
filename = generate_filename(response, "md")
|
| 459 |
+
create_file(filename, query, response, should_save)
|
| 460 |
|
| 461 |
+
#query3 = PromptPrefix2 + query + ' for story outline of method steps: ' + response # Add prompt preface for coding task behavior
|
| 462 |
+
#response2 = chat_with_model(query3)
|
| 463 |
|
| 464 |
+
#query4 = PromptPrefix3 + query + ' using this streamlit python programspecification to define features. Create entities for each variable and generate UI with HTML5 and JS that matches the streamlit program: ' + response2 # Add prompt preface for coding task behavior
|
| 465 |
+
#response3 = chat_with_model(query4)
|
| 466 |
|
| 467 |
+
#all = query + ' ' + response + ' ' + response2 + ' ' + response3
|
| 468 |
|
| 469 |
+
#filename = generate_filename(all, "md")
|
| 470 |
+
#create_file(filename, query, all, should_save)
|
| 471 |
|
| 472 |
SpeechSynthesis(all)
|
| 473 |
return all
|