awacke1 commited on
Commit
e4177a6
·
verified ·
1 Parent(s): b53800c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -252,7 +252,7 @@ def FileSidebar():
252
  # Llama versus GPT Battle!
253
  all=""
254
  try:
255
- st.write('🔍Running with Llama.')
256
  response = StreamLLMChatResponse(file_contents)
257
  filename = generate_filename(user_prompt, "md")
258
  create_file(filename, file_contents, response, should_save)
@@ -263,7 +263,7 @@ def FileSidebar():
263
 
264
  # gpt
265
  try:
266
- st.write('🔍Running with GPT.')
267
  response2 = chat_with_model(user_prompt, file_contents, model_choice)
268
  filename2 = generate_filename(file_contents, choice)
269
  create_file(filename2, user_prompt, response, should_save)
@@ -277,7 +277,7 @@ def FileSidebar():
277
 
278
  if next_action=='search':
279
  file_content_area = st.text_area("File Contents:", file_contents, height=500)
280
- st.write('🔍Running with Llama and GPT.')
281
 
282
  user_prompt = file_contents
283
 
@@ -295,7 +295,7 @@ def FileSidebar():
295
 
296
  # gpt
297
  try:
298
- st.write('🔍Running with GPT.')
299
  response2 = chat_with_model(user_prompt, file_contents, model_choice)
300
  filename2 = generate_filename(file_contents, choice)
301
  create_file(filename2, user_prompt, response, should_save)
@@ -407,14 +407,15 @@ def search_glossary(query):
407
 
408
  query2 = PromptPrefix + query # Add prompt preface for method step task behavior
409
  # st.write('## ' + query2)
410
- st.write('## 🔍 Running with GPT.') # -------------------------------------------------------------------------------------------------
411
  response = chat_with_model(query2)
412
  filename = generate_filename(query2 + ' --- ' + response, "md")
413
  create_file(filename, query, response, should_save)
414
-
 
415
  query3 = PromptPrefix2 + query + ' creating streamlit functions that implement outline of method steps below: ' + response # Add prompt preface for coding task behavior
416
  # st.write('## ' + query3)
417
- st.write('## 🔍 Coding with GPT.') # -------------------------------------------------------------------------------------------------
418
  response2 = chat_with_model(query3)
419
  filename_txt = generate_filename(query + ' --- ' + response2, "py")
420
  create_file(filename_txt, query, response2, should_save)
@@ -524,7 +525,7 @@ def create_search_url_google(keyword):
524
  return base_url + keyword.replace(' ', '+')
525
 
526
  def create_search_url_ai(keyword):
527
- base_url = "https://huggingface.co/spaces/awacke1/MixableWordGameAI?q="
528
  return base_url + keyword.replace(' ', '+')
529
 
530
  def display_images_and_wikipedia_summaries():
 
252
  # Llama versus GPT Battle!
253
  all=""
254
  try:
255
+ #st.write('🔍Running with Llama.')
256
  response = StreamLLMChatResponse(file_contents)
257
  filename = generate_filename(user_prompt, "md")
258
  create_file(filename, file_contents, response, should_save)
 
263
 
264
  # gpt
265
  try:
266
+ #st.write('🔍Running with GPT.')
267
  response2 = chat_with_model(user_prompt, file_contents, model_choice)
268
  filename2 = generate_filename(file_contents, choice)
269
  create_file(filename2, user_prompt, response, should_save)
 
277
 
278
  if next_action=='search':
279
  file_content_area = st.text_area("File Contents:", file_contents, height=500)
280
+ #st.write('🔍Running with Llama and GPT.')
281
 
282
  user_prompt = file_contents
283
 
 
295
 
296
  # gpt
297
  try:
298
+ #st.write('🔍Running with GPT.')
299
  response2 = chat_with_model(user_prompt, file_contents, model_choice)
300
  filename2 = generate_filename(file_contents, choice)
301
  create_file(filename2, user_prompt, response, should_save)
 
407
 
408
  query2 = PromptPrefix + query # Add prompt preface for method step task behavior
409
  # st.write('## ' + query2)
410
+ # st.write('## 🔍 Running with GPT.') # -------------------------------------------------------------------------------------------------
411
  response = chat_with_model(query2)
412
  filename = generate_filename(query2 + ' --- ' + response, "md")
413
  create_file(filename, query, response, should_save)
414
+ SpeechSynthesis(response)
415
+
416
  query3 = PromptPrefix2 + query + ' creating streamlit functions that implement outline of method steps below: ' + response # Add prompt preface for coding task behavior
417
  # st.write('## ' + query3)
418
+ # st.write('## 🔍 Coding with GPT.') # -------------------------------------------------------------------------------------------------
419
  response2 = chat_with_model(query3)
420
  filename_txt = generate_filename(query + ' --- ' + response2, "py")
421
  create_file(filename_txt, query, response2, should_save)
 
525
  return base_url + keyword.replace(' ', '+')
526
 
527
  def create_search_url_ai(keyword):
528
+ base_url = "https://huggingface.co/spaces/awacke1/GraphicNovelAI?q="
529
  return base_url + keyword.replace(' ', '+')
530
 
531
  def display_images_and_wikipedia_summaries():