saritha commited on
Commit
2653cec
Β·
verified Β·
1 Parent(s): 6319d8d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -35
app.py CHANGED
@@ -1,5 +1,3 @@
1
- import os
2
-
3
  from groq import Groq
4
  import gradio as gr
5
 
@@ -59,44 +57,29 @@ def response_from_mistral(query):
59
  # )
60
  # iface.launch()
61
 
 
 
 
 
 
 
 
62
  with gr.Blocks() as demo:
63
  gr.Markdown("<h1>πŸš€ Mistral 7B vs LLama3 8B πŸ¦™</h1>")
64
  gr.Markdown("<h3> πŸ•ΉοΈ Type your questions or prompts related to Ayurveda and see how each model responds to the same input πŸ‘Ύ </h3>")
65
-
66
- example_questions = [
67
- 'What is importance of fasting according to Ayurveda?',
68
- 'What are the medicinal values of Tulsi?',
69
- 'What are the three different doshas?',
70
- 'What is the ideal diet according to Ayurveda?'
71
- ]
72
-
73
- input_text = gr.Textbox(label="Enter your prompt here:", placeholder="Type something...", lines=2)
74
- example_dropdown = gr.Dropdown(label="Select an Example Question", choices=example_questions)
75
- submit_button = gr.Button("Submit")
76
-
77
  output_llama = gr.Textbox(label="Llama 3 8B πŸ‘Ύ", placeholder="", lines=10, interactive=False)
78
  output_mistral = gr.Textbox(label="Mistral 7B 🌠", placeholder="", lines=10, interactive=False)
 
 
 
 
 
 
79
 
80
- def set_example_question(choice):
81
- input_text.value = choice
82
-
83
- example_dropdown.change(set_example_question)
84
-
85
- def chat_with_models():
86
- llama_response = response_from_llam3(input_text.value)
87
- mistral_response = response_from_mistral(input_text.value)
88
- output_llama.value = llama_response
89
- output_mistral.value = mistral_response
90
-
91
- submit_button.click(chat_with_models)
92
 
93
  if __name__ == "__main__":
94
- demo.launch(share=True)
95
-
96
-
97
-
98
-
99
-
100
-
101
-
102
-
 
 
 
1
  from groq import Groq
2
  import gradio as gr
3
 
 
57
  # )
58
  # iface.launch()
59
 
60
+ def chat_with_models(text):
61
+ llama_response = response_from_llam3(text)
62
+ mistral_response =response_from_mistral(text)
63
+
64
+ return llama_response, mistral_response
65
+
66
+
67
  with gr.Blocks() as demo:
68
  gr.Markdown("<h1>πŸš€ Mistral 7B vs LLama3 8B πŸ¦™</h1>")
69
  gr.Markdown("<h3> πŸ•ΉοΈ Type your questions or prompts related to Ayurveda and see how each model responds to the same input πŸ‘Ύ </h3>")
70
+ with gr.Row():
71
+ input_text = gr.Textbox(label="Enter your prompt here:", placeholder="Type something...", lines=2)
72
+ submit_button = gr.Button("Submit")
 
 
 
 
 
 
 
 
 
73
  output_llama = gr.Textbox(label="Llama 3 8B πŸ‘Ύ", placeholder="", lines=10, interactive=False)
74
  output_mistral = gr.Textbox(label="Mistral 7B 🌠", placeholder="", lines=10, interactive=False)
75
+ examples=[
76
+ ['What is importance of fasting according to Ayurveda?'],
77
+ ['What are the medicinal values of Tusli?'],
78
+ ['What are the three different doshas?'],
79
+ ['What is the ideal diet according to ayurveda?']
80
+ ],
81
 
82
+ submit_button.click(fn=chat_with_models, inputs=input_text, outputs=[output_llama,output_mistral])
 
 
 
 
 
 
 
 
 
 
 
83
 
84
  if __name__ == "__main__":
85
+ demo.launch()