Spaces:
Sleeping
Sleeping
File size: 2,942 Bytes
cf9ec28 d7cd17a cf9ec28 992da9d 7ea7a0f 62afa46 d7cd17a 62afa46 b2cbbb9 f2d54d3 b2cbbb9 f2d54d3 62afa46 9e56a64 d7cd17a b3c2bf3 843623c 62afa46 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import os
from groq import Groq
import gradio as gr
client = Groq(
api_key =os.getenv('api_key_gorq')
)
def response_from_llam3(query):
messages = [
{
"role" : "system",
"content": "You are an helpul Assistant who has plently of Knowledge on Ayur Veda. If the message is Hi or any greeting say namste how can i assist you "
},
{
"role": "user",
"content": "What is the answer to {}".format(query)
}
]
response = client.chat.completions.create(
messages = messages,
model = "llama3-70b-8192"
)
return response.choices[0].message.content
def response_from_mistral(query):
messages = [
{
"role" : "system",
"content": "You are an helpul Assistant who has plently of Knowledge on Ayur Veda. If the message is Hi or any greeting say namste how can i assist you "
},
{
"role": "user",
"content": "What is the answer to {}".format(query)
}
]
response = client.chat.completions.create(
messages = messages,
model = "mixtral-8x7b-32768"
)
return response.choices[0].message.content
# iface = gr.Interface(
# fn=response_from_llam3,
# inputs="text",
# outputs="text",
# examples=[
# ['What is importance of fasting according to Ayurveda?'],
# ['What are the medicinal values of Tusli?'],
# ['What are the three different doshas?'],
# ['What is the ideal diet according to ayurveda?']
# ],
# cache_examples=False,
# )
# iface.launch()
def chat_with_models(text):
llama_response = response_from_llam3(text)
mistral_response =response_from_mistral(text)
return llama_response, mistral_response
with gr.Blocks() as demo:
gr.Markdown("<h1>๐ Mistral 7B vs LLama3 8B ๐ฆ</h1>")
gr.Markdown("<h3> ๐น๏ธ Compare the performance and responses of two powerful models, Mistral 7B and LLama3 8B instruct. Type your questions or prompts below and see how each model responds to the same input ๐พ </h3>")
with gr.Row():
input_text = gr.Textbox(label="Enter your prompt here:", placeholder="Type something...", lines=2)
submit_button = gr.Button("Submit")
output_llama = gr.Textbox(label="Llama 3 8B ๐พ", placeholder="", lines=10, interactive=False)
output_mistral = gr.Textbox(label="Mistral 7B ๐ ", placeholder="", lines=10, interactive=False)
examples=[
['What is importance of fasting according to Ayurveda?'],
['What are the medicinal values of Tusli?'],
['What are the three different doshas?'],
['What is the ideal diet according to ayurveda?']
],
submit_button.click(fn=chat_with_models, inputs=input_text, outputs=[output_llama, output_mistral])
if __name__ == "__main__":
demo.launch()
|