saritha commited on
Commit
843623c
Β·
verified Β·
1 Parent(s): 029eb38

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -81
app.py CHANGED
@@ -3,42 +3,6 @@ import os
3
  from groq import Groq
4
  import gradio as gr
5
 
6
- DESCRIPTION = '''
7
- <div>
8
- <h1 style="text-align: center;">Meta Llama3 8B</h1>
9
- <p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct"><b>Meta Llama3 8b Chat</b></a>. Meta Llama3 is the new open LLM and comes in two sizes: 8b and 70b. Feel free to play with it, or duplicate to run privately!</p>
10
- <p>πŸ”Ž For more details about the Llama3 release and how to use the model with <code>transformers</code>, take a look <a href="https://huggingface.co/blog/llama3">at our blog post</a>.</p>
11
- <p>πŸ¦• Looking for an even more powerful model? Check out the <a href="https://huggingface.co/chat/"><b>Hugging Chat</b></a> integration for Meta Llama 3 70b</p>
12
- </div>
13
- '''
14
-
15
- LICENSE = """
16
- <p/>
17
- ---
18
- Built with Meta Llama 3
19
- """
20
-
21
- PLACEHOLDER = """
22
- <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
23
- <img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8e75e61cc9bab22b7ce3dec85ab0e6db1da5d107/Meta_lockup_positive%20primary_RGB.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
24
- <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Meta llama3</h1>
25
- <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything related to Ayur Veda</p>
26
- </div>
27
- """
28
-
29
-
30
- css = """
31
- h1 {
32
- text-align: center;
33
- display: block;
34
- }
35
- #duplicate-button {
36
- margin: auto;
37
- color: white;
38
- background: #1565c0;
39
- border-radius: 100vh;
40
- }
41
- """
42
 
43
  client = Groq(
44
  api_key =os.getenv('api_key_gorq')
@@ -61,48 +25,14 @@ def response_from_llam3(query):
61
 
62
  )
63
  return response.choices[0].message.content
64
- # iface = gr.Interface(
65
- # fn=response_from_llam3,
66
- # inputs="text",
67
- # outputs="text",
68
- # examples=[
69
- # ['What is importance of fasting according to Ayurveda?'],
70
- # ['What are the medicinal values of Tusli?'],
71
- # ['What are the three different doshas?'],
72
- # ['What is the ideal diet according to ayurveda?']
73
- # ],
74
- # cache_examples=False,
75
- # )
76
- # iface.launch()
77
 
78
- # Gradio block
79
- chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
80
-
81
- with gr.Blocks(fill_height=True, css=css) as demo:
82
 
83
- gr.Markdown(DESCRIPTION)
84
- gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
85
- gr.ChatInterface(
86
- fn=response_from_llam3,
87
- chatbot=chatbot,
88
- fill_height=True,
89
- inputs="text",
90
- outputs="text",
91
- additional_inputs_accordion=gr.Accordion(label="βš™οΈ Parameters", open=False, render=False),
92
- # additional_inputs=[
93
- # gr.Slider(minimum=0,
94
- # maximum=1,
95
- # step=0.1,
96
- # value=0.95,
97
- # label="Temperature",
98
- # render=False),
99
- # gr.Slider(minimum=128,
100
- # maximum=4096,
101
- # step=1,
102
- # value=512,
103
- # label="Max new tokens",
104
- # render=False ),
105
- # ],
106
  examples=[
107
  ['What is importance of fasting according to Ayurveda?'],
108
  ['What are the medicinal values of Tusli?'],
@@ -110,9 +40,19 @@ with gr.Blocks(fill_height=True, css=css) as demo:
110
  ['What is the ideal diet according to ayurveda?']
111
  ],
112
  cache_examples=False,
113
- )
114
-
115
- gr.Markdown(LICENSE)
 
 
 
 
 
 
 
 
116
 
117
- if __name__ == "__main__":
118
- demo.launch()
 
 
 
3
  from groq import Groq
4
  import gradio as gr
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  client = Groq(
8
  api_key =os.getenv('api_key_gorq')
 
25
 
26
  )
27
  return response.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
 
 
 
 
29
 
30
+ iface = gr.Interface(
31
+ gr.Markdown("<h1>πŸš€ Ayurveda Mate πŸ¦™</h1>")
32
+ gr.Markdown("<h3> πŸ•ΉοΈ Type your questions or prompts below and see how each model responds to the same input πŸ‘Ύ </h3>")
33
+ fn=response_from_llam3,
34
+ inputs="text",
35
+ outputs="text",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  examples=[
37
  ['What is importance of fasting according to Ayurveda?'],
38
  ['What are the medicinal values of Tusli?'],
 
40
  ['What is the ideal diet according to ayurveda?']
41
  ],
42
  cache_examples=False,
43
+ )
44
+ iface.launch()
45
+
46
+ # with gr.Blocks() as demo:
47
+ # gr.Markdown("<h1>πŸš€ Ayurveda Mate πŸ¦™</h1>")
48
+ # gr.Markdown("<h3> πŸ•ΉοΈ Type your questions or prompts below and see how each model responds to the same input πŸ‘Ύ </h3>")
49
+ # with gr.Row():
50
+ # input_text = gr.Textbox(label="Enter your prompt here:", placeholder="Type something...", lines=2)
51
+ # submit_button = gr.Button("Submit")
52
+ # output_llama = gr.Textbox(label="Llama 3 8B πŸ‘Ύ", placeholder="", lines=10, interactive=False)
53
+ # output_mistral = gr.Textbox(label="Mistral 7B 🌠", placeholder="", lines=10, interactive=False)
54
 
55
+ # submit_button.click(fn=chat_with_models, inputs=input_text, outputs=[output_llama, output_mistral])
56
+
57
+ # if __name__ == "__main__":
58
+ # demo.launch()