File size: 3,995 Bytes
cf9ec28
 
 
d7cd17a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf9ec28
 
992da9d
7ea7a0f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7cd17a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21e25fd
 
 
 
 
 
 
d7cd17a
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import os

from groq import Groq 
import gradio as gr 

DESCRIPTION = '''
<div>
<h1 style="text-align: center;">Meta Llama3 8B</h1>
<p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct"><b>Meta Llama3 8b Chat</b></a>. Meta Llama3 is the new open LLM and comes in two sizes: 8b and 70b. Feel free to play with it, or duplicate to run privately!</p>
<p>🔎 For more details about the Llama3 release and how to use the model with <code>transformers</code>, take a look <a href="https://huggingface.co/blog/llama3">at our blog post</a>.</p>
<p>🦕 Looking for an even more powerful model? Check out the <a href="https://huggingface.co/chat/"><b>Hugging Chat</b></a> integration for Meta Llama 3 70b</p>
</div>
'''

LICENSE = """
<p/>
---
Built with Meta Llama 3
"""

PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
   <img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8e75e61cc9bab22b7ce3dec85ab0e6db1da5d107/Meta_lockup_positive%20primary_RGB.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55;  "> 
   <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Meta llama3</h1>
   <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything related to Ayur Veda</p>
</div>
"""


css = """
h1 {
  text-align: center;
  display: block;
}
#duplicate-button {
  margin: auto;
  color: white;
  background: #1565c0;
  border-radius: 100vh;
}
"""

client = Groq(
    api_key =os.getenv('api_key_gorq')
)
def response_from_llam3(query):
  messages = [
          {
          "role" : "system",
          "content": "You are an helpul Assistant who has plently of Knowledge on Ayur Veda. If the message is Hi or any greeting say namste how can i assist you "
          },
        { 
          "role": "user",
          "content": "What is the answer to {}".format(query)
          }
      ]
    
  response = client.chat.completions.create(
      messages = messages,
      model = "llama3-70b-8192"

  )
  return response.choices[0].message.content
# iface = gr.Interface(
#     fn=response_from_llam3,
#     inputs="text",
#     outputs="text",
#        examples=[
#             ['What is importance of fasting according to Ayurveda?'],
#             ['What are the medicinal values of Tusli?'],
#             ['What are the three different doshas?'],
#             ['What is the ideal diet according to ayurveda?']
#             ],
#         cache_examples=False,
#     )
# iface.launch()

# Gradio block
chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')

with gr.Blocks(fill_height=True, css=css) as demo:
    
    gr.Markdown(DESCRIPTION)
    gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
    gr.ChatInterface(
        fn=response_from_llam3,
        chatbot=chatbot,
        fill_height=True,
        additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
        # additional_inputs=[
        #     gr.Slider(minimum=0,
        #               maximum=1, 
        #               step=0.1,
        #               value=0.95, 
        #               label="Temperature", 
        #               render=False),
        #     gr.Slider(minimum=128, 
        #               maximum=4096,
        #               step=1,
        #               value=512, 
        #               label="Max new tokens", 
        #               render=False ),
        #     ],
       examples=[
            ['What is importance of fasting according to Ayurveda?'],
            ['What are the medicinal values of Tusli?'],
            ['What are the three different doshas?'],
            ['What is the ideal diet according to ayurveda?']
            ],
        cache_examples=False,
                     )
    
    gr.Markdown(LICENSE)
    
if __name__ == "__main__":
    demo.launch()