itsanurag commited on
Commit
9ea9df4
·
verified ·
1 Parent(s): d668346

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -0
app.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import InferenceClient
2
+ import gradio as gr
3
+ from langchain_community.tools import DuckDuckGoSearchRun
4
+
5
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
+
7
+ # Initialize DuckDuckGo search tool
8
+ duckduckgo_search = DuckDuckGoSearchRun()
9
+
10
+ def format_prompt(message, history):
11
+ prompt = "<s>"
12
+ for user_prompt, bot_response in history:
13
+ prompt += f"[INST] {user_prompt} [/INST]"
14
+ prompt += f" {bot_response}</s> "
15
+ prompt += f"[INST] {message} [/INST]"
16
+ return prompt
17
+
18
+ def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
19
+ temperature = float(temperature)
20
+ if temperature < 1e-2:
21
+ temperature = 1e-2
22
+ top_p = float(top_p)
23
+
24
+ generate_kwargs = dict(
25
+ temperature=temperature,
26
+ max_new_tokens=max_new_tokens,
27
+ top_p=top_p,
28
+ repetition_penalty=repetition_penalty,
29
+ do_sample=True,
30
+ seed=42,
31
+ )
32
+
33
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
34
+
35
+ # Generate response using model
36
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
37
+ output = ""
38
+
39
+ for response in stream:
40
+ output += response.token.text
41
+ # Yield model's response first
42
+ yield output
43
+
44
+ # Now, perform DuckDuckGo search and yield results
45
+ search_result = duckduckgo_search.run(prompt)
46
+ if search_result:
47
+ yield search_result
48
+ else:
49
+ yield "Sorry, I couldn't find any relevant information."
50
+
51
+ additional_inputs = [
52
+ gr.Textbox(
53
+ label="System Prompt",
54
+ max_lines=1,
55
+ interactive=True,
56
+ ),
57
+ gr.Slider(
58
+ label="Temperature",
59
+ value=0.9,
60
+ minimum=0.0,
61
+ maximum=1.0,
62
+ step=0.05,
63
+ interactive=True,
64
+ info="Higher values produce more diverse outputs",
65
+ ),
66
+ gr.Slider(
67
+ label="Max new tokens",
68
+ value=256,
69
+ minimum=0,
70
+ maximum=1048,
71
+ step=64,
72
+ interactive=True,
73
+ info="The maximum numbers of new tokens",
74
+ ),
75
+ gr.Slider(
76
+ label="Top-p (nucleus sampling)",
77
+ value=0.90,
78
+ minimum=0.0,
79
+ maximum=1,
80
+ step=0.05,
81
+ interactive=True,
82
+ info="Higher values sample more low-probability tokens",
83
+ ),
84
+ gr.Slider(
85
+ label="Repetition penalty",
86
+ value=1.2,
87
+ minimum=1.0,
88
+ maximum=2.0,
89
+ step=0.05,
90
+ interactive=True,
91
+ info="Penalize repeated tokens",
92
+ )
93
+ ]
94
+
95
+ examples = [
96
+ ["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None],
97
+ ["What are some best tourist places to visit in Lajpat nagar, Delhi?", None, None, None, None, None],
98
+ ["Ronaldo or Messi?", None, None, None, None, None],
99
+ ["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None, None],
100
+ ]
101
+
102
+ gr.ChatInterface(
103
+ fn=generate,
104
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
105
+ additional_inputs=additional_inputs,
106
+ title="🤗 friday2.0 🤗 WELCOME TO OPEN-SOURCE FREEDOM",
107
+ examples=examples,
108
+ concurrency_limit=20,
109
+ ).launch(show_api=False, share=True)