Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,21 @@
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
import random
|
|
|
4 |
|
5 |
API_URL = "https://api-inference.huggingface.co/models/"
|
|
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
)
|
10 |
|
11 |
def format_prompt(message, history):
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
|
19 |
def generate(prompt, history, temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
|
20 |
temperature = float(temperature)
|
@@ -38,9 +39,9 @@ def generate(prompt, history, temperature=0.9, max_new_tokens=512, top_p=0.95, r
|
|
38 |
|
39 |
for response in stream:
|
40 |
output += response.token.text
|
|
|
41 |
yield output
|
42 |
-
|
43 |
-
|
44 |
# Now, perform DuckDuckGo search and yield results
|
45 |
search_result = duckduckgo_search.run(prompt)
|
46 |
if search_result:
|
@@ -48,7 +49,6 @@ def generate(prompt, history, temperature=0.9, max_new_tokens=512, top_p=0.95, r
|
|
48 |
else:
|
49 |
yield "Sorry, I couldn't find any relevant information."
|
50 |
|
51 |
-
|
52 |
additional_inputs=[
|
53 |
gr.Slider(
|
54 |
label="Temperature",
|
@@ -101,4 +101,4 @@ with gr.Blocks(css=customCSS) as demo:
|
|
101 |
additional_inputs=additional_inputs,
|
102 |
)
|
103 |
|
104 |
-
demo.queue().launch(debug=True)
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
import random
|
4 |
+
from langchain_community.tools import DuckDuckGoSearchRun
|
5 |
|
6 |
API_URL = "https://api-inference.huggingface.co/models/"
|
7 |
+
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1")
|
8 |
|
9 |
+
# Initialize DuckDuckGo search tool
|
10 |
+
duckduckgo_search = DuckDuckGoSearchRun()
|
|
|
11 |
|
12 |
def format_prompt(message, history):
|
13 |
+
prompt = "<s>"
|
14 |
+
for user_prompt, bot_response in history:
|
15 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
16 |
+
prompt += f" {bot_response}</s> "
|
17 |
+
prompt += f"[INST] {message} [/INST]"
|
18 |
+
return prompt
|
19 |
|
20 |
def generate(prompt, history, temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
|
21 |
temperature = float(temperature)
|
|
|
39 |
|
40 |
for response in stream:
|
41 |
output += response.token.text
|
42 |
+
# Yield model's response first
|
43 |
yield output
|
44 |
+
|
|
|
45 |
# Now, perform DuckDuckGo search and yield results
|
46 |
search_result = duckduckgo_search.run(prompt)
|
47 |
if search_result:
|
|
|
49 |
else:
|
50 |
yield "Sorry, I couldn't find any relevant information."
|
51 |
|
|
|
52 |
additional_inputs=[
|
53 |
gr.Slider(
|
54 |
label="Temperature",
|
|
|
101 |
additional_inputs=additional_inputs,
|
102 |
)
|
103 |
|
104 |
+
demo.queue().launch(debug=True)
|