Spaces:
Runtime error
Runtime error
Upload with huggingface_hub
Browse files- app.py +45 -23
- qa.pmpt.tpl +1 -1
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,12 +1,19 @@
|
|
1 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
#
|
4 |
-
# Notebook](https://github.com/openai/openai-cookbook/blob/main/examples/Question_answering_using_embeddings.ipynb).
|
5 |
|
6 |
import datasets
|
7 |
import numpy as np
|
8 |
-
|
9 |
-
from
|
10 |
|
11 |
# We use Hugging Face Datasets as the database by assigning
|
12 |
# a FAISS index.
|
@@ -17,30 +24,45 @@ olympics.add_faiss_index("embeddings")
|
|
17 |
|
18 |
# Fast KNN retieval prompt
|
19 |
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
|
|
|
|
|
|
|
26 |
|
27 |
-
#
|
28 |
|
29 |
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
-
with start_chain("qa") as backend:
|
35 |
-
question = "Who won the 2020 Summer Olympics men's high jump?"
|
36 |
-
prompt = KNNPrompt(backend.OpenAIEmbed()).chain(QAPrompt(backend.OpenAI()))
|
37 |
-
result = prompt(question)
|
38 |
-
print(result)
|
39 |
|
40 |
-
# + tags=["hide_inp"]
|
41 |
-
QAPrompt().show(
|
42 |
-
{"question": "Who won the race?", "docs": ["doc1", "doc2", "doc3"]}, "Joe Bob"
|
43 |
-
)
|
44 |
-
# -
|
45 |
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# + tags=["hide_inp"]
|
2 |
+
desc = """
|
3 |
+
### Question Answering with Retrieval
|
4 |
+
|
5 |
+
Chain that answers questions with embeedding based retrieval. [[Code](https://github.com/srush/MiniChain/blob/main/examples/qa.py)]
|
6 |
+
|
7 |
+
(Adapted from [OpenAI Notebook](https://github.com/openai/openai-cookbook/blob/main/examples/Question_answering_using_embeddings.ipynb).)
|
8 |
+
"""
|
9 |
+
# -
|
10 |
|
11 |
+
# $
|
|
|
12 |
|
13 |
import datasets
|
14 |
import numpy as np
|
15 |
+
from minichain import prompt, show, OpenAIEmbed, OpenAI
|
16 |
+
from manifest import Manifest
|
17 |
|
18 |
# We use Hugging Face Datasets as the database by assigning
|
19 |
# a FAISS index.
|
|
|
24 |
|
25 |
# Fast KNN retieval prompt
|
26 |
|
27 |
+
@prompt(OpenAIEmbed())
|
28 |
+
def get_neighbors(model, inp, k):
|
29 |
+
embedding = model(inp)
|
30 |
+
res = olympics.get_nearest_examples("embeddings", np.array(embedding), k)
|
31 |
+
return res.examples["content"]
|
32 |
|
33 |
+
@prompt(OpenAI(),
|
34 |
+
template_file="qa.pmpt.tpl")
|
35 |
+
def get_result(model, query, neighbors):
|
36 |
+
return model(dict(question=query, docs=neighbors))
|
37 |
|
38 |
+
def qa(query):
|
39 |
+
n = get_neighbors(query, 3)
|
40 |
+
return get_result(query, n)
|
41 |
|
42 |
+
# $
|
43 |
|
44 |
|
45 |
+
questions = ["Who won the 2020 Summer Olympics men's high jump?",
|
46 |
+
"Why was the 2020 Summer Olympics originally postponed?",
|
47 |
+
"In the 2020 Summer Olympics, how many gold medals did the country which won the most medals win?",
|
48 |
+
"What is the total number of medals won by France?",
|
49 |
+
"What is the tallest mountain in the world?"]
|
50 |
|
51 |
+
gradio = show(qa,
|
52 |
+
examples=questions,
|
53 |
+
subprompts=[get_neighbors, get_result],
|
54 |
+
description=desc,
|
55 |
+
code=open("qa.py", "r").read().split("$")[1].strip().strip("#").strip(),
|
56 |
+
)
|
57 |
+
if __name__ == "__main__":
|
58 |
+
gradio.launch()
|
59 |
|
|
|
|
|
|
|
|
|
|
|
60 |
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
+
# # + tags=["hide_inp"]
|
63 |
+
# QAPrompt().show(
|
64 |
+
# {"question": "Who won the race?", "docs": ["doc1", "doc2", "doc3"]}, "Joe Bob"
|
65 |
+
# )
|
66 |
+
# # -
|
67 |
+
|
68 |
+
# show_log("qa.log")
|
qa.pmpt.tpl
CHANGED
@@ -8,5 +8,5 @@ Context:
|
|
8 |
|
9 |
Q: {{question}}
|
10 |
|
11 |
-
A:
|
12 |
|
|
|
8 |
|
9 |
Q: {{question}}
|
10 |
|
11 |
+
A:
|
12 |
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
-
gradio
|
2 |
git+https://github.com/srush/minichain@gradio
|
3 |
manifest-ml
|
|
|
|
1 |
+
gradio==3.21.0
|
2 |
git+https://github.com/srush/minichain@gradio
|
3 |
manifest-ml
|
4 |
+
faiss-cpu
|