Spaces:
Sleeping
Sleeping
add interface
Browse files- .ipynb_checkpoints/app-checkpoint.py +17 -3
- .ipynb_checkpoints/requirements-checkpoint.txt +1 -0
- app.py +17 -3
- requirements.txt +1 -0
.ipynb_checkpoints/app-checkpoint.py
CHANGED
|
@@ -1,7 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
-
|
| 4 |
-
return "Hello " + name + "!!"
|
| 5 |
|
| 6 |
-
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 7 |
demo.launch()
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
API_TOKEN = os.getenv('API_TOKEN')
|
| 3 |
+
|
| 4 |
import gradio as gr
|
| 5 |
+
import requests
|
| 6 |
+
|
| 7 |
+
API_URL = "https://api-inference.huggingface.co/models/tlkh/flan-t5-paraphrase-classify-explain"
|
| 8 |
+
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
| 9 |
+
|
| 10 |
+
def query(payload):
|
| 11 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
| 12 |
+
return response.json()
|
| 13 |
+
|
| 14 |
+
def infer(s1, s2):
|
| 15 |
+
model_input = "Classify and explain the relationship between this pair of sentences: <S1> "+s1+" </S1><S2> "+s2+" </S2>"
|
| 16 |
+
data = query(payload)
|
| 17 |
+
return data
|
| 18 |
|
| 19 |
+
demo = gr.Interface(fn=infer, inputs=["text", "text"], outputs="text")
|
|
|
|
| 20 |
|
|
|
|
| 21 |
demo.launch()
|
.ipynb_checkpoints/requirements-checkpoint.txt
CHANGED
|
@@ -1,2 +1,3 @@
|
|
| 1 |
gradio
|
| 2 |
transformers
|
|
|
|
|
|
| 1 |
gradio
|
| 2 |
transformers
|
| 3 |
+
requests
|
app.py
CHANGED
|
@@ -1,7 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
-
|
| 4 |
-
return "Hello " + name + "!!"
|
| 5 |
|
| 6 |
-
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 7 |
demo.launch()
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
API_TOKEN = os.getenv('API_TOKEN')
|
| 3 |
+
|
| 4 |
import gradio as gr
|
| 5 |
+
import requests
|
| 6 |
+
|
| 7 |
+
API_URL = "https://api-inference.huggingface.co/models/tlkh/flan-t5-paraphrase-classify-explain"
|
| 8 |
+
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
| 9 |
+
|
| 10 |
+
def query(payload):
|
| 11 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
| 12 |
+
return response.json()
|
| 13 |
+
|
| 14 |
+
def infer(s1, s2):
|
| 15 |
+
model_input = "Classify and explain the relationship between this pair of sentences: <S1> "+s1+" </S1><S2> "+s2+" </S2>"
|
| 16 |
+
data = query(payload)
|
| 17 |
+
return data
|
| 18 |
|
| 19 |
+
demo = gr.Interface(fn=infer, inputs=["text", "text"], outputs="text")
|
|
|
|
| 20 |
|
|
|
|
| 21 |
demo.launch()
|
requirements.txt
CHANGED
|
@@ -1,2 +1,3 @@
|
|
| 1 |
gradio
|
| 2 |
transformers
|
|
|
|
|
|
| 1 |
gradio
|
| 2 |
transformers
|
| 3 |
+
requests
|