Spaces:
Running
Running
Commit
·
90efa34
1
Parent(s):
5c3b51c
modified api
Browse files
client.py
CHANGED
@@ -11,7 +11,7 @@ file = st.file_uploader("Choose a file", type=["pdf"])
|
|
11 |
if st.button("Submit"):
|
12 |
if file is not None:
|
13 |
files = {"file": (file.name, file, file.type)}
|
14 |
-
response = requests.post("
|
15 |
st.write(response.text)
|
16 |
else:
|
17 |
st.write("No file uploaded.")
|
@@ -29,9 +29,10 @@ if prompt := st.chat_input("Write your prompt in this input field"):
|
|
29 |
with st.chat_message("user"):
|
30 |
st.text(prompt)
|
31 |
|
32 |
-
response = requests.
|
33 |
-
|
34 |
-
|
|
|
35 |
response.raise_for_status()
|
36 |
|
37 |
with st.chat_message("assistant"):
|
|
|
11 |
if st.button("Submit"):
|
12 |
if file is not None:
|
13 |
files = {"file": (file.name, file, file.type)}
|
14 |
+
response = requests.post("https://ahmed-eisa-genai-service.hf.space/upload", files=files)
|
15 |
st.write(response.text)
|
16 |
else:
|
17 |
st.write("No file uploaded.")
|
|
|
29 |
with st.chat_message("user"):
|
30 |
st.text(prompt)
|
31 |
|
32 |
+
response = requests.post(
|
33 |
+
"https://ahmed-eisa-genai-service.hf.space/generate/text",
|
34 |
+
json={"prompt": prompt}
|
35 |
+
)
|
36 |
response.raise_for_status()
|
37 |
|
38 |
with st.chat_message("assistant"):
|
main.py
CHANGED
@@ -10,7 +10,7 @@ from uuid import uuid4
|
|
10 |
import time
|
11 |
from datetime import datetime, timezone
|
12 |
import csv
|
13 |
-
from dependencies import get_urls_content
|
14 |
from schemas import TextModelResponse,TextModelRequest
|
15 |
import shutil, uuid
|
16 |
from upload import save_file
|
@@ -73,7 +73,7 @@ def root_controller():
|
|
73 |
@app.post("/generate/text")
|
74 |
async def serve_language_model_controller(request: Request,
|
75 |
body: TextModelRequest ,
|
76 |
-
urls_content: str = Depends(get_urls_content)) -> TextModelResponse:
|
77 |
prompt = body.prompt + " " + urls_content
|
78 |
output = generate_text(models["text"], prompt, body.temperature)
|
79 |
return TextModelResponse(content=output, ip=request.client.host)
|
|
|
10 |
import time
|
11 |
from datetime import datetime, timezone
|
12 |
import csv
|
13 |
+
from dependencies import get_urls_content,get_rag_content
|
14 |
from schemas import TextModelResponse,TextModelRequest
|
15 |
import shutil, uuid
|
16 |
from upload import save_file
|
|
|
73 |
@app.post("/generate/text")
|
74 |
async def serve_language_model_controller(request: Request,
|
75 |
body: TextModelRequest ,
|
76 |
+
urls_content: str = Depends(get_urls_content), rag_content: str = Depends(get_rag_content)) -> TextModelResponse:
|
77 |
prompt = body.prompt + " " + urls_content
|
78 |
output = generate_text(models["text"], prompt, body.temperature)
|
79 |
return TextModelResponse(content=output, ip=request.client.host)
|