Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
-
from fastapi import FastAPI
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
from llama_cpp import Llama
|
6 |
import time
|
7 |
import os
|
8 |
|
|
|
|
|
9 |
llm = Llama(model_path="llama-2-7b-chat.Q3_K_S.gguf", n_ctx=2048, n_batch=512, use_mlock=True, n_threads=8)
|
10 |
time.sleep(8)
|
11 |
os.system("cls")
|
@@ -13,8 +14,7 @@ print("Chatbot by Aritra Roy & DVLH")
|
|
13 |
import warnings
|
14 |
warnings.filterwarnings("ignore")
|
15 |
|
16 |
-
|
17 |
-
async def chat_endpoint(ask: str = Form(...)):
|
18 |
prompt = f"Llama-2-Chat [INST] <<SYS>>You're an assistant named Tusti. You are developed by Aritra Roy. Don't share any false information.<</SYS>> {ask} [/INST]"
|
19 |
|
20 |
output_stream = llm(prompt, max_tokens=1024, echo=False, temperature=0.2, top_p=0.1, stream=True)
|
@@ -25,13 +25,37 @@ async def chat_endpoint(ask: str = Form(...)):
|
|
25 |
chunk = next(output_stream)
|
26 |
if chunk.get('choices') and chunk['choices'][0].get('text'):
|
27 |
response_text_chunk = chunk['choices'][0]['text']
|
28 |
-
|
29 |
-
|
30 |
except StopIteration:
|
31 |
break
|
|
|
32 |
except StopIteration:
|
33 |
pass
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
if __name__ == "__main__":
|
36 |
import uvicorn
|
37 |
-
|
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
from fastapi.responses import StreamingResponse
|
3 |
+
from starlette.responses import HTMLResponse
|
|
|
4 |
from llama_cpp import Llama
|
5 |
import time
|
6 |
import os
|
7 |
|
8 |
+
app = FastAPI()
|
9 |
+
|
10 |
llm = Llama(model_path="llama-2-7b-chat.Q3_K_S.gguf", n_ctx=2048, n_batch=512, use_mlock=True, n_threads=8)
|
11 |
time.sleep(8)
|
12 |
os.system("cls")
|
|
|
14 |
import warnings
|
15 |
warnings.filterwarnings("ignore")
|
16 |
|
17 |
+
def generate_responses(ask):
|
|
|
18 |
prompt = f"Llama-2-Chat [INST] <<SYS>>You're an assistant named Tusti. You are developed by Aritra Roy. Don't share any false information.<</SYS>> {ask} [/INST]"
|
19 |
|
20 |
output_stream = llm(prompt, max_tokens=1024, echo=False, temperature=0.2, top_p=0.1, stream=True)
|
|
|
25 |
chunk = next(output_stream)
|
26 |
if chunk.get('choices') and chunk['choices'][0].get('text'):
|
27 |
response_text_chunk = chunk['choices'][0]['text']
|
28 |
+
yield response_text_chunk
|
29 |
+
|
30 |
except StopIteration:
|
31 |
break
|
32 |
+
|
33 |
except StopIteration:
|
34 |
pass
|
35 |
|
36 |
+
@app.get("/", response_class=HTMLResponse)
|
37 |
+
def home():
|
38 |
+
return """
|
39 |
+
<html>
|
40 |
+
<head>
|
41 |
+
<title>Chatbot Streaming</title>
|
42 |
+
</head>
|
43 |
+
<body>
|
44 |
+
<h1>Chatbot Streaming</h1>
|
45 |
+
<form action="/ask/" method="post">
|
46 |
+
<label for="ask">You:</label>
|
47 |
+
<input type="text" id="ask" name="ask" required>
|
48 |
+
<button type="submit">Ask</button>
|
49 |
+
</form>
|
50 |
+
</body>
|
51 |
+
</html>
|
52 |
+
"""
|
53 |
+
|
54 |
+
@app.post("/ask/")
|
55 |
+
def ask_endpoint(ask: str):
|
56 |
+
return StreamingResponse(generate_responses(ask))
|
57 |
+
|
58 |
if __name__ == "__main__":
|
59 |
import uvicorn
|
60 |
+
|
61 |
+
uvicorn.run(app, host="0.0.0.0", port=7856)
|