Manofem's picture
Update app.py
c05946e
raw
history blame
1.2 kB
from fastapi import FastAPI, Form, HTMLResponse
from fastapi.responses import StreamingResponse
import time
app = FastAPI()
def generate_responses(prompt):
from llama_cpp import Llama
llm = Llama(model_path="llama-2-7b-chat.Q3_K_S.gguf", n_ctx=2048, n_batch=512, use_mlock=True, n_threads=8)
output_stream = llm(prompt, max_tokens=1024, echo=False, temperature=0.2, top_p=0.1, stream=True)
try:
while True:
try:
chunk = next(output_stream)
if chunk.get('choices') and chunk['choices'][0].get('text'):
response_text_chunk = chunk['choices'][0]['text']
yield response_text_chunk
except StopIteration:
break
except StopIteration:
pass
@app.post("/chat")
async def chat(input_text: str = Form(...)):
prompt = f"Llama-2-Chat [INST] <<SYS>>You're a assistant named Tusti.You are Developed by Aritra Roy.Don't share any false information.<</SYS>> {input_text} [/INST]"
return StreamingResponse(generate_responses(prompt), media_type="text/plain")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)