Abhaykoul commited on
Commit
1667a30
·
verified ·
1 Parent(s): 4f61861

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -1
app.py CHANGED
@@ -3,7 +3,7 @@ from fastapi.responses import StreamingResponse
3
  import uvicorn
4
  from v1 import v1
5
  from v2 import v2
6
-
7
  app = FastAPI()
8
 
9
  @app.get("/Search/pro")
@@ -28,6 +28,26 @@ async def v2_chat(prompt: str):
28
  yield f"data: {chunk}\n\n"
29
 
30
  return StreamingResponse(response_generator(), media_type="text/event-stream")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  if __name__ == "__main__":
33
  uvicorn.run(app, host="0.0.0.0", port=8000)
 
3
  import uvicorn
4
  from v1 import v1
5
  from v2 import v2
6
+ from chatv1 import CHATv1
7
  app = FastAPI()
8
 
9
  @app.get("/Search/pro")
 
28
  yield f"data: {chunk}\n\n"
29
 
30
  return StreamingResponse(response_generator(), media_type="text/event-stream")
31
+ @app.get("/chatv1")
32
+ async def chat_endpoint_get(
33
+ user_prompt: str = Query(..., description="User's prompt"),
34
+ system_prompt: Optional[str] = Query("You are a helpful AI assistant.", description="System prompt to set AI behavior")
35
+ ):
36
+ def generate():
37
+ ai = CHATv1()
38
+ for chunk in ai.chat(user_prompt, system_prompt):
39
+ yield f"data: {chunk}\n\n"
40
+
41
+ return StreamingResponse(generate(), media_type="text/event-stream")
42
+
43
+ @app.post("/chatv1")
44
+ async def chat_endpoint_post(request: ChatRequest):
45
+ def generate():
46
+ ai = CHATv1()
47
+ for chunk in ai.chat(request.user_prompt, request.system_prompt):
48
+ yield f"data: {chunk}\n\n"
49
+
50
+ return StreamingResponse(generate(), media_type="text/event-stream")
51
 
52
  if __name__ == "__main__":
53
  uvicorn.run(app, host="0.0.0.0", port=8000)