ak0601 commited on
Commit
00f5488
·
verified ·
1 Parent(s): 702426b

Upload 2 files

Browse files
Files changed (2) hide show
  1. Dockerfile +16 -0
  2. app.py +91 -0
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM python:3.9
5
+
6
+ RUN useradd -m -u 1000 user
7
+ USER user
8
+ ENV PATH="/home/user/.local/bin:$PATH"
9
+
10
+ WORKDIR /app
11
+
12
+ COPY --chown=user ./requirements.txt requirements.txt
13
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
+
15
+ COPY --chown=user . /app
16
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request, Form, UploadFile, File
2
+ from fastapi.templating import Jinja2Templates
3
+ from fastapi.responses import HTMLResponse, RedirectResponse
4
+ from fastapi.staticfiles import StaticFiles
5
+ from dotenv import load_dotenv
6
+ import os, io
7
+ from PIL import Image
8
+ import markdown
9
+ import google.generativeai as genai
10
+
11
+ # Load environment variable
12
+ load_dotenv()
13
+ API_KEY = os.getenv("GOOGLE_API_KEY") or "AIzaSyDXqoZkx92J1jt_XAYxCGEHmYQtg2XwfLU"
14
+ genai.configure(api_key=API_KEY)
15
+
16
+ app = FastAPI()
17
+ templates = Jinja2Templates(directory="templates")
18
+ app.mount("/static", StaticFiles(directory="static"), name="static")
19
+
20
+ model = genai.GenerativeModel('gemini-2.0-flash')
21
+
22
+ # Create a global chat session
23
+ chat = None
24
+ chat_history = []
25
+
26
+ @app.get("/", response_class=HTMLResponse)
27
+ async def root(request: Request):
28
+ return templates.TemplateResponse("index.html", {
29
+ "request": request,
30
+ "chat_history": chat_history,
31
+ })
32
+
33
+ @app.post("/", response_class=HTMLResponse)
34
+ async def handle_input(
35
+ request: Request,
36
+ user_input: str = Form(...),
37
+ image: UploadFile = File(None)
38
+ ):
39
+ global chat, chat_history
40
+
41
+ # Initialize chat session if needed
42
+ if chat is None:
43
+ chat = model.start_chat(history=[])
44
+
45
+ parts = []
46
+ if user_input:
47
+ parts.append(user_input)
48
+
49
+ # For display in the UI
50
+ user_message = user_input
51
+
52
+ if image and image.content_type.startswith("image/"):
53
+ data = await image.read()
54
+ try:
55
+ img = Image.open(io.BytesIO(data))
56
+ parts.append(img)
57
+ user_message += " [Image uploaded]" # Indicate image in chat history
58
+ except Exception as e:
59
+ chat_history.append({
60
+ "role": "model",
61
+ "content": markdown.markdown(f"**Error loading image:** {e}")
62
+ })
63
+ return RedirectResponse("/", status_code=303)
64
+
65
+ # Store user message for display
66
+ chat_history.append({"role": "user", "content": user_message})
67
+
68
+ try:
69
+ # Send message to Gemini model
70
+ resp = chat.send_message(parts)
71
+ # Add model response to history
72
+ raw = resp.text
73
+ chat_history.append({"role": "model", "content": raw})
74
+
75
+ except Exception as e:
76
+ err = f"**Error:** {e}"
77
+ chat_history.append({
78
+ "role": "model",
79
+ "content": markdown.markdown(err)
80
+ })
81
+
82
+ # Post-Redirect-Get
83
+ return RedirectResponse("/", status_code=303)
84
+
85
+ # Clear chat history and start fresh
86
+ @app.post("/new")
87
+ async def new_chat():
88
+ global chat, chat_history
89
+ chat = None
90
+ chat_history.clear()
91
+ return RedirectResponse("/", status_code=303)