Rohit Rajpoot commited on
Commit
fe86603
Β·
1 Parent(s): 07c9c90

Update app.py

Browse files
Files changed (4) hide show
  1. Dockerfile +21 -0
  2. app.py +37 -0
  3. requirements.txt +4 -0
  4. training.txt +0 -0
Dockerfile ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+
3
+ # Set working directory
4
+ WORKDIR /app
5
+
6
+ # Install system dependencies
7
+ RUN apt-get update && apt-get install -y build-essential
8
+
9
+ # Copy requirements and install Python packages
10
+ COPY requirements.txt .
11
+ RUN pip install --upgrade pip
12
+ RUN pip install -r requirements.txt
13
+
14
+ # Copy the rest of the app
15
+ COPY . .
16
+
17
+ # Expose Streamlit port
18
+ EXPOSE 8501
19
+
20
+ # Run the app
21
+ CMD ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
app.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ os.environ["MPLCONFIGDIR"] = "/tmp" # Prevent matplotlib config errors
4
+ os.environ["STREAMLIT_BROWSER_GATHER_USAGE_STATS"] = "false"
5
+ os.environ["STREAMLIT_SERVER_HEADLESS"] = "true"
6
+
7
+ import streamlit as st
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
+ import torch
10
+
11
+ # Title and UI
12
+ st.set_page_config(page_title="DeepSeek-R1 Chatbot", page_icon="πŸ€–")
13
+ st.title("🧠 DeepSeek-R1 CPU Chatbot")
14
+ st.caption("Running entirely on CPU using Hugging Face Transformers")
15
+
16
+
17
+ # Load the model and tokenizer
18
+ @st.cache_resource
19
+ def load_model():
20
+ tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-Coder-1.3B-base")
21
+ model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-Coder-1.3B-base")
22
+ return tokenizer, model
23
+
24
+
25
+ tokenizer, model = load_model()
26
+
27
+ # Prompt input
28
+ user_input = st.text_area("πŸ“₯ Enter your prompt here:", "Explain what a neural network is.")
29
+
30
+ if st.button("🧠 Generate Response"):
31
+ with st.spinner("Thinking..."):
32
+ inputs = tokenizer(user_input, return_tensors="pt")
33
+ outputs = model.generate(**inputs, max_new_tokens=100)
34
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
35
+
36
+ st.markdown("### πŸ€– Response:")
37
+ st.write(response)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers==4.52.3
2
+ torch==2.7.0
3
+ streamlit==1.45.1
4
+ nltk
training.txt ADDED
File without changes