Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,22 +1,32 @@
|
|
1 |
import streamlit as st
|
2 |
import torch
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
-
from pyngrok import ngrok
|
5 |
import random
|
6 |
-
import re
|
7 |
|
8 |
-
#
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
)
|
18 |
|
19 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
menu = {
|
21 |
"meals": ["Grilled Chicken with Rice", "Beef Steak", "Salmon with Lemon Butter Sauce", "Vegetable Stir-Fry"],
|
22 |
"fast_foods": ["Cheeseburger", "Pepperoni Pizza", "Fried Chicken", "Hot Dog", "Tacos", "French Fries"],
|
@@ -24,6 +34,7 @@ menu = {
|
|
24 |
"sweets": ["Chocolate Cake", "Ice Cream", "Apple Pie", "Cheesecake", "Brownies", "Donuts"]
|
25 |
}
|
26 |
|
|
|
27 |
system_prompt = f"""
|
28 |
You are OrderBot, a virtual restaurant assistant.
|
29 |
You help customers order food from the following menu:
|
@@ -40,67 +51,38 @@ Rules:
|
|
40 |
"""
|
41 |
|
42 |
def process_order(user_input):
|
43 |
-
"""
|
44 |
-
Handles chatbot conversation and order processing.
|
45 |
-
"""
|
46 |
responses = {
|
47 |
-
"greetings": ["Hello! How can I assist you today?", "Hey there! What would you like to order?
|
48 |
-
"farewell": ["Goodbye! Have a great day! π", "See you next time!"
|
49 |
-
"thanks": ["You're welcome! π", "Happy to help!"
|
50 |
-
"default": ["I'm not sure how to respond to that. Can I take your order?", "
|
51 |
}
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
if any(word in user_input for word in ["hello", "hi", "hey"]):
|
56 |
return random.choice(responses["greetings"])
|
57 |
-
elif any(word in user_input for word in ["bye", "goodbye", "see you"]):
|
58 |
return random.choice(responses["farewell"])
|
59 |
-
elif any(word in user_input for word in ["thank you", "thanks"]):
|
60 |
return random.choice(responses["thanks"])
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
return response
|
72 |
-
|
73 |
-
# π¨ Streamlit UI
|
74 |
-
st.title("π OrderBot: AI Restaurant Assistant")
|
75 |
-
st.write(f"π **Public URL:** [{public_url}]({public_url}) (via ngrok)")
|
76 |
-
|
77 |
-
# βΉοΈ Display OrderBot Description
|
78 |
-
st.markdown("""
|
79 |
-
### π Hey there, I am OrderBot! Your friendly Restaurants AI Agent.
|
80 |
-
I am an **AI-driven assistant** powered by the **DeepSeek-7B Chat** model, designed for seamless natural language interaction.
|
81 |
-
I leverage **advanced machine learning** to process and respond to human input with **precision and efficiency**.
|
82 |
-
Let me take your order! ππ₯€π°
|
83 |
-
""")
|
84 |
-
|
85 |
-
# Chat History
|
86 |
if "messages" not in st.session_state:
|
87 |
-
st.session_state
|
88 |
-
|
89 |
-
# Display previous chat messages
|
90 |
-
for message in st.session_state.messages:
|
91 |
-
with st.chat_message(message["role"]):
|
92 |
-
st.write(message["content"])
|
93 |
|
94 |
-
|
95 |
-
|
96 |
|
|
|
97 |
if user_input:
|
98 |
-
st.session_state.messages.append({"role": "user", "content": user_input})
|
99 |
-
with st.chat_message("user"):
|
100 |
-
st.write(user_input)
|
101 |
-
|
102 |
response = process_order(user_input)
|
103 |
-
|
104 |
-
st.session_state
|
105 |
-
|
106 |
-
|
|
|
1 |
import streamlit as st
|
2 |
import torch
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
4 |
import random
|
|
|
5 |
|
6 |
+
# Set up the page title and description
|
7 |
+
st.set_page_config(page_title="OrderBot - AI Chatbot", page_icon="π")
|
8 |
+
st.title("π OrderBot - AI Chatbot")
|
9 |
+
st.markdown(
|
10 |
+
"""
|
11 |
+
### Hey there! I am OrderBot, an AI-driven assistant powered by the DeepSeek-7B Chat model.
|
12 |
+
I am designed for seamless natural language interaction. Leveraging advanced machine learning,
|
13 |
+
I process and respond to human input with precision and efficiency.
|
14 |
+
"""
|
15 |
)
|
16 |
|
17 |
+
# Load tokenizer and model
|
18 |
+
@st.cache_resource()
|
19 |
+
def load_model():
|
20 |
+
model_name = "deepseek-ai/deepseek-llm-7b-chat"
|
21 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
22 |
+
model = AutoModelForCausalLM.from_pretrained(
|
23 |
+
model_name, torch_dtype=torch.float16, device_map="auto", offload_folder="offload_weights"
|
24 |
+
)
|
25 |
+
return tokenizer, model
|
26 |
+
|
27 |
+
tokenizer, model = load_model()
|
28 |
+
|
29 |
+
# Define menu
|
30 |
menu = {
|
31 |
"meals": ["Grilled Chicken with Rice", "Beef Steak", "Salmon with Lemon Butter Sauce", "Vegetable Stir-Fry"],
|
32 |
"fast_foods": ["Cheeseburger", "Pepperoni Pizza", "Fried Chicken", "Hot Dog", "Tacos", "French Fries"],
|
|
|
34 |
"sweets": ["Chocolate Cake", "Ice Cream", "Apple Pie", "Cheesecake", "Brownies", "Donuts"]
|
35 |
}
|
36 |
|
37 |
+
# Order processing
|
38 |
system_prompt = f"""
|
39 |
You are OrderBot, a virtual restaurant assistant.
|
40 |
You help customers order food from the following menu:
|
|
|
51 |
"""
|
52 |
|
53 |
def process_order(user_input):
|
|
|
|
|
|
|
54 |
responses = {
|
55 |
+
"greetings": ["Hello! How can I assist you today?", "Hey there! What would you like to order? π"],
|
56 |
+
"farewell": ["Goodbye! Have a great day! π", "See you next time!"],
|
57 |
+
"thanks": ["You're welcome! π", "Happy to help!"],
|
58 |
+
"default": ["I'm not sure how to respond to that. Can I take your order?", "Tell me more!"]
|
59 |
}
|
60 |
+
|
61 |
+
if any(word in user_input.lower() for word in ["hello", "hi", "hey"]):
|
|
|
|
|
62 |
return random.choice(responses["greetings"])
|
63 |
+
elif any(word in user_input.lower() for word in ["bye", "goodbye", "see you"]):
|
64 |
return random.choice(responses["farewell"])
|
65 |
+
elif any(word in user_input.lower() for word in ["thank you", "thanks"]):
|
66 |
return random.choice(responses["thanks"])
|
67 |
+
else:
|
68 |
+
prompt = f"{system_prompt}\nUser: {user_input}\nOrderBot:"
|
69 |
+
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
70 |
+
output = model.generate(**inputs, max_new_tokens=150)
|
71 |
+
response = tokenizer.decode(output[0], skip_special_tokens=True).split("OrderBot:")[-1].strip()
|
72 |
+
return response
|
73 |
+
|
74 |
+
# Chat interface
|
75 |
+
st.subheader("π¬ Chat with OrderBot")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
if "messages" not in st.session_state:
|
77 |
+
st.session_state["messages"] = []
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
+
for msg in st.session_state["messages"]:
|
80 |
+
st.chat_message(msg["role"]).write(msg["content"])
|
81 |
|
82 |
+
user_input = st.text_input("You:", placeholder="Type your message here...")
|
83 |
if user_input:
|
|
|
|
|
|
|
|
|
84 |
response = process_order(user_input)
|
85 |
+
st.session_state["messages"].append({"role": "user", "content": user_input})
|
86 |
+
st.session_state["messages"].append({"role": "assistant", "content": response})
|
87 |
+
st.chat_message("user").write(user_input)
|
88 |
+
st.chat_message("assistant").write(response)
|