sunbal7 commited on
Commit
6648f74
Β·
verified Β·
1 Parent(s): d33305b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +199 -103
app.py CHANGED
@@ -1,104 +1,200 @@
1
- import os
2
- import fitz # PyMuPDF for PDF processing
3
- import faiss
4
- import numpy as np
5
  import streamlit as st
6
- from langchain.text_splitter import RecursiveCharacterTextSplitter
7
- from sentence_transformers import SentenceTransformer
8
- from groq import Groq
9
- from dotenv import load_dotenv
10
-
11
- # Load API key
12
- load_dotenv()
13
- GROQ_API_KEY = os.getenv("GROQ_API_KEY")
14
-
15
- # Initialize Groq client
16
- client = Groq(api_key=GROQ_API_KEY)
17
-
18
- # Load sentence transformer model for embedding
19
- embedding_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
20
-
21
- def extract_text_from_pdf(pdf_path):
22
- """Extract text from a PDF file using PyMuPDF."""
23
- doc = fitz.open(pdf_path)
24
- text = ""
25
- for page in doc:
26
- text += page.get_text("text") + "\n"
27
- return text.strip()
28
-
29
- def create_text_chunks(text, chunk_size=500, chunk_overlap=100):
30
- """Split text into chunks of specified size with overlap."""
31
- text_splitter = RecursiveCharacterTextSplitter(
32
- chunk_size=chunk_size,
33
- chunk_overlap=chunk_overlap
34
- )
35
- chunks = text_splitter.split_text(text)
36
- return chunks
37
-
38
- def create_faiss_index(chunks):
39
- """Generate embeddings for text chunks and store them in FAISS."""
40
- embeddings = embedding_model.encode(chunks, convert_to_numpy=True)
41
- dimension = embeddings.shape[1]
42
-
43
- index = faiss.IndexFlatL2(dimension) # L2 (Euclidean) distance
44
- index.add(embeddings) # Add embeddings to FAISS index
45
-
46
- return index, embeddings, chunks
47
-
48
- def retrieve_similar_chunks(query, index, embeddings, chunks, top_k=3):
49
- """Retrieve the most relevant text chunks using FAISS."""
50
- query_embedding = embedding_model.encode([query], convert_to_numpy=True)
51
- distances, indices = index.search(query_embedding, top_k)
52
-
53
- results = [chunks[idx] for idx in indices[0]]
54
- return results
55
-
56
- def query_groq_api(query, context):
57
- """Send the query along with retrieved context to Groq API."""
58
- prompt = f"Use the following context to answer the question:\n\n{context}\n\nQuestion: {query}\nAnswer:"
59
-
60
- chat_completion = client.chat.completions.create(
61
- messages=[{"role": "user", "content": prompt}],
62
- model="llama-3.3-70b-versatile",
63
- )
64
-
65
- return chat_completion.choices[0].message.content
66
-
67
- # Streamlit UI
68
- st.title("πŸ“š RAG-based PDF Query Application")
69
- st.write("Upload a PDF and ask questions!")
70
-
71
- # File Upload
72
- uploaded_file = st.file_uploader("Upload PDF", type="pdf")
73
-
74
- if uploaded_file is not None:
75
- pdf_path = "uploaded_document.pdf"
76
-
77
- # Save file temporarily
78
- with open(pdf_path, "wb") as f:
79
- f.write(uploaded_file.getbuffer())
80
-
81
- # Process the PDF
82
- st.write("Processing PDF...")
83
- text = extract_text_from_pdf(pdf_path)
84
- chunks = create_text_chunks(text)
85
- index, embeddings, chunk_texts = create_faiss_index(chunks)
86
-
87
- st.success("PDF processed! Now you can ask questions.")
88
-
89
- # User Query
90
- query = st.text_input("Ask a question about the PDF:")
91
-
92
- if st.button("Get Answer"):
93
- if query:
94
- # Retrieve top chunks
95
- relevant_chunks = retrieve_similar_chunks(query, index, embeddings, chunk_texts)
96
- context = "\n\n".join(relevant_chunks)
97
-
98
- # Query Groq API
99
- response = query_groq_api(query, context)
100
-
101
- st.subheader("Answer:")
102
- st.write(response)
103
- else:
104
- st.warning("Please enter a question.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import random
3
+ import time
4
+ from streamlit.components.v1 import html
5
+
6
+ # Set page config with light purple theme
7
+ st.set_page_config(
8
+ page_title="Emotion Mirror Chatbot",
9
+ page_icon="😊",
10
+ layout="centered",
11
+ initial_sidebar_state="collapsed"
12
+ )
13
+
14
+ # Custom CSS for light purple theme
15
+ st.markdown("""
16
+ <style>
17
+ :root {
18
+ --primary: #b19cd9;
19
+ --background: #f5f0ff;
20
+ --secondary-background: #e6e0fa;
21
+ --text: #4a4a4a;
22
+ --font: "Arial", sans-serif;
23
+ }
24
+
25
+ body {
26
+ background-color: var(--background);
27
+ color: var(--text);
28
+ font-family: var(--font);
29
+ }
30
+
31
+ .stTextInput>div>div>input {
32
+ background-color: var(--secondary-background) !important;
33
+ color: var(--text) !important;
34
+ }
35
+
36
+ .stButton>button {
37
+ background-color: var(--primary) !important;
38
+ color: white !important;
39
+ border: none;
40
+ border-radius: 8px;
41
+ padding: 8px 16px;
42
+ }
43
+
44
+ .stMarkdown {
45
+ font-family: monospace !important;
46
+ font-size: 16px !important;
47
+ }
48
+
49
+ .chat-message {
50
+ padding: 12px;
51
+ border-radius: 12px;
52
+ margin: 8px 0;
53
+ max-width: 80%;
54
+ }
55
+
56
+ .user-message {
57
+ background-color: var(--secondary-background);
58
+ margin-left: auto;
59
+ text-align: right;
60
+ }
61
+
62
+ .bot-message {
63
+ background-color: var(--primary);
64
+ color: white;
65
+ margin-right: auto;
66
+ }
67
+
68
+ .face-container {
69
+ text-align: center;
70
+ padding: 20px;
71
+ background: white;
72
+ border-radius: 16px;
73
+ box-shadow: 0 4px 12px rgba(0,0,0,0.1);
74
+ margin: 20px 0;
75
+ }
76
+ </style>
77
+ """, unsafe_allow_html=True)
78
+
79
+ # Emotion databases
80
+ POSITIVE_WORDS = {"happy", "awesome", "great", "joy", "excited", "good", "wonderful", "fantastic", "amazing"}
81
+ NEGATIVE_WORDS = {"sad", "depressed", "angry", "cry", "lonely", "bad", "terrible", "awful", "miserable"}
82
+ HELP_RESPONSES = [
83
+ "Would you like to talk about it? πŸ’¬",
84
+ "I'm here to listen πŸ’™",
85
+ "Want some uplifting quotes? πŸ“œ",
86
+ "Would a virtual hug help? πŸ€—",
87
+ "Let's focus on something positive 🌈"
88
+ ]
89
+
90
+ # ASCII Art Library
91
+ FACES = {
92
+ "happy": r"""
93
+ ╔════════╗
94
+ πŸ˜„ AWESOME!
95
+ β•šβ•β•β•β•β•β•β•β•β•
96
+ """,
97
+ "sad": r"""
98
+ ╔════════╗
99
+ 😒 SAD DAY?
100
+ β•šβ•β•β•β•β•β•β•β•β•
101
+ """,
102
+ "neutral": r"""
103
+ ╔════════╗
104
+ 😐 HELLO
105
+ β•šβ•β•β•β•β•β•β•β•β•
106
+ """,
107
+ "love": r"""
108
+ ╔════════╗
109
+ 😍 LOVELY!
110
+ β•šβ•β•β•β•β•β•β•β•β•
111
+ """
112
+ }
113
+
114
+ # Confetti effect using JavaScript
115
+ def confetti_effect():
116
+ confetti_js = """
117
+ <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/confetti.browser.min.js"></script>
118
+ <script>
119
+ const count = 200;
120
+ const defaults = {
121
+ origin: { y: 0.7 }
122
+ };
123
+
124
+ function fire(particleRatio, opts) {
125
+ confetti(Object.assign({}, defaults, opts, {
126
+ particleCount: Math.floor(count * particleRatio)
127
+ }));
128
+ }
129
+
130
+ fire(0.25, { spread: 26, startVelocity: 55 });
131
+ fire(0.2, { spread: 60 });
132
+ fire(0.35, { spread: 100, decay: 0.91, scalar: 0.8 });
133
+ fire(0.1, { spread: 120, startVelocity: 25, decay: 0.92, scalar: 1.2 });
134
+ fire(0.1, { spread: 120, startVelocity: 45 });
135
+ </script>
136
+ """
137
+ html(confetti_js)
138
+
139
+ # Emotion detection function
140
+ def detect_emotion(text):
141
+ text = text.lower()
142
+ if any(word in text for word in POSITIVE_WORDS):
143
+ return "happy"
144
+ elif any(word in text for word in NEGATIVE_WORDS):
145
+ return "sad"
146
+ elif "love" in text or "heart" in text:
147
+ return "love"
148
+ return "neutral"
149
+
150
+ # Initialize chat history
151
+ if "messages" not in st.session_state:
152
+ st.session_state.messages = []
153
+ st.session_state.current_emotion = "neutral"
154
+
155
+ # Title and description
156
+ st.title("✨ Emotion Mirror Chatbot")
157
+ st.markdown("I'm a reactive AI agent that mirrors your emotions! Try words like *happy*, *sad*, or *awesome*")
158
+
159
+ # Display current face
160
+ with st.container():
161
+ st.markdown(f"<div class='face-container'>\n{FACES[st.session_state.current_emotion]}\n</div>",
162
+ unsafe_allow_html=True)
163
+
164
+ # Display chat messages
165
+ for message in st.session_state.messages:
166
+ with st.chat_message(message["role"]):
167
+ st.markdown(f"<div class='chat-message {message['role']}-message'>{message['content']}</div>",
168
+ unsafe_allow_html=True)
169
+
170
+ # User input
171
+ if prompt := st.chat_input("How are you feeling today?"):
172
+ # Add user message to chat history
173
+ st.session_state.messages.append({"role": "user", "content": prompt})
174
+
175
+ # Detect emotion
176
+ emotion = detect_emotion(prompt)
177
+ st.session_state.current_emotion = emotion
178
+
179
+ # Generate bot response
180
+ if emotion == "happy":
181
+ response = FACES["happy"] + "\n\n🌟 That's wonderful to hear!"
182
+ confetti_effect()
183
+ elif emotion == "sad":
184
+ response = FACES["sad"] + "\n\n" + random.choice(HELP_RESPONSES)
185
+ elif emotion == "love":
186
+ response = FACES["love"] + "\n\nπŸ’– Love is in the air!"
187
+ else:
188
+ response = FACES["neutral"] + "\n\nTell me more about your feelings..."
189
+
190
+ # Add bot response to chat history
191
+ st.session_state.messages.append({"role": "bot", "content": response})
192
+
193
+ # Rerun to update the display
194
+ st.experimental_rerun()
195
+
196
+ # Add reset button
197
+ if st.button("Reset Conversation"):
198
+ st.session_state.messages = []
199
+ st.session_state.current_emotion = "neutral"
200
+ st.experimental_rerun()