NandanData commited on
Commit
fd123ae
·
verified ·
1 Parent(s): b4086c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +415 -415
app.py CHANGED
@@ -1,415 +1,415 @@
1
- import os
2
- from dotenv import load_dotenv
3
- from langchain.prompts import PromptTemplate
4
- from langchain.llms import CTransformers
5
- from langchain.chains import LLMChain
6
- from langchain.embeddings import HuggingFaceEmbeddings
7
- from pinecone import Pinecone
8
- from langchain_pinecone import PineconeVectorStore
9
- from langchain.schema import BaseRetriever, Document
10
- from pydantic import BaseModel, Field
11
- from typing import List
12
- import streamlit as st
13
- from googletrans import Translator
14
- import datetime
15
- import time
16
- import asyncio
17
-
18
-
19
- from langchain.schema import BaseRetriever, Document
20
- from langchain_pinecone import PineconeVectorStore
21
- from typing import List
22
- from pydantic import BaseModel, Field
23
-
24
-
25
-
26
-
27
- os.environ['PINECONE_API_KEY'] = 'c74ab656-6afe-47b2-a622-f24caa39f5bc' # Replace with your actual API key
28
- os.environ['PINECONE_ENVIRONMENT'] = 'us-east-1'
29
-
30
- # Load environment variables
31
- load_dotenv()
32
-
33
- # Initialize Pinecone
34
- pc = Pinecone(api_key=os.environ['PINECONE_API_KEY'], environment=os.environ['PINECONE_ENVIRONMENT'])
35
-
36
- # Define index name and namespace
37
- index_name = "bhagavadgita"
38
- namespace = "2MAN3D"
39
-
40
- # Connect to the index
41
- index = pc.Index(index_name)
42
-
43
- # Define a function to download embeddings
44
- def download_hugging_face_embeddings():
45
- return HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
46
-
47
- # Initialize the embeddings
48
- embeddings = download_hugging_face_embeddings()
49
-
50
- class CustomPineconeRetriever(BaseRetriever):
51
- vectorstore: PineconeVectorStore = Field(...)
52
-
53
- class Config:
54
- arbitrary_types_allowed = True
55
-
56
- def get_relevant_documents(self, query: str) -> List[Document]:
57
- # Retrieve relevant documents from Pinecone
58
- return self.vectorstore.similarity_search(query)
59
-
60
- async def aget_relevant_documents(self, query: str) -> List[Document]:
61
- # Handle asynchronous retrieval
62
- # Call the synchronous method in an async context
63
- return self.get_relevant_documents(query)
64
-
65
- # Load the index into PineconeVectorStore
66
- docsearch = PineconeVectorStore(index=index, embedding=embeddings, namespace=namespace)
67
- retriever = CustomPineconeRetriever(vectorstore=docsearch)
68
-
69
- # Define a refined prompt template
70
- PROMPT_TEMPLATE = """
71
- You are Krishna, the divine speaker of the Bhagavad Gita. Speak with wisdom and provide insights based only on the teachings of the Bhagavad Gita, tailored to help a human seeking knowledge.
72
-
73
- Context: {context}
74
- Query: {query}
75
-
76
- Answer:
77
- """
78
-
79
- PROMPT = PromptTemplate(
80
- template=PROMPT_TEMPLATE,
81
- input_variables=["context", "query"]
82
- )
83
-
84
- # Initialize the LLM
85
- llm = CTransformers(
86
- model="model/llama-2-7b-chat.ggmlv3.q4_0.bin",
87
- model_type="llama",
88
- config={'max_new_tokens': 512, 'temperature': 0.8}
89
- )
90
-
91
- # Create a simple LLMChain
92
- llm_chain = LLMChain(
93
- llm=llm,
94
- prompt=PROMPT
95
- )
96
-
97
- def log_query_response(query, response):
98
- """Log the query and response to a file."""
99
- with open("logs.txt", "a") as log_file:
100
- timestamp = datetime.datetime.now().isoformat()
101
- log_file.write(f"{timestamp} - Query: {query}\n")
102
- log_file.write(f"{timestamp} - Response: {response}\n\n")
103
-
104
- async def retrieve_relevant_documents_async(query: str) -> List[Document]:
105
- return await retriever.aget_relevant_documents(query)
106
-
107
- async def generate_response_async(query: str, context: str) -> str:
108
- relevant_docs = await retrieve_relevant_documents_async(query)
109
- context_from_docs = " ".join([doc.page_content for doc in relevant_docs])
110
- enriched_context = context + " " + context_from_docs
111
-
112
- input_data = {"context": enriched_context, "query": query}
113
- response = llm_chain(input_data)
114
- return response['text']
115
-
116
-
117
-
118
-
119
-
120
- # Set page configuration
121
- st.set_page_config(page_title="Bhagavad Gita Assistant", page_icon="📖", layout="wide")
122
-
123
- # Add custom CSS for tab styling and animations
124
- st.markdown("""
125
- <style>
126
- /* Tab Container */
127
- .tab-container {
128
- margin-top: 20px;
129
- padding: 10px;
130
- border-radius: 8px;
131
- border: 1px solid #444;
132
- background-color: #222;
133
- color: #ddd;
134
- }
135
-
136
- /* Tab Headers */
137
- .stTabs [data-baseweb="tab"] {
138
- background-color: #333;
139
- color: #ddd;
140
- border-radius: 8px;
141
- border: 1px solid #444;
142
- padding: 10px 20px;
143
- font-weight: bold;
144
- cursor: pointer;
145
- text-align: center;
146
- }
147
-
148
- /* Tab Headers Hover Effect */
149
- .stTabs [data-baseweb="tab"]:hover {
150
- background-color: #444;
151
- }
152
-
153
- /* Tab Content */
154
- .stTabs [data-baseweb="tab-content"] {
155
- padding: 20px;
156
- background-color: #1e1e1e;
157
- border-radius: 8px;
158
- border: 1px solid #333;
159
- margin-top: -1px; /* Overlap border */
160
- color: #ddd;
161
- }
162
-
163
- /* Tab Content Animation */
164
- @keyframes slideIn {
165
- from {
166
- opacity: 0;
167
- transform: translateY(-10px);
168
- }
169
- to {
170
- opacity: 1;
171
- transform: translateY(0);
172
- }
173
- }
174
- .stTabs [data-baseweb="tab-content"] {
175
- animation: slideIn 0.5s ease-out;
176
- }
177
- </style>
178
- """, unsafe_allow_html=True)
179
-
180
-
181
- st.header("Welcome to the Bhagavad Gita Assistant")
182
- st.markdown("Welcome to the Bhagavad Gita Assistant on LLAMA 2. Ask your questions and get insightful answers based on the Bhagavad Gita.")
183
- st.markdown("Please wait 50 seconds to 1 minute for the response because it is hosted on my local machine.")
184
-
185
- translator = Translator()
186
-
187
- # Initialize session state for conversation history
188
- if 'conversation_history' not in st.session_state:
189
- st.session_state['conversation_history'] = []
190
-
191
- # Tabs for Chat, Project Details, Mechanism, Logic, and Tech Used
192
- tabs = st.tabs(["Chat", "Project Details", "Mechanism", "Logic","Detailed Logic", "Tech Used", "Logs"])
193
-
194
- if 'response' not in st.session_state:
195
- st.session_state['response'] = ""
196
- if 'translated_response' not in st.session_state:
197
- st.session_state['translated_response'] = ""
198
- if 'response_time' not in st.session_state:
199
- st.session_state['response_time'] = 0
200
-
201
-
202
-
203
- with tabs[0]:
204
- st.header("Chat with Krishna")
205
- st.markdown("""
206
- **Ask Krishna Anything:** Use this tab to interact with Krishna, the orator of the Bhagavad Gita.
207
- Your questions will be answered based on the wisdom of the Bhagavad Gita. Please allow up to 40 seconds for a response.
208
-
209
- **How to Use:**
210
- - **Enter your query** in the text input field.
211
- - **Submit** the query to get a response from Krishna.
212
- - **Translate** the response to your preferred language if needed.
213
-
214
- **Tips for Better Responses:**
215
- - Be specific in your queries.
216
- - Provide context where possible.
217
- """)
218
- user_query = st.text_input("Enter your query:", placeholder="e.g., What is the meaning of life?")
219
- submit_query = st.button("Submit")
220
- language_option = st.selectbox("Choose a language to translate the response:", ["None", "Hindi", "Bengali", "Tamil", "Telugu", "Marathi"])
221
- translate_button = st.button("Translate Response")
222
-
223
- if submit_query and user_query:
224
- start_time = time.time()
225
- with st.spinner('Please wait...'):
226
- test_context = "You are Krishna, the divine speaker of the Bhagavad Gita. Speak with wisdom and provide insights based only on the teachings of the Bhagavad Gita."
227
-
228
- try:
229
- # Run the response generation asynchronously
230
- response = asyncio.run(generate_response_async(user_query, test_context))
231
-
232
- # Update session state
233
- st.session_state['response'] = response
234
- st.session_state['conversation_history'].append({"query": user_query, "response": response})
235
-
236
- end_time = time.time()
237
- st.session_state['response_time'] = end_time - start_time
238
-
239
- st.subheader("Response")
240
- st.write(response)
241
- st.subheader(f"Response Time: {st.session_state['response_time']:.2f} seconds")
242
-
243
- # Log the query and response
244
- log_query_response(user_query, response)
245
- except Exception as e:
246
- st.error(f"Error: {str(e)}")
247
-
248
- if translate_button and language_option != "None":
249
- if st.session_state['response']:
250
- try:
251
- translator = Translator()
252
- translated_response = translator.translate(st.session_state['response'], dest=language_option.lower()).text
253
- st.session_state['translated_response'] = translated_response
254
-
255
- st.subheader(f"Translated Response ({language_option})")
256
- st.write(translated_response)
257
- except Exception as e:
258
- st.error(f"Error translating response: {str(e)}")
259
- else:
260
- st.error("No response available for translation.")
261
-
262
- # Display original response in the same tab
263
- if st.session_state['response']:
264
- st.subheader("Original Response (English)")
265
- st.write(st.session_state['response'])
266
-
267
- with tabs[1]:
268
- st.header("Project Details")
269
- st.markdown("""
270
- **Project Name:** Bhagavad Gita Assistant
271
- **Creator:** Nandan
272
-
273
- **Overview:**
274
- This project leverages advanced AI models and vector search technologies to provide insightful answers based on the Bhagavad Gita.
275
-
276
- **Features:**
277
- - AI-powered responses based on the Bhagavad Gita.
278
- - Multi-language support for translations.
279
- - Detailed logs and analytics.
280
-
281
- **Objectives:**
282
- - To provide accurate and contextually relevant answers.
283
- - To optimize response time and user experience.
284
- """)
285
-
286
-
287
-
288
- with tabs[2]:
289
- st.header("Mechanism")
290
- st.markdown("""
291
- **How It Works:**
292
-
293
- 1. **User Query:** The user inputs a query.
294
- 2. **Semantic Search:** The query is used to perform a semantic search on a vector database (Pinecone) containing pre-indexed chunks of the Bhagavad Gita text.
295
- 3. **Retrieve Similar Chunks:** The search retrieves chunks of text that are semantically similar to the user's query.
296
- 4. **Generate Response:** The retrieved chunks, along with the user query, are sent to the AI model (LLAMA 2) to generate a final response based on the Bhagavad Gita.
297
-
298
- **Technologies Used:**
299
- - **Pinecone:** For vector-based retrieval.
300
- - **LangChain:** For managing prompts and responses.
301
- - **CTransformers:** For handling the AI model.
302
- - **Google Translator:** For translating responses.
303
- """)
304
-
305
- with tabs[3]:
306
- st.header("Logic")
307
- st.markdown("""
308
- **Detailed Logic Behind the System:**
309
-
310
- 1. **User Query Submission:** The user submits a query through the interface.
311
- 2. **Semantic Search:** The system performs a semantic search using Pinecone to find text chunks that are contextually relevant to the query.
312
- 3. **Context Retrieval:** Relevant text chunks are retrieved and combined with the query to form a detailed context.
313
- 4. **Response Generation:** The AI model (LLAMA 2) processes the combined context and query to generate a response based on the Bhagavad Gita.
314
-
315
- **Why This Approach:**
316
- - **Semantic Search:** Ensures that the responses are relevant to the user's query by leveraging advanced vector search capabilities.
317
- - **Detailed Context:** Provides richer and more accurate responses by combining relevant text chunks and historical conversation.
318
- - **AI Model:** Utilizes LLAMA 2's language generation capabilities to create meaningful and contextually appropriate answers.
319
-
320
- **Packages Used:**
321
- - **Streamlit:** For creating the web interface.
322
- - **LangChain:** For managing prompt templates and LLM chains.
323
- - **Pinecone:** For vector-based search and retrieval.
324
- - **CTransformers:** For loading and using the AI model.
325
- - **Google Translator:** For translating responses.
326
- """)
327
-
328
- with tabs[4]:
329
- st.header("Detailed Logic")
330
- st.markdown("""
331
- 1. **User Query Input:**
332
- - **Package:** `streamlit`
333
- - **Purpose:** Collects the user's query through a text input field on the web interface.
334
- - **Usage:** Allows users to ask questions related to the Bhagavad Gita.
335
- - **Code:**
336
- ```python
337
- user_query = st.text_input("Enter your query:", placeholder="e.g., What is life?")
338
- ```
339
-
340
- 2. **Semantic Search:**
341
- - **Packages:** `langchain`, `pinecone`
342
- - **Purpose:** Performs a semantic search on the vector database to find text chunks related to the user's query.
343
- - **Usage:**
344
- - **Pinecone:** Stores and searches pre-embedded text chunks of the Bhagavad Gita.
345
- - **Langchain:** Connects Pinecone with the search logic.
346
- - **How It Works:**
347
- - Uses asynchronous methods to improve performance and avoid blocking.
348
- - **Code:**
349
- ```python
350
- relevant_docs = retriever.get_relevant_documents(user_query)
351
- ```
352
-
353
- 3. **Retrieve Similar Chunks:**
354
- - **Purpose:** Retrieves text chunks that are semantically similar to the user's query.
355
- - **How It Works:**
356
- - **Context from Documents:** Extracts relevant text based on semantic similarity.
357
- - **Conversation History:** Includes previous interactions to provide more relevant responses.
358
- - **Code:**
359
- ```python
360
- context_from_docs = " ".join([doc.page_content for doc in relevant_docs])
361
- conversation_history = " ".join([f"User: {entry['query']}\nAssistant: {entry['response']}" for entry in st.session_state['conversation_history']])
362
- enriched_context = test_context + " " + context_from_docs + " " + conversation_history
363
- ```
364
-
365
- 4. **Generate Response:**
366
- - **Packages:** `langchain`, `CTransformers`
367
- - **Purpose:** Uses the AI model (LLAMA 2) to generate a response based on the query and the enriched context.
368
- - **Usage:**
369
- - **Langchain:** Manages the interaction with the AI model using `PromptTemplate` and `LLMChain`.
370
- - **CTransformers:** Loads and runs the LLAMA 2 model.
371
- - **How It Works:**
372
- - **Prompt Template:** Structures the input for the AI model.
373
- - **LLMChain:** Executes the model’s prompt chain.
374
- - **Asynchronous Response Generation:** Optimizes performance by running asynchronously.
375
- - **Code:**
376
- ```python
377
- response = llm_chain(input_data)
378
- ```
379
-
380
- 5. **Logging Queries and Responses:**
381
- - **Purpose:** Records queries and responses for debugging and tracking.
382
- - **How It Works:**
383
- - Logs are saved to a file with timestamps for future reference.
384
- - **Code:**
385
- ```python
386
- def log_query_response(query, response):
387
- with open("logs.txt", "a") as log_file:
388
- timestamp = datetime.datetime.now().isoformat()
389
- log_file.write(f"{timestamp} - Query: {query}\n")
390
- log_file.write(f"{timestamp} - Response: {response}\n\n")
391
- ```
392
- """)
393
-
394
-
395
- with tabs[5]:
396
- st.header("Tech Used")
397
- st.markdown("""
398
- - **Streamlit:** For the web interface.
399
- - **LangChain:** For prompt templates and chains.
400
- - **Pinecone:** For vector search and retrieval.
401
- - **CTransformers:** For loading and using the AI model (LLAMA 2).
402
- - **Hugging Face:** For text embeddings.
403
- - **Python:** Language.
404
-
405
-
406
- """)
407
-
408
- with tabs[6]:
409
- st.header("Query and Response Logs")
410
- if os.path.exists('logs.txt'):
411
- with open('logs.txt', 'r') as log_file:
412
- log_content = log_file.read()
413
- st.text_area("Logs", log_content, height=300)
414
- else:
415
- st.write("No logs available.")
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from langchain.prompts import PromptTemplate
4
+ from langchain.llms import CTransformers
5
+ from langchain.chains import LLMChain
6
+ from langchain.embeddings import HuggingFaceEmbeddings
7
+ from pinecone import Pinecone
8
+ from langchain_pinecone import PineconeVectorStore
9
+ from langchain.schema import BaseRetriever, Document
10
+ from pydantic import BaseModel, Field
11
+ from typing import List
12
+ import streamlit as st
13
+ from googletrans import Translator
14
+ import datetime
15
+ import time
16
+ import asyncio
17
+
18
+
19
+ from langchain.schema import BaseRetriever, Document
20
+ from langchain_pinecone import PineconeVectorStore
21
+ from typing import List
22
+ from pydantic import BaseModel, Field
23
+
24
+
25
+
26
+
27
+ os.environ['PINECONE_API_KEY'] = # Replace with your actual API key
28
+ os.environ['PINECONE_ENVIRONMENT'] = 'us-east-1'
29
+
30
+ # Load environment variables
31
+ load_dotenv()
32
+
33
+ # Initialize Pinecone
34
+ pc = Pinecone(api_key=os.environ['PINECONE_API_KEY'], environment=os.environ['PINECONE_ENVIRONMENT'])
35
+
36
+ # Define index name and namespace
37
+ index_name = "bhagavadgita"
38
+ namespace = "2MAN3D"
39
+
40
+ # Connect to the index
41
+ index = pc.Index(index_name)
42
+
43
+ # Define a function to download embeddings
44
+ def download_hugging_face_embeddings():
45
+ return HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
46
+
47
+ # Initialize the embeddings
48
+ embeddings = download_hugging_face_embeddings()
49
+
50
+ class CustomPineconeRetriever(BaseRetriever):
51
+ vectorstore: PineconeVectorStore = Field(...)
52
+
53
+ class Config:
54
+ arbitrary_types_allowed = True
55
+
56
+ def get_relevant_documents(self, query: str) -> List[Document]:
57
+ # Retrieve relevant documents from Pinecone
58
+ return self.vectorstore.similarity_search(query)
59
+
60
+ async def aget_relevant_documents(self, query: str) -> List[Document]:
61
+ # Handle asynchronous retrieval
62
+ # Call the synchronous method in an async context
63
+ return self.get_relevant_documents(query)
64
+
65
+ # Load the index into PineconeVectorStore
66
+ docsearch = PineconeVectorStore(index=index, embedding=embeddings, namespace=namespace)
67
+ retriever = CustomPineconeRetriever(vectorstore=docsearch)
68
+
69
+ # Define a refined prompt template
70
+ PROMPT_TEMPLATE = """
71
+ You are Krishna, the divine speaker of the Bhagavad Gita. Speak with wisdom and provide insights based only on the teachings of the Bhagavad Gita, tailored to help a human seeking knowledge.
72
+
73
+ Context: {context}
74
+ Query: {query}
75
+
76
+ Answer:
77
+ """
78
+
79
+ PROMPT = PromptTemplate(
80
+ template=PROMPT_TEMPLATE,
81
+ input_variables=["context", "query"]
82
+ )
83
+
84
+ # Initialize the LLM
85
+ llm = CTransformers(
86
+ model="model/llama-2-7b-chat.ggmlv3.q4_0.bin",
87
+ model_type="llama",
88
+ config={'max_new_tokens': 512, 'temperature': 0.8}
89
+ )
90
+
91
+ # Create a simple LLMChain
92
+ llm_chain = LLMChain(
93
+ llm=llm,
94
+ prompt=PROMPT
95
+ )
96
+
97
+ def log_query_response(query, response):
98
+ """Log the query and response to a file."""
99
+ with open("logs.txt", "a") as log_file:
100
+ timestamp = datetime.datetime.now().isoformat()
101
+ log_file.write(f"{timestamp} - Query: {query}\n")
102
+ log_file.write(f"{timestamp} - Response: {response}\n\n")
103
+
104
+ async def retrieve_relevant_documents_async(query: str) -> List[Document]:
105
+ return await retriever.aget_relevant_documents(query)
106
+
107
+ async def generate_response_async(query: str, context: str) -> str:
108
+ relevant_docs = await retrieve_relevant_documents_async(query)
109
+ context_from_docs = " ".join([doc.page_content for doc in relevant_docs])
110
+ enriched_context = context + " " + context_from_docs
111
+
112
+ input_data = {"context": enriched_context, "query": query}
113
+ response = llm_chain(input_data)
114
+ return response['text']
115
+
116
+
117
+
118
+
119
+
120
+ # Set page configuration
121
+ st.set_page_config(page_title="Bhagavad Gita Assistant", page_icon="📖", layout="wide")
122
+
123
+ # Add custom CSS for tab styling and animations
124
+ st.markdown("""
125
+ <style>
126
+ /* Tab Container */
127
+ .tab-container {
128
+ margin-top: 20px;
129
+ padding: 10px;
130
+ border-radius: 8px;
131
+ border: 1px solid #444;
132
+ background-color: #222;
133
+ color: #ddd;
134
+ }
135
+
136
+ /* Tab Headers */
137
+ .stTabs [data-baseweb="tab"] {
138
+ background-color: #333;
139
+ color: #ddd;
140
+ border-radius: 8px;
141
+ border: 1px solid #444;
142
+ padding: 10px 20px;
143
+ font-weight: bold;
144
+ cursor: pointer;
145
+ text-align: center;
146
+ }
147
+
148
+ /* Tab Headers Hover Effect */
149
+ .stTabs [data-baseweb="tab"]:hover {
150
+ background-color: #444;
151
+ }
152
+
153
+ /* Tab Content */
154
+ .stTabs [data-baseweb="tab-content"] {
155
+ padding: 20px;
156
+ background-color: #1e1e1e;
157
+ border-radius: 8px;
158
+ border: 1px solid #333;
159
+ margin-top: -1px; /* Overlap border */
160
+ color: #ddd;
161
+ }
162
+
163
+ /* Tab Content Animation */
164
+ @keyframes slideIn {
165
+ from {
166
+ opacity: 0;
167
+ transform: translateY(-10px);
168
+ }
169
+ to {
170
+ opacity: 1;
171
+ transform: translateY(0);
172
+ }
173
+ }
174
+ .stTabs [data-baseweb="tab-content"] {
175
+ animation: slideIn 0.5s ease-out;
176
+ }
177
+ </style>
178
+ """, unsafe_allow_html=True)
179
+
180
+
181
+ st.header("Welcome to the Bhagavad Gita Assistant")
182
+ st.markdown("Welcome to the Bhagavad Gita Assistant on LLAMA 2. Ask your questions and get insightful answers based on the Bhagavad Gita.")
183
+ st.markdown("Please wait 50 seconds to 1 minute for the response because it is hosted on my local machine.")
184
+
185
+ translator = Translator()
186
+
187
+ # Initialize session state for conversation history
188
+ if 'conversation_history' not in st.session_state:
189
+ st.session_state['conversation_history'] = []
190
+
191
+ # Tabs for Chat, Project Details, Mechanism, Logic, and Tech Used
192
+ tabs = st.tabs(["Chat", "Project Details", "Mechanism", "Logic","Detailed Logic", "Tech Used", "Logs"])
193
+
194
+ if 'response' not in st.session_state:
195
+ st.session_state['response'] = ""
196
+ if 'translated_response' not in st.session_state:
197
+ st.session_state['translated_response'] = ""
198
+ if 'response_time' not in st.session_state:
199
+ st.session_state['response_time'] = 0
200
+
201
+
202
+
203
+ with tabs[0]:
204
+ st.header("Chat with Krishna")
205
+ st.markdown("""
206
+ **Ask Krishna Anything:** Use this tab to interact with Krishna, the orator of the Bhagavad Gita.
207
+ Your questions will be answered based on the wisdom of the Bhagavad Gita. Please allow up to 40 seconds for a response.
208
+
209
+ **How to Use:**
210
+ - **Enter your query** in the text input field.
211
+ - **Submit** the query to get a response from Krishna.
212
+ - **Translate** the response to your preferred language if needed.
213
+
214
+ **Tips for Better Responses:**
215
+ - Be specific in your queries.
216
+ - Provide context where possible.
217
+ """)
218
+ user_query = st.text_input("Enter your query:", placeholder="e.g., What is the meaning of life?")
219
+ submit_query = st.button("Submit")
220
+ language_option = st.selectbox("Choose a language to translate the response:", ["None", "Hindi", "Bengali", "Tamil", "Telugu", "Marathi"])
221
+ translate_button = st.button("Translate Response")
222
+
223
+ if submit_query and user_query:
224
+ start_time = time.time()
225
+ with st.spinner('Please wait...'):
226
+ test_context = "You are Krishna, the divine speaker of the Bhagavad Gita. Speak with wisdom and provide insights based only on the teachings of the Bhagavad Gita."
227
+
228
+ try:
229
+ # Run the response generation asynchronously
230
+ response = asyncio.run(generate_response_async(user_query, test_context))
231
+
232
+ # Update session state
233
+ st.session_state['response'] = response
234
+ st.session_state['conversation_history'].append({"query": user_query, "response": response})
235
+
236
+ end_time = time.time()
237
+ st.session_state['response_time'] = end_time - start_time
238
+
239
+ st.subheader("Response")
240
+ st.write(response)
241
+ st.subheader(f"Response Time: {st.session_state['response_time']:.2f} seconds")
242
+
243
+ # Log the query and response
244
+ log_query_response(user_query, response)
245
+ except Exception as e:
246
+ st.error(f"Error: {str(e)}")
247
+
248
+ if translate_button and language_option != "None":
249
+ if st.session_state['response']:
250
+ try:
251
+ translator = Translator()
252
+ translated_response = translator.translate(st.session_state['response'], dest=language_option.lower()).text
253
+ st.session_state['translated_response'] = translated_response
254
+
255
+ st.subheader(f"Translated Response ({language_option})")
256
+ st.write(translated_response)
257
+ except Exception as e:
258
+ st.error(f"Error translating response: {str(e)}")
259
+ else:
260
+ st.error("No response available for translation.")
261
+
262
+ # Display original response in the same tab
263
+ if st.session_state['response']:
264
+ st.subheader("Original Response (English)")
265
+ st.write(st.session_state['response'])
266
+
267
+ with tabs[1]:
268
+ st.header("Project Details")
269
+ st.markdown("""
270
+ **Project Name:** Bhagavad Gita Assistant
271
+ **Creator:** Nandan
272
+
273
+ **Overview:**
274
+ This project leverages advanced AI models and vector search technologies to provide insightful answers based on the Bhagavad Gita.
275
+
276
+ **Features:**
277
+ - AI-powered responses based on the Bhagavad Gita.
278
+ - Multi-language support for translations.
279
+ - Detailed logs and analytics.
280
+
281
+ **Objectives:**
282
+ - To provide accurate and contextually relevant answers.
283
+ - To optimize response time and user experience.
284
+ """)
285
+
286
+
287
+
288
+ with tabs[2]:
289
+ st.header("Mechanism")
290
+ st.markdown("""
291
+ **How It Works:**
292
+
293
+ 1. **User Query:** The user inputs a query.
294
+ 2. **Semantic Search:** The query is used to perform a semantic search on a vector database (Pinecone) containing pre-indexed chunks of the Bhagavad Gita text.
295
+ 3. **Retrieve Similar Chunks:** The search retrieves chunks of text that are semantically similar to the user's query.
296
+ 4. **Generate Response:** The retrieved chunks, along with the user query, are sent to the AI model (LLAMA 2) to generate a final response based on the Bhagavad Gita.
297
+
298
+ **Technologies Used:**
299
+ - **Pinecone:** For vector-based retrieval.
300
+ - **LangChain:** For managing prompts and responses.
301
+ - **CTransformers:** For handling the AI model.
302
+ - **Google Translator:** For translating responses.
303
+ """)
304
+
305
+ with tabs[3]:
306
+ st.header("Logic")
307
+ st.markdown("""
308
+ **Detailed Logic Behind the System:**
309
+
310
+ 1. **User Query Submission:** The user submits a query through the interface.
311
+ 2. **Semantic Search:** The system performs a semantic search using Pinecone to find text chunks that are contextually relevant to the query.
312
+ 3. **Context Retrieval:** Relevant text chunks are retrieved and combined with the query to form a detailed context.
313
+ 4. **Response Generation:** The AI model (LLAMA 2) processes the combined context and query to generate a response based on the Bhagavad Gita.
314
+
315
+ **Why This Approach:**
316
+ - **Semantic Search:** Ensures that the responses are relevant to the user's query by leveraging advanced vector search capabilities.
317
+ - **Detailed Context:** Provides richer and more accurate responses by combining relevant text chunks and historical conversation.
318
+ - **AI Model:** Utilizes LLAMA 2's language generation capabilities to create meaningful and contextually appropriate answers.
319
+
320
+ **Packages Used:**
321
+ - **Streamlit:** For creating the web interface.
322
+ - **LangChain:** For managing prompt templates and LLM chains.
323
+ - **Pinecone:** For vector-based search and retrieval.
324
+ - **CTransformers:** For loading and using the AI model.
325
+ - **Google Translator:** For translating responses.
326
+ """)
327
+
328
+ with tabs[4]:
329
+ st.header("Detailed Logic")
330
+ st.markdown("""
331
+ 1. **User Query Input:**
332
+ - **Package:** `streamlit`
333
+ - **Purpose:** Collects the user's query through a text input field on the web interface.
334
+ - **Usage:** Allows users to ask questions related to the Bhagavad Gita.
335
+ - **Code:**
336
+ ```python
337
+ user_query = st.text_input("Enter your query:", placeholder="e.g., What is life?")
338
+ ```
339
+
340
+ 2. **Semantic Search:**
341
+ - **Packages:** `langchain`, `pinecone`
342
+ - **Purpose:** Performs a semantic search on the vector database to find text chunks related to the user's query.
343
+ - **Usage:**
344
+ - **Pinecone:** Stores and searches pre-embedded text chunks of the Bhagavad Gita.
345
+ - **Langchain:** Connects Pinecone with the search logic.
346
+ - **How It Works:**
347
+ - Uses asynchronous methods to improve performance and avoid blocking.
348
+ - **Code:**
349
+ ```python
350
+ relevant_docs = retriever.get_relevant_documents(user_query)
351
+ ```
352
+
353
+ 3. **Retrieve Similar Chunks:**
354
+ - **Purpose:** Retrieves text chunks that are semantically similar to the user's query.
355
+ - **How It Works:**
356
+ - **Context from Documents:** Extracts relevant text based on semantic similarity.
357
+ - **Conversation History:** Includes previous interactions to provide more relevant responses.
358
+ - **Code:**
359
+ ```python
360
+ context_from_docs = " ".join([doc.page_content for doc in relevant_docs])
361
+ conversation_history = " ".join([f"User: {entry['query']}\nAssistant: {entry['response']}" for entry in st.session_state['conversation_history']])
362
+ enriched_context = test_context + " " + context_from_docs + " " + conversation_history
363
+ ```
364
+
365
+ 4. **Generate Response:**
366
+ - **Packages:** `langchain`, `CTransformers`
367
+ - **Purpose:** Uses the AI model (LLAMA 2) to generate a response based on the query and the enriched context.
368
+ - **Usage:**
369
+ - **Langchain:** Manages the interaction with the AI model using `PromptTemplate` and `LLMChain`.
370
+ - **CTransformers:** Loads and runs the LLAMA 2 model.
371
+ - **How It Works:**
372
+ - **Prompt Template:** Structures the input for the AI model.
373
+ - **LLMChain:** Executes the model’s prompt chain.
374
+ - **Asynchronous Response Generation:** Optimizes performance by running asynchronously.
375
+ - **Code:**
376
+ ```python
377
+ response = llm_chain(input_data)
378
+ ```
379
+
380
+ 5. **Logging Queries and Responses:**
381
+ - **Purpose:** Records queries and responses for debugging and tracking.
382
+ - **How It Works:**
383
+ - Logs are saved to a file with timestamps for future reference.
384
+ - **Code:**
385
+ ```python
386
+ def log_query_response(query, response):
387
+ with open("logs.txt", "a") as log_file:
388
+ timestamp = datetime.datetime.now().isoformat()
389
+ log_file.write(f"{timestamp} - Query: {query}\n")
390
+ log_file.write(f"{timestamp} - Response: {response}\n\n")
391
+ ```
392
+ """)
393
+
394
+
395
+ with tabs[5]:
396
+ st.header("Tech Used")
397
+ st.markdown("""
398
+ - **Streamlit:** For the web interface.
399
+ - **LangChain:** For prompt templates and chains.
400
+ - **Pinecone:** For vector search and retrieval.
401
+ - **CTransformers:** For loading and using the AI model (LLAMA 2).
402
+ - **Hugging Face:** For text embeddings.
403
+ - **Python:** Language.
404
+
405
+
406
+ """)
407
+
408
+ with tabs[6]:
409
+ st.header("Query and Response Logs")
410
+ if os.path.exists('logs.txt'):
411
+ with open('logs.txt', 'r') as log_file:
412
+ log_content = log_file.read()
413
+ st.text_area("Logs", log_content, height=300)
414
+ else:
415
+ st.write("No logs available.")