Huzaifa367 commited on
Commit
d31d2c2
·
verified ·
1 Parent(s): 4f3cdb9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -46
app.py CHANGED
@@ -19,25 +19,24 @@ def text_to_speech(text):
19
  os.remove(temp_filename)
20
 
21
  def get_pdf_text(pdf_docs):
22
- text=""
23
  for pdf in pdf_docs:
24
- pdf_reader= PdfReader(pdf)
25
  for page in pdf_reader.pages:
26
- text+= page.extract_text()
27
- return text
28
 
29
  def get_text_chunks(text):
30
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
31
  chunks = text_splitter.split_text(text)
32
  return chunks
33
-
34
  def get_vector_store(text_chunks, api_key):
35
  embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
36
  vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
37
  vector_store.save_local("faiss_index")
38
 
39
  def get_conversational_chain():
40
-
41
  prompt_template = """
42
  Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
43
  provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
@@ -45,73 +44,56 @@ def get_conversational_chain():
45
  Question: \n{question}\n
46
  Answer:
47
  """
48
-
49
  model = ChatGroq(temperature=0, groq_api_key=os.environ["groq_api_key"], model_name="llama3-8b-8192")
50
-
51
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
52
  chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
53
-
54
  return chain
55
 
56
  def user_input(user_question, api_key):
57
  embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
58
-
59
  new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
60
  docs = new_db.similarity_search(user_question)
61
-
62
  chain = get_conversational_chain()
63
-
64
- response = chain(
65
- {"input_documents":docs, "question": user_question}
66
- , return_only_outputs=True)
67
-
68
- print(response) # Debugging line
69
-
70
  st.write("Replies:")
71
  if isinstance(response["output_text"], str):
72
  response_list = [response["output_text"]]
73
  else:
74
  response_list = response["output_text"]
75
-
76
  for text in response_list:
77
  st.write(text)
78
  # Convert text to speech for each response
79
  text_to_speech(text)
80
 
81
  def main():
82
- st.set_page_config(layout="centered")
83
  st.header("Chat with DOCS")
84
  st.markdown("<h1 style='font-size:20px;'>ChatBot by Muhammad Huzaifa</h1>", unsafe_allow_html=True)
85
  api_key = st.secrets["inference_api_key"]
86
-
87
- with st.sidebar:
88
- st.header("Chat with PDF")
89
-
90
- pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit Button", accept_multiple_files=True, type=["pdf"])
91
-
92
- if st.button("Submit") and pdf_docs:
93
- with st.spinner("Processing..."):
 
 
94
  raw_text = get_pdf_text(pdf_docs)
95
  text_chunks = get_text_chunks(raw_text)
96
  get_vector_store(text_chunks, api_key)
97
- st.success("Processing Complete")
98
-
99
- if pdf_docs:
100
- user_question = st.text_input("Ask a question from the Docs")
101
- if user_question:
102
- user_input(user_question, api_key)
103
- else:
104
- st.write("Please upload PDF documents to proceed.")
105
-
106
  if raw_text:
107
- get_pdf_text(pdf_docs)
108
- # # Check if any document is uploaded
109
- # if pdf_docs:
110
- # user_question = st.text_input("Ask a question from the Docs")
111
- # if user_question:
112
- # user_input(user_question, api_key)
113
- # else:
114
- # st.write("Please upload a document first to ask questions.")
115
-
116
  if __name__ == "__main__":
117
  main()
 
19
  os.remove(temp_filename)
20
 
21
  def get_pdf_text(pdf_docs):
22
+ text = ""
23
  for pdf in pdf_docs:
24
+ pdf_reader = PdfReader(pdf)
25
  for page in pdf_reader.pages:
26
+ text += page.extract_text()
27
+ return text
28
 
29
  def get_text_chunks(text):
30
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
31
  chunks = text_splitter.split_text(text)
32
  return chunks
33
+
34
  def get_vector_store(text_chunks, api_key):
35
  embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
36
  vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
37
  vector_store.save_local("faiss_index")
38
 
39
  def get_conversational_chain():
 
40
  prompt_template = """
41
  Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
42
  provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
 
44
  Question: \n{question}\n
45
  Answer:
46
  """
 
47
  model = ChatGroq(temperature=0, groq_api_key=os.environ["groq_api_key"], model_name="llama3-8b-8192")
 
48
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
49
  chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
 
50
  return chain
51
 
52
  def user_input(user_question, api_key):
53
  embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
 
54
  new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
55
  docs = new_db.similarity_search(user_question)
 
56
  chain = get_conversational_chain()
57
+ response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
 
 
 
 
 
 
58
  st.write("Replies:")
59
  if isinstance(response["output_text"], str):
60
  response_list = [response["output_text"]]
61
  else:
62
  response_list = response["output_text"]
 
63
  for text in response_list:
64
  st.write(text)
65
  # Convert text to speech for each response
66
  text_to_speech(text)
67
 
68
  def main():
69
+ st.set_page_config(layout="wide")
70
  st.header("Chat with DOCS")
71
  st.markdown("<h1 style='font-size:20px;'>ChatBot by Muhammad Huzaifa</h1>", unsafe_allow_html=True)
72
  api_key = st.secrets["inference_api_key"]
73
+
74
+ # Sidebar column for file upload
75
+ st.sidebar.header("Chat with PDF")
76
+ pdf_docs = st.sidebar.file_uploader("Upload your PDF Files", accept_multiple_files=True, type=["pdf"])
77
+
78
+ # Main column for displaying extracted text and user interaction
79
+ col1, col2 = st.columns([1, 2])
80
+
81
+ if pdf_docs and col1.button("Submit"):
82
+ with col1.spinner("Processing..."):
83
  raw_text = get_pdf_text(pdf_docs)
84
  text_chunks = get_text_chunks(raw_text)
85
  get_vector_store(text_chunks, api_key)
86
+ col1.success("Processing Complete")
87
+ if col1.success("Processing Complete"):
88
+ user_question = st.text_input("Ask a question from the Docs")
89
+ if user_question:
90
+ user_input(user_question, api_key)
 
 
 
 
91
  if raw_text:
92
+ with col2:
93
+ st.subheader("Extracted Text from PDF:")
94
+ st.text(raw_text)
95
+
96
+
97
+
 
 
 
98
  if __name__ == "__main__":
99
  main()