Huzaifa367 commited on
Commit
b8d7031
·
verified ·
1 Parent(s): 7243115

Delete summarizer.py

Browse files
Files changed (1) hide show
  1. summarizer.py +0 -100
summarizer.py DELETED
@@ -1,100 +0,0 @@
1
- import streamlit as st
2
- from PyPDF2 import PdfReader
3
- from langchain.text_splitter import RecursiveCharacterTextSplitter
4
- from langchain_groq import ChatGroq
5
- from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings
6
- from langchain.vectorstores import FAISS
7
- from langchain.chains.question_answering import load_qa_chain
8
- from langchain.prompts import PromptTemplate
9
- import tempfile
10
- from gtts import gTTS
11
- import os
12
-
13
- def text_to_speech(text):
14
- tts = gTTS(text=text, lang='en')
15
- audio_file = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
16
- temp_filename = audio_file.name
17
- tts.save(temp_filename)
18
- st.audio(temp_filename, format='audio/mp3')
19
- os.remove(temp_filename)
20
-
21
- def get_pdf_text(pdf_docs):
22
- text=""
23
- for pdf in pdf_docs:
24
- pdf_reader= PdfReader(pdf)
25
- for page in pdf_reader.pages:
26
- text+= page.extract_text()
27
- return text
28
-
29
- def get_text_chunks(text):
30
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
31
- chunks = text_splitter.split_text(text)
32
- return chunks
33
-
34
- def get_vector_store(text_chunks, api_key):
35
- embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
36
- vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
37
- vector_store.save_local("faiss_index")
38
-
39
- def get_conversational_chain():
40
-
41
- prompt_template = """
42
- Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
43
- provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
44
- Context:\n {context}?\n
45
- Question: \n{question}\n
46
- Answer:
47
- """
48
-
49
- model = ChatGroq(temperature=0, groq_api_key=os.environ["groq_api_key"], model_name="llama3-8b-8192")
50
-
51
- prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
52
- chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
53
-
54
- return chain
55
-
56
- def user_input(user_question, api_key):
57
- embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
58
-
59
- new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
60
- docs = new_db.similarity_search(user_question)
61
-
62
- chain = get_conversational_chain()
63
-
64
- response = chain(
65
- {"input_documents":docs, "question": user_question}
66
- , return_only_outputs=True)
67
-
68
- print(response) # Debugging line
69
-
70
- st.write("Replies:")
71
- if isinstance(response["output_text"], str):
72
- response_list = [response["output_text"]]
73
- else:
74
- response_list = response["output_text"]
75
-
76
- for text in response_list:
77
- st.write(text)
78
- # Convert text to speech for each response
79
- text_to_speech(text)
80
-
81
- def main():
82
-
83
- st.set_page_config(layout="centered")
84
- st.header("Chat with DOCS")
85
- st.markdown("<h1 style='font-size:20px;'>ChatBot by Muhammad Huzaifa</h1>", unsafe_allow_html=True)
86
- api_key = st.secrets["inference_api_key"]
87
-
88
-
89
- with st.sidebar:
90
- st.header("Chat with PDF")
91
- # st.title("Menu:")
92
- if st.button("Chat With PDF"):
93
- st.switch_page('/app.py')
94
-
95
- # Check if any document is uploaded
96
- if pdf_docs:
97
- user_question = st.text_input("Ask a question from the Docs")
98
- if user_question:
99
- user_input(user_question, api_key)
100
- else: