Spaces:
Build error
Build error
| import streamlit as st | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline | |
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from langchain_community.vectorstores import FAISS | |
| from langchain.llms import HuggingFacePipeline | |
| from langchain.chains import RetrievalQA | |
| checkpoint = "LaMini-T5-738M" | |
| def load_llm(): | |
| tokenizer = AutoTokenizer.from_pretrained(checkpoint) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) | |
| pipe = pipeline( | |
| 'text2text-generation', | |
| model=model, | |
| tokenizer=tokenizer, | |
| max_length=256, | |
| do_sample=True, | |
| temperature=0.3, | |
| top_p=0.95 | |
| ) | |
| return HuggingFacePipeline(pipeline=pipe) | |
| def qa_llm(): | |
| llm = load_llm() | |
| embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") | |
| db = FAISS.load_local("faiss_index", embeddings) | |
| retriever = db.as_retriever() | |
| qa = RetrievalQA.from_chain_type( | |
| llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True) | |
| return qa | |
| def process_answer(instruction): | |
| qa = qa_llm() | |
| generated_text = qa(instruction) | |
| answer = generated_text['result'] | |
| return answer, generated_text | |
| def main(): | |
| st.title("Search Your PDF π¦π") | |
| with st.expander("About the App"): | |
| st.markdown( | |
| """ | |
| This is a Generative AI powered Question and Answering app that responds to questions about your PDF File. | |
| """ | |
| ) | |
| question = st.text_area("Enter your Question") | |
| if st.button("Ask"): | |
| st.info("Your Question: " + question) | |
| st.info("Your Answer") | |
| answer, metadata = process_answer(question) | |
| st.write(answer) | |
| st.write(metadata) | |
| if __name__ == '__main__': | |
| main() |