Spaces:
Runtime error
Runtime error
File size: 2,753 Bytes
3934742 4fdf319 e119804 4fdf319 3c49a0e 3934742 4fdf319 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import gradio as gr
import langchain.llms as llms
import langchain.llms.prompts as prompts
import langchain.pipelines as pipelines
from langchain.llms.responses import ResponseItem
def process_pdf(pdf_file):
"""Processes the uploaded PDF using a pre-trained information extraction pipeline.
Args:
pdf_file (bytes): The uploaded PDF file content.
Returns:
dict: A dictionary containing the extracted information from the PDF.
"""
# Replace with your preferred information extraction pipeline
# (e.g., Camelot, PyMuPDF, PDFMiner.Six)
extracted_data = extract_information_from_pdf(pdf_file)
return extracted_data
def answer_question(question, context, llm):
"""Answers the user's question using the provided context and LLaMA3 model.
Args:
question (str): The user's question.
context (dict): The extracted information from the PDF.
llm (llms.BaseLLM): The LLaMA3 language model instance.
Returns:
ResponseItem: A ResponseItem object containing the answer, score, and retrieval.
"""
# Replace with your preferred RAG prompt template
# (e.g., "The document says [RETRIEVAL]. Can you answer this question based on it: {question}?")
prompt = prompts.get_rag_answering_prompt(question, context, retrieval_template="")
response = llm.run(prompt, wait_for_sequences=True)
return response[0]
def chatbot(pdf_file, message, chat_history):
"""Handles user interaction, processes PDF, answers questions, and maintains chat history.
Args:
pdf_file (bytes, optional): The uploaded PDF file content (if applicable).
message (str): The user's message (question).
chat_history (list): A list of previous messages.
Returns:
list: An updated chat history with the chatbot's response.
"""
if pdf_file is not None:
context = process_pdf(pdf_file)
chat_history.append(f"**You uploaded a PDF.**")
if message:
# Access the LLaMA3 model (replace with your setup)
llm = llms.get_llm("facebook/bart-base") # Example LLaMA3 model (replace with your access)
response = answer_question(message, context, llm)
chat_history.append(f"**User:** {message}")
chat_history.append(f"**Chatbot:** {response.generated_text}")
return chat_history
# Gradio interface setup
interface = gr.Interface(
fn=chatbot,
inputs=[
gr.File(type="pdf", label="Upload PDF (optional)"),
gr.Textbox(label="Ask a question"),
],
outputs=gr.Textbox(multiline=True),
title="PDF Q&A Chatbot with LLaMA3",
description="Ask questions about the uploaded PDF or provide an empty file to use example content.",
)
interface.launch() |