File size: 1,897 Bytes
6a8f952
 
 
 
 
 
 
 
 
 
fde9d41
6a8f952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import os
import streamlit as st
import PyPDF2
import docx
from sentence_transformers import SentenceTransformer
from groq import Groq
from transformers import pipeline
from langchain.text_splitter import RecursiveCharacterTextSplitter

# Set up Groq API
client = Groq(api_key=os.environ.get("Groq_Api"))

# Load embedding model
embedder = SentenceTransformer("all-MiniLM-L6-v2")

# Title and UI
st.set_page_config(page_title="A&Q From a File", page_icon="πŸ“–")
st.title("πŸ“– A&Q From a File")

# File Upload
uploaded_file = st.file_uploader("Upload a PDF or DOCX file", type=["pdf", "docx"])

if uploaded_file:
    text = ""
    
    # Extract text from PDF
    if uploaded_file.type == "application/pdf":
        pdf_reader = PyPDF2.PdfReader(uploaded_file)
        for page in pdf_reader.pages:
            text += page.extract_text() + "\n"
    
    # Extract text from DOCX
    elif uploaded_file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
        doc = docx.Document(uploaded_file)
        for para in doc.paragraphs:
            text += para.text + "\n"
    
    # Chunking the text
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=500, chunk_overlap=50
    )
    chunks = text_splitter.split_text(text)
    
    # Embed chunks
    embeddings = embedder.encode(chunks, convert_to_tensor=True)
    
    # Query Input
    user_query = st.text_input("Ask a question about the file:")
    if user_query:
        
        # Query Groq API
        chat_completion = client.chat.completions.create(
            messages=[
                {"role": "user", "content": f"Answer this question based on the uploaded document: {user_query}"}
            ],
            model="llama-3.3-70b-versatile",
        )
        
        # Display answer
        st.subheader("Answer:")
        st.write(chat_completion.choices[0].message.content)