Spaces:
Sleeping
Sleeping
File size: 5,276 Bytes
83dd2a8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
import streamlit as st
import torch
from transformers import LayoutLMv3Processor, LayoutLMv3ForTokenClassification
from PIL import Image
import io
import json
import pandas as pd
import plotly.express as px
import numpy as np
from typing import Dict, Any
import logging
import pytesseract
import re
from openai import OpenAI
import os
from dotenv import load_dotenv
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Load environment variables
load_dotenv()
# Initialize OpenAI client for Perplexity
client = OpenAI(
api_key=os.getenv('PERPLEXITY_API_KEY'),
base_url="https://api.perplexity.ai"
)
# Initialize LayoutLM model
@st.cache_resource
def load_model():
model_name = "microsoft/layoutlmv3-base"
processor = LayoutLMv3Processor.from_pretrained(model_name)
model = LayoutLMv3ForTokenClassification.from_pretrained(model_name)
return processor, model
def extract_json_from_llm_output(llm_result):
match = re.search(r'\{.*\}', llm_result, re.DOTALL)
if match:
return match.group(0)
return None
def extract_fields(image_path):
# OCR
text = pytesseract.image_to_string(Image.open(image_path))
# Display OCR output for debugging
st.subheader("Raw OCR Output")
st.code(text)
# Improved Regex patterns for fields
patterns = {
"name": r"Mrs\s+\w+\s+\w+",
"date": r"Date[:\s]+([\d/]+)",
"product": r"\d+\s+\w+.*Style\s+\d+",
"amount_paid": r"Total Paid\s+\$?([\d.,]+)",
"receipt_no": r"Receipt No\.?\s*:?\s*(\d+)"
}
results = {}
for field, pattern in patterns.items():
match = re.search(pattern, text, re.IGNORECASE)
if match:
results[field] = match.group(1) if match.groups() else match.group(0)
else:
results[field] = None
return results
def extract_with_perplexity_llm(ocr_text):
prompt = f"""
Extract the following fields from this receipt text:
- name
- date
- product
- amount_paid
- receipt_no
Text:
\"\"\"{ocr_text}\"\"\"
Return the result as a JSON object with those fields.
"""
messages = [
{
"role": "system",
"content": "You are an AI assistant that extracts structured information from text."
},
{
"role": "user",
"content": prompt
}
]
response = client.chat.completions.create(
model="sonar-pro",
messages=messages
)
return response.choices[0].message.content
def main():
st.set_page_config(
page_title="FormIQ - Intelligent Document Parser",
page_icon="π",
layout="wide"
)
st.title("FormIQ: Intelligent Document Parser")
st.markdown("""
Upload your documents to extract and validate information using advanced AI models.
""")
# Sidebar
with st.sidebar:
st.header("Settings")
document_type = st.selectbox(
"Document Type",
options=["invoice", "receipt", "form"],
index=0
)
confidence_threshold = st.slider(
"Confidence Threshold",
min_value=0.0,
max_value=1.0,
value=0.5,
step=0.05
)
st.markdown("---")
st.markdown("### About")
st.markdown("""
FormIQ uses LayoutLMv3 and Perplexity AI to extract and validate information from documents.
""")
# Main content
uploaded_file = st.file_uploader(
"Upload Document",
type=["png", "jpg", "jpeg", "pdf"],
help="Upload a document image to process"
)
if uploaded_file is not None:
# Display uploaded image
image = Image.open(uploaded_file)
st.image(image, caption="Uploaded Document", width=600)
# Process button
if st.button("Process Document"):
with st.spinner("Processing document..."):
try:
# Save the uploaded file to a temporary location
temp_path = "temp_uploaded_image.jpg"
image.save(temp_path)
# Extract fields using OCR + regex
fields = extract_fields(temp_path)
# Extract with Perplexity LLM
with st.spinner("Extracting structured data with Perplexity LLM..."):
try:
llm_result = extract_with_perplexity_llm(pytesseract.image_to_string(Image.open(temp_path)))
st.subheader("Structured Data (Perplexity LLM)")
st.code(llm_result, language="json")
# Display extracted fields
st.subheader("Extracted Fields")
fields_df = pd.DataFrame([fields])
st.dataframe(fields_df)
except Exception as e:
st.error(f"LLM extraction failed: {e}")
except Exception as e:
logger.error(f"Error processing document: {str(e)}")
st.error(f"Error processing document: {str(e)}")
if __name__ == "__main__":
main() |