File size: 6,594 Bytes
229b460 7cb4732 229b460 a6887bc 229b460 a6887bc 229b460 1fb4741 229b460 a6887bc 229b460 a6887bc 229b460 7cb4732 229b460 7cb4732 229b460 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 |
import os
import gradio as gr
import pandas as pd
import plotly.express as px
from lexoid.api import parse
parser_options = ["LLM_PARSE", "STATIC_PARSE", "AUTO"]
# Function to set the environment variables and parse the document
def run_parser(
file,
parser_type,
model,
pages_per_split,
max_processes,
as_pdf,
x_tolerance,
y_tolerance,
save_dir,
page_nums,
router_priority,
framework,
temperature,
depth,
google_api_key,
openai_api_key,
huggingfacehub_api_token,
together_api_key,
openrouter_api_key,
anthropic_api_key,
fireworks_api_key,
mistral_api_key,
):
# Set environment variables
os.environ["GOOGLE_API_KEY"] = google_api_key
os.environ["OPENAI_API_KEY"] = openai_api_key
os.environ["HUGGINGFACEHUB_API_TOKEN"] = huggingfacehub_api_token
os.environ["TOGETHER_API_KEY"] = together_api_key
os.environ["OPENROUTER_API_KEY"] = openrouter_api_key
os.environ["ANTHROPIC_API_KEY"] = anthropic_api_key
os.environ["FIREWORKS_API_KEY"] = fireworks_api_key
os.environ["MISTRAL_API_KEY"] = mistral_api_key
if file is None:
return "Please upload a file to parse."
kwargs = {
"model": model,
"pages_per_split": pages_per_split,
"max_processes": max_processes,
"as_pdf": as_pdf,
"x_tolerance": x_tolerance,
"y_tolerance": y_tolerance,
"save_dir": save_dir,
"page_nums": (
[int(num.strip()) for num in page_nums.split(",")] if page_nums else None
),
"router_priority": router_priority,
"framework": framework,
"temperature": temperature,
"depth": depth,
}
# Clean None values
kwargs = {k: v for k, v in kwargs.items() if v is not None}
result = parse(path=file.name, parser_type=parser_type, **kwargs)
if "raw" in result:
return result["raw"]
elif "segments" in result:
return "\n\n".join([seg.get("content", "") for seg in result["segments"]])
else:
return str(result)
with gr.Blocks(title="Lexoid Document Parser") as app:
gr.Markdown(
"## π Lexoid Document Parser\nUpload a document and customize how you'd like to parse it."
)
with gr.Row():
file_input = gr.File(
label="Upload Document",
file_types=[".pdf", ".docx", ".html", ".txt"],
type="filepath",
)
parser_type = gr.Dropdown(
choices=parser_options, value="AUTO", label="Parser Type"
)
model_input = gr.Textbox(value="gemini-2.0-flash", label="LLM ID")
framework = gr.Textbox(
value="pdfplumber",
label="Static Framework",
placeholder="e.g., pdfplumber, slate",
)
with gr.Accordion("Advanced Options", open=False):
pages_per_split = gr.Slider(
minimum=1, maximum=20, value=4, step=1, label="Pages per Split"
)
max_processes = gr.Slider(
minimum=1, maximum=16, value=4, step=1, label="Max Parallel Processes"
)
as_pdf = gr.Checkbox(label="Convert to PDF before parsing")
x_tolerance = gr.Number(label="X-axis Tolerance", value=None)
y_tolerance = gr.Number(label="Y-axis Tolerance", value=None)
save_dir = gr.Textbox(
label="Save Directory",
placeholder="Path to save intermediate files (optional)",
)
page_nums = gr.Textbox(
label="Page Numbers",
placeholder="Comma-separated page numbers (e.g., 1,3,5)",
)
router_priority = gr.Dropdown(
choices=["speed", "accuracy"], value="accuracy", label="Router Priority"
)
temperature = gr.Number(label="LLM Temperature", value=None)
depth = gr.Number(label="Recursive Depth", value=None)
# Adding the text boxes for the environment variables
with gr.Row():
google_api_key = gr.Textbox(
label="Google API Key", placeholder="Enter Google API Key"
)
openai_api_key = gr.Textbox(
label="OpenAI API Key", placeholder="Enter OpenAI API Key"
)
huggingfacehub_api_token = gr.Textbox(
label="HuggingFaceHub API Token",
placeholder="Enter HuggingFaceHub API Token",
)
together_api_key = gr.Textbox(
label="Together API Key", placeholder="Enter Together API Key"
)
openrouter_api_key = gr.Textbox(
label="OpenRouter API Key", placeholder="Enter OpenRouter API Key"
)
anthropic_api_key = gr.Textbox(
label="Anthropic API Key", placeholder="Enter Anthropic API Key"
)
fireworks_api_key = gr.Textbox(
label="Fireworks API Key", placeholder="Enter Fireworks API Key"
)
mistral_api_key = gr.Textbox(
label="Mistral API Key", placeholder="Enter Mistral API Key"
)
output = gr.Markdown(label="Parsed Output")
parse_button = gr.Button("Parse Document")
parse_button.click(
fn=run_parser,
inputs=[
file_input,
parser_type,
model_input,
pages_per_split,
max_processes,
as_pdf,
x_tolerance,
y_tolerance,
save_dir,
page_nums,
router_priority,
framework,
temperature,
depth,
google_api_key,
openai_api_key,
huggingfacehub_api_token,
together_api_key,
openrouter_api_key,
anthropic_api_key,
fireworks_api_key,
mistral_api_key,
],
outputs=output,
)
# Leaderboard loaded from leaderboard.csv
df = pd.read_csv("leaderboard.csv")
# Sort df by `sequence_matcher` in descending order and use new index as "Rank"
df = df.sort_values(by="sequence_matcher", ascending=False).reset_index(drop=True)
df.index += 1
df.index.name = "Rank"
leaderboard = gr.Dataframe(
value=df,
label="Leaderboard",
)
df = pd.read_csv("document_results.csv")
fig = px.bar(
df,
x="Input File",
y="sequence_matcher",
color="model",
labels={
"Input File": "Document",
"sequence_matcher": "Sequence Matcher Score",
"model": "Model",
},
barmode="group",
title="Sequence Matcher Scores by Document and Model",
)
gr.Plot(value=fig)
app.launch()
|