htrflow_mcp / app.py
Gabriel's picture
Update app.py
91e2f1d verified
raw
history blame
7.58 kB
import gradio as gr
import json
import tempfile
import os
from typing import List, Optional, Literal
from PIL import Image
import spaces
from pathlib import Path
from htrflow.volume.volume import Collection
from htrflow.pipeline.pipeline import Pipeline
DEFAULT_OUTPUT = "alto"
CHOICES = ["txt", "alto", "page", "json"]
PIPELINE_CONFIGS = {
"letter_english": {
"steps": [
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {"model": "Riksarkivet/yolov9-lines-within-regions-1"},
"generation_settings": {"batch_size": 8},
},
},
{
"step": "TextRecognition",
"settings": {
"model": "TrOCR",
"model_settings": {"model": "microsoft/trocr-base-handwritten"},
"generation_settings": {"batch_size": 16},
},
},
{"step": "OrderLines"},
]
},
"letter_swedish": {
"steps": [
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {"model": "Riksarkivet/yolov9-lines-within-regions-1"},
"generation_settings": {"batch_size": 8},
},
},
{
"step": "TextRecognition",
"settings": {
"model": "TrOCR",
"model_settings": {"model": "Riksarkivet/trocr-base-handwritten-hist-swe-2"},
"generation_settings": {"batch_size": 16},
},
},
{"step": "OrderLines"},
]
},
"spread_english": {
"steps": [
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {"model": "Riksarkivet/yolov9-regions-1"},
"generation_settings": {"batch_size": 4},
},
},
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {"model": "Riksarkivet/yolov9-lines-within-regions-1"},
"generation_settings": {"batch_size": 8},
},
},
{
"step": "TextRecognition",
"settings": {
"model": "TrOCR",
"model_settings": {"model": "microsoft/trocr-base-handwritten"},
"generation_settings": {"batch_size": 16},
},
},
{"step": "ReadingOrderMarginalia", "settings": {"two_page": True}},
]
},
"spread_swedish": {
"steps": [
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {"model": "Riksarkivet/yolov9-regions-1"},
"generation_settings": {"batch_size": 4},
},
},
{
"step": "Segmentation",
"settings": {
"model": "yolo",
"model_settings": {"model": "Riksarkivet/yolov9-lines-within-regions-1"},
"generation_settings": {"batch_size": 8},
},
},
{
"step": "TextRecognition",
"settings": {
"model": "TrOCR",
"model_settings": {"model": "Riksarkivet/trocr-base-handwritten-hist-swe-2"},
"generation_settings": {"batch_size": 16},
},
},
{"step": "ReadingOrderMarginalia", "settings": {"two_page": True}},
]
},
}
@spaces.GPU
def process_htr(image_path: str, document_type: Literal["letter_english", "letter_swedish", "spread_english", "spread_swedish"] = "letter_swedish", output_format: Literal["txt", "alto", "page", "json"] = DEFAULT_OUTPUT, custom_settings: Optional[str] = None) -> str:
"""
Process handwritten text recognition and return extracted text with specified format file.
Args:
image_path (str): Path to the image file to process
document_type (str): Type of document processing template to use
output_format (str): Output format for the processed file
custom_settings (str): Optional custom pipeline settings as JSON
Returns:
str: The path to the output file or error message
"""
if not image_path:
return "Error: No image provided"
try:
original_filename = Path(image_path).stem or "output"
if custom_settings:
try:
config = json.loads(custom_settings)
except json.JSONDecodeError:
return "Error: Invalid JSON in custom_settings parameter"
else:
config = PIPELINE_CONFIGS[document_type]
collection = Collection([image_path])
pipeline = Pipeline.from_config(config)
try:
processed_collection = pipeline.run(collection)
except Exception as pipeline_error:
return f"Error: Pipeline execution failed: {str(pipeline_error)}"
temp_dir = Path(tempfile.mkdtemp())
export_dir = temp_dir / output_format
processed_collection.save(directory=str(export_dir), serializer=output_format)
output_file_path = None
for root, _, files in os.walk(export_dir):
for file in files:
old_path = os.path.join(root, file)
file_ext = Path(file).suffix
new_filename = f"{original_filename}.{output_format}" if not file_ext else f"{original_filename}{file_ext}"
new_path = os.path.join(root, new_filename)
os.rename(old_path, new_path)
output_file_path = new_path
break
if output_file_path and os.path.exists(output_file_path):
return output_file_path
else:
return "Error: Failed to generate output file"
except Exception as e:
return f"Error: HTR processing failed: {str(e)}"
def extract_text_from_collection(collection: Collection) -> str:
text_lines = []
for page in collection.pages:
for node in page.traverse():
if hasattr(node, "text") and node.text:
text_lines.append(node.text)
return "\n".join(text_lines)
def create_htrflow_mcp_server():
demo = gr.Interface(
fn=process_htr,
inputs=[
gr.Image(type="filepath", label="Upload Image or Enter URL"),
gr.Dropdown(choices=["letter_english", "letter_swedish", "spread_english", "spread_swedish"], value="letter_swedish", label="Document Type"),
gr.Dropdown(choices=CHOICES, value=DEFAULT_OUTPUT, label="Output Format"),
gr.Textbox(label="Custom Settings (JSON)", placeholder="Optional custom pipeline settings", value=""),
],
outputs=gr.File(label="Download Output File"),
title="HTRflow MCP Server",
description="Process handwritten text from uploaded file or URL and get output file in specified format",
api_name="process_htr",
)
return demo
if __name__ == "__main__":
demo = create_htrflow_mcp_server()
demo.launch(mcp_server=True, share=False, debug=True)