Spaces:
Sleeping
Sleeping
File size: 1,766 Bytes
eb1ac6d a2d9609 eb1ac6d a2d9609 eb1ac6d a2d9609 dc7c4f3 a2d9609 dc7c4f3 eb1ac6d 0f8ba6a a2d9609 ea06666 a2d9609 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import os
import easyocr
import gradio as gr
from PIL import Image
from llama_index.core import Settings
from llama_index.llms.gemini import Gemini
from llama_index.core import Document, VectorStoreIndex
from llama_index.embeddings.gemini import GeminiEmbedding
reader = easyocr.Reader(['en'])
llm = Gemini(api_key=os.getenv('GEMINI_API_KEY'), model_name="models/gemini-2.0-flash")
gemini_embedding_model = GeminiEmbedding(api_key=os.getenv('GEMINI_API_KEY'), model_name="models/embedding-001")
# Set Global settings
Settings.llm = llm
Settings.embed_model = gemini_embedding_model
def inference(img_path, width_ths):
output = reader.readtext(img_path, detail=0, slope_ths=0.7, ycenter_ths=0.9,
height_ths=0.8, width_ths=width_ths, add_margin=0.2)
output = "\n".join(output)
# create a Document object from the extracted text
doc = Document(text = output)
# Create an index from the documents and save it to the disk.
index = VectorStoreIndex.from_documents([doc])
# save the index
index.storage_context.persist(persist_dir = "./receiptsembeddings")
return output
title = "Receipt RAG"
description = "A simple Gradio interface to query receipts using RAG"
examples = [["data/receipt_00000.JPG", 7.7],
["data/receipt_00001.jpg", 7.7]]
demo = gr.Interface(inference,
inputs = [gr.Image(width=320, height=320, label="Input Receipt"),
gr.Slider(0, 10, 7.7, 0.1, label="Width Threshold to merge bounding boxes")],
outputs= [gr.Textbox(label="OCR Output", type="text")],
title=title,
description=description,
examples=examples)
demo.launch()
|