Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline, AutoProcessor, AutoModelForCausalLM | |
| from datasets import load_dataset | |
| import torch | |
| import numpy as np | |
| # Load BLIP model for image captioning | |
| caption_model = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") | |
| # Load SpeechT5 model for text-to-speech | |
| synthesiser = pipeline("text-to-speech", model="microsoft/speecht5_tts") | |
| # Load Florence-2 model for OCR | |
| ocr_device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
| ocr_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 | |
| ocr_model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large", torch_dtype=ocr_dtype, trust_remote_code=True).to(ocr_device) | |
| ocr_processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True) | |
| # Load speaker embedding | |
| embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") | |
| speaker_embedding = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) | |
| def process_image(image): | |
| try: | |
| # Generate caption from the image | |
| caption = caption_model(image)[0]['generated_text'] | |
| # Convert caption to speech | |
| speech = synthesiser( | |
| caption, | |
| forward_params={"speaker_embeddings": speaker_embedding} | |
| ) | |
| # Extract text (OCR) using Florence-2 | |
| inputs = ocr_processor(text="<OCR>", images=image, return_tensors="pt").to(ocr_device, ocr_dtype) | |
| generated_ids = ocr_model.generate( | |
| input_ids=inputs["input_ids"], | |
| pixel_values=inputs["pixel_values"], | |
| max_new_tokens=4096, | |
| num_beams=3, | |
| do_sample=False | |
| ) | |
| extracted_text = ocr_processor.batch_decode(generated_ids, skip_special_tokens=True)[0] | |
| # Prepare audio data | |
| audio = np.array(speech["audio"]) | |
| rate = speech["sampling_rate"] | |
| # Return audio, caption, and extracted text | |
| return (rate, audio), caption, extracted_text | |
| except Exception as e: | |
| return None, f"Error: {str(e)}", "" | |
| # Gradio Interface | |
| iface = gr.Interface( | |
| fn=process_image, | |
| inputs=gr.Image(type='pil', label="Upload an Image"), | |
| outputs=[ | |
| gr.Audio(label="Generated Audio"), | |
| gr.Textbox(label="Generated Caption"), | |
| gr.Textbox(label="Extracted Text (OCR)") | |
| ], | |
| title="SeeSay with SpeechT5 and Florence-2 OCR", | |
| description="Upload an image to generate a caption, hear it described with SpeechT5's speech synthesis, and extract text using Florence-2 OCR." | |
| ) | |
| iface.launch() | |