Studymaker2 / app.py
g0th's picture
Update app.py
62f0ba5 verified
raw
history blame
3.55 kB
import os
import json
from PIL import Image
import torch
import gradio as gr
from transformers import (
BlipImageProcessor,
AutoTokenizer,
Llama4ForConditionalGeneration,
)
from ppt_parser import transfer_to_structure
# βœ… Load Hugging Face token
hf_token = os.getenv("HF_TOKEN")
model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct"
# βœ… Load image processor, tokenizer, and model manually
image_processor = BlipImageProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token)
model = Llama4ForConditionalGeneration.from_pretrained(
model_id,
token=hf_token,
attn_implementation="flex_attention",
device_map="auto",
torch_dtype=torch.bfloat16,
)
# βœ… Global state
extracted_text = ""
image_paths = []
def extract_text_from_pptx_json(parsed_json: dict) -> str:
text = ""
for slide in parsed_json.values():
for shape in slide.values():
if shape.get("type") == "group":
for group_shape in shape.get("group_content", {}).values():
if group_shape.get("type") == "text":
for para_key, para in group_shape.items():
if para_key.startswith("paragraph_"):
text += para.get("text", "") + "\n"
elif shape.get("type") == "text":
for para_key, para in shape.items():
if para_key.startswith("paragraph_"):
text += para.get("text", "") + "\n"
return text.strip()
def handle_pptx_upload(pptx_file):
global extracted_text, image_paths
tmp_path = pptx_file.name
parsed_json_str, image_paths = transfer_to_structure(tmp_path, "images")
parsed_json = json.loads(parsed_json_str)
extracted_text = extract_text_from_pptx_json(parsed_json)
return extracted_text or "No readable text found in slides."
def ask_llama(question):
global extracted_text, image_paths
if not extracted_text and not image_paths:
return "Please upload and extract a PPTX file first."
# βœ… Use the first image only (you can expand to multiple with batching)
image = Image.open(image_paths[0]).convert("RGB")
vision_inputs = image_processor(images=image, return_tensors="pt").to(model.device)
prompt = f"<|user|>\n{extracted_text}\n\nQuestion: {question}<|end|>\n<|assistant|>\n"
text_inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
input_ids=text_inputs["input_ids"],
pixel_values=vision_inputs["pixel_values"],
max_new_tokens=256,
)
response = tokenizer.decode(output[0][text_inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
return response.strip()
# βœ… Gradio UI
with gr.Blocks() as demo:
gr.Markdown("## 🧠 Llama-4-Scout Multimodal Study Assistant")
pptx_input = gr.File(label="πŸ“‚ Upload PPTX File", file_types=[".pptx"])
extract_btn = gr.Button("πŸ“œ Extract Text + Slides")
extracted_output = gr.Textbox(label="πŸ“„ Slide Text", lines=10, interactive=False)
extract_btn.click(handle_pptx_upload, inputs=[pptx_input], outputs=[extracted_output])
question = gr.Textbox(label="❓ Ask a Question")
ask_btn = gr.Button("πŸ’¬ Ask Scout")
ai_answer = gr.Textbox(label="πŸ€– Llama Answer", lines=6)
ask_btn.click(ask_llama, inputs=[question], outputs=[ai_answer])
if __name__ == "__main__":
demo.launch()