import gradio as gr import google.generativeai as genai from transformers import pipeline import json from ppt_parser import transfer_to_structure # updated and working # ✅ Your Google Gemini API Key GOOGLE_API_KEY = "AIzaSyA8fWpwJE21zxpuN8Fi8Qx9-iwx3d_AZiw" genai.configure(api_key=GOOGLE_API_KEY) # ✅ Load Models summarizer = pipeline("summarization", model="facebook/bart-large-cnn") gemini_model = genai.GenerativeModel("models/gemini-1.5-flash") # ✅ Global variable to hold extracted text extracted_text = "" # ✅ Flatten extracted JSON into plain text def extract_text_from_pptx_json(parsed_json: dict) -> str: extracted_text = "" for slide_key, slide in parsed_json.items(): for shape_key, shape in slide.items(): if shape.get('type') == 'group': group = shape.get('group_content', {}) for _, group_shape in group.items(): if group_shape.get('type') == 'text': for para_key, para in group_shape.items(): if para_key.startswith("paragraph_"): extracted_text += para.get("text", "") + "\n" elif shape.get('type') == 'text': for para_key, para in shape.items(): if para_key.startswith("paragraph_"): extracted_text += para.get("text", "") + "\n" return extracted_text.strip() # ✅ Main file handler def handle_pptx_upload(pptx_file): global extracted_text tmp_path = pptx_file.name # Fix for NamedString error on Spaces parsed_json_str, _ = transfer_to_structure(tmp_path, "images") parsed_json = json.loads(parsed_json_str) extracted_text = extract_text_from_pptx_json(parsed_json) return extracted_text or "No readable text found in slides." # ✅ Summary generator def summarize_text(): global extracted_text if not extracted_text: return "Please upload and extract text from a PPTX file first." summary = summarizer(extracted_text, max_length=200, min_length=50, do_sample=False)[0]['summary_text'] return summary # ✅ Gemini-powered Q&A def clarify_concept(question): global extracted_text if not extracted_text: return "Please upload and extract text from a PPTX file first." prompt = f"Context:\n{extracted_text}\n\nQuestion: {question}" response = gemini_model.generate_content(prompt) return response.text if response else "No response from Gemini." # ✅ Gradio UI with gr.Blocks() as demo: gr.Markdown("## 🧠 AI-Powered Study Assistant for PowerPoint Lectures") pptx_input = gr.File(label="📂 Upload PPTX File", file_types=[".pptx"]) # Fix mobile upload extract_btn = gr.Button("📜 Extract & Summarize") extracted_output = gr.Textbox(label="📄 Extracted Text", lines=10, interactive=False) summary_output = gr.Textbox(label="📝 Summary", interactive=False) extract_btn.click(handle_pptx_upload, inputs=[pptx_input], outputs=[extracted_output]) extract_btn.click(summarize_text, outputs=[summary_output]) question = gr.Textbox(label="❓ Ask a Question") ask_btn = gr.Button("💬 Ask Gemini") ai_answer = gr.Textbox(label="🤖 Gemini Answer", lines=4) ask_btn.click(clarify_concept, inputs=[question], outputs=[ai_answer]) # ✅ Launch app (without share=True for Spaces) if __name__ == "__main__": demo.launch()