Spaces:
Sleeping
Sleeping
File size: 3,353 Bytes
f40afe0 cdefe42 f40afe0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import gradio as gr
import google.generativeai as genai
from transformers import pipeline
import json
from tempfile import NamedTemporaryFile
from ppt_parser import transfer_to_structure # <- FIXED import
# β
Use your Gemini API key
GOOGLE_API_KEY = "AIzaSyA8fWpwJE21zxpuN8Fi8Qx9-iwx3d_AZiw"
genai.configure(api_key=GOOGLE_API_KEY)
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
gemini_model = genai.GenerativeModel("models/gemini-1.5-flash")
extracted_text = ""
def extract_text_from_pptx_json(parsed_json: dict) -> str:
extracted_text = ""
for slide_key, slide in parsed_json.items():
for shape_key, shape in slide.items():
if shape.get('type') == 'group':
group = shape.get('group_content', {})
for _, group_shape in group.items():
if group_shape.get('type') == 'text':
for para_key, para in group_shape.items():
if para_key.startswith("paragraph_"):
extracted_text += para.get("text", "") + "\n"
elif shape.get('type') == 'text':
for para_key, para in shape.items():
if para_key.startswith("paragraph_"):
extracted_text += para.get("text", "") + "\n"
return extracted_text.strip()
def handle_pptx_upload(pptx_file):
global extracted_text
with NamedTemporaryFile(delete=False, suffix=".pptx") as tmp:
tmp.write(pptx_file.read())
tmp_path = tmp.name
parsed_json_str, _, _ = transfer_to_structure(tmp_path, "images")
parsed_json = json.loads(parsed_json_str)
extracted_text = extract_text_from_pptx_json(parsed_json)
return extracted_text or "No readable text found in slides."
def summarize_text():
global extracted_text
if not extracted_text:
return "Please upload and extract text from a PPTX file first."
summary = summarizer(extracted_text, max_length=200, min_length=50, do_sample=False)[0]['summary_text']
return summary
def clarify_concept(question):
global extracted_text
if not extracted_text:
return "Please upload and extract text from a PPTX file first."
prompt = f"Context:\n{extracted_text}\n\nQuestion: {question}"
response = gemini_model.generate_content(prompt)
return response.text if response else "No response from Gemini."
# β
Gradio Interface
with gr.Blocks() as demo:
gr.Markdown("## π§ AI-Powered Study Assistant for PowerPoint Lectures")
pptx_input = gr.File(label="π Upload Lecture File", file_types=[".pptx", ".pdf", ".docx"])
extract_btn = gr.Button("π Extract & Summarize")
extracted_output = gr.Textbox(label="π Extracted Text", lines=10, interactive=False)
summary_output = gr.Textbox(label="π Summary", interactive=False)
extract_btn.click(handle_pptx_upload, inputs=[pptx_input], outputs=[extracted_output])
extract_btn.click(summarize_text, outputs=[summary_output])
question = gr.Textbox(label="β Ask a Question")
ask_btn = gr.Button("π¬ Ask Gemini")
ai_answer = gr.Textbox(label="π€ Gemini Answer", lines=4)
ask_btn.click(clarify_concept, inputs=[question], outputs=[ai_answer])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True) |