Open-GAMMA / app-BACKUP.py
openfree's picture
Rename app.py to app-BACKUP.py
81e6205 verified
raw
history blame
106 kB
#!/usr/bin/env python
import os
import re
import json
import requests
from collections.abc import Iterator
from threading import Thread
import tempfile
import random
from typing import Dict, List, Tuple, Optional
import shutil
import gradio as gr
from loguru import logger
import pandas as pd
import PyPDF2
from PIL import Image
from gradio_client import Client
import time
# python-pptx ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ํ™•์ธ
try:
from pptx import Presentation
from pptx.util import Inches, Pt
from pptx.enum.text import PP_ALIGN, MSO_ANCHOR
from pptx.dml.color import RGBColor
from pptx.enum.shapes import MSO_SHAPE
from pptx.chart.data import CategoryChartData
from pptx.enum.chart import XL_CHART_TYPE, XL_LEGEND_POSITION
PPTX_AVAILABLE = True
except ImportError:
PPTX_AVAILABLE = False
logger.warning("python-pptx ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ์„ค์น˜๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค. pip install python-pptx")
##############################################################################
# API Configuration
##############################################################################
FRIENDLI_TOKEN = os.environ.get("FRIENDLI_TOKEN")
if not FRIENDLI_TOKEN:
raise ValueError("Please set FRIENDLI_TOKEN environment variable")
FRIENDLI_MODEL_ID = "dep89a2fld32mcm"
FRIENDLI_API_URL = "https://api.friendli.ai/dedicated/v1/chat/completions"
# SERPHouse API key
SERPHOUSE_API_KEY = os.getenv("SERPHOUSE_API_KEY", "")
if not SERPHOUSE_API_KEY:
logger.warning("SERPHOUSE_API_KEY not set. Web search functionality will be limited.")
##############################################################################
# AI Image Generation API Configuration - FLUX API
##############################################################################
FLUX_API_URL = "http://211.233.58.201:7896"
FLUX_API_ENABLED = False
flux_api_client = None
def initialize_flux_api():
"""FLUX API ์ดˆ๊ธฐํ™”"""
global FLUX_API_ENABLED, flux_api_client
try:
logger.info("Connecting to FLUX API...")
flux_api_client = Client(FLUX_API_URL)
FLUX_API_ENABLED = True
logger.info("FLUX API connected successfully")
return True
except Exception as e:
logger.error(f"Failed to connect to FLUX API: {e}")
FLUX_API_ENABLED = False
return False
##############################################################################
# Diagram Generation API Configuration
##############################################################################
DIAGRAM_API_URL = "http://211.233.58.201:7860" # ChartGPT API URL
DIAGRAM_API_ENABLED = False
diagram_api_client = None
def initialize_diagram_api():
"""๋‹ค์ด์–ด๊ทธ๋žจ ์ƒ์„ฑ API ์ดˆ๊ธฐํ™”"""
global DIAGRAM_API_ENABLED, diagram_api_client
try:
logger.info("Connecting to Diagram Generation API...")
diagram_api_client = Client(DIAGRAM_API_URL)
DIAGRAM_API_ENABLED = True
logger.info("Diagram API connected successfully")
return True
except Exception as e:
logger.error(f"Failed to connect to Diagram API: {e}")
DIAGRAM_API_ENABLED = False
return False
##############################################################################
# Design Themes and Color Schemes
##############################################################################
DESIGN_THEMES = {
"professional": {
"name": "ํ”„๋กœํŽ˜์…”๋„",
"colors": {
"primary": RGBColor(46, 134, 171), # #2E86AB
"secondary": RGBColor(162, 59, 114), # #A23B72
"accent": RGBColor(241, 143, 1), # #F18F01
"background": RGBColor(250, 250, 250), # #FAFAFA - Lighter background
"text": RGBColor(44, 44, 44), # #2C2C2C - Darker text for better contrast
},
"fonts": {
"title": "Arial",
"subtitle": "Arial",
"body": "Calibri"
}
},
"modern": {
"name": "๋ชจ๋˜",
"colors": {
"primary": RGBColor(114, 9, 183), # #7209B7
"secondary": RGBColor(247, 37, 133), # #F72585
"accent": RGBColor(76, 201, 240), # #4CC9F0
"background": RGBColor(252, 252, 252), # #FCFCFC - Very light background
"text": RGBColor(40, 40, 40), # #282828 - Dark text
},
"fonts": {
"title": "Arial",
"subtitle": "Arial",
"body": "Helvetica"
}
},
"nature": {
"name": "์ž์—ฐ",
"colors": {
"primary": RGBColor(45, 106, 79), # #2D6A4F
"secondary": RGBColor(82, 183, 136), # #52B788
"accent": RGBColor(181, 233, 185), # #B5E9B9 - Softer accent
"background": RGBColor(248, 252, 248), # #F8FCF8 - Light green tint
"text": RGBColor(27, 38, 44), # #1B262C
},
"fonts": {
"title": "Georgia",
"subtitle": "Verdana",
"body": "Calibri"
}
},
"creative": {
"name": "ํฌ๋ฆฌ์—์ดํ‹ฐ๋ธŒ",
"colors": {
"primary": RGBColor(255, 0, 110), # #FF006E
"secondary": RGBColor(251, 86, 7), # #FB5607
"accent": RGBColor(255, 190, 11), # #FFBE0B
"background": RGBColor(255, 248, 240), # #FFF8F0 - Light warm background
"text": RGBColor(33, 33, 33), # #212121 - Dark text on light bg
},
"fonts": {
"title": "Impact",
"subtitle": "Arial",
"body": "Segoe UI"
}
},
"minimal": {
"name": "๋ฏธ๋‹ˆ๋ฉ€",
"colors": {
"primary": RGBColor(55, 55, 55), # #373737 - Softer than pure black
"secondary": RGBColor(120, 120, 120), # #787878
"accent": RGBColor(0, 122, 255), # #007AFF - Blue accent
"background": RGBColor(252, 252, 252), # #FCFCFC
"text": RGBColor(33, 33, 33), # #212121
},
"fonts": {
"title": "Helvetica",
"subtitle": "Helvetica",
"body": "Arial"
}
}
}
##############################################################################
# Slide Layout Types
##############################################################################
SLIDE_LAYOUTS = {
"title": 0, # ์ œ๋ชฉ ์Šฌ๋ผ์ด๋“œ
"title_content": 1, # ์ œ๋ชฉ๊ณผ ๋‚ด์šฉ
"section_header": 2, # ์„น์…˜ ํ—ค๋”
"two_content": 3, # 2๋‹จ ๋ ˆ์ด์•„์›ƒ
"comparison": 4, # ๋น„๊ต ๋ ˆ์ด์•„์›ƒ
"title_only": 5, # ์ œ๋ชฉ๋งŒ
"blank": 6 # ๋นˆ ์Šฌ๋ผ์ด๋“œ
}
##############################################################################
# Emoji Bullet Points Mapping
##############################################################################
def has_emoji(text: str) -> bool:
"""Check if text already contains emoji"""
# Check for common emoji unicode ranges
for char in text[:3]: # Check first 3 characters
code = ord(char)
# Common emoji ranges
if (0x1F300 <= code <= 0x1F9FF) or \
(0x2600 <= code <= 0x26FF) or \
(0x2700 <= code <= 0x27BF) or \
(0x1F000 <= code <= 0x1F02F) or \
(0x1F0A0 <= code <= 0x1F0FF) or \
(0x1F100 <= code <= 0x1F1FF):
return True
return False
def get_emoji_for_content(text: str) -> str:
"""Get relevant emoji based on content"""
text_lower = text.lower()
# Technology
if any(word in text_lower for word in ['ai', '์ธ๊ณต์ง€๋Šฅ', 'ml', '๋จธ์‹ ๋Ÿฌ๋‹', '๋”ฅ๋Ÿฌ๋‹', 'deep learning']):
return '๐Ÿค–'
elif any(word in text_lower for word in ['๋ฐ์ดํ„ฐ', 'data', '๋ถ„์„', 'analysis', 'ํ†ต๊ณ„']):
return '๐Ÿ“Š'
elif any(word in text_lower for word in ['์ฝ”๋“œ', 'code', 'ํ”„๋กœ๊ทธ๋ž˜๋ฐ', 'programming', '๊ฐœ๋ฐœ']):
return '๐Ÿ’ป'
elif any(word in text_lower for word in ['ํด๋ผ์šฐ๋“œ', 'cloud', '์„œ๋ฒ„', 'server']):
return 'โ˜๏ธ'
elif any(word in text_lower for word in ['๋ณด์•ˆ', 'security', '์•ˆ์ „', 'safety']):
return '๐Ÿ”’'
elif any(word in text_lower for word in ['๋„คํŠธ์›Œํฌ', 'network', '์—ฐ๊ฒฐ', 'connection', '์ธํ„ฐ๋„ท']):
return '๐ŸŒ'
elif any(word in text_lower for word in ['๋ชจ๋ฐ”์ผ', 'mobile', '์Šค๋งˆํŠธํฐ', 'smartphone', '์•ฑ']):
return '๐Ÿ“ฑ'
# Business
elif any(word in text_lower for word in ['์„ฑ์žฅ', 'growth', '์ฆ๊ฐ€', 'increase', '์ƒ์Šน']):
return '๐Ÿ“ˆ'
elif any(word in text_lower for word in ['๋ชฉํ‘œ', 'goal', 'target', 'ํƒ€๊ฒŸ', '๋ชฉ์ ']):
return '๐ŸŽฏ'
elif any(word in text_lower for word in ['๋ˆ', 'money', '๋น„์šฉ', 'cost', '์˜ˆ์‚ฐ', 'budget', '์ˆ˜์ต']):
return '๐Ÿ’ฐ'
elif any(word in text_lower for word in ['ํŒ€', 'team', 'ํ˜‘์—…', 'collaboration', 'ํ˜‘๋ ฅ']):
return '๐Ÿ‘ฅ'
elif any(word in text_lower for word in ['์‹œ๊ฐ„', 'time', '์ผ์ •', 'schedule', '๊ธฐํ•œ']):
return 'โฐ'
elif any(word in text_lower for word in ['์•„์ด๋””์–ด', 'idea', '์ฐฝ์˜', 'creative', 'ํ˜์‹ ']):
return '๐Ÿ’ก'
elif any(word in text_lower for word in ['์ „๋žต', 'strategy', '๊ณ„ํš', 'plan']):
return '๐Ÿ“‹'
elif any(word in text_lower for word in ['์„ฑ๊ณต', 'success', '๋‹ฌ์„ฑ', 'achieve']):
return '๐Ÿ†'
# Education
elif any(word in text_lower for word in ['ํ•™์Šต', 'learning', '๊ต์œก', 'education', '๊ณต๋ถ€']):
return '๐Ÿ“š'
elif any(word in text_lower for word in ['์—ฐ๊ตฌ', 'research', '์กฐ์‚ฌ', 'study', '์‹คํ—˜']):
return '๐Ÿ”ฌ'
elif any(word in text_lower for word in ['๋ฌธ์„œ', 'document', '๋ณด๊ณ ์„œ', 'report']):
return '๐Ÿ“„'
elif any(word in text_lower for word in ['์ •๋ณด', 'information', '์ง€์‹', 'knowledge']):
return '๐Ÿ“–'
# Communication
elif any(word in text_lower for word in ['์†Œํ†ต', 'communication', '๋Œ€ํ™”', 'conversation']):
return '๐Ÿ’ฌ'
elif any(word in text_lower for word in ['์ด๋ฉ”์ผ', 'email', '๋ฉ”์ผ', 'mail']):
return '๐Ÿ“ง'
elif any(word in text_lower for word in ['์ „ํ™”', 'phone', 'call', 'ํ†ตํ™”']):
return '๐Ÿ“ž'
elif any(word in text_lower for word in ['ํšŒ์˜', 'meeting', '๋ฏธํŒ…', '์ปจํผ๋Ÿฐ์Šค']):
return '๐Ÿ‘”'
# Nature/Environment
elif any(word in text_lower for word in ['ํ™˜๊ฒฝ', 'environment', '์ž์—ฐ', 'nature']):
return '๐ŸŒฑ'
elif any(word in text_lower for word in ['์ง€์†๊ฐ€๋Šฅ', 'sustainable', '์นœํ™˜๊ฒฝ', 'eco']):
return 'โ™ป๏ธ'
elif any(word in text_lower for word in ['์—๋„ˆ์ง€', 'energy', '์ „๋ ฅ', 'power']):
return 'โšก'
elif any(word in text_lower for word in ['์ง€๊ตฌ', 'earth', '์„ธ๊ณ„', 'world']):
return '๐ŸŒ'
# Process/Steps
elif any(word in text_lower for word in ['ํ”„๋กœ์„ธ์Šค', 'process', '์ ˆ์ฐจ', 'procedure', '๋‹จ๊ณ„']):
return '๐Ÿ”„'
elif any(word in text_lower for word in ['์ฒดํฌ', 'check', 'ํ™•์ธ', 'verify', '๊ฒ€์ฆ']):
return 'โœ…'
elif any(word in text_lower for word in ['์ฃผ์˜', 'warning', '๊ฒฝ๊ณ ', 'caution']):
return 'โš ๏ธ'
elif any(word in text_lower for word in ['์ค‘์š”', 'important', 'ํ•ต์‹ฌ', 'key', 'ํ•„์ˆ˜']):
return 'โญ'
elif any(word in text_lower for word in ['์งˆ๋ฌธ', 'question', '๋ฌธ์˜', 'ask']):
return 'โ“'
elif any(word in text_lower for word in ['ํ•ด๊ฒฐ', 'solution', '๋‹ต', 'answer']):
return '๐Ÿ’ฏ'
# Actions
elif any(word in text_lower for word in ['์‹œ์ž‘', 'start', '์ถœ๋ฐœ', 'begin']):
return '๐Ÿš€'
elif any(word in text_lower for word in ['์™„๋ฃŒ', 'complete', '์ข…๋ฃŒ', 'finish']):
return '๐Ÿ'
elif any(word in text_lower for word in ['๊ฐœ์„ ', 'improve', 'ํ–ฅ์ƒ', 'enhance']):
return '๐Ÿ”ง'
elif any(word in text_lower for word in ['๋ณ€ํ™”', 'change', '๋ณ€๊ฒฝ', 'modify']):
return '๐Ÿ”„'
# Industries
elif any(word in text_lower for word in ['์˜๋ฃŒ', 'medical', '๋ณ‘์›', 'hospital', '๊ฑด๊ฐ•']):
return '๐Ÿฅ'
elif any(word in text_lower for word in ['๊ธˆ์œต', 'finance', '์€ํ–‰', 'bank']):
return '๐Ÿฆ'
elif any(word in text_lower for word in ['์ œ์กฐ', 'manufacturing', '๊ณต์žฅ', 'factory']):
return '๐Ÿญ'
elif any(word in text_lower for word in ['๋†์—…', 'agriculture', '๋†์žฅ', 'farm']):
return '๐ŸŒพ'
# Emotion/Status
elif any(word in text_lower for word in ['ํ–‰๋ณต', 'happy', '๊ธฐ์จ', 'joy']):
return '๐Ÿ˜Š'
elif any(word in text_lower for word in ['์œ„ํ—˜', 'danger', 'risk', '๋ฆฌ์Šคํฌ']):
return 'โšก'
elif any(word in text_lower for word in ['์ƒˆ๋กœ์šด', 'new', '์‹ ๊ทœ', 'novel']):
return 'โœจ'
# Numbers
elif text_lower.startswith(('์ฒซ์งธ', 'first', '1.', '์ฒซ๋ฒˆ์งธ', '์ฒซ ๋ฒˆ์งธ')):
return '1๏ธโƒฃ'
elif text_lower.startswith(('๋‘˜์งธ', 'second', '2.', '๋‘๋ฒˆ์งธ', '๋‘ ๋ฒˆ์งธ')):
return '2๏ธโƒฃ'
elif text_lower.startswith(('์…‹์งธ', 'third', '3.', '์„ธ๋ฒˆ์งธ', '์„ธ ๋ฒˆ์งธ')):
return '3๏ธโƒฃ'
elif text_lower.startswith(('๋„ท์งธ', 'fourth', '4.', '๋„ค๋ฒˆ์งธ', '๋„ค ๋ฒˆ์งธ')):
return '4๏ธโƒฃ'
elif text_lower.startswith(('๋‹ค์„ฏ์งธ', 'fifth', '5.', '๋‹ค์„ฏ๋ฒˆ์งธ', '๋‹ค์„ฏ ๋ฒˆ์งธ')):
return '5๏ธโƒฃ'
# Default
else:
return 'โ–ถ๏ธ'
##############################################################################
# Diagram Type Detection
##############################################################################
def detect_diagram_type(title: str, content: str) -> Optional[str]:
"""์Šฌ๋ผ์ด๋“œ ๋‚ด์šฉ์„ ๋ถ„์„ํ•˜์—ฌ ์ ์ ˆํ•œ ๋‹ค์ด์–ด๊ทธ๋žจ ํƒ€์ž… ๊ฒฐ์ •"""
combined_text = f"{title} {content}".lower()
# Process Flow keywords
if any(word in combined_text for word in ['ํ”„๋กœ์„ธ์Šค', 'process', '์ ˆ์ฐจ', 'procedure', '๋‹จ๊ณ„', 'step', 'flow', 'ํ๋ฆ„', '์›Œํฌํ”Œ๋กœ์šฐ', 'workflow']):
return "Process Flow"
# WBS keywords
elif any(word in combined_text for word in ['wbs', '์ž‘์—…๋ถ„ํ•ด', 'ํ”„๋กœ์ ํŠธ', 'project', '์—…๋ฌด๋ถ„ํ•ด', 'breakdown', '๊ตฌ์กฐ๋„']):
return "WBS Diagram"
# Concept Map keywords
elif any(word in combined_text for word in ['๊ฐœ๋…', 'concept', '๊ด€๊ณ„', 'relationship', '์—ฐ๊ด€', 'connection', '๋งˆ์ธ๋“œ๋งต', 'mindmap']):
return "Concept Map"
# Radial Diagram keywords
elif any(word in combined_text for word in ['์ค‘์‹ฌ', 'central', '๋ฐฉ์‚ฌํ˜•', 'radial', 'ํ•ต์‹ฌ', 'core', '์ฃผ์š”', 'main']):
return "Radial Diagram"
# Synoptic Chart keywords
elif any(word in combined_text for word in ['๊ฐœ์š”', 'overview', '์ „์ฒด', 'overall', '์š”์•ฝ', 'summary', '์‹œ๋†‰ํ‹ฑ', 'synoptic']):
return "Synoptic Chart"
return None
##############################################################################
# Generate Diagram JSON using LLM
##############################################################################
def generate_diagram_json(title: str, content: str, diagram_type: str) -> Optional[str]:
"""LLM์„ ์‚ฌ์šฉํ•˜์—ฌ ๋‹ค์ด์–ด๊ทธ๋žจ์šฉ JSON ์ƒ์„ฑ"""
if not FRIENDLI_TOKEN:
return None
# ๋‹ค์ด์–ด๊ทธ๋žจ ํƒ€์ž…๋ณ„ JSON ๊ตฌ์กฐ ๊ฐ€์ด๋“œ
json_guides = {
"Concept Map": """Generate a JSON for a concept map with the EXACT following structure:
{
"central_node": "Main Topic",
"nodes": [
{
"id": "node1",
"label": "First Concept",
"relationship": "is part of",
"subnodes": [
{
"id": "node1_1",
"label": "Sub Concept 1",
"relationship": "includes",
"subnodes": []
}
]
}
]
}""",
"Process Flow": """Generate a JSON for a process flow diagram with the EXACT following structure:
{
"start_node": "Start Process",
"nodes": [
{"id": "step1", "label": "First Step", "type": "process"},
{"id": "step2", "label": "Decision Point", "type": "decision"},
{"id": "end", "label": "End Process", "type": "end"}
],
"connections": [
{"from": "start_node", "to": "step1", "label": "Begin"},
{"from": "step1", "to": "step2", "label": "Next"},
{"from": "step2", "to": "end", "label": "Complete"}
]
}""",
"WBS Diagram": """Generate a JSON for a WBS diagram with the EXACT following structure:
{
"project_title": "Project Name",
"phases": [
{
"id": "phase1",
"label": "Phase 1",
"tasks": [
{
"id": "task1_1",
"label": "Task 1.1",
"subtasks": []
}
]
}
]
}""",
"Radial Diagram": """Generate a JSON for a radial diagram with the EXACT following structure:
{
"central_node": "Central Concept",
"nodes": [
{
"id": "branch1",
"label": "Branch 1",
"relationship": "connected to",
"subnodes": []
}
]
}""",
"Synoptic Chart": """Generate a JSON for a synoptic chart with the EXACT following structure:
{
"central_node": "Chart Title",
"nodes": [
{
"id": "phase1",
"label": "Phase 1 Name",
"relationship": "starts with",
"subnodes": []
}
]
}"""
}
system_prompt = f"""You are a helpful assistant that generates JSON structures for diagrams.
{json_guides.get(diagram_type, '')}
Important rules:
1. Generate ONLY valid JSON without any explanation or markdown formatting
2. The JSON must follow the EXACT structure shown above
3. Create content based on the provided title and content
4. Use the user's language (Korean or English) for the content values
5. Keep it simple with 3-5 main nodes/steps"""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": f"Create a {diagram_type} JSON for:\nTitle: {title}\nContent: {content}"}
]
headers = {
"Authorization": f"Bearer {FRIENDLI_TOKEN}",
"Content-Type": "application/json"
}
payload = {
"model": FRIENDLI_MODEL_ID,
"messages": messages,
"max_tokens": 1000,
"temperature": 0.7,
"stream": False
}
try:
response = requests.post(FRIENDLI_API_URL, headers=headers, json=payload, timeout=30)
if response.status_code == 200:
response_data = response.json()
if 'choices' in response_data and len(response_data['choices']) > 0:
content = response_data['choices'][0]['message']['content']
# Extract JSON from response
content = content.strip()
if content.startswith("```json"):
content = content[7:]
if content.startswith("```"):
content = content[3:]
if content.endswith("```"):
content = content[:-3]
# Validate JSON
json.loads(content) # This will raise exception if invalid
return content
except Exception as e:
logger.error(f"Error generating diagram JSON: {e}")
return None
##############################################################################
# Generate Diagram using API
##############################################################################
def generate_diagram_via_api(json_data: str, diagram_type: str) -> Optional[str]:
"""๋‹ค์ด์–ด๊ทธ๋žจ API๋ฅผ ํ†ตํ•ด ๋‹ค์ด์–ด๊ทธ๋žจ ์ƒ์„ฑ"""
if not DIAGRAM_API_ENABLED or not diagram_api_client:
return None
try:
# API ํ˜ธ์ถœ
result = diagram_api_client.predict(
prompt_input=f"Generate {diagram_type}", # ํ”„๋กฌํ”„ํŠธ
diagram_type_select=diagram_type, # ๋‹ค์ด์–ด๊ทธ๋žจ ํƒ€์ž…
design_type_select="None", # ๋””์ž์ธ ํƒ€์ž…์€ None
output_format_radio="png", # PNG ํ˜•์‹
use_search_checkbox=False, # ๊ฒ€์ƒ‰ ์‚ฌ์šฉ ์•ˆํ•จ
api_name="/generate_with_llm"
)
# ๊ฒฐ๊ณผ์—์„œ ์ด๋ฏธ์ง€ ๊ฒฝ๋กœ ์ถ”์ถœ
if isinstance(result, tuple) and len(result) > 0:
image_path = result[0]
if image_path and os.path.exists(image_path):
# ์ž„์‹œ ํŒŒ์ผ๋กœ ๋ณต์‚ฌ
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
shutil.copy2(image_path, tmp.name)
return tmp.name
return None
except Exception as e:
logger.error(f"Failed to generate diagram via API: {e}")
return None
##############################################################################
# FLUX Image Generation Functions
##############################################################################
def generate_flux_prompt(title: str, content: str) -> str:
"""์Šฌ๋ผ์ด๋“œ ๋‚ด์šฉ์„ ๊ธฐ๋ฐ˜์œผ๋กœ FLUX ์ด๋ฏธ์ง€ ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ"""
# LLM์„ ์‚ฌ์šฉํ•˜์—ฌ ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ
system_prompt = """You are an expert at creating visual prompts for AI image generation.
Create a concise, visually descriptive prompt in English based on the slide content.
The prompt should describe a professional, modern illustration that represents the key concepts.
Keep it under 100 words and focus on visual elements, style, and mood.
Output ONLY the prompt without any explanation."""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": f"Create an image prompt for:\nTitle: {title}\nContent: {content[:500]}"}
]
headers = {
"Authorization": f"Bearer {FRIENDLI_TOKEN}",
"Content-Type": "application/json"
}
payload = {
"model": FRIENDLI_MODEL_ID,
"messages": messages,
"max_tokens": 200,
"temperature": 0.8,
"stream": False
}
try:
response = requests.post(FRIENDLI_API_URL, headers=headers, json=payload, timeout=30)
if response.status_code == 200:
response_data = response.json()
if 'choices' in response_data and len(response_data['choices']) > 0:
prompt = response_data['choices'][0]['message']['content'].strip()
return f"Professional business presentation slide illustration: {prompt}, modern clean style, corporate colors, white background"
except Exception as e:
logger.error(f"Error generating FLUX prompt: {e}")
# Fallback prompt
return f"Professional business presentation illustration about {title}, modern minimalist style, clean design, corporate colors"
def generate_flux_image_via_api(prompt: str) -> Optional[str]:
"""FLUX API๋ฅผ ํ†ตํ•ด ์ด๋ฏธ์ง€ ์ƒ์„ฑ"""
if not FLUX_API_ENABLED or not flux_api_client:
return None
try:
logger.info(f"Generating FLUX image with prompt: {prompt[:100]}...")
result = flux_api_client.predict(
prompt=prompt,
width=768,
height=768,
guidance=3.5,
inference_steps=8,
seed=random.randint(1, 1000000),
do_img2img=False,
init_image=None,
image2image_strength=0.8,
resize_img=True,
api_name="/generate_image"
)
if isinstance(result, tuple) and len(result) > 0:
image_path = result[0]
if image_path and os.path.exists(image_path):
# PNG๋กœ ๋ณ€ํ™˜
with Image.open(image_path) as img:
png_tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
img.save(png_tmp.name, format="PNG")
logger.info(f"FLUX image generated and saved to {png_tmp.name}")
return png_tmp.name
return None
except Exception as e:
logger.error(f"Failed to generate FLUX image: {e}")
return None
##############################################################################
# Icon and Shape Mappings
##############################################################################
SHAPE_ICONS = {
"๋ชฉํ‘œ": MSO_SHAPE.STAR_5_POINT,
"ํ”„๋กœ์„ธ์Šค": MSO_SHAPE.BLOCK_ARC,
"์„ฑ์žฅ": MSO_SHAPE.UP_ARROW,
"์•„์ด๋””์–ด": MSO_SHAPE.LIGHTNING_BOLT,
"์ฒดํฌ": MSO_SHAPE.RECTANGLE,
"์ฃผ์˜": MSO_SHAPE.DIAMOND,
"์งˆ๋ฌธ": MSO_SHAPE.OVAL,
"๋ถ„์„": MSO_SHAPE.PENTAGON,
"์‹œ๊ฐ„": MSO_SHAPE.DONUT,
"ํŒ€": MSO_SHAPE.HEXAGON,
}
##############################################################################
# File Processing Constants
##############################################################################
MAX_FILE_SIZE = 30 * 1024 * 1024 # 30MB
MAX_CONTENT_CHARS = 6000
##############################################################################
# Improved Keyword Extraction
##############################################################################
def extract_keywords(text: str, top_k: int = 5) -> str:
"""
Extract keywords: supports English and Korean
"""
stop_words = {'์€', '๋Š”', '์ด', '๊ฐ€', '์„', '๋ฅผ', '์˜', '์—', '์—์„œ',
'the', 'is', 'at', 'on', 'in', 'a', 'an', 'and', 'or', 'but'}
text = re.sub(r"[^a-zA-Z0-9๊ฐ€-ํžฃ\s]", "", text)
tokens = text.split()
key_tokens = [
token for token in tokens
if token.lower() not in stop_words and len(token) > 1
][:top_k]
return " ".join(key_tokens)
##############################################################################
# File Size Validation
##############################################################################
def validate_file_size(file_path: str) -> bool:
"""Check if file size is within limits"""
try:
file_size = os.path.getsize(file_path)
return file_size <= MAX_FILE_SIZE
except:
return False
##############################################################################
# Web Search Function
##############################################################################
def do_web_search(query: str, use_korean: bool = False) -> str:
"""
Search web and return top 20 organic results
"""
if not SERPHOUSE_API_KEY:
return "Web search unavailable. API key not configured."
try:
url = "https://api.serphouse.com/serp/live"
params = {
"q": query,
"domain": "google.com",
"serp_type": "web",
"device": "desktop",
"lang": "ko" if use_korean else "en",
"num": "20"
}
headers = {
"Authorization": f"Bearer {SERPHOUSE_API_KEY}"
}
logger.info(f"Calling SerpHouse API... Query: {query}")
response = requests.get(url, headers=headers, params=params, timeout=30)
response.raise_for_status()
data = response.json()
# Parse results
results = data.get("results", {})
organic = None
if isinstance(results, dict) and "organic" in results:
organic = results["organic"]
elif isinstance(results, dict) and "results" in results:
if isinstance(results["results"], dict) and "organic" in results["results"]:
organic = results["results"]["organic"]
elif "organic" in data:
organic = data["organic"]
if not organic:
return "No search results found or unexpected API response structure."
max_results = min(20, len(organic))
limited_organic = organic[:max_results]
summary_lines = []
for idx, item in enumerate(limited_organic, start=1):
title = item.get("title", "No title")
link = item.get("link", "#")
snippet = item.get("snippet", "No description")
displayed_link = item.get("displayed_link", link)
summary_lines.append(
f"### Result {idx}: {title}\n\n"
f"{snippet}\n\n"
f"**Source**: [{displayed_link}]({link})\n\n"
f"---\n"
)
instructions = """
# Web Search Results
Below are the search results. Use this information when answering questions:
1. Reference the title, content, and source links
2. Explicitly cite sources in your answer (e.g., "According to source X...")
3. Include actual source links in your response
4. Synthesize information from multiple sources
"""
search_results = instructions + "\n".join(summary_lines)
return search_results
except requests.exceptions.Timeout:
logger.error("Web search timeout")
return "Web search timed out. Please try again."
except requests.exceptions.RequestException as e:
logger.error(f"Web search network error: {e}")
return "Network error during web search."
except Exception as e:
logger.error(f"Web search failed: {e}")
return f"Web search failed: {str(e)}"
##############################################################################
# File Analysis Functions
##############################################################################
def analyze_csv_file(path: str) -> str:
"""Analyze CSV file with size validation and encoding handling"""
if not validate_file_size(path):
return f"โš ๏ธ Error: File size exceeds {MAX_FILE_SIZE/1024/1024:.1f}MB limit."
try:
encodings = ['utf-8', 'cp949', 'euc-kr', 'latin-1']
df = None
for encoding in encodings:
try:
df = pd.read_csv(path, encoding=encoding, nrows=50)
break
except UnicodeDecodeError:
continue
if df is None:
return f"Failed to read CSV: Unsupported encoding"
total_rows = len(pd.read_csv(path, encoding=encoding, usecols=[0]))
if df.shape[1] > 10:
df = df.iloc[:, :10]
summary = f"**Data size**: {total_rows} rows x {df.shape[1]} columns\n"
summary += f"**Showing**: Top {min(50, total_rows)} rows\n"
summary += f"**Columns**: {', '.join(df.columns)}\n\n"
# Extract data for charts
chart_data = {
"columns": list(df.columns),
"sample_data": df.head(10).to_dict('records')
}
df_str = df.to_string()
if len(df_str) > MAX_CONTENT_CHARS:
df_str = df_str[:MAX_CONTENT_CHARS] + "\n...(truncated)..."
return f"**[CSV File: {os.path.basename(path)}]**\n\n{summary}{df_str}\n\nCHART_DATA:{json.dumps(chart_data)}"
except Exception as e:
logger.error(f"CSV read error: {e}")
return f"Failed to read CSV file ({os.path.basename(path)}): {str(e)}"
def analyze_txt_file(path: str) -> str:
"""Analyze text file with automatic encoding detection"""
if not validate_file_size(path):
return f"โš ๏ธ Error: File size exceeds {MAX_FILE_SIZE/1024/1024:.1f}MB limit."
encodings = ['utf-8', 'cp949', 'euc-kr', 'latin-1', 'utf-16']
for encoding in encodings:
try:
with open(path, "r", encoding=encoding) as f:
text = f.read()
file_size = os.path.getsize(path)
size_info = f"**File size**: {file_size/1024:.1f}KB\n\n"
if len(text) > MAX_CONTENT_CHARS:
text = text[:MAX_CONTENT_CHARS] + "\n...(truncated)..."
return f"**[TXT File: {os.path.basename(path)}]**\n\n{size_info}{text}"
except UnicodeDecodeError:
continue
return f"Failed to read text file ({os.path.basename(path)}): Unsupported encoding"
def pdf_to_markdown(pdf_path: str) -> str:
"""Convert PDF to markdown with improved error handling"""
if not validate_file_size(pdf_path):
return f"โš ๏ธ Error: File size exceeds {MAX_FILE_SIZE/1024/1024:.1f}MB limit."
text_chunks = []
try:
with open(pdf_path, "rb") as f:
reader = PyPDF2.PdfReader(f)
total_pages = len(reader.pages)
max_pages = min(5, total_pages)
text_chunks.append(f"**Total pages**: {total_pages}")
text_chunks.append(f"**Showing**: First {max_pages} pages\n")
for page_num in range(max_pages):
try:
page = reader.pages[page_num]
page_text = page.extract_text() or ""
page_text = page_text.strip()
if page_text:
if len(page_text) > MAX_CONTENT_CHARS // max_pages:
page_text = page_text[:MAX_CONTENT_CHARS // max_pages] + "...(truncated)"
text_chunks.append(f"## Page {page_num+1}\n\n{page_text}\n")
except Exception as e:
text_chunks.append(f"## Page {page_num+1}\n\nFailed to read page: {str(e)}\n")
if total_pages > max_pages:
text_chunks.append(f"\n...({max_pages}/{total_pages} pages shown)...")
except Exception as e:
logger.error(f"PDF read error: {e}")
return f"Failed to read PDF file ({os.path.basename(pdf_path)}): {str(e)}"
full_text = "\n".join(text_chunks)
if len(full_text) > MAX_CONTENT_CHARS:
full_text = full_text[:MAX_CONTENT_CHARS] + "\n...(truncated)..."
return f"**[PDF File: {os.path.basename(pdf_path)}]**\n\n{full_text}"
##############################################################################
# AI Image Generation Functions using FLUX API
##############################################################################
def generate_cover_image_prompt(topic: str, slides_data: list) -> str:
"""PPT ์ฃผ์ œ์™€ ๋‚ด์šฉ์„ ๊ธฐ๋ฐ˜์œผ๋กœ ํ‘œ์ง€ ์ด๋ฏธ์ง€ ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ"""
# ์ฃผ์š” ํ‚ค์›Œ๋“œ ์ถ”์ถœ
keywords = extract_keywords(topic, top_k=3).split()
# ์ฃผ์ œ ๋ถ„์„์„ ํ†ตํ•œ ์Šคํƒ€์ผ ๊ฒฐ์ •
style = "modern professional"
topic_lower = topic.lower()
if any(word in topic_lower for word in ['๊ธฐ์ˆ ', 'tech', 'ai', '์ธ๊ณต์ง€๋Šฅ', 'digital', '๋””์ง€ํ„ธ']):
style = "futuristic technology"
elif any(word in topic_lower for word in ['๋น„์ฆˆ๋‹ˆ์Šค', 'business', '๊ฒฝ์˜', 'management']):
style = "corporate business"
elif any(word in topic_lower for word in ['๊ต์œก', 'education', 'ํ•™์Šต', 'learning']):
style = "educational inspiring"
elif any(word in topic_lower for word in ['ํ™˜๊ฒฝ', 'environment', '์ž์—ฐ', 'nature']):
style = "nature eco-friendly"
elif any(word in topic_lower for word in ['์˜๋ฃŒ', 'medical', '๊ฑด๊ฐ•', 'health']):
style = "medical healthcare"
elif any(word in topic_lower for word in ['๊ธˆ์œต', 'finance', 'ํˆฌ์ž', 'investment']):
style = "financial professional"
# FLUX๋ฅผ ์œ„ํ•œ ์˜์–ด ํ”„๋กฌํ”„ํŠธ ๊ตฌ์„ฑ
prompt = f"Professional presentation cover design, {style} theme, abstract geometric shapes representing {' '.join(keywords)}, modern minimalist style, gradient background, clean composition, high quality, corporate design"
return prompt
def generate_conclusion_image_prompt(title: str, content: str) -> str:
"""๊ฒฐ๋ก  ์Šฌ๋ผ์ด๋“œ์šฉ ์ด๋ฏธ์ง€ ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ"""
# ์ฃผ์š” ํ‚ค์›Œ๋“œ ์ถ”์ถœ
keywords = extract_keywords(f"{title} {content}", top_k=5).split()
# ๊ฒฐ๋ก /ํ•˜์ด๋ผ์ดํŠธ ๊ด€๋ จ ํ‚ค์›Œ๋“œ ๊ฐ์ง€
conclusion_keywords = ['๊ฒฐ๋ก ', 'conclusion', '์š”์•ฝ', 'summary', 'ํ•ต์‹ฌ', 'key',
'์ค‘์š”', 'important', '๋ฏธ๋ž˜', 'future', '์ „๋ง', 'outlook']
# ํ”„๋กฌํ”„ํŠธ ์Šคํƒ€์ผ ๊ฒฐ์ •
if any(word in title.lower() + content.lower() for word in conclusion_keywords):
style = "inspirational conclusion with bright future vision"
else:
style = "professional summary visualization"
# FLUX ํ”„๋กฌํ”„ํŠธ ๊ตฌ์„ฑ
prompt = f"Professional presentation conclusion slide, {style}, highlighting {' '.join(keywords[:3])}, modern abstract visualization, uplifting mood, corporate design, high quality"
return prompt
def generate_flux_prompt(title: str, content: str) -> str:
"""์Šฌ๋ผ์ด๋“œ ๋‚ด์šฉ์„ ๊ธฐ๋ฐ˜์œผ๋กœ FLUX ์ด๋ฏธ์ง€ ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ"""
# ์ฃผ์š” ํ‚ค์›Œ๋“œ ์ถ”์ถœ
keywords = extract_keywords(f"{title} {content}", top_k=4).split()
# ๋‚ด์šฉ ๋ถ„์„์„ ํ†ตํ•œ ์Šคํƒ€์ผ ๊ฒฐ์ •
content_lower = (title + " " + content).lower()
style = "professional business"
if any(word in content_lower for word in ['data', '๋ฐ์ดํ„ฐ', 'analysis', '๋ถ„์„', 'chart', '์ฐจํŠธ']):
style = "data visualization analytics"
elif any(word in content_lower for word in ['process', 'ํ”„๋กœ์„ธ์Šค', 'workflow', '์›Œํฌํ”Œ๋กœ์šฐ']):
style = "workflow process diagram"
elif any(word in content_lower for word in ['team', 'ํŒ€', 'collaboration', 'ํ˜‘์—…']):
style = "teamwork collaboration"
elif any(word in content_lower for word in ['growth', '์„ฑ์žฅ', 'increase', '์ฆ๊ฐ€']):
style = "growth success metrics"
# FLUX ํ”„๋กฌํ”„ํŠธ ๊ตฌ์„ฑ
prompt = f"Professional presentation slide illustration, {style} theme, featuring {' '.join(keywords[:3])}, modern business graphics, clean corporate design, white background, high quality"
return prompt
def generate_flux_image_via_api(prompt: str) -> Optional[str]:
"""FLUX API๋ฅผ ํ†ตํ•ด ์ด๋ฏธ์ง€ ์ƒ์„ฑ"""
if not FLUX_API_ENABLED or not flux_api_client:
logger.warning("FLUX API is not available")
return None
try:
logger.info(f"Generating FLUX image with prompt: {prompt[:100]}...")
result = flux_api_client.predict(
prompt=prompt,
width=768,
height=768,
guidance=3.5,
inference_steps=8,
seed=random.randint(1, 1000000),
do_img2img=False,
init_image=None,
image2image_strength=0.8,
resize_img=True,
api_name="/generate_image"
)
if isinstance(result, tuple) and len(result) > 0:
image_path = result[0]
if image_path and os.path.exists(image_path):
# PNG๋กœ ๋ณ€ํ™˜
with Image.open(image_path) as img:
png_tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
img.save(png_tmp.name, format="PNG")
logger.info(f"FLUX image generated and saved to {png_tmp.name}")
return png_tmp.name
return None
except Exception as e:
logger.error(f"Failed to generate FLUX image: {e}")
return None
def generate_ai_cover_image_via_flux(topic: str, slides_data: list) -> Optional[str]:
"""FLUX API๋ฅผ ํ†ตํ•ด AI ํ‘œ์ง€ ์ด๋ฏธ์ง€ ์ƒ์„ฑ"""
if not FLUX_API_ENABLED:
return None
prompt = generate_cover_image_prompt(topic, slides_data)
return generate_flux_image_via_api(prompt)
##############################################################################
# PPT Generation Functions - FIXED VERSION
##############################################################################
def parse_llm_ppt_response(response: str, layout_style: str = "consistent") -> list:
"""Parse LLM response to extract slide content - COMPLETELY FIXED VERSION"""
slides = []
# Debug: ์ „์ฒด ์‘๋‹ต ํ™•์ธ
logger.info(f"Parsing LLM response, total length: {len(response)}")
logger.debug(f"First 500 chars: {response[:500]}")
# Try JSON parsing first
try:
json_match = re.search(r'\[[\s\S]*\]', response)
if json_match:
slides_data = json.loads(json_match.group())
return slides_data
except:
pass
# Split by slide markers and process each section
# ์Šฌ๋ผ์ด๋“œ๋ฅผ ๊ตฌ๋ถ„ํ•˜๋Š” ๋” ๊ฐ•๋ ฅํ•œ ์ •๊ทœ์‹
slide_pattern = r'(?:^|\n)(?:์Šฌ๋ผ์ด๋“œ|Slide)\s*\d+|(?:^|\n)\d+[\.)](?:\s|$)'
# ์Šฌ๋ผ์ด๋“œ ์„น์…˜์œผ๋กœ ๋ถ„ํ• 
sections = re.split(slide_pattern, response, flags=re.MULTILINE | re.IGNORECASE)
# ์ฒซ ๋ฒˆ์งธ ๋นˆ ์„น์…˜ ์ œ๊ฑฐ
if sections and not sections[0].strip():
sections = sections[1:]
logger.info(f"Found {len(sections)} potential slide sections")
for idx, section in enumerate(sections):
if not section.strip():
continue
logger.debug(f"Processing section {idx}: {section[:100]}...")
slide = {
'title': '',
'content': '',
'notes': '',
'layout': 'title_content',
'chart_data': None
}
# ์„น์…˜ ๋‚ด์—์„œ ์ œ๋ชฉ, ๋‚ด์šฉ, ๋…ธํŠธ ์ถ”์ถœ
lines = section.strip().split('\n')
current_part = None
title_lines = []
content_lines = []
notes_lines = []
for line in lines:
line = line.strip()
if not line:
continue
# ์ œ๋ชฉ ์„น์…˜ ๊ฐ์ง€
if line.startswith('์ œ๋ชฉ:') or line.startswith('Title:'):
current_part = 'title'
title_text = line.split(':', 1)[1].strip() if ':' in line else ''
if title_text:
title_lines.append(title_text)
# ๋‚ด์šฉ ์„น์…˜ ๊ฐ์ง€
elif line.startswith('๋‚ด์šฉ:') or line.startswith('Content:'):
current_part = 'content'
content_text = line.split(':', 1)[1].strip() if ':' in line else ''
if content_text:
content_lines.append(content_text)
# ๋…ธํŠธ ์„น์…˜ ๊ฐ์ง€
elif line.startswith('๋…ธํŠธ:') or line.startswith('Notes:') or line.startswith('๋ฐœํ‘œ์ž ๋…ธํŠธ:'):
current_part = 'notes'
notes_text = line.split(':', 1)[1].strip() if ':' in line else ''
if notes_text:
notes_lines.append(notes_text)
# ํ˜„์žฌ ์„น์…˜์— ๋”ฐ๋ผ ๋‚ด์šฉ ์ถ”๊ฐ€
else:
if current_part == 'title' and not title_lines:
title_lines.append(line)
elif current_part == 'content':
content_lines.append(line)
elif current_part == 'notes':
notes_lines.append(line)
elif not current_part and not title_lines:
# ์ฒซ ๋ฒˆ์งธ ์ค„์„ ์ œ๋ชฉ์œผ๋กœ
title_lines.append(line)
current_part = 'content' # ์ดํ›„ ์ค„๋“ค์€ content๋กœ
elif not current_part:
content_lines.append(line)
# ์Šฌ๋ผ์ด๋“œ ๋ฐ์ดํ„ฐ ์„ค์ •
slide['title'] = ' '.join(title_lines).strip()
slide['content'] = '\n'.join(content_lines).strip()
slide['notes'] = ' '.join(notes_lines).strip()
# ์ œ๋ชฉ ์ •๋ฆฌ
slide['title'] = re.sub(r'^(์Šฌ๋ผ์ด๋“œ|Slide)\s*\d+\s*[:๏ผš\-]?\s*', '', slide['title'], flags=re.IGNORECASE)
slide['title'] = re.sub(r'^(์ œ๋ชฉ|Title)\s*[:๏ผš]\s*', '', slide['title'], flags=re.IGNORECASE)
# ๋‚ด์šฉ์ด ์žˆ๋Š” ๊ฒฝ์šฐ์—๋งŒ ์ถ”๊ฐ€
if slide['title'] or slide['content']:
logger.info(f"Slide {len(slides)+1}: Title='{slide['title'][:30]}...', Content length={len(slide['content'])}")
slides.append(slide)
# ๋งŒ์•ฝ ์œ„ ๋ฐฉ๋ฒ•์œผ๋กœ ํŒŒ์‹ฑ์ด ์•ˆ ๋˜์—ˆ๋‹ค๋ฉด, ๋” ๊ฐ„๋‹จํ•œ ๋ฐฉ๋ฒ• ์‹œ๋„
if not slides:
logger.warning("Primary parsing failed, trying fallback method...")
# ๋”๋ธ” ๋‰ด๋ผ์ธ์œผ๋กœ ๊ตฌ๋ถ„
sections = response.split('\n\n')
for section in sections:
lines = section.strip().split('\n')
if len(lines) >= 2: # ์ตœ์†Œ ์ œ๋ชฉ๊ณผ ๋‚ด์šฉ์ด ์žˆ์–ด์•ผ ํ•จ
slide = {
'title': lines[0].strip(),
'content': '\n'.join(lines[1:]).strip(),
'notes': '',
'layout': 'title_content',
'chart_data': None
}
# ์ œ๋ชฉ ์ •๋ฆฌ
slide['title'] = re.sub(r'^(์Šฌ๋ผ์ด๋“œ|Slide)\s*\d+\s*[:๏ผš\-]?\s*', '', slide['title'], flags=re.IGNORECASE)
if slide['title'] and slide['content']:
slides.append(slide)
logger.info(f"Total slides parsed: {len(slides)}")
return slides
def force_font_size(text_frame, font_size_pt: int, theme: Dict):
"""Force font size for all paragraphs and runs in a text frame"""
if not text_frame:
return
try:
# Ensure paragraphs exist
if not hasattr(text_frame, 'paragraphs'):
return
for paragraph in text_frame.paragraphs:
try:
# Set paragraph level font
if hasattr(paragraph, 'font'):
paragraph.font.size = Pt(font_size_pt)
paragraph.font.name = theme['fonts']['body']
paragraph.font.color.rgb = theme['colors']['text']
# Set run level font (most important for actual rendering)
if hasattr(paragraph, 'runs'):
for run in paragraph.runs:
run.font.size = Pt(font_size_pt)
run.font.name = theme['fonts']['body']
run.font.color.rgb = theme['colors']['text']
# If paragraph has no runs but has text, create a run
if paragraph.text and (not hasattr(paragraph, 'runs') or len(paragraph.runs) == 0):
# Force creation of runs by modifying text
temp_text = paragraph.text
paragraph.text = temp_text # This creates runs
if hasattr(paragraph, 'runs'):
for run in paragraph.runs:
run.font.size = Pt(font_size_pt)
run.font.name = theme['fonts']['body']
run.font.color.rgb = theme['colors']['text']
except Exception as e:
logger.warning(f"Error setting font for paragraph: {e}")
continue
except Exception as e:
logger.warning(f"Error in force_font_size: {e}")
def apply_theme_to_slide(slide, theme: Dict, layout_type: str = 'title_content'):
"""Apply design theme to a slide with consistent styling"""
# Add colored background shape for all slides
bg_shape = slide.shapes.add_shape(
MSO_SHAPE.RECTANGLE, 0, 0, Inches(10), Inches(5.625)
)
bg_shape.fill.solid()
# Use lighter background for content slides
if layout_type in ['title_content', 'two_content', 'comparison']:
# Light background with subtle gradient effect
bg_shape.fill.fore_color.rgb = theme['colors']['background']
# Add accent strip at top
accent_strip = slide.shapes.add_shape(
MSO_SHAPE.RECTANGLE, 0, 0, Inches(10), Inches(0.5)
)
accent_strip.fill.solid()
accent_strip.fill.fore_color.rgb = theme['colors']['primary']
accent_strip.line.fill.background()
# Add bottom accent
bottom_strip = slide.shapes.add_shape(
MSO_SHAPE.RECTANGLE, 0, Inches(5.125), Inches(10), Inches(0.5)
)
bottom_strip.fill.solid()
bottom_strip.fill.fore_color.rgb = theme['colors']['secondary']
bottom_strip.fill.transparency = 0.7
bottom_strip.line.fill.background()
else:
# Section headers get primary color background
bg_shape.fill.fore_color.rgb = theme['colors']['primary']
bg_shape.line.fill.background()
# Move background shapes to back
slide.shapes._spTree.remove(bg_shape._element)
slide.shapes._spTree.insert(2, bg_shape._element)
# Apply title formatting if exists
if slide.shapes.title:
try:
title = slide.shapes.title
if title.text_frame and title.text_frame.paragraphs:
for paragraph in title.text_frame.paragraphs:
paragraph.font.name = theme['fonts']['title']
paragraph.font.bold = True
# UPDATED: Increased font sizes for better readability
if layout_type == 'section_header':
paragraph.font.size = Pt(28) # Increased from 20
paragraph.font.color.rgb = RGBColor(255, 255, 255)
paragraph.alignment = PP_ALIGN.CENTER
else:
paragraph.font.size = Pt(24) # Increased from 18
paragraph.font.color.rgb = theme['colors']['primary']
paragraph.alignment = PP_ALIGN.LEFT
except Exception as e:
logger.warning(f"Title formatting failed: {e}")
# Apply content formatting with improved readability
# NOTE: Do NOT add emojis here - they will be added in create_advanced_ppt_from_content
for shape in slide.shapes:
if shape.has_text_frame and shape != slide.shapes.title:
try:
text_frame = shape.text_frame
# Set text frame margins for better spacing
text_frame.margin_left = Inches(0.25)
text_frame.margin_right = Inches(0.25)
text_frame.margin_top = Inches(0.1)
text_frame.margin_bottom = Inches(0.1)
# Only apply font formatting, no content modification
if text_frame.text.strip():
# Use force_font_size helper to ensure font is applied
force_font_size(text_frame, 16, theme) # Increased from 12
for paragraph in text_frame.paragraphs:
# Add line spacing for better readability
paragraph.space_after = Pt(4) # Increased from 3
paragraph.line_spacing = 1.2 # Increased from 1.1
except Exception as e:
logger.warning(f"Content formatting failed: {e}")
def add_gradient_background(slide, color1: RGBColor, color2: RGBColor):
"""Add gradient-like background to slide using shapes"""
# Note: python-pptx doesn't directly support gradients in backgrounds,
# so we'll create a gradient effect using overlapping shapes
left = top = 0
width = Inches(10)
height = Inches(5.625)
# Add base color rectangle
shape1 = slide.shapes.add_shape(
MSO_SHAPE.RECTANGLE, left, top, width, height
)
shape1.fill.solid()
shape1.fill.fore_color.rgb = color1
shape1.line.fill.background()
# Add semi-transparent overlay for gradient effect
shape2 = slide.shapes.add_shape(
MSO_SHAPE.RECTANGLE, left, top, width, Inches(2.8)
)
shape2.fill.solid()
shape2.fill.fore_color.rgb = color2
shape2.fill.transparency = 0.5
shape2.line.fill.background()
# Move shapes to back
slide.shapes._spTree.remove(shape1._element)
slide.shapes._spTree.remove(shape2._element)
slide.shapes._spTree.insert(2, shape1._element)
slide.shapes._spTree.insert(3, shape2._element)
def add_decorative_shapes(slide, theme: Dict):
"""Add decorative shapes to enhance visual appeal"""
try:
# Add corner accent circle
shape1 = slide.shapes.add_shape(
MSO_SHAPE.OVAL,
Inches(9.3), Inches(4.8),
Inches(0.7), Inches(0.7)
)
shape1.fill.solid()
shape1.fill.fore_color.rgb = theme['colors']['accent']
shape1.fill.transparency = 0.3
shape1.line.fill.background()
# Add smaller accent
shape2 = slide.shapes.add_shape(
MSO_SHAPE.OVAL,
Inches(0.1), Inches(0.1),
Inches(0.4), Inches(0.4)
)
shape2.fill.solid()
shape2.fill.fore_color.rgb = theme['colors']['secondary']
shape2.fill.transparency = 0.5
shape2.line.fill.background()
except Exception as e:
logger.warning(f"Failed to add decorative shapes: {e}")
def create_chart_slide(slide, chart_data: Dict, theme: Dict):
"""Create a chart on the slide based on data"""
try:
# Add chart
x, y, cx, cy = Inches(1), Inches(2), Inches(8), Inches(4.5)
# Prepare chart data
chart_data_obj = CategoryChartData()
# Simple bar chart example
if 'columns' in chart_data and 'sample_data' in chart_data:
# Use first numeric column for chart
numeric_cols = []
for col in chart_data['columns']:
try:
# Check if column has numeric data
float(chart_data['sample_data'][0].get(col, 0))
numeric_cols.append(col)
except:
pass
if numeric_cols:
categories = [str(row.get(chart_data['columns'][0], ''))
for row in chart_data['sample_data'][:5]]
chart_data_obj.categories = categories
for col in numeric_cols[:3]: # Max 3 series
values = [float(row.get(col, 0))
for row in chart_data['sample_data'][:5]]
chart_data_obj.add_series(col, values)
chart = slide.shapes.add_chart(
XL_CHART_TYPE.COLUMN_CLUSTERED, x, y, cx, cy, chart_data_obj
).chart
# Style the chart
chart.has_legend = True
chart.legend.position = XL_LEGEND_POSITION.BOTTOM
except Exception as e:
logger.warning(f"Chart creation failed: {e}")
# If chart fails, add a text placeholder instead
textbox = slide.shapes.add_textbox(x, y, cx, cy)
text_frame = textbox.text_frame
text_frame.text = "๋ฐ์ดํ„ฐ ์ฐจํŠธ (์ฐจํŠธ ์ƒ์„ฑ ์‹คํŒจ)"
text_frame.paragraphs[0].font.size = Pt(16) # Increased font size
text_frame.paragraphs[0].font.color.rgb = theme['colors']['secondary']
def create_advanced_ppt_from_content(
slides_data: list,
topic: str,
theme_name: str,
include_charts: bool = False,
include_ai_image: bool = False,
include_diagrams: bool = False,
include_flux_images: bool = False
) -> str:
"""Create advanced PPT file with consistent visual design and AI-generated visuals"""
if not PPTX_AVAILABLE:
raise ImportError("python-pptx library is required")
prs = Presentation()
theme = DESIGN_THEMES.get(theme_name, DESIGN_THEMES['professional'])
# Set slide size (16:9)
prs.slide_width = Inches(10)
prs.slide_height = Inches(5.625)
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
# 1) ์ œ๋ชฉ ์Šฌ๋ผ์ด๋“œ(ํ‘œ์ง€) ์ƒ์„ฑ - ์ˆ˜์ •๋œ ๋ ˆ์ด์•„์›ƒ
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
title_slide_layout = prs.slide_layouts[0]
slide = prs.slides.add_slide(title_slide_layout)
# ๋ฐฐ๊ฒฝ ๊ทธ๋ผ๋””์–ธํŠธ
add_gradient_background(slide, theme['colors']['primary'], theme['colors']['secondary'])
# ์ œ๋ชฉ๊ณผ ๋ถ€์ œ๋ชฉ์„ ๋จผ์ € ์ค‘์•™ ์ƒ๋‹จ์— ๋ฐฐ์น˜
title_shape = slide.shapes.title
subtitle_shape = slide.placeholders[1] if len(slide.placeholders) > 1 else None
if title_shape:
title_shape.left = Inches(0.5)
title_shape.width = prs.slide_width - Inches(1)
title_shape.top = Inches(0.8) # ์ƒ๋‹จ์— ๋ฐฐ์น˜
title_shape.height = Inches(1.2)
tf = title_shape.text_frame
tf.clear()
tf.text = topic
p = tf.paragraphs[0]
p.font.name = theme['fonts']['title']
p.font.size = Pt(36)
p.font.bold = True
p.font.color.rgb = RGBColor(255, 255, 255)
p.alignment = PP_ALIGN.CENTER
if subtitle_shape:
subtitle_shape.left = Inches(0.5)
subtitle_shape.width = prs.slide_width - Inches(1)
subtitle_shape.top = Inches(2.0) # ์ œ๋ชฉ ์•„๋ž˜์— ๋ฐฐ์น˜
subtitle_shape.height = Inches(0.9)
tf2 = subtitle_shape.text_frame
tf2.clear()
tf2.text = f"์ž๋™ ์ƒ์„ฑ๋œ ํ”„๋ ˆ์  ํ…Œ์ด์…˜ โ€ข ์ด {len(slides_data)}์žฅ"
p2 = tf2.paragraphs[0]
p2.font.name = theme['fonts']['subtitle']
p2.font.size = Pt(20)
p2.font.color.rgb = RGBColor(255, 255, 255)
p2.alignment = PP_ALIGN.CENTER
# AI ์ด๋ฏธ์ง€๋ฅผ ์šฐ์ธก์— ๋ฐฐ์น˜
if include_ai_image and FLUX_API_ENABLED:
logger.info("Generating AI cover image via FLUX API...")
ai_image_path = generate_ai_cover_image_via_flux(topic, slides_data)
if ai_image_path and os.path.exists(ai_image_path):
try:
img = Image.open(ai_image_path)
img_width, img_height = img.size
# ์ด๋ฏธ์ง€๋ฅผ ์šฐ์ธก์— ๋ฐฐ์น˜
max_width = Inches(4)
max_height = Inches(3.5)
ratio = img_height / img_width
img_w = max_width
img_h = max_width * ratio
if img_h > max_height:
img_h = max_height
img_w = max_height / ratio
# ์šฐ์ธก ๋ฐฐ์น˜
left = prs.slide_width - img_w - Inches(0.5)
top = (prs.slide_height - img_h) / 2
pic = slide.shapes.add_picture(ai_image_path, left, top, width=img_w, height=img_h)
pic.shadow.inherit = False
pic.shadow.visible = True
pic.shadow.blur_radius = Pt(15)
pic.shadow.distance = Pt(8)
pic.shadow.angle = 45
# ์ด๋ฏธ์ง€ ์•„๋ž˜์— ์ž‘์€ ์บก์…˜ ์ถ”๊ฐ€
caption_box = slide.shapes.add_textbox(
left, top + img_h + Inches(0.1),
img_w, Inches(0.3)
)
caption_tf = caption_box.text_frame
caption_tf.text = "AI Generated"
caption_p = caption_tf.paragraphs[0]
caption_p.font.size = Pt(10)
caption_p.font.color.rgb = RGBColor(255, 255, 255)
caption_p.alignment = PP_ALIGN.CENTER
try:
os.unlink(ai_image_path)
except Exception as e:
logger.warning(f"Temp image delete failed: {e}")
except Exception as e:
logger.error(f"Failed to add cover image: {e}")
# ์žฅ์‹ ์š”์†Œ
add_decorative_shapes(slide, theme)
# Add content slides with consistent design
for i, slide_data in enumerate(slides_data):
layout_type = slide_data.get('layout', 'title_content')
# Log slide creation
logger.info(f"Creating slide {i+1}: {slide_data.get('title', 'No title')}")
logger.debug(f"Content length: {len(slide_data.get('content', ''))}")
# Choose appropriate layout
if layout_type == 'section_header' and len(prs.slide_layouts) > 2:
slide_layout = prs.slide_layouts[2]
elif layout_type == 'two_content' and len(prs.slide_layouts) > 3:
slide_layout = prs.slide_layouts[3]
elif layout_type == 'comparison' and len(prs.slide_layouts) > 4:
slide_layout = prs.slide_layouts[4]
else:
slide_layout = prs.slide_layouts[1] if len(prs.slide_layouts) > 1 else prs.slide_layouts[0]
slide = prs.slides.add_slide(slide_layout)
# Apply theme to EVERY slide for consistency
apply_theme_to_slide(slide, theme, layout_type)
# Set title
if slide.shapes.title:
slide.shapes.title.text = slide_data.get('title', '์ œ๋ชฉ ์—†์Œ')
# IMMEDIATELY set title font size after setting text
try:
title_text_frame = slide.shapes.title.text_frame
if title_text_frame and title_text_frame.paragraphs:
for paragraph in title_text_frame.paragraphs:
if layout_type == 'section_header':
paragraph.font.size = Pt(28) # Increased from 20
paragraph.font.color.rgb = RGBColor(255, 255, 255)
paragraph.alignment = PP_ALIGN.CENTER
else:
paragraph.font.size = Pt(24) # Increased from 18
paragraph.font.color.rgb = theme['colors']['primary']
paragraph.font.bold = True
paragraph.font.name = theme['fonts']['title']
except Exception as e:
logger.warning(f"Title font sizing failed: {e}")
# Detect if this slide should have a diagram or image
slide_title = slide_data.get('title', '')
slide_content = slide_data.get('content', '')
# ๊ฒฐ๋ก /ํ•˜์ด๋ผ์ดํŠธ ์Šฌ๋ผ์ด๋“œ ๊ฐ์ง€
is_conclusion_slide = any(word in slide_title.lower() for word in
['๊ฒฐ๋ก ', 'conclusion', '์š”์•ฝ', 'summary', 'ํ•ต์‹ฌ', 'key',
'๋งˆ๋ฌด๋ฆฌ', 'closing', '์ •๋ฆฌ', 'takeaway', '์‹œ์‚ฌ์ ', 'implication'])
# ๋‹ค์ด์–ด๊ทธ๋žจ ๋˜๋Š” ์ด๋ฏธ์ง€ ์ƒ์„ฑ ์—ฌ๋ถ€ ๊ฒฐ์ •
should_add_visual = False
visual_type = None
# ๊ฒฐ๋ก  ์Šฌ๋ผ์ด๋“œ๋Š” ํ•ญ์ƒ FLUX ์ด๋ฏธ์ง€ ์ถ”๊ฐ€
if is_conclusion_slide and include_flux_images and FLUX_API_ENABLED:
should_add_visual = True
visual_type = ('flux_conclusion', None)
elif include_diagrams:
diagram_type = detect_diagram_type(slide_title, slide_content)
if diagram_type:
should_add_visual = True
visual_type = ('diagram', diagram_type)
elif not should_add_visual and include_flux_images and i % 3 == 0: # ๋งค 3๋ฒˆ์งธ ์Šฌ๋ผ์ด๋“œ์— FLUX ์ด๋ฏธ์ง€
should_add_visual = True
visual_type = ('flux', None)
# ์‹œ๊ฐ์  ์š”์†Œ๊ฐ€ ์žˆ๋Š” ๊ฒฝ์šฐ ์ขŒ-์šฐ ๋ ˆ์ด์•„์›ƒ ์ ์šฉ
if should_add_visual and layout_type not in ['section_header']:
# ์ขŒ์ธก์— ํ…์ŠคํŠธ ๋ฐฐ์น˜
left_box = slide.shapes.add_textbox(
Inches(0.5), Inches(1.5), Inches(4.5), Inches(3.5)
)
left_tf = left_box.text_frame
left_tf.clear()
left_tf.text = slide_content
left_tf.word_wrap = True
force_font_size(left_tf, 14, theme)
# Apply emoji bullets
for paragraph in left_tf.paragraphs:
text = paragraph.text.strip()
if text and text.startswith(('-', 'โ€ข', 'โ—')) and not has_emoji(text):
clean_text = text.lstrip('-โ€ขโ— ')
emoji = get_emoji_for_content(clean_text)
paragraph.text = f"{emoji} {clean_text}"
force_font_size(left_tf, 14, theme)
# ์šฐ์ธก์— ์‹œ๊ฐ์  ์š”์†Œ ์ถ”๊ฐ€
visual_added = False
if visual_type[0] == 'diagram':
# ๋‹ค์ด์–ด๊ทธ๋žจ ์ƒ์„ฑ
logger.info(f"Generating {visual_type[1]} for slide {i+1}")
diagram_json = generate_diagram_json(slide_title, slide_content, visual_type[1])
if diagram_json:
diagram_path = generate_diagram_via_api(diagram_json, visual_type[1])
if diagram_path and os.path.exists(diagram_path):
try:
# ๋‹ค์ด์–ด๊ทธ๋žจ ์ด๋ฏธ์ง€ ์ถ”๊ฐ€
pic = slide.shapes.add_picture(
diagram_path,
Inches(5.2), Inches(1.5),
width=Inches(4.3), height=Inches(3.0)
)
visual_added = True
# ์ž„์‹œ ํŒŒ์ผ ์‚ญ์ œ
os.unlink(diagram_path)
except Exception as e:
logger.error(f"Failed to add diagram: {e}")
elif visual_type[0] == 'flux_conclusion':
# ๊ฒฐ๋ก  ์Šฌ๋ผ์ด๋“œ์šฉ FLUX ์ด๋ฏธ์ง€ ์ƒ์„ฑ
logger.info(f"Generating conclusion FLUX image for slide {i+1}")
conclusion_prompt = generate_conclusion_image_prompt(slide_title, slide_content)
flux_image_path = generate_flux_image_via_api(conclusion_prompt)
if flux_image_path and os.path.exists(flux_image_path):
try:
# FLUX ์ด๋ฏธ์ง€ ์ถ”๊ฐ€
pic = slide.shapes.add_picture(
flux_image_path,
Inches(5.2), Inches(1.5),
width=Inches(4.3), height=Inches(3.0)
)
visual_added = True
# ์ด๋ฏธ์ง€ ์บก์…˜ ์ถ”๊ฐ€
caption_box = slide.shapes.add_textbox(
Inches(5.2), Inches(4.6), Inches(4.3), Inches(0.3)
)
caption_tf = caption_box.text_frame
caption_tf.text = "Key Takeaway Visualization"
caption_p = caption_tf.paragraphs[0]
caption_p.font.size = Pt(10)
caption_p.font.color.rgb = theme['colors']['secondary']
caption_p.alignment = PP_ALIGN.CENTER
# ์ž„์‹œ ํŒŒ์ผ ์‚ญ์ œ
os.unlink(flux_image_path)
except Exception as e:
logger.error(f"Failed to add conclusion FLUX image: {e}")
elif visual_type[0] == 'flux':
# FLUX ์ด๋ฏธ์ง€ ์ƒ์„ฑ
logger.info(f"Generating FLUX image for slide {i+1}")
flux_prompt = generate_flux_prompt(slide_title, slide_content)
flux_image_path = generate_flux_image_via_api(flux_prompt)
if flux_image_path and os.path.exists(flux_image_path):
try:
# FLUX ์ด๋ฏธ์ง€ ์ถ”๊ฐ€
pic = slide.shapes.add_picture(
flux_image_path,
Inches(5.2), Inches(1.5),
width=Inches(4.3), height=Inches(3.0)
)
visual_added = True
# ์ž„์‹œ ํŒŒ์ผ ์‚ญ์ œ
os.unlink(flux_image_path)
except Exception as e:
logger.error(f"Failed to add FLUX image: {e}")
# ์‹œ๊ฐ์  ์š”์†Œ๊ฐ€ ์ถ”๊ฐ€๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ ํ”Œ๋ ˆ์ด์Šคํ™€๋” ์ถ”๊ฐ€
if not visual_added:
placeholder_box = slide.shapes.add_textbox(
Inches(5.2), Inches(2.5), Inches(4.3), Inches(1.0)
)
placeholder_tf = placeholder_box.text_frame
placeholder_tf.text = f"{visual_type[1] if visual_type[0] == 'diagram' else 'Visual'} Placeholder"
placeholder_tf.paragraphs[0].font.size = Pt(14)
placeholder_tf.paragraphs[0].font.color.rgb = theme['colors']['secondary']
placeholder_tf.paragraphs[0].alignment = PP_ALIGN.CENTER
else:
# ๊ธฐ๋ณธ ๋ ˆ์ด์•„์›ƒ (์‹œ๊ฐ์  ์š”์†Œ ์—†์Œ)
if layout_type == 'section_header':
# Section header content handling
content = slide_data.get('content', '')
if content:
logger.info(f"Adding content to section header slide {i+1}: {content[:50]}...")
textbox = slide.shapes.add_textbox(
Inches(1), Inches(3.5), Inches(8), Inches(1.5)
)
tf = textbox.text_frame
tf.clear()
tf.text = content
tf.word_wrap = True
for paragraph in tf.paragraphs:
paragraph.font.name = theme['fonts']['body']
paragraph.font.size = Pt(16)
paragraph.font.color.rgb = RGBColor(255, 255, 255)
paragraph.alignment = PP_ALIGN.CENTER
# Add decorative line
line = slide.shapes.add_shape(
MSO_SHAPE.RECTANGLE, Inches(3), Inches(3.2), Inches(4), Pt(4)
)
line.fill.solid()
line.fill.fore_color.rgb = RGBColor(255, 255, 255)
line.line.fill.background()
elif layout_type == 'two_content':
content = slide_data.get('content', '')
if content:
logger.info(f"Creating two-column layout for slide {i+1}")
content_lines = content.split('\n')
mid_point = len(content_lines) // 2
# Left column
left_box = slide.shapes.add_textbox(
Inches(0.5), Inches(1.5), Inches(4.5), Inches(3.5)
)
left_tf = left_box.text_frame
left_tf.clear()
left_content = '\n'.join(content_lines[:mid_point])
if left_content:
left_tf.text = left_content
left_tf.word_wrap = True
force_font_size(left_tf, 14, theme)
# Apply emoji bullets
for paragraph in left_tf.paragraphs:
text = paragraph.text.strip()
if text and text.startswith(('-', 'โ€ข', 'โ—')) and not has_emoji(text):
clean_text = text.lstrip('-โ€ขโ— ')
emoji = get_emoji_for_content(clean_text)
paragraph.text = f"{emoji} {clean_text}"
force_font_size(left_tf, 14, theme)
# Right column
right_box = slide.shapes.add_textbox(
Inches(5), Inches(1.5), Inches(4.5), Inches(3.5)
)
right_tf = right_box.text_frame
right_tf.clear()
right_content = '\n'.join(content_lines[mid_point:])
if right_content:
right_tf.text = right_content
right_tf.word_wrap = True
force_font_size(right_tf, 14, theme)
# Apply emoji bullets
for paragraph in right_tf.paragraphs:
text = paragraph.text.strip()
if text and text.startswith(('-', 'โ€ข', 'โ—')) and not has_emoji(text):
clean_text = text.lstrip('-โ€ขโ— ')
emoji = get_emoji_for_content(clean_text)
paragraph.text = f"{emoji} {clean_text}"
force_font_size(right_tf, 14, theme)
else:
# Regular content
content = slide_data.get('content', '')
logger.info(f"Slide {i+1} - Content to add: '{content[:100]}...' (length: {len(content)})")
if include_charts and slide_data.get('chart_data'):
create_chart_slide(slide, slide_data['chart_data'], theme)
if content and content.strip():
textbox = slide.shapes.add_textbox(
Inches(0.5), # left
Inches(1.5), # top
Inches(9), # width
Inches(3.5) # height
)
tf = textbox.text_frame
tf.clear()
tf.text = content.strip()
tf.word_wrap = True
tf.margin_left = Inches(0.1)
tf.margin_right = Inches(0.1)
tf.margin_top = Inches(0.05)
tf.margin_bottom = Inches(0.05)
force_font_size(tf, 16, theme)
for p_idx, paragraph in enumerate(tf.paragraphs):
if paragraph.text.strip():
text = paragraph.text.strip()
if text.startswith(('-', 'โ€ข', 'โ—')) and not has_emoji(text):
clean_text = text.lstrip('-โ€ขโ— ')
emoji = get_emoji_for_content(clean_text)
paragraph.text = f"{emoji} {clean_text}"
if paragraph.runs:
for run in paragraph.runs:
run.font.size = Pt(16)
run.font.name = theme['fonts']['body']
run.font.color.rgb = theme['colors']['text']
else:
paragraph.font.size = Pt(16)
paragraph.font.name = theme['fonts']['body']
paragraph.font.color.rgb = theme['colors']['text']
paragraph.space_before = Pt(6)
paragraph.space_after = Pt(6)
paragraph.line_spacing = 1.3
logger.info(f"Successfully added content to slide {i+1}")
else:
logger.warning(f"Slide {i+1} has no content or empty content")
# Add slide notes if available
if slide_data.get('notes'):
try:
notes_slide = slide.notes_slide
notes_text_frame = notes_slide.notes_text_frame
notes_text_frame.text = slide_data.get('notes', '')
except Exception as e:
logger.warning(f"Failed to add slide notes: {e}")
# Add slide number with better design
slide_number_bg = slide.shapes.add_shape(
MSO_SHAPE.ROUNDED_RECTANGLE,
Inches(8.3), Inches(5.0), Inches(1.5), Inches(0.5)
)
slide_number_bg.fill.solid()
slide_number_bg.fill.fore_color.rgb = theme['colors']['primary']
slide_number_bg.fill.transparency = 0.8
slide_number_bg.line.fill.background()
slide_number_box = slide.shapes.add_textbox(
Inches(8.3), Inches(5.05), Inches(1.5), Inches(0.4)
)
slide_number_frame = slide_number_box.text_frame
slide_number_frame.text = f"{i + 1} / {len(slides_data)}"
slide_number_frame.paragraphs[0].font.size = Pt(10) # Increased from 8
slide_number_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
slide_number_frame.paragraphs[0].font.bold = False
slide_number_frame.paragraphs[0].alignment = PP_ALIGN.CENTER
# Add subtle design element on alternating slides
if i % 2 == 0:
accent_shape = slide.shapes.add_shape(
MSO_SHAPE.OVAL,
Inches(9.6), Inches(0.1),
Inches(0.2), Inches(0.2)
)
accent_shape.fill.solid()
accent_shape.fill.fore_color.rgb = theme['colors']['accent']
accent_shape.line.fill.background()
# Add thank you slide with consistent design
thank_you_layout = prs.slide_layouts[5] if len(prs.slide_layouts) > 5 else prs.slide_layouts[0]
thank_you_slide = prs.slides.add_slide(thank_you_layout)
# Apply gradient background
add_gradient_background(thank_you_slide, theme['colors']['secondary'], theme['colors']['primary'])
if thank_you_slide.shapes.title:
thank_you_slide.shapes.title.text = "๊ฐ์‚ฌํ•ฉ๋‹ˆ๋‹ค"
try:
if thank_you_slide.shapes.title.text_frame and thank_you_slide.shapes.title.text_frame.paragraphs:
thank_you_slide.shapes.title.text_frame.paragraphs[0].font.size = Pt(36) # Increased from 28
thank_you_slide.shapes.title.text_frame.paragraphs[0].font.bold = True
thank_you_slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
thank_you_slide.shapes.title.text_frame.paragraphs[0].alignment = PP_ALIGN.CENTER
except Exception as e:
logger.warning(f"Thank you slide styling failed: {e}")
# Add contact or additional info placeholder
info_box = thank_you_slide.shapes.add_textbox(
Inches(2), Inches(3.5), Inches(6), Inches(1)
)
info_tf = info_box.text_frame
info_tf.text = "AI๋กœ ์ƒ์„ฑ๋œ ํ”„๋ ˆ์  ํ…Œ์ด์…˜"
info_tf.paragraphs[0].font.size = Pt(18) # Increased from 14
info_tf.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255)
info_tf.paragraphs[0].alignment = PP_ALIGN.CENTER
# Save to temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix=".pptx") as tmp_file:
prs.save(tmp_file.name)
return tmp_file.name
##############################################################################
# Streaming Response Handler for PPT Generation - IMPROVED VERSION
##############################################################################
def generate_ppt_content(topic: str, num_slides: int, additional_context: str, use_korean: bool = False, layout_style: str = "consistent") -> Iterator[str]:
"""Generate PPT content using LLM with clearer format"""
# Layout instructions based on style
layout_instructions = ""
if layout_style == "varied":
layout_instructions = """
์Šฌ๋ผ์ด๋“œ ๋ ˆ์ด์•„์›ƒ์„ ๋‹ค์–‘ํ•˜๊ฒŒ ๊ตฌ์„ฑํ•ด์ฃผ์„ธ์š”:
- ๋งค 5๋ฒˆ์งธ ์Šฌ๋ผ์ด๋“œ๋Š” '์„น์…˜ ๊ตฌ๋ถ„' ์Šฌ๋ผ์ด๋“œ๋กœ ๋งŒ๋“ค์–ด์ฃผ์„ธ์š”
- ๋น„๊ต๋‚˜ ๋Œ€์กฐ ๋‚ด์šฉ์ด ์žˆ์œผ๋ฉด '๋น„๊ต' ๋ ˆ์ด์•„์›ƒ์„ ์‚ฌ์šฉํ•˜์„ธ์š”
- ๋‚ด์šฉ์ด ๋งŽ์œผ๋ฉด 2๋‹จ ๊ตฌ์„ฑ์„ ๊ณ ๋ คํ•˜์„ธ์š”
"""
elif layout_style == "consistent":
layout_instructions = """
์ผ๊ด€๋œ ๋ ˆ์ด์•„์›ƒ์„ ์œ ์ง€ํ•ด์ฃผ์„ธ์š”:
- ๋ชจ๋“  ์Šฌ๋ผ์ด๋“œ๋Š” ๋™์ผํ•œ ๊ตฌ์กฐ๋กœ ์ž‘์„ฑ
- ์ œ๋ชฉ๊ณผ ๊ธ€๋จธ๋ฆฌ ๊ธฐํ˜ธ ํ˜•์‹ ํ†ต์ผ
- ๊ฐ„๊ฒฐํ•˜๊ณ  ๋ช…ํ™•ํ•œ ๊ตฌ์„ฑ
"""
# ๋” ๋ช…ํ™•ํ•œ ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ
if use_korean:
system_prompt = f"""๋‹น์‹ ์€ ์ „๋ฌธ์ ์ธ PPT ํ”„๋ ˆ์  ํ…Œ์ด์…˜ ์ž‘์„ฑ ์ „๋ฌธ๊ฐ€์ž…๋‹ˆ๋‹ค.
์ฃผ์–ด์ง„ ์ฃผ์ œ์— ๋Œ€ํ•ด ์ •ํ™•ํžˆ {num_slides}์žฅ์˜ ์Šฌ๋ผ์ด๋“œ ๋‚ด์šฉ์„ ์ž‘์„ฑํ•ด์ฃผ์„ธ์š”.
**๋ฐ˜๋“œ์‹œ ์•„๋ž˜ ํ˜•์‹์„ ์ •ํ™•ํžˆ ๋”ฐ๋ผ์ฃผ์„ธ์š”:**
์Šฌ๋ผ์ด๋“œ 1
์ œ๋ชฉ: [์Šฌ๋ผ์ด๋“œ ์ œ๋ชฉ - "์Šฌ๋ผ์ด๋“œ 1" ๊ฐ™์€ ๋ฒˆํ˜ธ๋Š” ํฌํ•จํ•˜์ง€ ๋งˆ์„ธ์š”]
๋‚ด์šฉ:
- ์ฒซ ๋ฒˆ์งธ ํ•ต์‹ฌ ํฌ์ธํŠธ
- ๋‘ ๋ฒˆ์งธ ํ•ต์‹ฌ ํฌ์ธํŠธ
- ์„ธ ๋ฒˆ์งธ ํ•ต์‹ฌ ํฌ์ธํŠธ
- ๋„ค ๋ฒˆ์งธ ํ•ต์‹ฌ ํฌ์ธํŠธ
- ๋‹ค์„ฏ ๋ฒˆ์งธ ํ•ต์‹ฌ ํฌ์ธํŠธ
๋…ธํŠธ: [๋ฐœํ‘œ์ž๊ฐ€ ์ด ์Šฌ๋ผ์ด๋“œ๋ฅผ ์„ค๋ช…ํ•  ๋•Œ ์‚ฌ์šฉํ•  ๊ตฌ์–ด์ฒด ์Šคํฌ๋ฆฝํŠธ]
์Šฌ๋ผ์ด๋“œ 2
์ œ๋ชฉ: [์Šฌ๋ผ์ด๋“œ ์ œ๋ชฉ]
๋‚ด์šฉ:
- ์ฒซ ๋ฒˆ์งธ ํ•ต์‹ฌ ํฌ์ธํŠธ
- ๋‘ ๋ฒˆ์งธ ํ•ต์‹ฌ ํฌ์ธํŠธ
- ์„ธ ๋ฒˆ์งธ ํ•ต์‹ฌ ํฌ์ธํŠธ
- ๋„ค ๋ฒˆ์งธ ํ•ต์‹ฌ ํฌ์ธํŠธ
- ๋‹ค์„ฏ ๋ฒˆ์งธ ํ•ต์‹ฌ ํฌ์ธํŠธ
๋…ธํŠธ: [๋ฐœํ‘œ ์Šคํฌ๋ฆฝํŠธ]
(์ด๋Ÿฐ ์‹์œผ๋กœ ์Šฌ๋ผ์ด๋“œ {num_slides}๊นŒ์ง€ ๊ณ„์†)
{layout_instructions}
**์ค‘์š” ์ง€์นจ:**
1. ๊ฐ ์Šฌ๋ผ์ด๋“œ๋Š” "์Šฌ๋ผ์ด๋“œ ์ˆซ์ž"๋กœ ์‹œ์ž‘
2. ์ œ๋ชฉ: ๋’ค์— ์‹ค์ œ ์ œ๋ชฉ ์ž‘์„ฑ (๋ฒˆํ˜ธ ์ œ์™ธ)
3. ๋‚ด์šฉ: ๋’ค์— ์ •ํ™•ํžˆ 5๊ฐœ์˜ ๊ธ€๋จธ๋ฆฌ ๊ธฐํ˜ธ ํฌ์ธํŠธ
4. ๋…ธํŠธ: ๋’ค์— ๋ฐœํ‘œ ์Šคํฌ๋ฆฝํŠธ
5. ๊ฐ ์„น์…˜ ์‚ฌ์ด์— ๋นˆ ์ค„ ์—†์Œ
6. ์ด {num_slides}์žฅ ์ž‘์„ฑ
7. ๊ฐ ํฌ์ธํŠธ๋Š” '-' ๊ธฐํ˜ธ๋กœ ์‹œ์ž‘ํ•˜์„ธ์š” (์ด๋ชจ์ง€๋Š” ์ž๋™์œผ๋กœ ์ถ”๊ฐ€๋ฉ๋‹ˆ๋‹ค)
8. ๋…ธํŠธ๋Š” ํ•ด๋‹น ์Šฌ๋ผ์ด๋“œ์˜ ๋‚ด์šฉ์„ ๋ฐœํ‘œ์ž๊ฐ€ ์ฒญ์ค‘์—๊ฒŒ ์„ค๋ช…ํ•˜๋Š” ๊ตฌ์–ด์ฒด ๋Œ€๋ณธ์œผ๋กœ ์ž‘์„ฑํ•˜์„ธ์š”"""
else:
system_prompt = f"""You are a professional PPT presentation expert.
Create content for exactly {num_slides} slides on the given topic.
**You MUST follow this exact format:**
Slide 1
Title: [Slide title - do NOT include "Slide 1" in the title]
Content:
- First key point
- Second key point
- Third key point
- Fourth key point
- Fifth key point
Notes: [Speaker script in conversational style for explaining this slide]
Slide 2
Title: [Slide title]
Content:
- First key point
- Second key point
- Third key point
- Fourth key point
- Fifth key point
Notes: [Speaker script]
(Continue this way until Slide {num_slides})
**Important instructions:**
1. Each slide starts with "Slide number"
2. Title: followed by the actual title (no numbers)
3. Content: followed by exactly 5 bullet points
4. Notes: followed by speaker script
5. No empty lines between sections
6. Create exactly {num_slides} slides
7. Start each point with '-' (emojis will be added automatically)
8. Notes should be a speaker script explaining the slide content in conversational style"""
# Add search results if web search is performed
if additional_context:
system_prompt += f"\n\n์ฐธ๊ณ  ์ •๋ณด:\n{additional_context}"
# Prepare messages
user_prompt = f"์ฃผ์ œ: {topic}\n\n์œ„์—์„œ ์„ค๋ช…ํ•œ ํ˜•์‹์— ๋งž์ถฐ ์ •ํ™•ํžˆ {num_slides}์žฅ์˜ PPT ์Šฌ๋ผ์ด๋“œ ๋‚ด์šฉ์„ ์ž‘์„ฑํ•ด์ฃผ์„ธ์š”. ๊ฐ ์Šฌ๋ผ์ด๋“œ๋งˆ๋‹ค 5๊ฐœ์˜ ํ•ต์‹ฌ ํฌ์ธํŠธ์™€ ํ•จ๊ป˜, ๋ฐœํ‘œ์ž๊ฐ€ ์ฒญ์ค‘์—๊ฒŒ ํ•ด๋‹น ๋‚ด์šฉ์„ ์„ค๋ช…ํ•˜๋Š” ๊ตฌ์–ด์ฒด ๋ฐœํ‘œ ๋Œ€๋ณธ์„ ๋…ธํŠธ๋กœ ์ž‘์„ฑํ•ด์ฃผ์„ธ์š”."
if not use_korean:
user_prompt = f"Topic: {topic}\n\nPlease create exactly {num_slides} PPT slides following the format described above. Include exactly 5 key points per slide, and write speaker notes as a conversational script explaining the content to the audience."
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
# Call LLM API
headers = {
"Authorization": f"Bearer {FRIENDLI_TOKEN}",
"Content-Type": "application/json"
}
payload = {
"model": FRIENDLI_MODEL_ID,
"messages": messages,
"max_tokens": min(4000, num_slides * 300), # More tokens for 5 points + notes
"top_p": 0.9,
"temperature": 0.8,
"stream": True,
"stream_options": {
"include_usage": True
}
}
try:
response = requests.post(
FRIENDLI_API_URL,
headers=headers,
json=payload,
stream=True,
timeout=60
)
response.raise_for_status()
full_response = ""
for line in response.iter_lines():
if line:
line_text = line.decode('utf-8')
if line_text.startswith("data: "):
data_str = line_text[6:]
if data_str == "[DONE]":
break
try:
data = json.loads(data_str)
if "choices" in data and len(data["choices"]) > 0:
delta = data["choices"][0].get("delta", {})
content = delta.get("content", "")
if content:
full_response += content
yield full_response
except json.JSONDecodeError:
logger.warning(f"JSON parsing failed: {data_str}")
continue
except Exception as e:
logger.error(f"LLM API error: {str(e)}")
yield f"โš ๏ธ Error generating content: {str(e)}"
##############################################################################
# Main PPT Generation Function - IMPROVED VERSION with Enhanced Features
##############################################################################
def generate_ppt(
topic: str,
num_slides: int = 10,
use_web_search: bool = False,
use_korean: bool = True,
reference_files: list = None,
design_theme: str = "professional",
font_style: str = "modern",
layout_style: str = "consistent",
include_charts: bool = False,
include_ai_image: bool = False,
include_diagrams: bool = False,
include_flux_images: bool = False
) -> tuple:
"""Main function to generate PPT with advanced design and enhanced visuals"""
if not PPTX_AVAILABLE:
return None, "โŒ python-pptx ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ์„ค์น˜๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.\n\n์„ค์น˜ ๋ช…๋ น: pip install python-pptx", ""
if not topic.strip():
return None, "โŒ PPT ์ฃผ์ œ๋ฅผ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”.", ""
if num_slides < 3 or num_slides > 20:
return None, "โŒ ์Šฌ๋ผ์ด๋“œ ์ˆ˜๋Š” 3์žฅ ์ด์ƒ 20์žฅ ์ดํ•˜๋กœ ์„ค์ •ํ•ด์ฃผ์„ธ์š”.", ""
try:
# FLUX API ์ดˆ๊ธฐํ™” (ํ‘œ์ง€ ์ด๋ฏธ์ง€์šฉ)
if include_ai_image and not FLUX_API_ENABLED:
yield None, "๐Ÿ”„ FLUX ์ด๋ฏธ์ง€ ์ƒ์„ฑ API์— ์—ฐ๊ฒฐํ•˜๋Š” ์ค‘...", ""
if initialize_flux_api():
yield None, "โœ… FLUX API ์—ฐ๊ฒฐ ์„ฑ๊ณต!", ""
else:
include_ai_image = False
yield None, "โš ๏ธ FLUX API ์—ฐ๊ฒฐ ์‹คํŒจ. AI ์ด๋ฏธ์ง€ ์—†์ด ์ง„ํ–‰ํ•ฉ๋‹ˆ๋‹ค.", ""
# ๋‹ค์ด์–ด๊ทธ๋žจ API ์ดˆ๊ธฐํ™”
if include_diagrams and not DIAGRAM_API_ENABLED:
yield None, "๐Ÿ”„ ๋‹ค์ด์–ด๊ทธ๋žจ ์ƒ์„ฑ API์— ์—ฐ๊ฒฐํ•˜๋Š” ์ค‘...", ""
if initialize_diagram_api():
yield None, "โœ… ๋‹ค์ด์–ด๊ทธ๋žจ API ์—ฐ๊ฒฐ ์„ฑ๊ณต!", ""
else:
include_diagrams = False
yield None, "โš ๏ธ ๋‹ค์ด์–ด๊ทธ๋žจ API ์—ฐ๊ฒฐ ์‹คํŒจ. ๋‹ค์ด์–ด๊ทธ๋žจ ์—†์ด ์ง„ํ–‰ํ•ฉ๋‹ˆ๋‹ค.", ""
# FLUX API ์ดˆ๊ธฐํ™” (์Šฌ๋ผ์ด๋“œ ์ด๋ฏธ์ง€์šฉ)
if include_flux_images and not FLUX_API_ENABLED:
yield None, "๐Ÿ”„ FLUX ์ด๋ฏธ์ง€ ์ƒ์„ฑ API์— ์—ฐ๊ฒฐํ•˜๋Š” ์ค‘...", ""
if initialize_flux_api():
yield None, "โœ… FLUX API ์—ฐ๊ฒฐ ์„ฑ๊ณต!", ""
else:
include_flux_images = False
yield None, "โš ๏ธ FLUX API ์—ฐ๊ฒฐ ์‹คํŒจ. ์Šฌ๋ผ์ด๋“œ ์ด๋ฏธ์ง€ ์—†์ด ์ง„ํ–‰ํ•ฉ๋‹ˆ๋‹ค.", ""
# Process reference files if provided
additional_context = ""
chart_data = None
if reference_files:
file_contents = []
for file_path in reference_files:
if file_path.lower().endswith(".csv"):
csv_content = analyze_csv_file(file_path)
file_contents.append(csv_content)
# Extract chart data if available
if "CHART_DATA:" in csv_content:
chart_json = csv_content.split("CHART_DATA:")[1]
try:
chart_data = json.loads(chart_json)
except:
pass
elif file_path.lower().endswith(".txt"):
file_contents.append(analyze_txt_file(file_path))
elif file_path.lower().endswith(".pdf"):
file_contents.append(pdf_to_markdown(file_path))
if file_contents:
additional_context = "\n\n".join(file_contents)
# Web search if enabled
if use_web_search:
search_query = extract_keywords(topic, top_k=5)
search_results = do_web_search(search_query, use_korean=use_korean)
if not search_results.startswith("Web search"):
additional_context += f"\n\n{search_results}"
# Generate PPT content
llm_response = ""
for response in generate_ppt_content(topic, num_slides, additional_context, use_korean, layout_style):
llm_response = response
yield None, f"๐Ÿ“ ์ƒ์„ฑ ์ค‘...\n\n{response}", response
# Parse LLM response
slides_data = parse_llm_ppt_response(llm_response, layout_style)
# Debug: ํŒŒ์‹ฑ๋œ ๊ฐ ์Šฌ๋ผ์ด๋“œ ๋‚ด์šฉ ์ถœ๋ ฅ
logger.info(f"=== Parsed Slides Debug Info ===")
for i, slide in enumerate(slides_data):
logger.info(f"Slide {i+1}:")
logger.info(f" Title: {slide.get('title', 'NO TITLE')}")
logger.info(f" Content: {slide.get('content', 'NO CONTENT')[:100]}...")
logger.info(f" Content Length: {len(slide.get('content', ''))}")
logger.info("---")
# Add chart data to relevant slides if available
if chart_data and include_charts:
for slide in slides_data:
if '๋ฐ์ดํ„ฐ' in slide.get('title', '') or 'data' in slide.get('title', '').lower():
slide['chart_data'] = chart_data
break
# Debug logging
logger.info(f"Parsed {len(slides_data)} slides from LLM response")
logger.info(f"Design theme: {design_theme}, Layout style: {layout_style}")
logger.info(f"Include diagrams: {include_diagrams}, Include FLUX images: {include_flux_images}")
if not slides_data:
# Show the raw response for debugging
error_msg = "โŒ PPT ๋‚ด์šฉ ํŒŒ์‹ฑ์— ์‹คํŒจํ–ˆ์Šต๋‹ˆ๋‹ค.\n\n"
error_msg += "LLM ์‘๋‹ต์„ ํ™•์ธํ•ด์ฃผ์„ธ์š”:\n"
error_msg += "=" * 50 + "\n"
error_msg += llm_response[:500] + "..." if len(llm_response) > 500 else llm_response
yield None, error_msg, llm_response
return
# AI ์ด๋ฏธ์ง€ ๋ฐ ๋‹ค์ด์–ด๊ทธ๋žจ ์ƒ์„ฑ ์•Œ๋ฆผ
visual_features = []
if include_ai_image and FLUX_API_ENABLED:
visual_features.append("AI 3D ํ‘œ์ง€ ์ด๋ฏธ์ง€")
if include_diagrams and DIAGRAM_API_ENABLED:
visual_features.append("๋‹ค์ด์–ด๊ทธ๋žจ")
if include_flux_images and FLUX_API_ENABLED:
visual_features.append("AI ์ƒ์„ฑ ์ด๋ฏธ์ง€")
if visual_features:
yield None, f"๐Ÿ“ ์Šฌ๋ผ์ด๋“œ ์ƒ์„ฑ ์™„๋ฃŒ!\n\n๐ŸŽจ ์ƒ์„ฑ ์ค‘: {', '.join(visual_features)}... (์‹œ๊ฐ„์ด ์†Œ์š”๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค)", llm_response
# Create PPT file with advanced design
ppt_path = create_advanced_ppt_from_content(
slides_data,
topic,
design_theme,
include_charts,
include_ai_image,
include_diagrams,
include_flux_images
)
success_msg = f"โœ… PPT ํŒŒ์ผ์ด ์„ฑ๊ณต์ ์œผ๋กœ ์ƒ์„ฑ๋˜์—ˆ์Šต๋‹ˆ๋‹ค!\n\n"
success_msg += f"๐Ÿ“Š ์ฃผ์ œ: {topic}\n"
success_msg += f"๐Ÿ“„ ์Šฌ๋ผ์ด๋“œ ์ˆ˜: {len(slides_data)}์žฅ\n"
success_msg += f"๐ŸŽจ ๋””์ž์ธ ํ…Œ๋งˆ: {DESIGN_THEMES[design_theme]['name']}\n"
success_msg += f"๐Ÿ“ ๋ ˆ์ด์•„์›ƒ ์Šคํƒ€์ผ: {layout_style}\n"
if include_ai_image and FLUX_API_ENABLED:
success_msg += f"๐Ÿ–ผ๏ธ AI ์ƒ์„ฑ ํ‘œ์ง€ ์ด๋ฏธ์ง€ ํฌํ•จ\n"
if include_diagrams and DIAGRAM_API_ENABLED:
success_msg += f"๐Ÿ“Š AI ์ƒ์„ฑ ๋‹ค์ด์–ด๊ทธ๋žจ ํฌํ•จ\n"
if include_flux_images and FLUX_API_ENABLED:
success_msg += f"๐ŸŽจ AI ์ƒ์„ฑ ์Šฌ๋ผ์ด๋“œ ์ด๋ฏธ์ง€ ํฌํ•จ\n"
success_msg += f"๐Ÿ“ ์ƒ์„ฑ๋œ ์Šฌ๋ผ์ด๋“œ:\n"
for i, slide in enumerate(slides_data[:5]): # Show first 5 slides
success_msg += f" {i+1}. {slide.get('title', '์ œ๋ชฉ ์—†์Œ')} [{slide.get('layout', 'standard')}]\n"
if slide.get('notes'):
success_msg += f" ๐Ÿ’ก ๋…ธํŠธ: {slide.get('notes', '')[:50]}...\n"
if len(slides_data) > 5:
success_msg += f" ... ์™ธ {len(slides_data) - 5}์žฅ"
yield ppt_path, success_msg, llm_response
except Exception as e:
logger.error(f"PPT generation error: {str(e)}")
import traceback
error_details = traceback.format_exc()
logger.error(f"Error details: {error_details}")
yield None, f"โŒ PPT ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}\n\n์ƒ์„ธ ์˜ค๋ฅ˜:\n{error_details}", ""
##############################################################################
# Gradio UI
##############################################################################
css = """
/* Full width UI */
.gradio-container {
background: rgba(255, 255, 255, 0.98);
padding: 40px 50px;
margin: 30px auto;
width: 100% !important;
max-width: 1400px !important;
border-radius: 20px;
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.1);
}
/* Background */
body {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
margin: 0;
padding: 0;
font-family: 'Segoe UI', 'Helvetica Neue', Arial, sans-serif;
}
/* Title styling */
h1 {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
background-clip: text;
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
font-weight: 700;
margin-bottom: 10px;
}
/* Button styles */
button.primary {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
border: none;
color: white !important;
font-weight: 600;
padding: 15px 30px !important;
font-size: 18px !important;
transition: all 0.3s ease;
text-transform: uppercase;
letter-spacing: 1px;
}
button.primary:hover {
transform: translateY(-3px);
box-shadow: 0 8px 20px rgba(102, 126, 234, 0.4);
}
/* Input styles */
.textbox, textarea, input[type="text"], input[type="number"] {
border: 2px solid #e5e7eb;
border-radius: 12px;
padding: 15px;
font-size: 16px;
transition: all 0.3s ease;
background: white;
}
.textbox:focus, textarea:focus, input[type="text"]:focus {
border-color: #667eea;
outline: none;
box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1);
}
/* Card style */
.card {
background: white;
border-radius: 16px;
padding: 25px;
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.08);
margin-bottom: 25px;
border: 1px solid rgba(102, 126, 234, 0.1);
}
/* Dropdown styles */
.dropdown {
border: 2px solid #e5e7eb;
border-radius: 12px;
padding: 12px;
background: white;
transition: all 0.3s ease;
}
.dropdown:hover {
border-color: #667eea;
}
/* Slider styles */
.gr-slider input[type="range"] {
background: linear-gradient(to right, #667eea 0%, #764ba2 100%);
height: 8px;
border-radius: 4px;
}
/* File upload area */
.file-upload {
border: 3px dashed #667eea;
border-radius: 16px;
padding: 40px;
text-align: center;
transition: all 0.3s ease;
background: rgba(102, 126, 234, 0.02);
}
.file-upload:hover {
border-color: #764ba2;
background: rgba(102, 126, 234, 0.05);
transform: scale(1.01);
}
/* Checkbox styles */
input[type="checkbox"] {
width: 20px;
height: 20px;
margin-right: 10px;
cursor: pointer;
}
/* Tab styles */
.tabs {
border-radius: 12px;
overflow: hidden;
margin-bottom: 20px;
}
.tab-nav {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
padding: 5px;
}
.tab-nav button {
background: transparent;
color: white;
border: none;
padding: 10px 20px;
margin: 0 5px;
border-radius: 8px;
transition: all 0.3s ease;
}
.tab-nav button.selected {
background: white;
color: #667eea;
}
/* Section headers */
.section-header {
font-size: 20px;
font-weight: 600;
color: #667eea;
margin: 20px 0 15px 0;
padding-bottom: 10px;
border-bottom: 2px solid rgba(102, 126, 234, 0.2);
}
/* Status box styling */
.status-box {
background: linear-gradient(135deg, rgba(102, 126, 234, 0.1) 0%, rgba(118, 75, 162, 0.1) 100%);
border-radius: 12px;
padding: 20px;
}
/* Preview box styling */
.preview-box {
background: #f8f9fa;
border-radius: 12px;
padding: 20px;
font-family: 'Courier New', monospace;
font-size: 13px;
line-height: 1.5;
max-height: 500px;
overflow-y: auto;
}
"""
with gr.Blocks(css=css, title="AI PPT Generator Pro") as demo:
gr.Markdown(
"""
# ๐ŸŽฏ AI ๊ธฐ๋ฐ˜ PPT ์ž๋™ ์ƒ์„ฑ ์‹œ์Šคํ…œ Pro
๊ณ ๊ธ‰ ๋””์ž์ธ ํ…Œ๋งˆ์™€ ๋ ˆ์ด์•„์›ƒ์„ ํ™œ์šฉํ•œ ์ „๋ฌธ์ ์ธ ํ”„๋ ˆ์  ํ…Œ์ด์…˜์„ ์ž๋™์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
FLUX AI๋กœ ์ƒ์„ฑํ•œ ๊ณ ํ’ˆ์งˆ ์ด๋ฏธ์ง€์™€ ๋‹ค์ด์–ด๊ทธ๋žจ์„ ํฌํ•จํ•˜์—ฌ ์‹œ๊ฐ์ ์œผ๋กœ ํ’๋ถ€ํ•œ PPT๋ฅผ ๋งŒ๋“ญ๋‹ˆ๋‹ค.
"""
)
with gr.Row():
with gr.Column(scale=2):
topic_input = gr.Textbox(
label="๐Ÿ“Œ PPT ์ฃผ์ œ",
placeholder="์˜ˆ: ์ธ๊ณต์ง€๋Šฅ์˜ ๋ฏธ๋ž˜์™€ ์‚ฐ์—… ์ ์šฉ ์‚ฌ๋ก€",
lines=2,
elem_classes="card"
)
with gr.Row():
with gr.Column():
num_slides = gr.Slider(
label="๐Ÿ“„ ์Šฌ๋ผ์ด๋“œ ์ˆ˜",
minimum=3,
maximum=20,
step=1,
value=10,
info="์ƒ์„ฑํ•  ์Šฌ๋ผ์ด๋“œ ๊ฐœ์ˆ˜ (3-20์žฅ)"
)
with gr.Column():
use_korean = gr.Checkbox(
label="๐Ÿ‡ฐ๐Ÿ‡ท ํ•œ๊ตญ์–ด",
value=True,
info="ํ•œ๊ตญ์–ด๋กœ ์ƒ์„ฑ"
)
use_web_search = gr.Checkbox(
label="๐Ÿ” ์›น ๊ฒ€์ƒ‰",
value=False,
info="์ตœ์‹  ์ •๋ณด ๊ฒ€์ƒ‰"
)
# Design Options Section
gr.Markdown("<div class='section-header'>๐ŸŽจ ๋””์ž์ธ ์˜ต์…˜</div>")
with gr.Row():
design_theme = gr.Dropdown(
label="๋””์ž์ธ ํ…Œ๋งˆ",
choices=[
("ํ”„๋กœํŽ˜์…”๋„ (ํŒŒ๋ž‘/ํšŒ์ƒ‰)", "professional"),
("๋ชจ๋˜ (๋ณด๋ผ/ํ•‘ํฌ)", "modern"),
("์ž์—ฐ (์ดˆ๋ก/๊ฐˆ์ƒ‰)", "nature"),
("ํฌ๋ฆฌ์—์ดํ‹ฐ๋ธŒ (๋‹ค์ฑ„๋กœ์šด)", "creative"),
("๋ฏธ๋‹ˆ๋ฉ€ (ํ‘๋ฐฑ)", "minimal")
],
value="professional",
elem_classes="dropdown"
)
layout_style = gr.Dropdown(
label="๋ ˆ์ด์•„์›ƒ ์Šคํƒ€์ผ",
choices=[
("์ผ๊ด€๋œ ๋ ˆ์ด์•„์›ƒ", "consistent"),
("๋‹ค์–‘ํ•œ ๋ ˆ์ด์•„์›ƒ", "varied"),
("๋ฏธ๋‹ˆ๋ฉ€ ๋ ˆ์ด์•„์›ƒ", "minimal")
],
value="consistent",
elem_classes="dropdown"
)
with gr.Row():
font_style = gr.Dropdown(
label="ํฐํŠธ ์Šคํƒ€์ผ",
choices=[
("๋ชจ๋˜", "modern"),
("ํด๋ž˜์‹", "classic"),
("์บ์ฃผ์–ผ", "casual")
],
value="modern",
elem_classes="dropdown"
)
include_charts = gr.Checkbox(
label="๐Ÿ“Š ์ฐจํŠธ ํฌํ•จ",
value=False,
info="CSV ๋ฐ์ดํ„ฐ๊ฐ€ ์žˆ์„ ๊ฒฝ์šฐ ์ฐจํŠธ ์ƒ์„ฑ"
)
# Visual Enhancement Options
gr.Markdown("<div class='section-header'>๐Ÿ–ผ๏ธ ์‹œ๊ฐ์  ํ–ฅ์ƒ ์˜ต์…˜</div>")
with gr.Row():
include_ai_image = gr.Checkbox(
label="๐Ÿ–ผ๏ธ AI ํ‘œ์ง€ ์ด๋ฏธ์ง€",
value=False,
info="FLUX๋กœ ์ƒ์„ฑํ•œ ํ‘œ์ง€ ์ด๋ฏธ์ง€ ์ถ”๊ฐ€"
)
include_diagrams = gr.Checkbox(
label="๐Ÿ“Š AI ๋‹ค์ด์–ด๊ทธ๋žจ",
value=False,
info="๋‚ด์šฉ์— ๋งž๋Š” ๋‹ค์ด์–ด๊ทธ๋žจ ์ž๋™ ์ƒ์„ฑ"
)
include_flux_images = gr.Checkbox(
label="๐ŸŽจ ์Šฌ๋ผ์ด๋“œ ์ด๋ฏธ์ง€",
value=False,
info="์ผ๋ถ€ ์Šฌ๋ผ์ด๋“œ์— FLUX ์ด๋ฏธ์ง€ ์ถ”๊ฐ€"
)
reference_files = gr.File(
label="๐Ÿ“Ž ์ฐธ๊ณ  ์ž๋ฃŒ (์„ ํƒ์‚ฌํ•ญ)",
file_types=[".pdf", ".csv", ".txt"],
file_count="multiple",
elem_classes="file-upload"
)
generate_btn = gr.Button(
"๐Ÿš€ PPT ์ƒ์„ฑํ•˜๊ธฐ",
variant="primary",
size="lg"
)
with gr.Column(scale=1):
download_file = gr.File(
label="๐Ÿ“ฅ ์ƒ์„ฑ๋œ PPT ๋‹ค์šด๋กœ๋“œ",
interactive=False,
elem_classes="card"
)
status_text = gr.Textbox(
label="๐Ÿ“Š ์ƒ์„ฑ ์ƒํƒœ",
lines=10,
interactive=False,
elem_classes="status-box"
)
with gr.Row():
content_preview = gr.Textbox(
label="๐Ÿ“ ์ƒ์„ฑ๋œ ๋‚ด์šฉ ๋ฏธ๋ฆฌ๋ณด๊ธฐ",
lines=20,
interactive=False,
visible=True,
elem_classes="preview-box"
)
gr.Markdown(
"""
### ๐Ÿ“‹ ์‚ฌ์šฉ ๋ฐฉ๋ฒ•
1. **PPT ์ฃผ์ œ ์ž…๋ ฅ**: ๊ตฌ์ฒด์ ์ธ ์ฃผ์ œ์ผ์ˆ˜๋ก ๋” ์ข‹์€ ๊ฒฐ๊ณผ
2. **์Šฌ๋ผ์ด๋“œ ์ˆ˜ ์„ ํƒ**: 3-20์žฅ ๋ฒ”์œ„์—์„œ ์„ ํƒ
3. **๋””์ž์ธ ํ…Œ๋งˆ ์„ ํƒ**: 5๊ฐ€์ง€ ์ „๋ฌธ์ ์ธ ํ…Œ๋งˆ ์ค‘ ์„ ํƒ
4. **์‹œ๊ฐ์  ์˜ต์…˜ ์„ค์ •**: AI ์ด๋ฏธ์ง€, ๋‹ค์ด์–ด๊ทธ๋žจ, FLUX ์ด๋ฏธ์ง€ ์ถ”๊ฐ€
5. **์ฐธ๊ณ  ์ž๋ฃŒ ์—…๋กœ๋“œ**: PDF, CSV, TXT ํŒŒ์ผ ์ง€์›
6. **์ƒ์„ฑ ๋ฒ„ํŠผ ํด๋ฆญ**: AI๊ฐ€ ์ž๋™์œผ๋กœ PPT ์ƒ์„ฑ
### ๐ŸŽจ ์ƒˆ๋กœ์šด ๊ธฐ๋Šฅ
- **FLUX AI ํ‘œ์ง€ ์ด๋ฏธ์ง€**: FLUX API๋ฅผ ํ†ตํ•œ ๊ณ ํ’ˆ์งˆ ํ‘œ์ง€ ์ด๋ฏธ์ง€ (์šฐ์ธก ๋ฐฐ์น˜)
- **AI ๋‹ค์ด์–ด๊ทธ๋žจ**: ๋‚ด์šฉ ๋ถ„์„ ํ›„ ์ ์ ˆํ•œ ๋‹ค์ด์–ด๊ทธ๋žจ ์ž๋™ ์ƒ์„ฑ
- **FLUX ์Šฌ๋ผ์ด๋“œ ์ด๋ฏธ์ง€**: ์ฃผ์š” ์Šฌ๋ผ์ด๋“œ์— AI ์ƒ์„ฑ ์ด๋ฏธ์ง€ ์ถ”๊ฐ€
- **๊ฒฐ๋ก  ์Šฌ๋ผ์ด๋“œ ๊ฐ•์กฐ**: ๊ฒฐ๋ก /์š”์•ฝ ์Šฌ๋ผ์ด๋“œ์— ํŠน๋ณ„ํ•œ FLUX ์ด๋ฏธ์ง€ ์ถ”๊ฐ€
- **์ขŒ-์šฐ ๋ ˆ์ด์•„์›ƒ**: ํ…์ŠคํŠธ๋Š” ์ขŒ์ธก, ์‹œ๊ฐ์  ์š”์†Œ๋Š” ์šฐ์ธก ๋ฐฐ์น˜
### ๐Ÿ’ก ๊ณ ๊ธ‰ ํŒ
- ํ‘œ์ง€์˜ ์ œ๋ชฉ/๋ถ€์ œ๋Š” ์ค‘์•™ ์ƒ๋‹จ, AI ์ด๋ฏธ์ง€๋Š” ์šฐ์ธก์— ๋ฐฐ์น˜๋ฉ๋‹ˆ๋‹ค
- ๊ฒฐ๋ก /์š”์•ฝ ์Šฌ๋ผ์ด๋“œ๋Š” ์ž๋™์œผ๋กœ ๊ฐ์ง€๋˜์–ด ํŠน๋ณ„ํ•œ ์ด๋ฏธ์ง€๊ฐ€ ์ถ”๊ฐ€๋ฉ๋‹ˆ๋‹ค
- FLUX ์ด๋ฏธ์ง€๋Š” ์Šฌ๋ผ์ด๋“œ ๋‚ด์šฉ์„ ๋ถ„์„ํ•˜์—ฌ ๊ด€๋ จ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค
"""
)
# Examples
gr.Examples(
examples=[
["์ธ๊ณต์ง€๋Šฅ์˜ ๋ฏธ๋ž˜์™€ ์‚ฐ์—… ์ ์šฉ ์‚ฌ๋ก€", 10, False, True, [], "professional", "modern", "consistent", False, True, True, False],
["2024๋…„ ๋””์ง€ํ„ธ ๋งˆ์ผ€ํŒ… ํŠธ๋ Œ๋“œ", 12, True, True, [], "modern", "modern", "consistent", False, True, True, True],
["๊ธฐํ›„๋ณ€ํ™”์™€ ์ง€์†๊ฐ€๋Šฅํ•œ ๋ฐœ์ „", 15, True, True, [], "nature", "classic", "consistent", False, True, True, True],
["์Šคํƒ€ํŠธ์—… ์‚ฌ์—…๊ณ„ํš์„œ", 8, False, True, [], "creative", "modern", "varied", False, True, True, True],
],
inputs=[topic_input, num_slides, use_web_search, use_korean, reference_files,
design_theme, font_style, layout_style, include_charts, include_ai_image,
include_diagrams, include_flux_images],
)
# Event handler
generate_btn.click(
fn=generate_ppt,
inputs=[
topic_input,
num_slides,
use_web_search,
use_korean,
reference_files,
design_theme,
font_style,
layout_style,
include_charts,
include_ai_image,
include_diagrams,
include_flux_images
],
outputs=[download_file, status_text, content_preview]
)
# Initialize APIs on startup
if __name__ == "__main__":
# Try to initialize APIs
if FLUX_API_URL:
initialize_flux_api()
if DIAGRAM_API_URL:
initialize_diagram_api()
demo.launch()