diff --git "a/app.py" "b/app.py"
--- "a/app.py"
+++ "b/app.py"
@@ -24,3036 +24,2444 @@ logger = logging.getLogger(__name__)
# --- Document export imports ---
try:
- from docx import Document
- from docx.shared import Inches, Pt, RGBColor, Mm
- from docx.enum.text import WD_ALIGN_PARAGRAPH
- from docx.enum.style import WD_STYLE_TYPE
- from docx.oxml.ns import qn
- from docx.oxml import OxmlElement
- DOCX_AVAILABLE = True
+ from docx import Document
+ from docx.shared import Inches, Pt, RGBColor, Mm
+ from docx.enum.text import WD_ALIGN_PARAGRAPH
+ from docx.enum.style import WD_STYLE_TYPE
+ from docx.oxml.ns import qn
+ from docx.oxml import OxmlElement
+ DOCX_AVAILABLE = True
except ImportError:
- DOCX_AVAILABLE = False
- logger.warning("python-docx not installed. DOCX export will be disabled.")
+ DOCX_AVAILABLE = False
+ logger.warning("python-docx not installed. DOCX export will be disabled.")
# --- Environment variables and constants ---
FRIENDLI_TOKEN = os.getenv("FRIENDLI_TOKEN", "")
BRAVE_SEARCH_API_KEY = os.getenv("BRAVE_SEARCH_API_KEY", "")
API_URL = "https://api.friendli.ai/dedicated/v1/chat/completions"
MODEL_ID = "dep86pjolcjjnv8"
-DB_PATH = "novel_sessions_v6.db"
-
-# Target word count settings
-TARGET_WORDS = 8000 # Safety margin
-MIN_WORDS_PER_PART = 800 # Minimum words per part
+DB_PATH = "screenplay_sessions_v1.db"
+
+# Screenplay length settings
+SCREENPLAY_LENGTHS = {
+ "movie": {"pages": 110, "description": "Feature Film (90-120 pages)"},
+ "tv_drama": {"pages": 55, "description": "TV Drama Episode (50-60 pages)"},
+ "ott_series": {"pages": 45, "description": "OTT Series Episode (30-60 pages)"},
+ "short_film": {"pages": 15, "description": "Short Film (10-20 pages)"}
+}
# --- Environment validation ---
if not FRIENDLI_TOKEN:
- logger.error("FRIENDLI_TOKEN not set. Application will not work properly.")
- FRIENDLI_TOKEN = "dummy_token_for_testing"
+ logger.error("FRIENDLI_TOKEN not set. Application will not work properly.")
+ FRIENDLI_TOKEN = "dummy_token_for_testing"
if not BRAVE_SEARCH_API_KEY:
- logger.warning("BRAVE_SEARCH_API_KEY not set. Web search features will be disabled.")
+ logger.warning("BRAVE_SEARCH_API_KEY not set. Web search features will be disabled.")
# --- Global variables ---
db_lock = threading.Lock()
-# Narrative phases definition
-NARRATIVE_PHASES = [
- "Introduction: Daily Life and Cracks",
- "Development 1: Rising Anxiety",
- "Development 2: External Shock",
- "Development 3: Deepening Internal Conflict",
- "Climax 1: Peak of Crisis",
- "Climax 2: Moment of Choice",
- "Falling Action 1: Consequences and Aftermath",
- "Falling Action 2: New Recognition",
- "Resolution 1: Changed Daily Life",
- "Resolution 2: Open Questions"
-]
+# Genre templates
+GENRE_TEMPLATES = {
+ "action": {
+ "pacing": "fast",
+ "scene_length": "short",
+ "dialogue_ratio": 0.3,
+ "key_elements": ["set pieces", "physical conflict", "urgency", "stakes escalation"],
+ "structure_beats": ["explosive opening", "pursuit/chase", "confrontation", "climactic battle"]
+ },
+ "thriller": {
+ "pacing": "fast",
+ "scene_length": "short",
+ "dialogue_ratio": 0.35,
+ "key_elements": ["suspense", "twists", "paranoia", "time pressure"],
+ "structure_beats": ["hook", "mystery deepens", "false victory", "revelation", "final confrontation"]
+ },
+ "drama": {
+ "pacing": "moderate",
+ "scene_length": "medium",
+ "dialogue_ratio": 0.5,
+ "key_elements": ["character depth", "emotional truth", "relationships", "internal conflict"],
+ "structure_beats": ["status quo", "catalyst", "debate", "commitment", "complications", "crisis", "resolution"]
+ },
+ "comedy": {
+ "pacing": "fast",
+ "scene_length": "short",
+ "dialogue_ratio": 0.6,
+ "key_elements": ["setup/payoff", "timing", "character comedy", "escalation"],
+ "structure_beats": ["funny opening", "complication", "misunderstandings multiply", "chaos peak", "resolution with callback"]
+ },
+ "horror": {
+ "pacing": "variable",
+ "scene_length": "mixed",
+ "dialogue_ratio": 0.3,
+ "key_elements": ["atmosphere", "dread", "jump scares", "gore/psychological"],
+ "structure_beats": ["normal world", "first sign", "investigation", "first attack", "survival", "final girl/boy"]
+ },
+ "sci-fi": {
+ "pacing": "moderate",
+ "scene_length": "medium",
+ "dialogue_ratio": 0.4,
+ "key_elements": ["world building", "technology", "concepts", "visual spectacle"],
+ "structure_beats": ["ordinary world", "discovery", "new world", "complications", "understanding", "choice", "new normal"]
+ },
+ "romance": {
+ "pacing": "moderate",
+ "scene_length": "medium",
+ "dialogue_ratio": 0.55,
+ "key_elements": ["chemistry", "obstacles", "emotional moments", "intimacy"],
+ "structure_beats": ["meet cute", "attraction", "first conflict", "deepening", "crisis/breakup", "grand gesture", "together"]
+ }
+}
-# Stage configuration - Single writer system
-UNIFIED_STAGES = [
- ("director", "๐ฌ Director: Integrated Narrative Structure Planning"),
- ("critic_director", "๐ Critic: Deep Review of Narrative Structure"),
- ("director", "๐ฌ Director: Final Master Plan"),
-] + [
- item for i in range(1, 11)
- for item in [
- ("writer", f"โ๏ธ Writer: Part {i} - {NARRATIVE_PHASES[i-1]}"),
- (f"critic_part{i}", f"๐ Part {i} Critic: Immediate Review and Revision Request"),
- ("writer", f"โ๏ธ Writer: Part {i} Revision")
- ]
-] + [
- ("critic_final", "๐ Final Critic: Comprehensive Evaluation and Literary Achievement"),
+# Screenplay stages definition
+SCREENPLAY_STAGES = [
+ ("producer", "๐ฌ Producer: Concept Development & Market Analysis"),
+ ("story_developer", "๐ Story Developer: Synopsis & Three-Act Structure"),
+ ("character_designer", "๐ฅ Character Designer: Cast & Relationships"),
+ ("critic_structure", "๐ Structure Critic: Story & Character Review"),
+ ("scene_planner", "๐ฏ Scene Planner: Detailed Scene Breakdown"),
+ ("screenwriter", "โ๏ธ Screenwriter: Act 1 - Setup (25%)"),
+ ("script_doctor", "๐ง Script Doctor: Act 1 Review & Polish"),
+ ("screenwriter", "โ๏ธ Screenwriter: Act 2A - Rising Action (25%)"),
+ ("script_doctor", "๐ง Script Doctor: Act 2A Review & Polish"),
+ ("screenwriter", "โ๏ธ Screenwriter: Act 2B - Complications (25%)"),
+ ("script_doctor", "๐ง Script Doctor: Act 2B Review & Polish"),
+ ("screenwriter", "โ๏ธ Screenwriter: Act 3 - Resolution (25%)"),
+ ("final_reviewer", "๐ญ Final Review: Complete Screenplay Analysis"),
]
+# Save the Cat Beat Sheet
+SAVE_THE_CAT_BEATS = {
+ 1: "Opening Image (0-1%)",
+ 2: "Setup (1-10%)",
+ 3: "Theme Stated (5%)",
+ 4: "Catalyst (10%)",
+ 5: "Debate (10-20%)",
+ 6: "Break into Two (20%)",
+ 7: "B Story (22%)",
+ 8: "Fun and Games (20-50%)",
+ 9: "Midpoint (50%)",
+ 10: "Bad Guys Close In (50-75%)",
+ 11: "All Is Lost (75%)",
+ 12: "Dark Night of the Soul (75-80%)",
+ 13: "Break into Three (80%)",
+ 14: "Finale (80-99%)",
+ 15: "Final Image (99-100%)"
+}
+
# --- Data classes ---
@dataclass
-class StoryBible:
- """Story bible for maintaining narrative consistency"""
- characters: Dict[str, Dict[str, Any]] = field(default_factory=dict)
- settings: Dict[str, str] = field(default_factory=dict)
- timeline: List[Dict[str, Any]] = field(default_factory=list)
- plot_points: List[Dict[str, Any]] = field(default_factory=list)
- themes: List[str] = field(default_factory=list)
- symbols: Dict[str, List[str]] = field(default_factory=dict)
- style_guide: Dict[str, str] = field(default_factory=dict)
- opening_sentence: str = ""
+class ScreenplayBible:
+ """Screenplay bible for maintaining consistency"""
+ title: str = ""
+ logline: str = ""
+ genre: str = ""
+ subgenre: str = ""
+ tone: str = ""
+ themes: List[str] = field(default_factory=list)
+
+ # Characters
+ protagonist: Dict[str, Any] = field(default_factory=dict)
+ antagonist: Dict[str, Any] = field(default_factory=dict)
+ supporting_cast: Dict[str, Dict[str, Any]] = field(default_factory=dict)
+
+ # Structure
+ three_act_structure: Dict[str, str] = field(default_factory=dict)
+ save_the_cat_beats: Dict[int, str] = field(default_factory=dict)
+
+ # World
+ time_period: str = ""
+ primary_locations: List[Dict[str, str]] = field(default_factory=list)
+ world_rules: List[str] = field(default_factory=list)
+
+ # Visual style
+ visual_style: str = ""
+ key_imagery: List[str] = field(default_factory=list)
+
+@dataclass
+class SceneBreakdown:
+ """Individual scene information"""
+ scene_number: int
+ act: int
+ location: str
+ time_of_day: str
+ characters: List[str]
+ purpose: str
+ conflict: str
+ page_count: float
+ beat: str = ""
+ transition: str = "CUT TO:"
@dataclass
-class PartCritique:
- """Critique content for each part"""
- part_number: int
- continuity_issues: List[str] = field(default_factory=list)
- character_consistency: List[str] = field(default_factory=list)
- plot_progression: List[str] = field(default_factory=list)
- thematic_alignment: List[str] = field(default_factory=list)
- technical_issues: List[str] = field(default_factory=list)
- strengths: List[str] = field(default_factory=list)
- required_changes: List[str] = field(default_factory=list)
- literary_quality: List[str] = field(default_factory=list)
+class CharacterProfile:
+ """Detailed character profile"""
+ name: str
+ age: int
+ role: str # protagonist, antagonist, supporting, etc.
+ archetype: str
+ want: str # External goal
+ need: str # Internal need
+ backstory: str
+ personality: List[str]
+ speech_pattern: str
+ character_arc: str
+ relationships: Dict[str, str] = field(default_factory=dict)
+ first_appearance: str = ""
# --- Core logic classes ---
-class UnifiedNarrativeTracker:
- """Unified narrative tracker for single writer system"""
- def __init__(self):
- self.story_bible = StoryBible()
- self.part_critiques: Dict[int, PartCritique] = {}
- self.accumulated_content: List[str] = []
- self.word_count_by_part: Dict[int, int] = {}
- self.revision_history: Dict[int, List[str]] = defaultdict(list)
- self.causal_chains: List[Dict[str, Any]] = []
- self.narrative_momentum: float = 0.0
-
- def update_story_bible(self, element_type: str, key: str, value: Any):
- """Update story bible"""
- if element_type == "character":
- self.story_bible.characters[key] = value
- elif element_type == "setting":
- self.story_bible.settings[key] = value
- elif element_type == "timeline":
- self.story_bible.timeline.append({"event": key, "details": value})
- elif element_type == "theme":
- if key not in self.story_bible.themes:
- self.story_bible.themes.append(key)
- elif element_type == "symbol":
- if key not in self.story_bible.symbols:
- self.story_bible.symbols[key] = []
- self.story_bible.symbols[key].append(value)
-
- def add_part_critique(self, part_number: int, critique: PartCritique):
- """Add part critique"""
- self.part_critiques[part_number] = critique
-
- def check_continuity(self, current_part: int, new_content: str) -> List[str]:
- """Check continuity"""
- issues = []
-
- # Character consistency check
- for char_name, char_data in self.story_bible.characters.items():
- if char_name in new_content:
- if "traits" in char_data:
- for trait in char_data["traits"]:
- if trait.get("abandoned", False):
- issues.append(f"{char_name}'s abandoned trait '{trait['name']}' reappears")
-
- # Timeline consistency check
- if len(self.story_bible.timeline) > 0:
- last_event = self.story_bible.timeline[-1]
-
- # Causality check
- if current_part > 1 and not any(kw in new_content for kw in
- ['because', 'therefore', 'thus', 'hence', 'consequently']):
- issues.append("Unclear causality with previous part")
-
- return issues
-
- def calculate_narrative_momentum(self, part_number: int, content: str) -> float:
- """Calculate narrative momentum"""
- momentum = 5.0
-
- # New elements introduced
- new_elements = len(set(content.split()) - set(' '.join(self.accumulated_content).split()))
- if new_elements > 100:
- momentum += 2.0
-
- # Conflict escalation
- tension_words = ['crisis', 'conflict', 'tension', 'struggle', 'dilemma']
- if any(word in content.lower() for word in tension_words):
- momentum += 1.5
-
- # Causal clarity
- causal_words = ['because', 'therefore', 'thus', 'consequently', 'hence']
- causal_count = sum(1 for word in causal_words if word in content.lower())
- momentum += min(causal_count * 0.5, 2.0)
-
- # Repetition penalty
- if part_number > 1:
- prev_content = self.accumulated_content[-1] if self.accumulated_content else ""
- overlap = len(set(content.split()) & set(prev_content.split()))
- if overlap > len(content.split()) * 0.3:
- momentum -= 3.0
-
- return max(0.0, min(10.0, momentum))
-
-class NovelDatabase:
- """Database management - Modified for single writer system"""
- @staticmethod
- def init_db():
- with sqlite3.connect(DB_PATH) as conn:
- conn.execute("PRAGMA journal_mode=WAL")
- cursor = conn.cursor()
-
- # Main sessions table
- cursor.execute('''
- CREATE TABLE IF NOT EXISTS sessions (
- session_id TEXT PRIMARY KEY,
- user_query TEXT NOT NULL,
- language TEXT NOT NULL,
- created_at TEXT DEFAULT (datetime('now')),
- updated_at TEXT DEFAULT (datetime('now')),
- status TEXT DEFAULT 'active',
- current_stage INTEGER DEFAULT 0,
- final_novel TEXT,
- literary_report TEXT,
- total_words INTEGER DEFAULT 0,
- story_bible TEXT,
- narrative_tracker TEXT,
- opening_sentence TEXT
- )
- ''')
-
- # Stages table
- cursor.execute('''
- CREATE TABLE IF NOT EXISTS stages (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- session_id TEXT NOT NULL,
- stage_number INTEGER NOT NULL,
- stage_name TEXT NOT NULL,
- role TEXT NOT NULL,
- content TEXT,
- word_count INTEGER DEFAULT 0,
- status TEXT DEFAULT 'pending',
- narrative_momentum REAL DEFAULT 0.0,
- created_at TEXT DEFAULT (datetime('now')),
- updated_at TEXT DEFAULT (datetime('now')),
- FOREIGN KEY (session_id) REFERENCES sessions(session_id),
- UNIQUE(session_id, stage_number)
- )
- ''')
-
- # Critiques table
- cursor.execute('''
- CREATE TABLE IF NOT EXISTS critiques (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- session_id TEXT NOT NULL,
- part_number INTEGER NOT NULL,
- critique_data TEXT,
- created_at TEXT DEFAULT (datetime('now')),
- FOREIGN KEY (session_id) REFERENCES sessions(session_id)
- )
- ''')
-
- # Random themes library table
- cursor.execute('''
- CREATE TABLE IF NOT EXISTS random_themes_library (
- theme_id TEXT PRIMARY KEY,
- theme_text TEXT NOT NULL,
- language TEXT NOT NULL,
- title TEXT,
- opening_sentence TEXT,
- protagonist TEXT,
- conflict TEXT,
- philosophical_question TEXT,
- generated_at TEXT DEFAULT (datetime('now')),
- view_count INTEGER DEFAULT 0,
- used_count INTEGER DEFAULT 0,
- tags TEXT,
- metadata TEXT
- )
- ''')
-
- conn.commit()
-
- @staticmethod
- @contextmanager
- def get_db():
- with db_lock:
- conn = sqlite3.connect(DB_PATH, timeout=30.0)
- conn.row_factory = sqlite3.Row
- try:
- yield conn
- finally:
- conn.close()
-
- @staticmethod
- def create_session(user_query: str, language: str) -> str:
- session_id = hashlib.md5(f"{user_query}{datetime.now()}".encode()).hexdigest()
- with NovelDatabase.get_db() as conn:
- conn.cursor().execute(
- 'INSERT INTO sessions (session_id, user_query, language) VALUES (?, ?, ?)',
- (session_id, user_query, language)
- )
- conn.commit()
- return session_id
-
- @staticmethod
- def save_stage(session_id: str, stage_number: int, stage_name: str,
- role: str, content: str, status: str = 'complete',
- narrative_momentum: float = 0.0):
- word_count = len(content.split()) if content else 0
- with NovelDatabase.get_db() as conn:
- cursor = conn.cursor()
- cursor.execute('''
- INSERT INTO stages (session_id, stage_number, stage_name, role, content,
- word_count, status, narrative_momentum)
- VALUES (?, ?, ?, ?, ?, ?, ?, ?)
- ON CONFLICT(session_id, stage_number)
- DO UPDATE SET content=?, word_count=?, status=?, stage_name=?,
- narrative_momentum=?, updated_at=datetime('now')
- ''', (session_id, stage_number, stage_name, role, content, word_count,
- status, narrative_momentum, content, word_count, status, stage_name,
- narrative_momentum))
-
- # Update total word count
- cursor.execute('''
- UPDATE sessions
- SET total_words = (
- SELECT SUM(word_count)
- FROM stages
- WHERE session_id = ? AND role = 'writer' AND content IS NOT NULL
- ),
- updated_at = datetime('now'),
- current_stage = ?
- WHERE session_id = ?
- ''', (session_id, stage_number, session_id))
-
- conn.commit()
-
- @staticmethod
- def save_critique(session_id: str, part_number: int, critique: PartCritique):
- """Save critique"""
- with NovelDatabase.get_db() as conn:
- critique_json = json.dumps(asdict(critique))
- conn.cursor().execute(
- 'INSERT INTO critiques (session_id, part_number, critique_data) VALUES (?, ?, ?)',
- (session_id, part_number, critique_json)
- )
- conn.commit()
-
- @staticmethod
- def save_opening_sentence(session_id: str, opening_sentence: str):
- """Save opening sentence"""
- with NovelDatabase.get_db() as conn:
- conn.cursor().execute(
- 'UPDATE sessions SET opening_sentence = ? WHERE session_id = ?',
- (opening_sentence, session_id)
- )
- conn.commit()
-
- @staticmethod
- def get_writer_content(session_id: str) -> str:
- """Get writer content - Integrate all revisions"""
- with NovelDatabase.get_db() as conn:
- rows = conn.cursor().execute('''
- SELECT content FROM stages
- WHERE session_id = ? AND role = 'writer'
- AND stage_name LIKE '%Revision%'
- ORDER BY stage_number
- ''', (session_id,)).fetchall()
-
- if rows:
- return '\n\n'.join(row['content'] for row in rows if row['content'])
- else:
- # If no revisions, use drafts
- rows = conn.cursor().execute('''
- SELECT content FROM stages
- WHERE session_id = ? AND role = 'writer'
- AND stage_name NOT LIKE '%Revision%'
- ORDER BY stage_number
- ''', (session_id,)).fetchall()
- return '\n\n'.join(row['content'] for row in rows if row['content'])
-
- @staticmethod
- def save_narrative_tracker(session_id: str, tracker: UnifiedNarrativeTracker):
- """Save unified narrative tracker"""
- with NovelDatabase.get_db() as conn:
- tracker_data = json.dumps({
- 'story_bible': asdict(tracker.story_bible),
- 'part_critiques': {k: asdict(v) for k, v in tracker.part_critiques.items()},
- 'word_count_by_part': tracker.word_count_by_part,
- 'causal_chains': tracker.causal_chains,
- 'narrative_momentum': tracker.narrative_momentum
- })
- conn.cursor().execute(
- 'UPDATE sessions SET narrative_tracker = ? WHERE session_id = ?',
- (tracker_data, session_id)
- )
- conn.commit()
-
- @staticmethod
- def load_narrative_tracker(session_id: str) -> Optional[UnifiedNarrativeTracker]:
- """Load unified narrative tracker"""
- with NovelDatabase.get_db() as conn:
- row = conn.cursor().execute(
- 'SELECT narrative_tracker FROM sessions WHERE session_id = ?',
- (session_id,)
- ).fetchone()
-
- if row and row['narrative_tracker']:
- data = json.loads(row['narrative_tracker'])
- tracker = UnifiedNarrativeTracker()
-
- # Restore story bible
- bible_data = data.get('story_bible', {})
- tracker.story_bible = StoryBible(**bible_data)
-
- # Restore critiques
- for part_num, critique_data in data.get('part_critiques', {}).items():
- tracker.part_critiques[int(part_num)] = PartCritique(**critique_data)
-
- tracker.word_count_by_part = data.get('word_count_by_part', {})
- tracker.causal_chains = data.get('causal_chains', [])
- tracker.narrative_momentum = data.get('narrative_momentum', 0.0)
-
- return tracker
- return None
-
- @staticmethod
- def save_random_theme(theme_text: str, language: str, metadata: Dict[str, Any]) -> str:
- """Save randomly generated theme to library"""
- theme_id = hashlib.md5(f"{theme_text}{datetime.now()}".encode()).hexdigest()[:12]
-
- # Extract components from theme text
- title = metadata.get('title', '')
- opening_sentence = metadata.get('opening_sentence', '')
- protagonist = metadata.get('protagonist', '')
- conflict = metadata.get('conflict', '')
- philosophical_question = metadata.get('philosophical_question', '')
- tags = json.dumps(metadata.get('tags', []))
-
- with NovelDatabase.get_db() as conn:
- conn.cursor().execute('''
- INSERT INTO random_themes_library
- (theme_id, theme_text, language, title, opening_sentence,
- protagonist, conflict, philosophical_question, tags, metadata)
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
- ''', (theme_id, theme_text, language, title, opening_sentence,
- protagonist, conflict, philosophical_question, tags,
- json.dumps(metadata)))
- conn.commit()
-
- return theme_id
-
- @staticmethod
- def get_random_themes_library(language: str = None, limit: int = 50) -> List[Dict]:
- """Get random themes from library"""
- with NovelDatabase.get_db() as conn:
- query = '''
- SELECT * FROM random_themes_library
- {}
- ORDER BY generated_at DESC
- LIMIT ?
- '''.format('WHERE language = ?' if language else '')
-
- if language:
- rows = conn.cursor().execute(query, (language, limit)).fetchall()
- else:
- rows = conn.cursor().execute(query, (limit,)).fetchall()
-
- return [dict(row) for row in rows]
-
- @staticmethod
- def update_theme_view_count(theme_id: str):
- """Update view count for a theme"""
- with NovelDatabase.get_db() as conn:
- conn.cursor().execute(
- 'UPDATE random_themes_library SET view_count = view_count + 1 WHERE theme_id = ?',
- (theme_id,)
- )
- conn.commit()
-
- @staticmethod
- def update_theme_used_count(theme_id: str):
- """Update used count when theme is used for novel"""
- with NovelDatabase.get_db() as conn:
- conn.cursor().execute(
- 'UPDATE random_themes_library SET used_count = used_count + 1 WHERE theme_id = ?',
- (theme_id,)
- )
- conn.commit()
-
- @staticmethod
- def get_theme_by_id(theme_id: str) -> Optional[Dict]:
- """Get specific theme by ID"""
- with NovelDatabase.get_db() as conn:
- row = conn.cursor().execute(
- 'SELECT * FROM random_themes_library WHERE theme_id = ?',
- (theme_id,)
- ).fetchone()
- return dict(row) if row else None
-
- @staticmethod
- def get_session(session_id: str) -> Optional[Dict]:
- with NovelDatabase.get_db() as conn:
- row = conn.cursor().execute('SELECT * FROM sessions WHERE session_id = ?',
- (session_id,)).fetchone()
- return dict(row) if row else None
-
- @staticmethod
- def get_stages(session_id: str) -> List[Dict]:
- with NovelDatabase.get_db() as conn:
- rows = conn.cursor().execute(
- 'SELECT * FROM stages WHERE session_id = ? ORDER BY stage_number',
- (session_id,)
- ).fetchall()
- return [dict(row) for row in rows]
-
- @staticmethod
- def update_final_novel(session_id: str, final_novel: str, literary_report: str = ""):
- with NovelDatabase.get_db() as conn:
- conn.cursor().execute(
- '''UPDATE sessions SET final_novel = ?, status = 'complete',
- updated_at = datetime('now'), literary_report = ? WHERE session_id = ?''',
- (final_novel, literary_report, session_id)
- )
- conn.commit()
-
- @staticmethod
- def get_active_sessions() -> List[Dict]:
- with NovelDatabase.get_db() as conn:
- rows = conn.cursor().execute(
- '''SELECT session_id, user_query, language, created_at, current_stage, total_words
- FROM sessions WHERE status = 'active' ORDER BY updated_at DESC LIMIT 10'''
- ).fetchall()
- return [dict(row) for row in rows]
-
- @staticmethod
- def get_total_words(session_id: str) -> int:
- """Get total word count"""
- with NovelDatabase.get_db() as conn:
- row = conn.cursor().execute(
- 'SELECT total_words FROM sessions WHERE session_id = ?',
- (session_id,)
- ).fetchone()
- return row['total_words'] if row and row['total_words'] else 0
+class ScreenplayTracker:
+ """Unified screenplay tracker"""
+ def __init__(self):
+ self.screenplay_bible = ScreenplayBible()
+ self.scenes: List[SceneBreakdown] = []
+ self.characters: Dict[str, CharacterProfile] = {}
+ self.page_count = 0
+ self.act_pages = {"1": 0, "2A": 0, "2B": 0, "3": 0}
+ self.dialogue_action_ratio = 0.0
+
+ def add_scene(self, scene: SceneBreakdown):
+ """Add scene to tracker"""
+ self.scenes.append(scene)
+ self.page_count += scene.page_count
+
+ def add_character(self, character: CharacterProfile):
+ """Add character to tracker"""
+ self.characters[character.name] = character
+
+ def update_bible(self, key: str, value: Any):
+ """Update screenplay bible"""
+ if hasattr(self.screenplay_bible, key):
+ setattr(self.screenplay_bible, key, value)
+
+ def get_act_page_target(self, act: str, total_pages: int) -> int:
+ """Get target pages for each act"""
+ if act == "1":
+ return int(total_pages * 0.25)
+ elif act in ["2A", "2B"]:
+ return int(total_pages * 0.25)
+ elif act == "3":
+ return int(total_pages * 0.25)
+ return 0
+
+class ScreenplayDatabase:
+ """Database management for screenplay sessions"""
+ @staticmethod
+ def init_db():
+ with sqlite3.connect(DB_PATH) as conn:
+ conn.execute("PRAGMA journal_mode=WAL")
+ cursor = conn.cursor()
+
+ # Main screenplay sessions table
+ cursor.execute('''
+ CREATE TABLE IF NOT EXISTS screenplay_sessions (
+ session_id TEXT PRIMARY KEY,
+ user_query TEXT NOT NULL,
+ screenplay_type TEXT NOT NULL,
+ genre TEXT NOT NULL,
+ subgenre TEXT,
+ target_pages INTEGER,
+ language TEXT NOT NULL,
+ title TEXT,
+ logline TEXT,
+ synopsis TEXT,
+ three_act_structure TEXT,
+ character_profiles TEXT,
+ scene_breakdown TEXT,
+ screenplay_bible TEXT,
+ final_screenplay TEXT,
+ pdf_path TEXT,
+ created_at TEXT DEFAULT (datetime('now')),
+ updated_at TEXT DEFAULT (datetime('now')),
+ status TEXT DEFAULT 'active',
+ current_stage INTEGER DEFAULT 0,
+ total_pages REAL DEFAULT 0
+ )
+ ''')
+
+ # Stages table
+ cursor.execute('''
+ CREATE TABLE IF NOT EXISTS screenplay_stages (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ session_id TEXT NOT NULL,
+ stage_number INTEGER NOT NULL,
+ stage_name TEXT NOT NULL,
+ role TEXT NOT NULL,
+ content TEXT,
+ page_count REAL DEFAULT 0,
+ status TEXT DEFAULT 'pending',
+ created_at TEXT DEFAULT (datetime('now')),
+ updated_at TEXT DEFAULT (datetime('now')),
+ FOREIGN KEY (session_id) REFERENCES screenplay_sessions(session_id),
+ UNIQUE(session_id, stage_number)
+ )
+ ''')
+
+ # Scenes table
+ cursor.execute('''
+ CREATE TABLE IF NOT EXISTS scenes (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ session_id TEXT NOT NULL,
+ act_number INTEGER NOT NULL,
+ scene_number INTEGER NOT NULL,
+ location TEXT NOT NULL,
+ time_of_day TEXT NOT NULL,
+ characters TEXT,
+ purpose TEXT,
+ content TEXT,
+ page_count REAL,
+ created_at TEXT DEFAULT (datetime('now')),
+ FOREIGN KEY (session_id) REFERENCES screenplay_sessions(session_id)
+ )
+ ''')
+
+ # Characters table
+ cursor.execute('''
+ CREATE TABLE IF NOT EXISTS characters (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ session_id TEXT NOT NULL,
+ character_name TEXT NOT NULL,
+ character_data TEXT,
+ created_at TEXT DEFAULT (datetime('now')),
+ FOREIGN KEY (session_id) REFERENCES screenplay_sessions(session_id),
+ UNIQUE(session_id, character_name)
+ )
+ ''')
+
+ # Screenplay themes library
+ cursor.execute('''
+ CREATE TABLE IF NOT EXISTS screenplay_themes_library (
+ theme_id TEXT PRIMARY KEY,
+ theme_text TEXT NOT NULL,
+ screenplay_type TEXT NOT NULL,
+ genre TEXT NOT NULL,
+ language TEXT NOT NULL,
+ title TEXT,
+ logline TEXT,
+ protagonist_desc TEXT,
+ conflict_desc TEXT,
+ generated_at TEXT DEFAULT (datetime('now')),
+ view_count INTEGER DEFAULT 0,
+ used_count INTEGER DEFAULT 0,
+ tags TEXT,
+ metadata TEXT
+ )
+ ''')
+
+ conn.commit()
+
+ @staticmethod
+ @contextmanager
+ def get_db():
+ with db_lock:
+ conn = sqlite3.connect(DB_PATH, timeout=30.0)
+ conn.row_factory = sqlite3.Row
+ try:
+ yield conn
+ finally:
+ conn.close()
+
+ @staticmethod
+ def create_session(user_query: str, screenplay_type: str, genre: str, language: str) -> str:
+ session_id = hashlib.md5(f"{user_query}{screenplay_type}{datetime.now()}".encode()).hexdigest()
+ target_pages = SCREENPLAY_LENGTHS[screenplay_type]["pages"]
+
+ with ScreenplayDatabase.get_db() as conn:
+ conn.cursor().execute(
+ '''INSERT INTO screenplay_sessions
+ (session_id, user_query, screenplay_type, genre, target_pages, language)
+ VALUES (?, ?, ?, ?, ?, ?)''',
+ (session_id, user_query, screenplay_type, genre, target_pages, language)
+ )
+ conn.commit()
+ return session_id
+
+ @staticmethod
+ def save_stage(session_id: str, stage_number: int, stage_name: str,
+ role: str, content: str, status: str = 'complete'):
+ page_count = 0
+ if role == "screenwriter" and content:
+ # Estimate pages based on screenplay format (rough estimate)
+ page_count = len(content.split('\n')) / 55 # ~55 lines per page
+
+ with ScreenplayDatabase.get_db() as conn:
+ cursor = conn.cursor()
+ cursor.execute('''
+ INSERT INTO screenplay_stages
+ (session_id, stage_number, stage_name, role, content, page_count, status)
+ VALUES (?, ?, ?, ?, ?, ?, ?)
+ ON CONFLICT(session_id, stage_number)
+ DO UPDATE SET content=?, page_count=?, status=?, updated_at=datetime('now')
+ ''', (session_id, stage_number, stage_name, role, content, page_count, status,
+ content, page_count, status))
+
+ # Update session info
+ cursor.execute('''
+ UPDATE screenplay_sessions
+ SET current_stage = ?, updated_at = datetime('now')
+ WHERE session_id = ?
+ ''', (stage_number, session_id))
+
+ conn.commit()
+
+ @staticmethod
+ def save_screenplay_bible(session_id: str, bible: ScreenplayBible):
+ """Save screenplay bible"""
+ with ScreenplayDatabase.get_db() as conn:
+ bible_json = json.dumps(asdict(bible))
+ conn.cursor().execute(
+ 'UPDATE screenplay_sessions SET screenplay_bible = ? WHERE session_id = ?',
+ (bible_json, session_id)
+ )
+ conn.commit()
+
+ @staticmethod
+ def save_character(session_id: str, character: CharacterProfile):
+ """Save character profile"""
+ with ScreenplayDatabase.get_db() as conn:
+ char_json = json.dumps(asdict(character))
+ conn.cursor().execute(
+ '''INSERT INTO characters (session_id, character_name, character_data)
+ VALUES (?, ?, ?)
+ ON CONFLICT(session_id, character_name)
+ DO UPDATE SET character_data = ?''',
+ (session_id, character.name, char_json, char_json)
+ )
+ conn.commit()
+
+ @staticmethod
+ def save_scene(session_id: str, scene: SceneBreakdown):
+ """Save scene breakdown"""
+ with ScreenplayDatabase.get_db() as conn:
+ conn.cursor().execute(
+ '''INSERT INTO scenes
+ (session_id, act_number, scene_number, location, time_of_day,
+ characters, purpose, page_count)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)''',
+ (session_id, scene.act, scene.scene_number, scene.location,
+ scene.time_of_day, json.dumps(scene.characters), scene.purpose,
+ scene.page_count)
+ )
+ conn.commit()
+
+ @staticmethod
+ def get_screenplay_content(session_id: str) -> str:
+ """Get complete screenplay content"""
+ with ScreenplayDatabase.get_db() as conn:
+ rows = conn.cursor().execute('''
+ SELECT content FROM screenplay_stages
+ WHERE session_id = ? AND role = 'screenwriter'
+ ORDER BY stage_number
+ ''', (session_id,)).fetchall()
+
+ if rows:
+ return '\n\n'.join(row['content'] for row in rows if row['content'])
+ return ""
+
+ @staticmethod
+ def update_final_screenplay(session_id: str, final_screenplay: str, title: str, logline: str):
+ """Update final screenplay"""
+ with ScreenplayDatabase.get_db() as conn:
+ total_pages = len(final_screenplay.split('\n')) / 55
+ conn.cursor().execute(
+ '''UPDATE screenplay_sessions
+ SET final_screenplay = ?, title = ?, logline = ?,
+ total_pages = ?, status = 'complete', updated_at = datetime('now')
+ WHERE session_id = ?''',
+ (final_screenplay, title, logline, total_pages, session_id)
+ )
+ conn.commit()
+
+ @staticmethod
+ def get_session(session_id: str) -> Optional[Dict]:
+ with ScreenplayDatabase.get_db() as conn:
+ row = conn.cursor().execute(
+ 'SELECT * FROM screenplay_sessions WHERE session_id = ?',
+ (session_id,)
+ ).fetchone()
+ return dict(row) if row else None
+
+ @staticmethod
+ def get_active_sessions() -> List[Dict]:
+ with ScreenplayDatabase.get_db() as conn:
+ rows = conn.cursor().execute(
+ '''SELECT session_id, title, user_query, screenplay_type, genre,
+ created_at, current_stage, total_pages
+ FROM screenplay_sessions
+ WHERE status = 'active'
+ ORDER BY updated_at DESC
+ LIMIT 10'''
+ ).fetchall()
+ return [dict(row) for row in rows]
+
+ @staticmethod
+ def save_random_theme(theme_text: str, screenplay_type: str, genre: str,
+ language: str, metadata: Dict[str, Any]) -> str:
+ """Save randomly generated screenplay theme"""
+ theme_id = hashlib.md5(f"{theme_text}{datetime.now()}".encode()).hexdigest()[:12]
+
+ title = metadata.get('title', '')
+ logline = metadata.get('logline', '')
+ protagonist_desc = metadata.get('protagonist', '')
+ conflict_desc = metadata.get('conflict', '')
+ tags = json.dumps(metadata.get('tags', []))
+
+ with ScreenplayDatabase.get_db() as conn:
+ conn.cursor().execute('''
+ INSERT INTO screenplay_themes_library
+ (theme_id, theme_text, screenplay_type, genre, language, title, logline,
+ protagonist_desc, conflict_desc, tags, metadata)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+ ''', (theme_id, theme_text, screenplay_type, genre, language, title, logline,
+ protagonist_desc, conflict_desc, tags, json.dumps(metadata)))
+ conn.commit()
+
+ return theme_id
class WebSearchIntegration:
- """Web search functionality"""
- def __init__(self):
- self.brave_api_key = BRAVE_SEARCH_API_KEY
- self.search_url = "https://api.search.brave.com/res/v1/web/search"
- self.enabled = bool(self.brave_api_key)
-
- def search(self, query: str, count: int = 3, language: str = "en") -> List[Dict]:
- if not self.enabled:
- return []
- headers = {
- "Accept": "application/json",
- "X-Subscription-Token": self.brave_api_key
- }
- params = {
- "q": query,
- "count": count,
- "search_lang": "ko" if language == "Korean" else "en",
- "text_decorations": False,
- "safesearch": "moderate"
- }
- try:
- response = requests.get(self.search_url, headers=headers, params=params, timeout=10)
- response.raise_for_status()
- results = response.json().get("web", {}).get("results", [])
- return results
- except requests.exceptions.RequestException as e:
- logger.error(f"Web search API error: {e}")
- return []
-
- def extract_relevant_info(self, results: List[Dict], max_chars: int = 1500) -> str:
- if not results:
- return ""
- extracted = []
- total_chars = 0
- for i, result in enumerate(results[:3], 1):
- title = result.get("title", "")
- description = result.get("description", "")
- info = f"[{i}] {title}: {description}"
- if total_chars + len(info) < max_chars:
- extracted.append(info)
- total_chars += len(info)
- else:
- break
- return "\n".join(extracted)
-
-class UnifiedLiterarySystem:
- """Single writer progressive literary novel generation system"""
- def __init__(self):
- self.token = FRIENDLI_TOKEN
- self.api_url = API_URL
- self.model_id = MODEL_ID
- self.narrative_tracker = UnifiedNarrativeTracker()
- self.web_search = WebSearchIntegration()
- self.current_session_id = None
- NovelDatabase.init_db()
-
- def create_headers(self):
- return {"Authorization": f"Bearer {self.token}", "Content-Type": "application/json"}
-
- # --- Prompt generation functions ---
- def augment_query(self, user_query: str, language: str) -> str:
- """Augment prompt"""
- if len(user_query.split()) < 15:
- augmented_template = {
- "Korean": f"""'{user_query}'
-
-**์์ฌ ๊ตฌ์กฐ ํต์ฌ:**
-- 10๊ฐ ํํธ๊ฐ ํ๋์ ํตํฉ๋ ์ด์ผ๊ธฐ๋ฅผ ๊ตฌ์ฑ
-- ๊ฐ ํํธ๋ ์ด์ ํํธ์ ํ์ฐ์ ๊ฒฐ๊ณผ
-- ์ธ๋ฌผ์ ๋ช
ํํ ๋ณํ ๊ถค์ (A โ B โ C)
-- ์ค์ฌ ๊ฐ๋ฑ์ ์ ์ง์ ๊ณ ์กฐ์ ํด๊ฒฐ
-- ๊ฐ๋ ฌํ ์ค์ฌ ์์ง์ ์๋ฏธ ๋ณํ""",
-
- "English": f"""'{user_query}'
-
-**Narrative Structure Core:**
-- 10 parts forming one integrated story
-- Each part as inevitable result of previous
-- Clear character transformation arc (A โ B โ C)
-- Progressive escalation and resolution of central conflict
-- Evolving meaning of powerful central symbol"""
- }
- return augmented_template.get(language, user_query)
- return user_query
-
- def generate_powerful_opening(self, user_query: str, language: str) -> str:
- """Generate powerful opening sentence matching the theme"""
-
- opening_prompt = {
- "Korean": f"""์ฃผ์ : {user_query}
-
-์ด ์ฃผ์ ์ ๋ํ ๊ฐ๋ ฌํ๊ณ ์์ ์ ์๋ ์ฒซ๋ฌธ์ฅ์ ์์ฑํ์ธ์.
-
-**์ฒซ๋ฌธ์ฅ ์์ฑ ์์น:**
-1. ์ฆ๊ฐ์ ์ธ ๊ธด์ฅ๊ฐ์ด๋ ๊ถ๊ธ์ฆ ์ ๋ฐ
-2. ํ๋ฒํ์ง ์์ ์๊ฐ์ด๋ ์ํฉ ์ ์
-3. ๊ฐ๊ฐ์ ์ด๊ณ ๊ตฌ์ฒด์ ์ธ ์ด๋ฏธ์ง
-4. ์ฒ ํ์ ์ง๋ฌธ์ด๋ ์ญ์ค์ ์ง์
-5. ์๊ฐ๊ณผ ๊ณต๊ฐ์ ๋
ํนํ ์ค์
-
-**ํ๋ฅญํ ์ฒซ๋ฌธ์ฅ์ ์์ ํจํด:**
-- "๊ทธ๊ฐ ์ฃฝ์ ๋ , ..." (์ถฉ๊ฒฉ์ ์ฌ๊ฑด)
-- "๋ชจ๋ ๊ฒ์ด ๋๋ฌ๋ค๊ณ ์๊ฐํ ์๊ฐ..." (๋ฐ์ ์๊ณ )
-- "์ธ์์์ ๊ฐ์ฅ [ํ์ฉ์ฌ]ํ [๋ช
์ฌ]๋..." (๋
ํนํ ์ ์)
-- "[๊ตฌ์ฒด์ ํ๋]ํ๋ ๊ฒ๋ง์ผ๋ก๋..." (์ผ์์ ์ฌํด์)
-
-๋จ ํ๋์ ๋ฌธ์ฅ๋ง ์ ์ํ์ธ์.""",
-
- "English": f"""Theme: {user_query}
-
-Generate an unforgettable opening sentence for this theme.
-
-**Opening Sentence Principles:**
-1. Immediate tension or curiosity
-2. Unusual perspective or situation
-3. Sensory and specific imagery
-4. Philosophical question or paradox
-5. Unique temporal/spatial setting
-
-**Great Opening Patterns:**
-- "The day he died, ..." (shocking event)
-- "At the moment everything seemed over..." (reversal hint)
-- "The most [adjective] [noun] in the world..." (unique definition)
-- "Just by [specific action]..." (reinterpretation of ordinary)
-
-Provide only one sentence."""
- }
-
- messages = [{"role": "user", "content": opening_prompt.get(language, opening_prompt["Korean"])}]
- opening = self.call_llm_sync(messages, "writer", language)
- return opening.strip()
-
- def create_director_initial_prompt(self, user_query: str, language: str) -> str:
- """Director initial planning - Enhanced version"""
- augmented_query = self.augment_query(user_query, language)
-
- # Generate opening sentence
- opening_sentence = self.generate_powerful_opening(user_query, language)
- self.narrative_tracker.story_bible.opening_sentence = opening_sentence
- if self.current_session_id:
- NovelDatabase.save_opening_sentence(self.current_session_id, opening_sentence)
-
- search_results_str = ""
- if self.web_search.enabled:
- short_query = user_query[:50] if len(user_query) > 50 else user_query
- queries = [
- f"{short_query} philosophical meaning",
- f"human existence meaning {short_query}",
- f"{short_query} literary works"
- ]
- for q in queries[:2]:
- try:
- results = self.web_search.search(q, count=2, language=language)
- if results:
- search_results_str += self.web_search.extract_relevant_info(results) + "\n"
- except Exception as e:
- logger.warning(f"Search failed: {str(e)}")
-
- lang_prompts = {
- "Korean": f"""๋
ธ๋ฒจ๋ฌธํ์ ์์ค์ ์ฒ ํ์ ๊น์ด๋ฅผ ์ง๋ ์คํธ์์ค(8,000๋จ์ด)์ ๊ธฐํํ์ธ์.
-
-**์ฃผ์ :** {augmented_query}
-
-**ํ์ ์ฒซ๋ฌธ์ฅ:** {opening_sentence}
-
-**์ฐธ๊ณ ์๋ฃ:**
-{search_results_str if search_results_str else "N/A"}
-
-**ํ์ ๋ฌธํ์ ์์:**
-
-1. **์ฒ ํ์ ํ๊ตฌ**
- - ํ๋์ธ์ ์ค์กด์ ๊ณ ๋ (์์ธ, ์ ์ฒด์ฑ, ์๋ฏธ ์์ค)
- - ๋์งํธ ์๋์ ์ธ๊ฐ ์กฐ๊ฑด
- - ์๋ณธ์ฃผ์ ์ฌํ์ ๋ชจ์๊ณผ ๊ฐ์ธ์ ์ ํ
- - ์ฃฝ์, ์ฌ๋, ์์ ์ ๋ํ ์๋ก์ด ์ฑ์ฐฐ
-
-2. **์ฌํ์ ๋ฉ์์ง**
- - ๊ณ๊ธ, ์ ๋, ์ธ๋ ๊ฐ ๊ฐ๋ฑ
- - ํ๊ฒฝ ์๊ธฐ์ ์ธ๊ฐ์ ์ฑ
์
- - ๊ธฐ์ ๋ฐ์ ๊ณผ ์ธ๊ฐ์ฑ์ ์ถฉ๋
- - ํ๋ ๋ฏผ์ฃผ์ฃผ์์ ์๊ธฐ์ ๊ฐ์ธ์ ์ญํ
-
-3. **๋ฌธํ์ ์์ฌ ์ฅ์น**
- - ์ค์ฌ ์์ : [๊ตฌ์ฒด์ ์ฌ๋ฌผ/ํ์] โ [์ถ์์ ์๋ฏธ]
- - ๋ฐ๋ณต๋๋ ๋ชจํฐํ: [์ด๋ฏธ์ง/ํ๋] (์ต์ 5ํ ๋ณ์ฃผ)
- - ๋์กฐ๋ฒ: [A vs B]์ ์ง์์ ๊ธด์ฅ
- - ์์ง์ ๊ณต๊ฐ: [๊ตฌ์ฒด์ ์ฅ์]๊ฐ ์๋ฏธํ๋ ๊ฒ
- - ์๊ฐ์ ์ฃผ๊ด์ ํ๋ฆ (ํ์, ์๊ฐ, ์ ์ง)
-
-4. **ํตํฉ๋ 10ํํธ ๊ตฌ์กฐ**
- ๊ฐ ํํธ๋ณ ํต์ฌ:
- - ํํธ 1: ์ฒซ๋ฌธ์ฅ์ผ๋ก ์์, ์ผ์ ์ ๊ท ์ด โ ์ฒ ํ์ ์ง๋ฌธ ์ ๊ธฐ
- - ํํธ 2-3: ์ธ๋ถ ์ฌ๊ฑด โ ๋ด์ ์ฑ์ฐฐ ์ฌํ
- - ํํธ 4-5: ์ฌํ์ ๊ฐ๋ฑ โ ๊ฐ์ธ์ ๋๋ ๋ง
- - ํํธ 6-7: ์๊ธฐ์ ์ ์ โ ์ค์กด์ ์ ํ
- - ํํธ 8-9: ์ ํ์ ๊ฒฐ๊ณผ โ ์๋ก์ด ์ธ์
- - ํํธ 10: ๋ณํ๋ ์ธ๊ณ๊ด โ ์ด๋ฆฐ ์ง๋ฌธ
-
-5. **๋ฌธ์ฒด ์ง์นจ**
- - ์์ ์ฐ๋ฌธ์ฒด: ์ผ์ ์ธ์ด์ ์์ ์ ๊ท ํ
- - ์์์ ํ๋ฆ๊ณผ ๊ฐ๊ด์ ๋ฌ์ฌ์ ๊ต์ฐจ
- - ์งง๊ณ ๊ฐ๋ ฌํ ๋ฌธ์ฅ๊ณผ ์ฑ์ฐฐ์ ๊ธด ๋ฌธ์ฅ์ ๋ฆฌ๋ฌ
- - ๊ฐ๊ฐ์ ๋ํ
์ผ๋ก ์ถ์์ ๊ฐ๋
๊ตฌํ
-
-๊ตฌ์ฒด์ ์ด๊ณ ํ์ ์ ์ธ ๊ณํ์ ์ ์ํ์ธ์.""",
-
- "English": f"""Plan a philosophically profound novella (8,000 words) worthy of Nobel Prize.
-
-**Theme:** {augmented_query}
-
-**Required Opening:** {opening_sentence}
-
-**Reference:**
-{search_results_str if search_results_str else "N/A"}
-
-**Essential Literary Elements:**
-
-1. **Philosophical Exploration**
- - Modern existential anguish (alienation, identity, loss of meaning)
- - Human condition in digital age
- - Capitalist contradictions and individual choice
- - New reflections on death, love, freedom
-
-2. **Social Message**
- - Class, gender, generational conflicts
- - Environmental crisis and human responsibility
- - Technology vs humanity collision
- - Modern democracy crisis and individual role
-
-3. **Literary Devices**
- - Central metaphor: [concrete object/phenomenon] โ [abstract meaning]
- - Recurring motif: [image/action] (minimum 5 variations)
- - Contrast: sustained tension of [A vs B]
- - Symbolic space: what [specific place] means
- - Subjective time flow (flashback, premonition, pause)
-
-4. **Integrated 10-Part Structure**
- Each part's core:
- - Part 1: Start with opening sentence, daily cracks โ philosophical questions
- - Part 2-3: External events โ deepening introspection
- - Part 4-5: Social conflict โ personal dilemma
- - Part 6-7: Crisis peak โ existential choice
- - Part 8-9: Choice consequences โ new recognition
- - Part 10: Changed worldview โ open questions
-
-5. **Style Guidelines**
- - Poetic prose: balance of everyday language and metaphor
- - Stream of consciousness crossing with objective description
- - Rhythm of short intense sentences and reflective long ones
- - Abstract concepts through sensory details
-
-Provide concrete, innovative plan."""
- }
-
- return lang_prompts.get(language, lang_prompts["Korean"])
-
- def create_critic_director_prompt(self, director_plan: str, user_query: str, language: str) -> str:
- """Director plan deep review - Enhanced version"""
- lang_prompts = {
- "Korean": f"""์์ฌ ๊ตฌ์กฐ ์ ๋ฌธ๊ฐ๋ก์ ์ด ๊ธฐํ์ ์ฌ์ธต ๋ถ์ํ์ธ์.
-
-**์ ์ฃผ์ :** {user_query}
-
-**๊ฐ๋
์ ๊ธฐํ:**
-{director_plan}
-
-**์ฌ์ธต ๊ฒํ ํญ๋ชฉ:**
-
-1. **์ธ๊ณผ๊ด๊ณ ๊ฒ์ฆ**
- ๊ฐ ํํธ ๊ฐ ์ฐ๊ฒฐ์ ๊ฒํ ํ๊ณ ๋
ผ๋ฆฌ์ ๋น์ฝ์ ์ฐพ์ผ์ธ์:
- - ํํธ 1โ2: [์ฐ๊ฒฐ์ฑ ํ๊ฐ]
- - ํํธ 2โ3: [์ฐ๊ฒฐ์ฑ ํ๊ฐ]
- (๋ชจ๋ ์ฐ๊ฒฐ ์ง์ ๊ฒํ )
-
-2. **์ฒ ํ์ ๊น์ด ํ๊ฐ**
- - ์ ์๋ ์ฒ ํ์ ์ฃผ์ ๊ฐ ์ถฉ๋ถํ ๊น์๊ฐ?
- - ํ๋์ ๊ด๋ จ์ฑ์ด ์๋๊ฐ?
- - ๋
์ฐฝ์ ํต์ฐฐ์ด ์๋๊ฐ?
-
-3. **๋ฌธํ์ ์ฅ์น์ ํจ๊ณผ์ฑ**
- - ์์ ์ ์์ง์ด ์ ๊ธฐ์ ์ผ๋ก ์๋ํ๋๊ฐ?
- - ๊ณผ๋ํ๊ฑฐ๋ ๋ถ์กฑํ์ง ์์๊ฐ?
- - ์ฃผ์ ์ ๊ธด๋ฐํ ์ฐ๊ฒฐ๋๋๊ฐ?
-
-4. **์บ๋ฆญํฐ ์ํฌ ์คํ ๊ฐ๋ฅ์ฑ**
- - ๋ณํ๊ฐ ์ถฉ๋ถํ ์ ์ง์ ์ธ๊ฐ?
- - ๊ฐ ๋จ๊ณ์ ๋๊ธฐ๊ฐ ๋ช
ํํ๊ฐ?
- - ์ฌ๋ฆฌ์ ์ ๋ขฐ์ฑ์ด ์๋๊ฐ?
-
-5. **8,000๋จ์ด ์คํ ๊ฐ๋ฅ์ฑ**
- - ๊ฐ ํํธ๊ฐ 800๋จ์ด๋ฅผ ์ ์งํ ์ ์๋๊ฐ?
- - ๋์ด์ง๊ฑฐ๋ ์์ถ๋๋ ๋ถ๋ถ์ ์๋๊ฐ?
-
-**ํ์ ๊ฐ์ ์ฌํญ์ ๊ตฌ์ฒด์ ์ผ๋ก ์ ์ํ์ธ์.**""",
-
- "English": f"""As narrative structure expert, deeply analyze this plan.
-
-**Original Theme:** {user_query}
-
-**Director's Plan:**
-{director_plan}
-
-**Deep Review Items:**
-
-1. **Causality Verification**
- Review connections between parts, find logical leaps:
- - Part 1โ2: [Connection assessment]
- - Part 2โ3: [Connection assessment]
- (Review all connection points)
-
-2. **Philosophical Depth Assessment**
- - Is philosophical theme deep enough?
- - Contemporary relevance?
- - Original insights?
-
-3. **Literary Device Effectiveness**
- - Do metaphors and symbols work organically?
- - Not excessive or insufficient?
- - Tightly connected to theme?
-
-4. **Character Arc Feasibility**
- - Is change sufficiently gradual?
- - Are motivations clear at each stage?
- - Psychological credibility?
-
-5. **8,000-word Feasibility**
- - Can each part sustain 800 words?
- - Any dragging or compressed sections?
-
-**Provide specific required improvements.**"""
- }
-
- return lang_prompts.get(language, lang_prompts["Korean"])
-
- def create_writer_prompt(self, part_number: int, master_plan: str,
- accumulated_content: str, story_bible: StoryBible,
- language: str) -> str:
- """Single writer prompt - Enhanced version"""
-
- phase_name = NARRATIVE_PHASES[part_number-1]
- target_words = MIN_WORDS_PER_PART
-
- # Part-specific instructions
- philosophical_focus = {
- 1: "Introduce existential anxiety through daily cracks",
- 2: "First collision between individual and society",
- 3: "Self-recognition through encounter with others",
- 4: "Shaking beliefs and clashing values",
- 5: "Weight of choice and paradox of freedom",
- 6: "Test of humanity in extreme situations",
- 7: "Weight of consequences and responsibility",
- 8: "Self-rediscovery through others' gaze",
- 9: "Reconciliation with the irreconcilable",
- 10: "New life possibilities and unresolved questions"
- }
-
- literary_techniques = {
- 1: "Introducing objective correlative",
- 2: "Contrapuntal narration",
- 3: "Stream of consciousness",
- 4: "Subtle shifts in perspective",
- 5: "Aesthetics of silence and omission",
- 6: "Subjective transformation of time",
- 7: "Intersection of multiple viewpoints",
- 8: "Subversion of metaphor",
- 9: "Reinterpretation of archetypal images",
- 10: "Multi-layered open ending"
- }
-
- # Story bible summary
- bible_summary = f"""
-**Characters:** {', '.join(story_bible.characters.keys()) if story_bible.characters else 'TBD'}
-**Key Symbols:** {', '.join(story_bible.symbols.keys()) if story_bible.symbols else 'TBD'}
-**Themes:** {', '.join(story_bible.themes[:3]) if story_bible.themes else 'TBD'}
-**Style:** {story_bible.style_guide.get('voice', 'N/A')}
-"""
-
- # Previous content summary
- prev_content = ""
- if accumulated_content:
- prev_parts = accumulated_content.split('\n\n')
- if len(prev_parts) >= 1:
- prev_content = prev_parts[-1][-2000:] # Last 2000 chars of previous part
-
- lang_prompts = {
- "Korean": f"""๋น์ ์ ํ๋ ๋ฌธํ์ ์ต์ ์ ์ ์ ์๊ฐ์
๋๋ค.
-**ํ์ฌ: ํํธ {part_number} - {phase_name}**
-
-{"**ํ์ ์ฒซ๋ฌธ์ฅ:** " + story_bible.opening_sentence if part_number == 1 and story_bible.opening_sentence else ""}
-
-**์ด๋ฒ ํํธ์ ์ฒ ํ์ ์ด์ :** {philosophical_focus[part_number]}
-**ํต์ฌ ๋ฌธํ ๊ธฐ๋ฒ:** {literary_techniques[part_number]}
-
-**์ ์ฒด ๊ณํ:**
-{master_plan}
-
-**์คํ ๋ฆฌ ๋ฐ์ด๋ธ:**
-{bible_summary}
-
-**์ง์ ๋ด์ฉ:**
-{prev_content if prev_content else "์ฒซ ํํธ์
๋๋ค"}
-
-**ํํธ {part_number} ์์ฑ ์ง์นจ:**
-
-1. **๋ถ๋:** {target_words}-900 ๋จ์ด (ํ์)
-
-2. **๋ฌธํ์ ์์ฌ ์๊ตฌ์ฌํญ:**
- - ์ต์ 3๊ฐ์ ๋
์ฐฝ์ ์์ /์ง์
- - 1๊ฐ ์ด์์ ์์ง์ ์ด๋ฏธ์ง ์ฌํ
- - ๊ฐ๊ฐ์ ๋ฌ์ฌ์ ์ถ์์ ์ฌ์ ์ ์ตํฉ
- - ๋ฆฌ๋ฌ๊ฐ ์๋ ๋ฌธ์ฅ ๊ตฌ์ฑ (์ฅ๋จ์ ๋ณ์ฃผ)
-
-3. **ํ๋์ ๊ณ ๋ ํํ:**
- - ๋์งํธ ์๋์ ์์ธ๊ฐ
- - ์๋ณธ์ฃผ์์ ์ถ์ ๋ถ์กฐ๋ฆฌ
- - ๊ด๊ณ์ ํ๋ฉด์ฑ๊ณผ ์ง์ ์ฑ ๊ฐ๋ง
- - ์๋ฏธ ์ถ๊ตฌ์ ๋ฌด์๋ฏธ์ ์ง๋ฉด
-
-4. **์ฌํ์ ๋ฉ์์ง ๋ด์ฌํ:**
- - ์ง์ ์ ์ฃผ์ฅ์ด ์๋ ์ํฉ๊ณผ ์ธ๋ฌผ์ ํตํ ์์
- - ๊ฐ์ธ์ ๊ณ ํต๊ณผ ์ฌํ ๊ตฌ์กฐ์ ์ฐ๊ฒฐ
- - ๋ฏธ์์ ์ผ์๊ณผ ๊ฑฐ์์ ๋ฌธ์ ์ ๊ต์ฐจ
-
-5. **์์ฌ์ ์ถ์ง๋ ฅ:**
- - ์ด์ ํํธ์ ํ์ฐ์ ๊ฒฐ๊ณผ๋ก ์์
- - ์๋ก์ด ๊ฐ๋ฑ ์ธต์ ์ถ๊ฐ
- - ๋ค์ ํํธ๋ฅผ ํฅํ ๊ธด์ฅ๊ฐ ์กฐ์ฑ
-
-**๋ฌธํ์ ๊ธ๊ธฐ:**
-- ์ง๋ถํ ํํ์ด๋ ์ํฌ์ ์์
-- ๊ฐ์ ์ ์ง์ ์ ์ค๋ช
-- ๋๋์ ํ๋จ์ด๋ ๊ตํ
-- ์ธ์์ ์ธ ํด๊ฒฐ์ด๋ ์์
-
-ํํธ {part_number}๋ฅผ ๊น์ด ์๋ ๋ฌธํ์ ์ฑ์ทจ๋ก ๋ง๋์ธ์.""",
-
- "English": f"""You are a writer at the forefront of contemporary literature.
-**Current: Part {part_number} - {phase_name}**
-
-{"**Required Opening:** " + story_bible.opening_sentence if part_number == 1 and story_bible.opening_sentence else ""}
-
-**Philosophical Focus:** {philosophical_focus[part_number]}
-**Core Literary Technique:** {literary_techniques[part_number]}
-
-**Master Plan:**
-{master_plan}
-
-**Story Bible:**
-{bible_summary}
+ """Web search functionality for screenplay research"""
+ def __init__(self):
+ self.brave_api_key = BRAVE_SEARCH_API_KEY
+ self.search_url = "https://api.search.brave.com/res/v1/web/search"
+ self.enabled = bool(self.brave_api_key)
+
+ def search(self, query: str, count: int = 3, language: str = "en") -> List[Dict]:
+ if not self.enabled:
+ return []
+ headers = {
+ "Accept": "application/json",
+ "X-Subscription-Token": self.brave_api_key
+ }
+ params = {
+ "q": query,
+ "count": count,
+ "search_lang": "ko" if language == "Korean" else "en",
+ "text_decorations": False,
+ "safesearch": "moderate"
+ }
+ try:
+ response = requests.get(self.search_url, headers=headers, params=params, timeout=10)
+ response.raise_for_status()
+ results = response.json().get("web", {}).get("results", [])
+ return results
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Web search API error: {e}")
+ return []
+
+ def extract_relevant_info(self, results: List[Dict], max_chars: int = 1500) -> str:
+ if not results:
+ return ""
+ extracted = []
+ total_chars = 0
+ for i, result in enumerate(results[:3], 1):
+ title = result.get("title", "")
+ description = result.get("description", "")
+ info = f"[{i}] {title}: {description}"
+ if total_chars + len(info) < max_chars:
+ extracted.append(info)
+ total_chars += len(info)
+ else:
+ break
+ return "\n".join(extracted)
+
+class ScreenplayGenerationSystem:
+ """Professional screenplay generation system"""
+ def __init__(self):
+ self.token = FRIENDLI_TOKEN
+ self.api_url = API_URL
+ self.model_id = MODEL_ID
+ self.screenplay_tracker = ScreenplayTracker()
+ self.web_search = WebSearchIntegration()
+ self.current_session_id = None
+ ScreenplayDatabase.init_db()
+
+ def create_headers(self):
+ return {"Authorization": f"Bearer {self.token}", "Content-Type": "application/json"}
+
+ # --- Prompt generation functions ---
+ def create_producer_prompt(self, user_query: str, screenplay_type: str,
+ genre: str, language: str) -> str:
+ """Producer initial concept development"""
+
+ # Web search for market trends if enabled
+ search_results = ""
+ if self.web_search.enabled:
+ queries = [
+ f"box office success {genre} films 2024 2025",
+ f"popular {screenplay_type} {genre} trends",
+ f"audience demographics {genre} movies"
+ ]
+ for q in queries[:2]:
+ results = self.web_search.search(q, count=2, language=language)
+ if results:
+ search_results += self.web_search.extract_relevant_info(results) + "\n"
+
+ lang_prompts = {
+ "Korean": f"""๋น์ ์ ํ ๋ฆฌ์ฐ๋ ํ๋ก๋์์
๋๋ค. ์์
์ ์ผ๋ก ์ฑ๊ณต ๊ฐ๋ฅํ {screenplay_type} ์ปจ์
์ ๊ฐ๋ฐํ์ธ์.
-**Previous Content:**
-{prev_content if prev_content else "This is the first part"}
-
-**Part {part_number} Guidelines:**
-
-1. **Length:** {target_words}-900 words (mandatory)
-
-2. **Literary Device Requirements:**
- - Minimum 3 original metaphors/similes
- - Deepen at least 1 symbolic image
- - Fusion of sensory description and abstract thought
- - Rhythmic sentence composition (variation of long/short)
-
-3. **Modern Anguish Expression:**
- - Digital age alienation
- - Absurdity of capitalist life
- - Surface relationships vs authenticity yearning
- - Meaning pursuit vs confronting meaninglessness
-
-4. **Social Message Internalization:**
- - Implication through situation and character, not direct claim
- - Connection between individual pain and social structure
- - Intersection of micro daily life and macro problems
-
-5. **Narrative Momentum:**
- - Start as inevitable result of previous part
- - Add new conflict layers
- - Create tension toward next part
-
-**Literary Taboos:**
-- Clichรฉd expressions or trite metaphors
-- Direct emotion explanation
-- Moral judgment or preaching
-- Artificial resolution or comfort
-
-Make Part {part_number} a profound literary achievement."""
- }
-
- return lang_prompts.get(language, lang_prompts["Korean"])
-
- def create_part_critic_prompt(self, part_number: int, part_content: str,
- master_plan: str, accumulated_content: str,
- story_bible: StoryBible, language: str) -> str:
- """Part-by-part immediate critique - Enhanced version"""
-
- lang_prompts = {
- "Korean": f"""ํํธ {part_number}์ ๋ฌธํ์ ์ฑ์ทจ๋๋ฅผ ์๊ฒฉํ ํ๊ฐํ์ธ์.
-
-**๋ง์คํฐํ๋ ํํธ {part_number} ์๊ตฌ์ฌํญ:**
-{self._extract_part_plan(master_plan, part_number)}
+**์์ฒญ์ฌํญ:** {user_query}
+**ํ์
:** {SCREENPLAY_LENGTHS[screenplay_type]['description']}
+**์ฅ๋ฅด:** {genre}
-**์์ฑ๋ ๋ด์ฉ:**
-{part_content}
+**์์ฅ ์กฐ์ฌ:**
+{search_results if search_results else "N/A"}
-**์คํ ๋ฆฌ ๋ฐ์ด๋ธ ์ฒดํฌ:**
-- ์บ๋ฆญํฐ: {', '.join(story_bible.characters.keys())}
-- ์ค์ : {', '.join(story_bible.settings.keys())}
+**ํ์ ์ ๊ณต ํญ๋ชฉ:**
-**ํ๊ฐ ๊ธฐ์ค:**
+1. **์ ๋ชฉ (TITLE)**
+ - ๊ธฐ์ตํ๊ธฐ ์ฝ๊ณ ๋ง์ผํ
๊ฐ๋ฅํ ์ ๋ชฉ
+ - ์ฅ๋ฅด์ ํค์ ์์ํ๋ ์ ๋ชฉ
-1. **๋ฌธํ์ ์์ฌ (30%)**
- - ์์ ์ ์์ง์ ๋
์ฐฝ์ฑ
- - ์ธ์ด์ ์์ ๋ฐ๋
- - ์ด๋ฏธ์ง์ ์ ๋ช
๋์ ๊น์ด
- - ๋ฌธ์ฅ์ ๋ฆฌ๋ฌ๊ณผ ์์
์ฑ
+2. **๋ก๊ทธ๋ผ์ธ (LOGLINE)**
+ - 25๋จ์ด ์ด๋ด ํ ๋ฌธ์ฅ
+ - ํ์: "[์ฌ๊ฑด]์ด ์ผ์ด๋ฌ์ ๋, [์ฃผ์ธ๊ณต]์ [๋ชฉํ]๋ฅผ ์ด๋ฃจ์ด์ผ ํ๋ค. ๊ทธ๋ ์ง ์์ผ๋ฉด [๊ฒฐ๊ณผ]"
+ - ๊ฐ๋ฑ๊ณผ stakes๊ฐ ๋ช
ํํด์ผ ํจ
-2. **์ฒ ํ์ ๊น์ด (25%)**
- - ์ค์กด์ ์ง๋ฌธ์ ์ ๊ธฐ
- - ํ๋์ธ์ ์กฐ๊ฑด ํ๊ตฌ
- - ๋ณดํธ์ฑ๊ณผ ํน์์ฑ์ ๊ท ํ
- - ์ฌ์ ์ ๋
์ฐฝ์ฑ
+3. **์ฅ๋ฅด ๋ถ์**
+ - ์ฃผ ์ฅ๋ฅด: {genre}
+ - ์๋ธ ์ฅ๋ฅด:
+ - ํค & ๋ถ์๊ธฐ:
-3. **์ฌํ์ ํต์ฐฐ (20%)**
- - ์๋์ ์ ์ ํฌ์ฐฉ
- - ๊ตฌ์กฐ์ ๊ฐ์ธ์ ๊ด๊ณ
- - ๋นํ์ ์๊ฐ์ ์๋ฆฌํจ
- - ๋์์ ์์๋ ฅ
+4. **ํ๊ฒ ๊ด๊ฐ**
+ - ์ฃผ์ ์ฐ๋ น๋:
+ - ์ฑ๋ณ ๋ถํฌ:
+ - ๊ด์ฌ์ฌ:
+ - ์ ์ฌ ์ํ ํฌ์ธต:
-4. **์์ฌ์ ์์ฑ๋ (25%)**
- - ์ธ๊ณผ๊ด๊ณ์ ํ์ฐ์ฑ
- - ๊ธด์ฅ๊ฐ์ ์ ์ง
- - ์ธ๋ฌผ์ ์
์ฒด์ฑ
- - ๊ตฌ์กฐ์ ํต์ผ์ฑ
+5. **๋น๊ต ์ํ (COMPS)**
+ - 3๊ฐ์ ์ฑ๊ณตํ ์ ์ฌ ํ๋ก์ ํธ
+ - ๊ฐ๊ฐ์ ๋ฐ์ค์คํผ์ค/์์ฒญ๋ฅ ์ฑ๊ณผ
+ - ์ฐ๋ฆฌ ํ๋ก์ ํธ์์ ์ฐจ๋ณ์
-**๊ตฌ์ฒด์ ์ง์ ์ฌํญ:**
-- ์ง๋ถํ ํํ: [์์์ ๋์]
-- ์ฒ ํ์ ์ฒ์ฐฉ ๋ถ์กฑ: [๋ณด์ ๋ฐฉํฅ]
-- ์ฌํ์ ๋ฉ์์ง ๋ถ๋ช
ํ: [๊ฐํ ๋ฐฉ์]
-- ์์ฌ์ ํ์ : [์์ ํ์]
+6. **๊ณ ์ ํ๋งค ํฌ์ธํธ (USP)**
+ - ์ด ์ด์ผ๊ธฐ๋ง์ ๋
ํนํ ์
+ - ํ์ฌ ์์ฅ์์์ ํ์์ฑ
+ - ์ ์ ๊ฐ๋ฅ์ฑ
-**ํ์ ๊ฐ์ ์๊ตฌ:**
-๋ฌธํ์ ์์ค์ ๋
ธ๋ฒจ์ ๊ธ์ผ๋ก ๋์ด์ฌ๋ฆฌ๊ธฐ ์ํ ๊ตฌ์ฒด์ ์์ ์์ ์ ์ํ์ธ์.""",
+7. **๋น์ฃผ์ผ ์ปจ์
**
+ - ํต์ฌ ๋น์ฃผ์ผ ์ด๋ฏธ์ง 3๊ฐ
+ - ์ ์ฒด์ ์ธ ๋ฃฉ & ํ
- "English": f"""Strictly evaluate literary achievement of Part {part_number}.
+๊ตฌ์ฒด์ ์ด๊ณ ์์ฅ์ฑ ์๋ ์ปจ์
์ ์ ์ํ์ธ์.""",
-**Master Plan Part {part_number} Requirements:**
-{self._extract_part_plan(master_plan, part_number)}
+ "English": f"""You are a Hollywood producer. Develop a commercially viable {screenplay_type} concept.
-**Written Content:**
-{part_content}
+**Request:** {user_query}
+**Type:** {SCREENPLAY_LENGTHS[screenplay_type]['description']}
+**Genre:** {genre}
-**Story Bible Check:**
-- Characters: {', '.join(story_bible.characters.keys()) if story_bible.characters else 'None yet'}
-- Settings: {', '.join(story_bible.settings.keys()) if story_bible.settings else 'None yet'}
+**Market Research:**
+{search_results if search_results else "N/A"}
-**Evaluation Criteria:**
+**Required Elements:**
+
+1. **TITLE**
+ - Memorable and marketable
+ - Hints at genre and tone
+
+2. **LOGLINE**
+ - One sentence, 25 words max
+ - Format: "When [inciting incident], a [protagonist] must [objective] or else [stakes]"
+ - Clear conflict and stakes
+
+3. **GENRE ANALYSIS**
+ - Primary Genre: {genre}
+ - Sub-genre:
+ - Tone & Mood:
+
+4. **TARGET AUDIENCE**
+ - Primary Age Range:
+ - Gender Distribution:
+ - Interests:
+ - Similar Works Fanbase:
+
+5. **COMPARABLE FILMS (COMPS)**
+ - 3 successful similar projects
+ - Box office/viewership performance
+ - How ours differs
+
+6. **UNIQUE SELLING POINT (USP)**
+ - What makes this story unique
+ - Why now in the market
+ - Production feasibility
+
+7. **VISUAL CONCEPT**
+ - 3 key visual images
+ - Overall look & feel
+
+Provide specific, marketable concept."""
+ }
+
+ return lang_prompts.get(language, lang_prompts["English"])
+
+ def create_story_developer_prompt(self, producer_concept: str, user_query: str,
+ screenplay_type: str, genre: str, language: str) -> str:
+ """Story developer for synopsis and structure"""
+
+ genre_template = GENRE_TEMPLATES.get(genre, GENRE_TEMPLATES["drama"])
+
+ lang_prompts = {
+ "Korean": f"""๋น์ ์ ์คํ ๋ฆฌ ๊ฐ๋ฐ์์
๋๋ค. ํ๋ก๋์์ ์ปจ์
์ ๋ฐํ์ผ๋ก {screenplay_type}์ ์๋์์ค์ 3๋ง ๊ตฌ์กฐ๋ฅผ ๊ฐ๋ฐํ์ธ์.
+
+**ํ๋ก๋์ ์ปจ์
:**
+{producer_concept}
+
+**์ฅ๋ฅด ํน์ฑ:** {genre}
+- ํ์ด์ฑ: {genre_template['pacing']}
+- ํต์ฌ ์์: {', '.join(genre_template['key_elements'])}
+- ๊ตฌ์กฐ ๋นํธ: {', '.join(genre_template['structure_beats'])}
+
+**ํ์ ์์ฑ ํญ๋ชฉ:**
+
+1. **์๋์์ค (SYNOPSIS)**
+ - 300-500๋จ์ด
+ - 3๋ง ๊ตฌ์กฐ๊ฐ ๋ช
ํํ ๋๋ฌ๋๋๋ก
+ - ์ฃผ์ธ๊ณต์ ๋ณํ arc ํฌํจ
+ - ์ฃผ์ ์ ํ์ ๋ช
์
+ - ๊ฒฐ๋ง ํฌํจ (์คํฌ์ผ๋ฌ OK)
+
+2. **3๋ง ๊ตฌ์กฐ (THREE-ACT STRUCTURE)**
+
+ **์ 1๋ง - ์ค์ (Setup) [25%]**
+ - ์ผ์ ์ธ๊ณ (Ordinary World):
+ - ๊ทผ์น์๊ฐ ์ฌ๊ฑด (Inciting Incident):
+ - ์ฃผ์ธ๊ณต ์๊ฐ ๋ฐ ๊ฒฐํจ:
+ - 1๋ง ์ ํ์ (Plot Point 1):
+
+ **์ 2๋งA - ์์น ์ก์
(Rising Action) [25%]**
+ - ์๋ก์ด ์ธ๊ณ ์ง์
:
+ - ์ฌ๋ฏธ์ ๊ฒ์ (Fun and Games):
+ - B ์คํ ๋ฆฌ (๊ด๊ณ/ํ
๋ง):
+ - ์ค๊ฐ์ (Midpoint) - ๊ฐ์ง ์น๋ฆฌ/ํจ๋ฐฐ:
+
+ **์ 2๋งB - ๋ณต์กํ (Complications) [25%]**
+ - ์
๋น์ ๋ฐ๊ฒฉ:
+ - ํ ํด์ฒด/์๊ธฐ:
+ - ๋ชจ๋ ๊ฒ์ ์์ (All Is Lost):
+ - ์ํผ์ ์ด๋ ๋ฐค:
+
+ **์ 3๋ง - ํด๊ฒฐ (Resolution) [25%]**
+ - 2๋ง ์ ํ์ (Plot Point 2):
+ - ์ต์ข
์ ํฌ ์ค๋น:
+ - ํด๋ผ์ด๋งฅ์ค:
+ - ์๋ก์ด ์ผ์:
+
+3. **Save the Cat ๋นํธ ์ํธ**
+ 15๊ฐ ๋นํธ๋ฅผ {SCREENPLAY_LENGTHS[screenplay_type]['pages']}ํ์ด์ง์ ๋ง์ถฐ ๋ฐฐ์น
+
+4. **์ฃผ์ (THEME)**
+ - ์ค์ฌ ์ฃผ์ :
+ - ์ฃผ์ ๊ฐ ๋๋ฌ๋๋ ์๊ฐ:
+ - ์ฃผ์ ์ ์๊ฐ์ ํํ:
+
+5. **ํค & ์คํ์ผ**
+ - ์ ์ฒด์ ์ธ ํค:
+ - ์ ๋จธ ์ฌ์ฉ ์ฌ๋ถ:
+ - ๋น์ฃผ์ผ ์คํ์ผ:
+
+๊ตฌ์ฒด์ ์ด๊ณ ๊ฐ์ ์ ์ผ๋ก ๊ณต๊ฐ๊ฐ๋ ์คํ ๋ฆฌ๋ฅผ ๋ง๋์ธ์.""",
+
+ "English": f"""You are a story developer. Based on the producer's concept, develop the synopsis and three-act structure for this {screenplay_type}.
+
+**Producer Concept:**
+{producer_concept}
+
+**Genre Characteristics:** {genre}
+- Pacing: {genre_template['pacing']}
+- Key Elements: {', '.join(genre_template['key_elements'])}
+- Structure Beats: {', '.join(genre_template['structure_beats'])}
+
+**Required Elements:**
+
+1. **SYNOPSIS**
+ - 300-500 words
+ - Clear three-act structure
+ - Protagonist's change arc
+ - Major turning points
+ - Include ending (spoilers OK)
+
+2. **THREE-ACT STRUCTURE**
+
+ **ACT 1 - Setup [25%]**
+ - Ordinary World:
+ - Inciting Incident:
+ - Protagonist Introduction & Flaw:
+ - Plot Point 1:
+
+ **ACT 2A - Rising Action [25%]**
+ - Entering New World:
+ - Fun and Games:
+ - B Story (Relationship/Theme):
+ - Midpoint - False Victory/Defeat:
+
+ **ACT 2B - Complications [25%]**
+ - Bad Guys Close In:
+ - Team Breaks Down/Crisis:
+ - All Is Lost:
+ - Dark Night of the Soul:
+
+ **ACT 3 - Resolution [25%]**
+ - Plot Point 2:
+ - Final Battle Preparation:
+ - Climax:
+ - New Normal:
+
+3. **SAVE THE CAT BEAT SHEET**
+ Place 15 beats across {SCREENPLAY_LENGTHS[screenplay_type]['pages']} pages
+
+4. **THEME**
+ - Central Theme:
+ - Theme Stated Moment:
+ - Visual Theme Expression:
+
+5. **TONE & STYLE**
+ - Overall Tone:
+ - Use of Humor:
+ - Visual Style:
+
+Create specific, emotionally resonant story."""
+ }
+
+ return lang_prompts.get(language, lang_prompts["English"])
+
+ def create_character_designer_prompt(self, producer_concept: str, story_structure: str,
+ genre: str, language: str) -> str:
+ """Character designer prompt"""
+
+ lang_prompts = {
+ "Korean": f"""๋น์ ์ ์บ๋ฆญํฐ ๋์์ด๋์
๋๋ค. ๋ค์ธต์ ์ด๊ณ ๋งค๋ ฅ์ ์ธ ์บ๋ฆญํฐ๋ค์ ์ฐฝ์กฐํ์ธ์.
+
+**ํ๋ก๋์ ์ปจ์
:**
+{producer_concept}
+
+**์คํ ๋ฆฌ ๊ตฌ์กฐ:**
+{story_structure}
+
+**ํ์ ์บ๋ฆญํฐ ํ๋กํ:**
+
+1. **์ฃผ์ธ๊ณต (PROTAGONIST)**
+ - ์ด๋ฆ & ๋์ด:
+ - ์ง์
/์ญํ :
+ - ์บ๋ฆญํฐ ์ํฌํ์
:
+ - WANT (์ธ์ ๋ชฉํ):
+ - NEED (๋ด์ ํ์):
+ - ์น๋ช
์ ๊ฒฐํจ (Fatal Flaw):
+ - ๋ฐฑ์คํ ๋ฆฌ (ํต์ฌ ์์ฒ):
+ - ์ฑ๊ฒฉ ํน์ฑ (3-5๊ฐ):
+ - ๋งํฌ & ์ธ์ด ํจํด:
+ - ์๊ฐ์ ํน์ง:
+ - ์บ๋ฆญํฐ ์ํฌ (AโB):
+
+2. **์ ๋์ (ANTAGONIST)**
+ - ์ด๋ฆ & ๋์ด:
+ - ์ง์
/์ญํ :
+ - ์
์ญ ์ํฌํ์
:
+ - ๋ชฉํ & ๋๊ธฐ:
+ - ์ฃผ์ธ๊ณต๊ณผ์ ์ฐ๊ฒฐ์ :
+ - ์ ๋น์ฑ ์๋ ์ด์ :
+ - ์ฝ์ :
+ - ํน์ง์ ํ๋:
+
+3. **์กฐ๋ ฅ์๋ค (SUPPORTING CAST)**
+ ์ต์ 3๋ช
, ๊ฐ๊ฐ:
+ - ์ด๋ฆ & ์ญํ :
+ - ์ฃผ์ธ๊ณต๊ณผ์ ๊ด๊ณ:
+ - ์คํ ๋ฆฌ ๊ธฐ๋ฅ:
+ - ๋
ํนํ ํน์ฑ:
+ - ๊ธฐ์ฌํ๋ ๋ฐ:
+
+4. **์บ๋ฆญํฐ ๊ด๊ณ๋**
+ - ์ฃผ์ ๊ด๊ณ ์ญํ:
+ - ๊ฐ๋ฑ ๊ตฌ์กฐ:
+ - ๊ฐ์ ์ ์ฐ๊ฒฐ:
+ - ํ์ ๋ค์ด๋๋ฏน:
+
+5. **์บ์คํ
์ ์**
+ - ๊ฐ ์ฃผ์ ์บ๋ฆญํฐ๋ณ ์ด์์ ์ธ ๋ฐฐ์ฐ ํ์
+ - ์ฐ๋ น๋, ์ธ๋ชจ, ์ฐ๊ธฐ ์คํ์ผ
+
+6. **๋ํ ์ํ**
+ - ๊ฐ ์ฃผ์ ์บ๋ฆญํฐ์ ์๊ทธ๋์ฒ ๋์ฌ 2-3๊ฐ
+ - ์บ๋ฆญํฐ์ ๋ณธ์ง์ ๋๋ฌ๋ด๋ ๋ํ
+
+๊ฐ ์บ๋ฆญํฐ๊ฐ ํ
๋ง๋ฅผ ๊ตฌํํ๊ณ ์คํ ๋ฆฌ๋ฅผ ์ถ์งํ๋๋ก ๋์์ธํ์ธ์.""",
+
+ "English": f"""You are a character designer. Create multi-dimensional, compelling characters.
+
+**Producer Concept:**
+{producer_concept}
+
+**Story Structure:**
+{story_structure}
+
+**Required Character Profiles:**
+
+1. **PROTAGONIST**
+ - Name & Age:
+ - Occupation/Role:
+ - Character Archetype:
+ - WANT (External Goal):
+ - NEED (Internal Need):
+ - Fatal Flaw:
+ - Backstory (Core Wound):
+ - Personality Traits (3-5):
+ - Speech Pattern:
+ - Visual Characteristics:
+ - Character Arc (AโB):
+
+2. **ANTAGONIST**
+ - Name & Age:
+ - Occupation/Role:
+ - Villain Archetype:
+ - Goal & Motivation:
+ - Connection to Protagonist:
+ - Justifiable Reason:
+ - Weakness:
+ - Signature Behaviors:
+
+3. **SUPPORTING CAST**
+ Minimum 3, each with:
+ - Name & Role:
+ - Relationship to Protagonist:
+ - Story Function:
+ - Unique Traits:
+ - What They Contribute:
+
+4. **CHARACTER RELATIONSHIPS**
+ - Key Relationship Dynamics:
+ - Conflict Structure:
+ - Emotional Connections:
+ - Power Dynamics:
+
+5. **CASTING SUGGESTIONS**
+ - Ideal actor type for each major character
+ - Age range, appearance, acting style
+
+6. **DIALOGUE SAMPLES**
+ - 2-3 signature lines per major character
+ - Dialogue revealing character essence
+
+Design each character to embody theme and drive story."""
+ }
+
+ return lang_prompts.get(language, lang_prompts["English"])
+
+ def create_scene_planner_prompt(self, story_structure: str, characters: str,
+ screenplay_type: str, genre: str, language: str) -> str:
+ """Scene breakdown planner"""
+
+ total_pages = SCREENPLAY_LENGTHS[screenplay_type]['pages']
+
+ lang_prompts = {
+ "Korean": f"""๋น์ ์ ์ฌ ํ๋๋์
๋๋ค. {total_pages}ํ์ด์ง {screenplay_type}์ ์์ธํ ์ฌ ๋ธ๋ ์ดํฌ๋ค์ด์ ์์ฑํ์ธ์.
+
+**์คํ ๋ฆฌ ๊ตฌ์กฐ:**
+{story_structure}
+
+**์บ๋ฆญํฐ:**
+{characters}
+
+**์ฌ ๋ธ๋ ์ดํฌ๋ค์ด ์๊ตฌ์ฌํญ:**
+
+๊ฐ ์ฌ๋ง๋ค ๋ค์ ์ ๋ณด ์ ๊ณต:
+- ์ฌ ๋ฒํธ
+- ์ฅ์ (INT./EXT. LOCATION)
+- ์๊ฐ (DAY/NIGHT/DAWN/DUSK)
+- ๋ฑ์ฅ์ธ๋ฌผ
+- ์ฌ์ ๋ชฉ์ (์คํ ๋ฆฌ/์บ๋ฆญํฐ/ํ
๋ง)
+- ํต์ฌ ๊ฐ๋ฑ
+- ์์ ํ์ด์ง ์
+- Save the Cat ๋นํธ (ํด๋น์)
+
+**๋ง๋ณ ๋ฐฐ๋ถ:**
+- 1๋ง: ~{int(total_pages * 0.25)}ํ์ด์ง (10-12์ฌ)
+- 2๋งA: ~{int(total_pages * 0.25)}ํ์ด์ง (12-15์ฌ)
+- 2๋งB: ~{int(total_pages * 0.25)}ํ์ด์ง (12-15์ฌ)
+- 3๋ง: ~{int(total_pages * 0.25)}ํ์ด์ง (8-10์ฌ)
+
+**์ฅ๋ฅด๋ณ ๊ณ ๋ ค์ฌํญ:** {genre}
+{self._get_genre_scene_guidelines(genre, "Korean")}
+
+**์ฌ ์ ํ ์คํ์ผ:**
+- CUT TO:
+- FADE IN/OUT:
+- MATCH CUT:
+- SMASH CUT:
+- DISSOLVE TO:
+
+๊ฐ ์ฌ์ด ์คํ ๋ฆฌ๋ฅผ ์ ์ง์ํค๊ณ ์บ๋ฆญํฐ๋ฅผ ๋ฐ์ ์ํค๋๋ก ๊ณํํ์ธ์.""",
+
+ "English": f"""You are a scene planner. Create detailed scene breakdown for {total_pages}-page {screenplay_type}.
+
+**Story Structure:**
+{story_structure}
+
+**Characters:**
+{characters}
+
+**Scene Breakdown Requirements:**
+
+For each scene provide:
+- Scene Number
+- Location (INT./EXT. LOCATION)
+- Time (DAY/NIGHT/DAWN/DUSK)
+- Characters Present
+- Scene Purpose (Story/Character/Theme)
+- Core Conflict
+- Estimated Page Count
+- Save the Cat Beat (if applicable)
+
+**Act Distribution:**
+- Act 1: ~{int(total_pages * 0.25)} pages (10-12 scenes)
+- Act 2A: ~{int(total_pages * 0.25)} pages (12-15 scenes)
+- Act 2B: ~{int(total_pages * 0.25)} pages (12-15 scenes)
+- Act 3: ~{int(total_pages * 0.25)} pages (8-10 scenes)
+
+**Genre Considerations:** {genre}
+{self._get_genre_scene_guidelines(genre, "English")}
+
+**Scene Transitions:**
+- CUT TO:
+- FADE IN/OUT:
+- MATCH CUT:
+- SMASH CUT:
+- DISSOLVE TO:
+
+Plan each scene to advance story and develop character."""
+ }
+
+ return lang_prompts.get(language, lang_prompts["English"])
+
+ def create_screenwriter_prompt(self, act: str, scene_breakdown: str,
+ characters: str, previous_acts: str,
+ screenplay_type: str, genre: str, language: str) -> str:
+ """Screenwriter prompt for actual screenplay pages"""
+
+ act_pages = int(SCREENPLAY_LENGTHS[screenplay_type]['pages'] * 0.25)
+
+ lang_prompts = {
+ "Korean": f"""๋น์ ์ ํ๋ก ์๋๋ฆฌ์ค ์๊ฐ์
๋๋ค. {act}์ ํ์ค ์๋๋ฆฌ์ค ํฌ๋งท์ผ๋ก ์์ฑํ์ธ์.
+
+**ํ๊ฒ ๋ถ๋:** {act_pages}ํ์ด์ง
+
+**์ฌ ๋ธ๋ ์ดํฌ๋ค์ด:**
+{self._extract_act_scenes(scene_breakdown, act)}
-1. **Literary Rhetoric (30%)**
- - Originality of metaphor and symbol
- - Poetic density of language
- - Clarity and depth of imagery
- - Rhythm and musicality of sentences
-
-2. **Philosophical Depth (25%)**
- - Raising existential questions
- - Exploring modern human condition
- - Balance of universality and specificity
- - Originality of thought
-
-3. **Social Insight (20%)**
- - Capturing zeitgeist
- - Relationship between structure and individual
- - Sharpness of critical perspective
- - Alternative imagination
-
-4. **Narrative Completion (25%)**
- - Inevitability of causality
- - Maintaining tension
- - Character dimensionality
- - Structural unity
-
-**Specific Points:**
-- Clichรฉd expressions: [examples and alternatives]
-- Insufficient philosophical exploration: [enhancement direction]
-- Unclear social message: [strengthening methods]
-- Narrative gaps: [needed revisions]
+**์บ๋ฆญํฐ ์ ๋ณด:**
+{characters}
+
+**์ด์ ๋ด์ฉ:**
+{previous_acts if previous_acts else "์ฒซ ๋ง์
๋๋ค."}
+
+**์๋๋ฆฌ์ค ํฌ๋งท ๊ท์น:**
+
+1. **์ฌ ํค๋ฉ**
+ INT. ์ฅ์ - ์๊ฐ
+ EXT. ์ฅ์ - ์๊ฐ
+
+2. **์ก์
๋ผ์ธ**
+ - ํ์ฌ ์์ ์ฌ์ฉ
+ - ์๊ฐ์ ์ผ๋ก ๋ณด์ด๋ ๊ฒ๋ง ๋ฌ์ฌ
+ - 4์ค ์ดํ๋ก ์ ์ง
+ - ๊ฐ์ ์ ํ๋์ผ๋ก ํํ
+
+3. **์บ๋ฆญํฐ ์๊ฐ**
+ ์ฒซ ๋ฑ์ฅ์: ์ด๋ฆ (๋์ด) ๊ฐ๋จํ ๋ฌ์ฌ
+
+4. **๋ํ**
+ ์บ๋ฆญํฐ๋ช
+ (์ง๋ฌธ)
+ ๋์ฌ
+
+5. **์ค์ ์์น**
+ - Show, don't tell
+ - ์๋ธํ
์คํธ ํ์ฉ
+ - ์์ฐ์ค๋ฌ์ด ๋ํ
+ - ์๊ฐ์ ์คํ ๋ฆฌํ
๋ง
+ - ํ์ด์ง๋น 1๋ถ ๊ท์น
+
+**{genre} ์ฅ๋ฅด ํน์ฑ:**
+- ๋ํ ๋น์จ: {GENRE_TEMPLATES[genre]['dialogue_ratio']*100}%
+- ์ฌ ๊ธธ์ด: {GENRE_TEMPLATES[genre]['scene_length']}
+- ํต์ฌ ์์: {', '.join(GENRE_TEMPLATES[genre]['key_elements'][:2])}
+
+์ ํํ ํฌ๋งท๊ณผ ๋ชฐ์
๊ฐ ์๋ ์คํ ๋ฆฌํ
๋ง์ผ๋ก ์์ฑํ์ธ์.""",
+
+ "English": f"""You are a professional screenwriter. Write {act} in standard screenplay format.
+
+**Target Length:** {act_pages} pages
+
+**Scene Breakdown:**
+{self._extract_act_scenes(scene_breakdown, act)}
+
+**Character Info:**
+{characters}
+
+**Previous Content:**
+{previous_acts if previous_acts else "This is the first act."}
+
+**Screenplay Format Rules:**
+
+1. **Scene Headings**
+ INT. LOCATION - TIME
+ EXT. LOCATION - TIME
+
+2. **Action Lines**
+ - Present tense
+ - Only what's visually seen
+ - Keep under 4 lines
+ - Emotions through actions
+
+3. **Character Intros**
+ First appearance: NAME (age) brief description
+
+4. **Dialogue**
+ CHARACTER NAME
+ (parenthetical)
+ Dialogue
+
+5. **Key Principles**
+ - Show, don't tell
+ - Use subtext
+ - Natural dialogue
+ - Visual storytelling
+ - One page = one minute
+
+**{genre} Genre Characteristics:**
+- Dialogue Ratio: {GENRE_TEMPLATES[genre]['dialogue_ratio']*100}%
+- Scene Length: {GENRE_TEMPLATES[genre]['scene_length']}
+- Key Elements: {', '.join(GENRE_TEMPLATES[genre]['key_elements'][:2])}
+
+Write with proper format and engaging storytelling."""
+ }
+
+ return lang_prompts.get(language, lang_prompts["English"])
+
+ def create_script_doctor_prompt(self, act_content: str, act: str,
+ genre: str, language: str) -> str:
+ """Script doctor review and polish"""
+
+ lang_prompts = {
+ "Korean": f"""๋น์ ์ ์คํฌ๋ฆฝํธ ๋ฅํฐ์
๋๋ค. {act}๋ฅผ ๊ฒํ ํ๊ณ ๊ฐ์ ์ฌํญ์ ์ ์ํ์ธ์.
+
+**์์ฑ๋ ๋ด์ฉ:**
+{act_content}
+
+**๊ฒํ ํญ๋ชฉ:**
+
+1. **ํฌ๋งท ์ ํ์ฑ**
+ - ์ฌ ํค๋ฉ ํ์
+ - ์ก์
๋ผ์ธ ๊ธธ์ด
+ - ๋ํ ํฌ๋งท
+ - ์ ํ ํ์
+
+2. **์คํ ๋ฆฌํ
๋ง**
+ - ์๊ฐ์ ๋ช
ํ์ฑ
+ - ํ์ด์ฑ
+ - ๊ธด์ฅ๊ฐ ๊ตฌ์ถ
+ - ์ฌ ๋ชฉ์ ๋ฌ์ฑ
+
+3. **๋ํ ํ์ง**
+ - ์์ฐ์ค๋ฌ์
+ - ์บ๋ฆญํฐ ๊ณ ์ ์ฑ
+ - ์๋ธํ
์คํธ
+ - ๋ถํ์ํ ์ค๋ช
์ ๊ฑฐ
+
+4. **{genre} ์ฅ๋ฅด ์ ํฉ์ฑ**
+ - ์ฅ๋ฅด ๊ด์ต ์ค์
+ - ํค ์ผ๊ด์ฑ
+ - ๊ธฐ๋ ์ถฉ์กฑ
+
+5. **๊ธฐ์ ์ ์ธก๋ฉด**
+ - ํ์ด์ง ์ ์ ์ ์ฑ
+ - ์ ์ ๊ฐ๋ฅ์ฑ
+ - ์์ฐ ๊ณ ๋ ค์ฌํญ
+
+**ํ์ ๊ฐ์ ์ฌํญ:**
+๊ตฌ์ฒด์ ์ธ ์์ ์ ์๊ณผ ๊ฐ์ ๋ ์์๋ฅผ ์ ๊ณตํ์ธ์.""",
+
+ "English": f"""You are a script doctor. Review and provide improvements for {act}.
+
+**Written Content:**
+{act_content}
+
+**Review Areas:**
+
+1. **Format Accuracy**
+ - Scene heading format
+ - Action line length
+ - Dialogue format
+ - Transitions
+
+2. **Storytelling**
+ - Visual clarity
+ - Pacing
+ - Tension building
+ - Scene purpose achievement
+
+3. **Dialogue Quality**
+ - Naturalness
+ - Character uniqueness
+ - Subtext
+ - Remove exposition
+
+4. **{genre} Genre Fit**
+ - Genre conventions
+ - Tone consistency
+ - Meeting expectations
+
+5. **Technical Aspects**
+ - Page count appropriateness
+ - Production feasibility
+ - Budget considerations
**Required Improvements:**
-Provide specific revisions to elevate literary level to Nobel Prize standard."""
- }
-
- return lang_prompts.get(language, lang_prompts["Korean"])
-
- def create_writer_revision_prompt(self, part_number: int, original_content: str,
- critic_feedback: str, language: str) -> str:
- """Writer revision prompt"""
-
- lang_prompts = {
- "Korean": f"""ํํธ {part_number}๋ฅผ ๋นํ์ ๋ฐ๋ผ ์์ ํ์ธ์.
-
-**์๋ณธ:**
-{original_content}
-
-**๋นํ ํผ๋๋ฐฑ:**
-{critic_feedback}
-
-**์์ ์ง์นจ:**
-1. ๋ชจ๋ 'ํ์ ์์ ' ์ฌํญ์ ๋ฐ์
-2. ๊ฐ๋ฅํ '๊ถ์ฅ ๊ฐ์ ' ์ฌํญ๋ ํฌํจ
-3. ์๋ณธ์ ๊ฐ์ ์ ์ ์ง
-4. ๋ถ๋ {MIN_WORDS_PER_PART}๋จ์ด ์ด์ ์ ์ง
-5. ์๊ฐ๋ก์์ ์ผ๊ด๋ ๋ชฉ์๋ฆฌ ์ ์ง
-6. ๋ฌธํ์ ์์ค์ ํ ๋จ๊ณ ๋์ด๊ธฐ
-
-์์ ๋ณธ๋ง ์ ์ํ์ธ์. ์ค๋ช
์ ๋ถํ์ํฉ๋๋ค.""",
-
- "English": f"""Revise Part {part_number} according to critique.
-
-**Original:**
-{original_content}
-
-**Critique Feedback:**
-{critic_feedback}
-
-**Revision Guidelines:**
-1. Reflect all 'Required fixes'
-2. Include 'Recommended improvements' where possible
-3. Maintain original strengths
-4. Keep length {MIN_WORDS_PER_PART}+ words
-5. Maintain consistent authorial voice
-6. Elevate literary level
-
-Present only the revision. No explanation needed."""
- }
-
- return lang_prompts.get(language, lang_prompts["Korean"])
-
- def create_final_critic_prompt(self, complete_novel: str, word_count: int,
- story_bible: StoryBible, language: str) -> str:
- """Final comprehensive evaluation"""
-
- lang_prompts = {
- "Korean": f"""์์ฑ๋ ์์ค์ ์ข
ํฉ ํ๊ฐํ์ธ์.
-
-**์ํ ์ ๋ณด:**
-- ์ด ๋ถ๋: {word_count}๋จ์ด
-- ๋ชฉํ: 8,000๋จ์ด
+Provide specific revision suggestions with improved examples."""
+ }
+
+ return lang_prompts.get(language, lang_prompts["English"])
+
+ def create_final_reviewer_prompt(self, complete_screenplay: str,
+ screenplay_type: str, genre: str, language: str) -> str:
+ """Final comprehensive review"""
+
+ lang_prompts = {
+ "Korean": f"""๋น์ ์ ์ต์ข
๋ฆฌ๋ทฐ์ด์
๋๋ค. ์์ฑ๋ {screenplay_type} ์๋๋ฆฌ์ค๋ฅผ ์ข
ํฉ ํ๊ฐํ์ธ์.
**ํ๊ฐ ๊ธฐ์ค:**
-1. **์์ฌ์ ํตํฉ์ฑ (30์ )**
- - 10๊ฐ ํํธ๊ฐ ํ๋์ ์ด์ผ๊ธฐ๋ก ํตํฉ๋์๋๊ฐ?
- - ์ธ๊ณผ๊ด๊ณ๊ฐ ๋ช
ํํ๊ณ ํ์ฐ์ ์ธ๊ฐ?
- - ๋ฐ๋ณต์ด๋ ์ํ ์์ด ์งํ๋๋๊ฐ?
+1. **์์
์ฑ (25์ )**
+ - ์์ฅ์ฑ
+ - ํ๊ฒ ๊ด๊ฐ ์ดํ
+ - ์ ์ ๊ฐ๋ฅ์ฑ
+ - ๋ฐฐ๊ธ ์ ์ฌ๋ ฅ
-2. **์บ๋ฆญํฐ ์ํฌ (25์ )**
- - ์ฃผ์ธ๊ณต์ ๋ณํ๊ฐ ์ค๋๋ ฅ ์๋๊ฐ?
- - ๋ณํ๊ฐ ์ ์ง์ ์ด๊ณ ์์ฐ์ค๋ฌ์ด๊ฐ?
- - ์ต์ข
์ํ๊ฐ ์ด๊ธฐ์ ๋ช
ํํ ๋ค๋ฅธ๊ฐ?
+2. **์คํ ๋ฆฌ (25์ )**
+ - 3๋ง ๊ตฌ์กฐ ํจ๊ณผ์ฑ
+ - ์บ๋ฆญํฐ ์ํฌ
+ - ํ๋กฏ ์ผ๊ด์ฑ
+ - ํ
๋ง ์ ๋ฌ
-3. **๋ฌธํ์ ์ฑ์ทจ (25์ )**
- - ์ฃผ์ ๊ฐ ๊น์ด ์๊ฒ ํ๊ตฌ๋์๋๊ฐ?
- - ์์ง์ด ํจ๊ณผ์ ์ผ๋ก ํ์ฉ๋์๋๊ฐ?
- - ๋ฌธ์ฒด๊ฐ ์ผ๊ด๋๊ณ ์๋ฆ๋ค์ด๊ฐ?
- - ํ๋์ ์ฒ ํ๊ณผ ์ฌํ์ ๋ฉ์์ง๊ฐ ๋
น์์๋๊ฐ?
+3. **๊ธฐ์ ์ ์์ฑ๋ (25์ )**
+ - ํฌ๋งท ์ ํ์ฑ
+ - ํ์ด์ง ์ ์ ์ ์ฑ
+ - ์ฌ ๊ตฌ์ฑ
+ - ์๊ฐ์ ์คํ ๋ฆฌํ
๋ง
-4. **๊ธฐ์ ์ ์์ฑ๋ (20์ )**
- - ๋ชฉํ ๋ถ๋์ ๋ฌ์ฑํ๋๊ฐ?
- - ๊ฐ ํํธ๊ฐ ๊ท ํ ์๊ฒ ์ ๊ฐ๋์๋๊ฐ?
- - ๋ฌธ๋ฒ๊ณผ ํํ์ด ์ ํํ๊ฐ?
+4. **๋ํ & ์บ๋ฆญํฐ (25์ )**
+ - ๋ํ ์์ฐ์ค๋ฌ์
+ - ์บ๋ฆญํฐ ๊ณ ์ ์ฑ
+ - ๊ด๊ณ ์ญํ
+ - ๊ฐ์ ์ ์ง์ ์ฑ
-**์ด์ : /100์ **
+**์ข
ํฉ ํ๊ฐ:**
+- ๊ฐ์ (3-5๊ฐ)
+- ๊ฐ์ ํ์ ์ฌํญ (3-5๊ฐ)
+- ์์ฅ ์ ์ฌ๋ ฅ
+- ์ถ์ฒ ์ฌํญ
-๊ตฌ์ฒด์ ์ธ ๊ฐ์ ๊ณผ ์ฝ์ ์ ์ ์ํ์ธ์.""",
+**๋ฑ๊ธ:** A+ ~ F
- "English": f"""Comprehensively evaluate the completed novel.
+๊ตฌ์ฒด์ ์ด๊ณ ๊ฑด์ค์ ์ธ ํผ๋๋ฐฑ์ ์ ๊ณตํ์ธ์.""",
-**Work Info:**
-- Total length: {word_count} words
-- Target: 8,000 words
+ "English": f"""You are the final reviewer. Comprehensively evaluate the completed {screenplay_type} screenplay.
**Evaluation Criteria:**
-1. **Narrative Integration (30 points)**
- - Are 10 parts integrated into one story?
- - Clear and inevitable causality?
- - Progress without repetition or cycles?
-
-2. **Character Arc (25 points)**
- - Convincing protagonist transformation?
- - Gradual and natural changes?
- - Final state clearly different from initial?
-
-3. **Literary Achievement (25 points)**
- - Theme explored with depth?
- - Symbols used effectively?
- - Consistent and beautiful style?
- - Contemporary philosophy and social message integrated?
-
-4. **Technical Completion (20 points)**
- - Target length achieved?
- - Each part balanced in development?
- - Grammar and expression accurate?
-
-**Total Score: /100 points**
-
-Present specific strengths and weaknesses."""
- }
-
- return lang_prompts.get(language, lang_prompts["Korean"])
-
- def create_director_final_prompt(self, initial_plan: str, critic_feedback: str,
- user_query: str, language: str) -> str:
- """Director final master plan"""
- return f"""Reflect the critique and complete the final master plan.
-
-**Original Theme:** {user_query}
-
-**Initial Plan:**
-{initial_plan}
-
-**Critique Feedback:**
-{critic_feedback}
-
-**Final Master Plan Requirements:**
-1. Reflect all critique points
-2. Specific content and causality for 10 parts
-3. Clear transformation stages of protagonist
-4. Meaning evolution process of central symbol
-5. Feasibility of 800 words per part
-6. Implementation of philosophical depth and social message
-
-Present concrete and executable final plan."""
-
- def _extract_part_plan(self, master_plan: str, part_number: int) -> str:
- """Extract specific part plan from master plan"""
- lines = master_plan.split('\n')
- part_section = []
- capturing = False
-
- for line in lines:
- if f"Part {part_number}:" in line or f"ํํธ {part_number}:" in line:
- capturing = True
- elif capturing and (f"Part {part_number+1}:" in line or f"ํํธ {part_number+1}:" in line):
- break
- elif capturing:
- part_section.append(line)
-
- return '\n'.join(part_section) if part_section else "Cannot find the part plan."
-
- # --- LLM call functions ---
- def call_llm_sync(self, messages: List[Dict[str, str]], role: str, language: str) -> str:
- full_content = ""
- for chunk in self.call_llm_streaming(messages, role, language):
- full_content += chunk
- if full_content.startswith("โ"):
- raise Exception(f"LLM Call Failed: {full_content}")
- return full_content
-
- def call_llm_streaming(self, messages: List[Dict[str, str]], role: str,
- language: str) -> Generator[str, None, None]:
- try:
- system_prompts = self.get_system_prompts(language)
- full_messages = [{"role": "system", "content": system_prompts.get(role, "")}, *messages]
-
- max_tokens = 15000 if role == "writer" else 10000
-
- payload = {
- "model": self.model_id,
- "messages": full_messages,
- "max_tokens": max_tokens,
- "temperature": 0.8,
- "top_p": 0.95,
- "presence_penalty": 0.5,
- "frequency_penalty": 0.3,
- "stream": True
- }
-
- response = requests.post(
- self.api_url,
- headers=self.create_headers(),
- json=payload,
- stream=True,
- timeout=180
- )
-
- if response.status_code != 200:
- yield f"โ API Error (Status Code: {response.status_code})"
- return
-
- buffer = ""
- for line in response.iter_lines():
- if not line:
- continue
-
- try:
- line_str = line.decode('utf-8').strip()
- if not line_str.startswith("data: "):
- continue
-
- data_str = line_str[6:]
- if data_str == "[DONE]":
- break
-
- data = json.loads(data_str)
- choices = data.get("choices", [])
- if choices and choices[0].get("delta", {}).get("content"):
- content = choices[0]["delta"]["content"]
- buffer += content
-
- if len(buffer) >= 50 or '\n' in buffer:
- yield buffer
- buffer = ""
- time.sleep(0.01)
-
- except Exception as e:
- logger.error(f"Chunk processing error: {str(e)}")
- continue
-
- if buffer:
- yield buffer
-
- except Exception as e:
- logger.error(f"Streaming error: {type(e).__name__}: {str(e)}")
- yield f"โ Error occurred: {str(e)}"
-
- def get_system_prompts(self, language: str) -> Dict[str, str]:
- """Role-specific system prompts - Enhanced version"""
-
- base_prompts = {
- "Korean": {
- "director": """๋น์ ์ ํ๋ ์ธ๊ณ๋ฌธํ์ ์ ์ ์ ์งํฅํ๋ ์ํ์ ์ค๊ณํฉ๋๋ค.
-๊น์ ์ฒ ํ์ ํต์ฐฐ๊ณผ ๋ ์นด๋ก์ด ์ฌํ ๋นํ์ ๊ฒฐํฉํ์ธ์.
-์ธ๊ฐ ์กฐ๊ฑด์ ๋ณต์ก์ฑ์ 10๊ฐ์ ์ ๊ธฐ์ ํํธ๋ก ๊ตฌํํ์ธ์.
-๋
์์ ์ํผ์ ๋คํ๋ค ๊ฐ๋ ฌํ ์ฒซ๋ฌธ์ฅ๋ถํฐ ์์ํ์ธ์.""",
-
- "critic_director": """์์ฌ ๊ตฌ์กฐ์ ๋
ผ๋ฆฌ์ฑ๊ณผ ์คํ ๊ฐ๋ฅ์ฑ์ ๊ฒ์ฆํ๋ ์ ๋ฌธ๊ฐ์
๋๋ค.
-์ธ๊ณผ๊ด๊ณ์ ํ์ ์ ์ฐพ์๋ด์ธ์.
-์บ๋ฆญํฐ ๋ฐ์ ์ ์ ๋น์ฑ์ ํ๊ฐํ์ธ์.
-์ฒ ํ์ ๊น์ด์ ๋ฌธํ์ ๊ฐ์น๋ฅผ ํ๋จํ์ธ์.
-8,000๋จ์ด ๋ถ๋์ ์ ์ ์ฑ์ ํ๋จํ์ธ์.""",
-
- "writer": """๋น์ ์ ์ธ์ด์ ์ฐ๊ธ์ ์ฌ์
๋๋ค.
-์ผ์์ด๋ฅผ ์๋ก, ๊ตฌ์ฒด๋ฅผ ์ถ์์ผ๋ก, ๊ฐ์ธ์ ๋ณดํธ์ผ๋ก ๋ณํํ์ธ์.
-ํ๋์ธ์ ์ํผ์ ์ด๋ ๊ณผ ๋น์ ๋์์ ํฌ์ฐฉํ์ธ์.
-๋
์๊ฐ ์์ ์ ์ฌ๋ฐ๊ฒฌํ๊ฒ ๋ง๋๋ ๊ฑฐ์ธ์ด ๋์ธ์.""",
-
- "critic_final": """๋น์ ์ ์ํ์ ๋ฌธํ์ ์ ์ฌ๋ ฅ์ ๊ทน๋ํํ๋ ์กฐ๋ ฅ์์
๋๋ค.
-ํ๋ฒํจ์ ๋น๋ฒํจ์ผ๋ก ์ด๋๋ ๋ ์นด๋ก์ด ํต์ฐฐ์ ์ ๊ณตํ์ธ์.
-์๊ฐ์ ๋ฌด์์์ ์ ๋ ๋ณด์์ ๋ฐ๊ตดํ์ธ์.
-ํํ ์๋ ๊ธฐ์ค์ผ๋ก ์ต๊ณ ๋ฅผ ์๊ตฌํ์ธ์."""
- },
- "English": {
- "director": """You design works aiming for the pinnacle of contemporary world literature.
-Combine deep philosophical insights with sharp social criticism.
-Implement the complexity of the human condition in 10 organic parts.
-Start with an intense opening sentence that shakes the reader's soul.""",
-
- "critic_director": """You are an expert verifying narrative logic and feasibility.
-Find gaps in causality.
-Evaluate credibility of character development.
-Judge philosophical depth and literary value.
-Judge appropriateness of 8,000-word length.""",
-
- "writer": """You are an alchemist of language.
-Transform everyday language into poetry, concrete into abstract, individual into universal.
-Capture both darkness and light of the modern soul.
-Become a mirror where readers rediscover themselves.""",
-
- "critic_final": """You are a collaborator maximizing the work's literary potential.
-Provide sharp insights leading ordinariness to extraordinariness.
-Excavate gems sleeping in the writer's unconscious.
-Demand the best with uncompromising standards."""
- }
- }
-
- prompts = base_prompts.get(language, base_prompts["Korean"]).copy()
-
- # Add part-specific critic prompts
- for i in range(1, 11):
- prompts[f"critic_part{i}"] = f"""You are Part {i} dedicated critic.
-Review causality with previous parts as top priority.
-Verify character consistency and development.
-Evaluate alignment with master plan.
-Assess literary level and philosophical depth.
-Provide specific and actionable revision instructions."""
-
- return prompts
-
- # --- Main process ---
- def process_novel_stream(self, query: str, language: str,
- session_id: Optional[str] = None) -> Generator[Tuple[str, List[Dict[str, Any]], str], None, None]:
- """Single writer novel generation process"""
- try:
- resume_from_stage = 0
- if session_id:
- self.current_session_id = session_id
- session = NovelDatabase.get_session(session_id)
- if session:
- query = session['user_query']
- language = session['language']
- resume_from_stage = session['current_stage'] + 1
- saved_tracker = NovelDatabase.load_narrative_tracker(session_id)
- if saved_tracker:
- self.narrative_tracker = saved_tracker
- else:
- self.current_session_id = NovelDatabase.create_session(query, language)
- logger.info(f"Created new session: {self.current_session_id}")
-
- stages = []
- if resume_from_stage > 0:
- stages = [{
- "name": s['stage_name'],
- "status": s['status'],
- "content": s.get('content', ''),
- "word_count": s.get('word_count', 0),
- "momentum": s.get('narrative_momentum', 0.0)
- } for s in NovelDatabase.get_stages(self.current_session_id)]
-
- total_words = NovelDatabase.get_total_words(self.current_session_id)
-
- for stage_idx in range(resume_from_stage, len(UNIFIED_STAGES)):
- role, stage_name = UNIFIED_STAGES[stage_idx]
- if stage_idx >= len(stages):
- stages.append({
- "name": stage_name,
- "status": "active",
- "content": "",
- "word_count": 0,
- "momentum": 0.0
- })
- else:
- stages[stage_idx]["status"] = "active"
-
- yield f"๐ Processing... (Current {total_words:,} words)", stages, self.current_session_id
-
- prompt = self.get_stage_prompt(stage_idx, role, query, language, stages)
- stage_content = ""
-
- for chunk in self.call_llm_streaming([{"role": "user", "content": prompt}], role, language):
- stage_content += chunk
- stages[stage_idx]["content"] = stage_content
- stages[stage_idx]["word_count"] = len(stage_content.split())
- yield f"๐ {stage_name} writing... ({total_words + stages[stage_idx]['word_count']:,} words)", stages, self.current_session_id
-
- # Content processing and tracking
- if role == "writer":
- # Calculate part number
- part_num = self._get_part_number(stage_idx)
- if part_num:
- self.narrative_tracker.accumulated_content.append(stage_content)
- self.narrative_tracker.word_count_by_part[part_num] = len(stage_content.split())
-
- # Calculate narrative momentum
- momentum = self.narrative_tracker.calculate_narrative_momentum(part_num, stage_content)
- stages[stage_idx]["momentum"] = momentum
-
- # Update story bible
- self._update_story_bible_from_content(stage_content, part_num)
-
- stages[stage_idx]["status"] = "complete"
- NovelDatabase.save_stage(
- self.current_session_id, stage_idx, stage_name, role,
- stage_content, "complete", stages[stage_idx].get("momentum", 0.0)
- )
-
- NovelDatabase.save_narrative_tracker(self.current_session_id, self.narrative_tracker)
- total_words = NovelDatabase.get_total_words(self.current_session_id)
- yield f"โ
{stage_name} completed (Total {total_words:,} words)", stages, self.current_session_id
-
- # Final processing
- final_novel = NovelDatabase.get_writer_content(self.current_session_id)
- final_word_count = len(final_novel.split())
- final_report = self.generate_literary_report(final_novel, final_word_count, language)
-
- NovelDatabase.update_final_novel(self.current_session_id, final_novel, final_report)
- yield f"โ
Novel completed! Total {final_word_count:,} words", stages, self.current_session_id
-
- except Exception as e:
- logger.error(f"Novel generation process error: {e}", exc_info=True)
- yield f"โ Error occurred: {e}", stages if 'stages' in locals() else [], self.current_session_id
-
- def get_stage_prompt(self, stage_idx: int, role: str, query: str,
- language: str, stages: List[Dict]) -> str:
- """Generate stage-specific prompt"""
- if stage_idx == 0: # Director initial planning
- return self.create_director_initial_prompt(query, language)
-
- if stage_idx == 1: # Director plan review
- return self.create_critic_director_prompt(stages[0]["content"], query, language)
-
- if stage_idx == 2: # Director final master plan
- return self.create_director_final_prompt(stages[0]["content"], stages[1]["content"], query, language)
-
- master_plan = stages[2]["content"]
-
- # Writer part writing
- if role == "writer" and "Revision" not in stages[stage_idx]["name"]:
- part_num = self._get_part_number(stage_idx)
- accumulated = '\n\n'.join(self.narrative_tracker.accumulated_content)
- return self.create_writer_prompt(part_num, master_plan, accumulated,
- self.narrative_tracker.story_bible, language)
-
- # Part-specific critique
- if role.startswith("critic_part"):
- part_num = int(role.replace("critic_part", ""))
- # Find writer content for this part
- writer_content = stages[stage_idx-1]["content"]
- accumulated = '\n\n'.join(self.narrative_tracker.accumulated_content[:-1])
- return self.create_part_critic_prompt(part_num, writer_content, master_plan,
- accumulated, self.narrative_tracker.story_bible, language)
-
- # Writer revision
- if role == "writer" and "Revision" in stages[stage_idx]["name"]:
- part_num = self._get_part_number(stage_idx)
- original_content = stages[stage_idx-2]["content"] # Original
- critic_feedback = stages[stage_idx-1]["content"] # Critique
- return self.create_writer_revision_prompt(part_num, original_content,
- critic_feedback, language)
-
- # Final critique
- if role == "critic_final":
- complete_novel = NovelDatabase.get_writer_content(self.current_session_id)
- word_count = len(complete_novel.split())
- return self.create_final_critic_prompt(complete_novel, word_count,
- self.narrative_tracker.story_bible, language)
-
- return ""
-
- def _get_part_number(self, stage_idx: int) -> Optional[int]:
- """Extract part number from stage index"""
- stage_name = UNIFIED_STAGES[stage_idx][1]
- match = re.search(r'Part (\d+)', stage_name)
- if match:
- return int(match.group(1))
- return None
-
- def _update_story_bible_from_content(self, content: str, part_num: int):
- """Auto-update story bible from content"""
- # Simple keyword-based extraction (more sophisticated NLP needed in reality)
- lines = content.split('\n')
-
- # Extract character names (words starting with capital letters)
- for line in lines:
- words = line.split()
- for word in words:
- if word and word[0].isupper() and len(word) > 1:
- if word not in self.narrative_tracker.story_bible.characters:
- self.narrative_tracker.story_bible.characters[word] = {
- "first_appearance": part_num,
- "traits": []
- }
-
- def generate_literary_report(self, complete_novel: str, word_count: int, language: str) -> str:
- """Generate final literary evaluation report"""
- prompt = self.create_final_critic_prompt(complete_novel, word_count,
- self.narrative_tracker.story_bible, language)
- try:
- report = self.call_llm_sync([{"role": "user", "content": prompt}],
- "critic_final", language)
- return report
- except Exception as e:
- logger.error(f"Final report generation failed: {e}")
- return "Error occurred during report generation"
+1. **Commercial Viability (25 points)**
+ - Marketability
+ - Target audience appeal
+ - Production feasibility
+ - Distribution potential
+
+2. **Story (25 points)**
+ - Three-act structure effectiveness
+ - Character arcs
+ - Plot consistency
+ - Theme delivery
+
+3. **Technical Excellence (25 points)**
+ - Format accuracy
+ - Page count appropriateness
+ - Scene construction
+ - Visual storytelling
+
+4. **Dialogue & Character (25 points)**
+ - Dialogue naturalness
+ - Character uniqueness
+ - Relationship dynamics
+ - Emotional authenticity
+
+**Overall Assessment:**
+- Strengths (3-5)
+- Areas for Improvement (3-5)
+- Market Potential
+- Recommendations
+
+**Grade:** A+ to F
+
+Provide specific, constructive feedback."""
+ }
+
+ return lang_prompts.get(language, lang_prompts["English"])
+
+ def _get_genre_scene_guidelines(self, genre: str, language: str) -> str:
+ """Get genre-specific scene guidelines"""
+ guidelines = {
+ "action": {
+ "Korean": "- ์งง๊ณ ํ์น๊ฐ ์๋ ์ฌ\n- ์ก์
์ํ์ค ์์ธ ๊ณํ\n- ๊ธด์ฅ๊ฐ ์ง์",
+ "English": "- Short, punchy scenes\n- Detailed action sequences\n- Maintain tension"
+ },
+ "thriller": {
+ "Korean": "- ์์คํ์ค ๊ตฌ์ถ\n- ์ ๋ณด ์ ์ง์ ๊ณต๊ฐ\n- ๋ฐ์ ๋ฐฐ์น",
+ "English": "- Build suspense\n- Gradual information reveal\n- Place twists"
+ },
+ "drama": {
+ "Korean": "- ๊ฐ์ ์ ๋นํธ ๊ฐ์กฐ\n- ์บ๋ฆญํฐ ์ค์ฌ ์ฌ\n- ๋ํ ๊ณต๊ฐ ํ๋ณด",
+ "English": "- Emphasize emotional beats\n- Character-driven scenes\n- Allow dialogue space"
+ },
+ "comedy": {
+ "Korean": "- ์
์
๊ณผ ํ์ด์คํ\n- ์ฝ๋ฏน ํ์ด๋ฐ\n- ์๊ฐ์ ๊ฐ๊ทธ",
+ "English": "- Setup and payoff\n- Comic timing\n- Visual gags"
+ },
+ "horror": {
+ "Korean": "- ๋ถ์๊ธฐ ์กฐ์ฑ\n- ์ ํ ์ค์ผ์ด ๋ฐฐ์น\n- ๊ธด์ฅ๊ณผ ์ด์",
+ "English": "- Atmosphere building\n- Jump scare placement\n- Tension and release"
+ },
+ "sci-fi": {
+ "Korean": "- ์ธ๊ณ๊ด ์ค๋ช
\n- ์๊ฐ์ ์คํ๏ฟฝ๏ฟฝํด\n- ๊ฐ๋
์๊ฐ",
+ "English": "- World building\n- Visual spectacle\n- Concept introduction"
+ },
+ "romance": {
+ "Korean": "- ๊ฐ์ ์ ์น๋ฐ๊ฐ\n- ๊ด๊ณ ๋ฐ์ \n- ๋ก๋งจํฑ ๋นํธ",
+ "English": "- Emotional intimacy\n- Relationship progression\n- Romantic beats"
+ }
+ }
+
+ return guidelines.get(genre, guidelines["drama"]).get(language, "")
+
+ def _extract_act_scenes(self, scene_breakdown: str, act: str) -> str:
+ """Extract scenes for specific act"""
+ # This would parse the scene breakdown and return only scenes for the requested act
+ # For now, returning a placeholder
+ return f"Scenes for {act} from the breakdown"
+
+ # --- LLM call functions ---
+ def call_llm_sync(self, messages: List[Dict[str, str]], role: str, language: str) -> str:
+ full_content = ""
+ for chunk in self.call_llm_streaming(messages, role, language):
+ full_content += chunk
+ if full_content.startswith("โ"):
+ raise Exception(f"LLM Call Failed: {full_content}")
+ return full_content
+
+ def call_llm_streaming(self, messages: List[Dict[str, str]], role: str,
+ language: str) -> Generator[str, None, None]:
+ try:
+ system_prompts = self.get_system_prompts(language)
+ full_messages = [{"role": "system", "content": system_prompts.get(role, "")}, *messages]
+
+ max_tokens = 15000 if role == "screenwriter" else 8000
+
+ payload = {
+ "model": self.model_id,
+ "messages": full_messages,
+ "max_tokens": max_tokens,
+ "temperature": 0.7 if role in ["screenwriter", "script_doctor"] else 0.8,
+ "top_p": 0.9,
+ "presence_penalty": 0.3,
+ "frequency_penalty": 0.3,
+ "stream": True
+ }
+
+ response = requests.post(
+ self.api_url,
+ headers=self.create_headers(),
+ json=payload,
+ stream=True,
+ timeout=180
+ )
+
+ if response.status_code != 200:
+ yield f"โ API Error (Status Code: {response.status_code})"
+ return
+
+ buffer = ""
+ for line in response.iter_lines():
+ if not line:
+ continue
+
+ try:
+ line_str = line.decode('utf-8').strip()
+ if not line_str.startswith("data: "):
+ continue
+
+ data_str = line_str[6:]
+ if data_str == "[DONE]":
+ break
+
+ data = json.loads(data_str)
+ choices = data.get("choices", [])
+ if choices and choices[0].get("delta", {}).get("content"):
+ content = choices[0]["delta"]["content"]
+ buffer += content
+
+ if len(buffer) >= 50 or '\n' in buffer:
+ yield buffer
+ buffer = ""
+ time.sleep(0.01)
+
+ except Exception as e:
+ logger.error(f"Chunk processing error: {str(e)}")
+ continue
+
+ if buffer:
+ yield buffer
+
+ except Exception as e:
+ logger.error(f"Streaming error: {type(e).__name__}: {str(e)}")
+ yield f"โ Error occurred: {str(e)}"
+
+ def get_system_prompts(self, language: str) -> Dict[str, str]:
+ """Role-specific system prompts"""
+
+ base_prompts = {
+ "Korean": {
+ "producer": """๋น์ ์ 20๋
๊ฒฝ๋ ฅ์ ํ ๋ฆฌ์ฐ๋ ํ๋ก๋์์
๋๋ค.
+์์
์ ์ฑ๊ณต๊ณผ ์์ ์ ๊ฐ์น๋ฅผ ๋ชจ๋ ์ถ๊ตฌํฉ๋๋ค.
+์์ฅ ํธ๋ ๋์ ๊ด๊ฐ ์ฌ๋ฆฌ๋ฅผ ์ ํํ ํ์
ํฉ๋๋ค.
+์คํ ๊ฐ๋ฅํ๊ณ ๋งค๋ ฅ์ ์ธ ํ๋ก์ ํธ๋ฅผ ๊ฐ๋ฐํฉ๋๋ค.""",
+
+ "story_developer": """๋น์ ์ ์์ ๊ฒฝ๋ ฅ์ด ์๋ ์คํ ๋ฆฌ ๊ฐ๋ฐ์์
๋๋ค.
+๊ฐ์ ์ ์ผ๋ก ๊ณต๊ฐ๊ฐ๊ณ ๊ตฌ์กฐ์ ์ผ๋ก ํํํ ์ด์ผ๊ธฐ๋ฅผ ๋ง๋ญ๋๋ค.
+์บ๋ฆญํฐ์ ๋ด์ ์ฌ์ ๊ณผ ์ธ์ ํ๋กฏ์ ์กฐํ๋กญ๊ฒ ์ฎ์ต๋๋ค.
+๋ณดํธ์ ์ฃผ์ ๋ฅผ ๋
ํนํ ๋ฐฉ์์ผ๋ก ํ๊ตฌํฉ๋๋ค.""",
+
+ "character_designer": """๋น์ ์ ์ฌ๋ฆฌํ์ ๊ณต๋ถํ ์บ๋ฆญํฐ ๋์์ด๋์
๋๋ค.
+์ง์ง ๊ฐ์ ์ธ๋ฌผ๋ค์ ์ฐฝ์กฐํ๋ ์ ๋ฌธ๊ฐ์
๋๋ค.
+๊ฐ ์บ๋ฆญํฐ์๊ฒ ๊ณ ์ ํ ๋ชฉ์๋ฆฌ์ ๊ด์ ์ ๋ถ์ฌํฉ๋๋ค.
+๋ณต์กํ๊ณ ๋ชจ์์ ์ธ ์ธ๊ฐ์ฑ์ ํฌ์ฐฉํฉ๋๋ค.""",
+
+ "scene_planner": """๋น์ ์ ์ ๋ฐํ ์ฌ ๊ตฌ์ฑ์ ๋๊ฐ์
๋๋ค.
+๊ฐ ์ฌ์ด ์คํ ๋ฆฌ์ ์บ๋ฆญํฐ๋ฅผ ์ ์ง์ํค๋๋ก ์ค๊ณํฉ๋๋ค.
+๋ฆฌ๋ฌ๊ณผ ํ์ด์ฑ์ ์๋ฒฝํ๊ฒ ์กฐ์ ํฉ๋๋ค.
+์๊ฐ์ ์คํ ๋ฆฌํ
๋ง์ ๊ทน๋ํํฉ๋๋ค.""",
+
+ "screenwriter": """๋น์ ์ ๋ค์์ ์๋๋ฆฌ์ค ์๊ฐ์
๋๋ค.
+'๋ณด์ฌ์ฃผ๊ธฐ'์ ๋๊ฐ์ด๋ฉฐ ์๋ธํ
์คํธ๋ฅผ ๋ฅ์ํ๊ฒ ๋ค๋ฃน๋๋ค.
+์์ํ๊ณ ์์ฐ์ค๋ฌ์ด ๋ํ๋ฅผ ์ฐ๋ ์ ๋ฌธ๊ฐ์
๋๋ค.
+์ ์ ํ์ค์ ๊ณ ๋ คํ๋ฉด์๋ ์ฐฝ์์ ์ธ ํด๊ฒฐ์ฑ
์ ์ฐพ์ต๋๋ค.""",
+
+ "script_doctor": """๋น์ ์ ๊น๋ค๋ก์ด ์คํฌ๋ฆฝํธ ๋ฅํฐ์
๋๋ค.
+์์ ๋ํ
์ผ๋ ๋์น์ง ์๋ ์๋ฒฝ์ฃผ์์์
๋๋ค.
+์คํ ๋ฆฌ์ ์ ์ฌ๋ ฅ์ ์ต๋ํ ๋์ด๋
๋๋ค.
+๊ฑด์ค์ ์ด๊ณ ๊ตฌ์ฒด์ ์ธ ๊ฐ์ ์์ ์ ์ํฉ๋๋ค.""",
+
+ "critic_structure": """๋น์ ์ ๊ตฌ์กฐ ๋ถ์ ์ ๋ฌธ๊ฐ์
๋๋ค.
+์คํ ๋ฆฌ์ ๋ผ๋์ ๊ทผ์ก์ ๊ฟฐ๋ซ์ด ๋ด
๋๋ค.
+๋
ผ๋ฆฌ์ ํ์ ๊ณผ ๊ฐ์ ์ ๊ณต๋ฐฑ์ ์ฐพ์๋
๋๋ค.
+๋ ๋์ ๊ตฌ์กฐ๋ฅผ ์ํ ๊ตฌ์ฒด์ ์ ์์ ํฉ๋๋ค.""",
+
+ "final_reviewer": """๋น์ ์ ์
๊ณ ๋ฒ ํ
๋ ์ต์ข
๋ฆฌ๋ทฐ์ด์
๋๋ค.
+์์
์ฑ๊ณผ ์์ ์ฑ์ ๊ท ํ์๊ฒ ํ๊ฐํฉ๋๋ค.
+์ ์์ฌ, ๋ฐฐ์ฐ, ๊ด๊ฐ ๋ชจ๋ ๊ด์ ์ ๊ณ ๋ คํฉ๋๋ค.
+๋์ ํ์ง๋ง ๊ฒฉ๋ คํ๋ ํผ๋๋ฐฑ์ ์ ๊ณตํฉ๋๋ค."""
+ },
+ "English": {
+ "producer": """You are a Hollywood producer with 20 years experience.
+You pursue both commercial success and artistic value.
+You accurately grasp market trends and audience psychology.
+You develop feasible and attractive projects.""",
+
+ "story_developer": """You are an award-winning story developer.
+You create emotionally resonant and structurally sound stories.
+You harmoniously weave internal journeys with external plots.
+You explore universal themes in unique ways.""",
+
+ "character_designer": """You are a character designer who studied psychology.
+You're an expert at creating lifelike characters.
+You give each character a unique voice and perspective.
+You capture complex and contradictory humanity.""",
+
+ "scene_planner": """You are a master of precise scene construction.
+You design each scene to advance story and character.
+You perfectly control rhythm and pacing.
+You maximize visual storytelling.""",
+
+ "screenwriter": """You are a prolific screenwriter.
+You're a master of 'showing' and skilled with subtext.
+You're an expert at writing vivid, natural dialogue.
+You find creative solutions while considering production reality.""",
+
+ "script_doctor": """You are a demanding script doctor.
+You're a perfectionist who misses no small detail.
+You maximize the story's potential.
+You provide constructive and specific improvements.""",
+
+ "critic_structure": """You are a structure analysis expert.
+You see through the story's skeleton and muscles.
+You find logical gaps and emotional voids.
+You make specific suggestions for better structure.""",
+
+ "final_reviewer": """You are an industry veteran final reviewer.
+You evaluate commercial and artistic value in balance.
+You consider all perspectives: producers, actors, audience.
+You provide feedback that's critical yet encouraging."""
+ }
+ }
+
+ return base_prompts.get(language, base_prompts["English"])
+
+ # --- Main process ---
+ def process_screenplay_stream(self, query: str, screenplay_type: str, genre: str,
+ language: str, session_id: Optional[str] = None
+ ) -> Generator[Tuple[str, List[Dict[str, Any]], str], None, None]:
+ """Main screenplay generation process"""
+ try:
+ resume_from_stage = 0
+ if session_id:
+ self.current_session_id = session_id
+ session = ScreenplayDatabase.get_session(session_id)
+ if session:
+ query = session['user_query']
+ screenplay_type = session['screenplay_type']
+ genre = session['genre']
+ language = session['language']
+ resume_from_stage = session['current_stage'] + 1
+ else:
+ self.current_session_id = ScreenplayDatabase.create_session(
+ query, screenplay_type, genre, language
+ )
+ logger.info(f"Created new screenplay session: {self.current_session_id}")
+
+ stages = []
+ if resume_from_stage > 0:
+ stages = [{
+ "name": s['stage_name'],
+ "status": s['status'],
+ "content": s.get('content', ''),
+ "page_count": s.get('page_count', 0)
+ } for s in ScreenplayDatabase.get_stages(self.current_session_id)]
+
+ for stage_idx in range(resume_from_stage, len(SCREENPLAY_STAGES)):
+ role, stage_name = SCREENPLAY_STAGES[stage_idx]
+
+
+if stage_idx >= len(stages):
+ stages.append({
+ "name": stage_name,
+ "status": "active",
+ "content": "",
+ "page_count": 0
+ })
+ else:
+ stages[stage_idx]["status"] = "active"
+
+ yield f"๐ Processing {stage_name}...", stages, self.current_session_id
+
+ prompt = self.get_stage_prompt(stage_idx, role, query, screenplay_type,
+ genre, language, stages)
+ stage_content = ""
+
+ for chunk in self.call_llm_streaming([{"role": "user", "content": prompt}],
+ role, language):
+ stage_content += chunk
+ stages[stage_idx]["content"] = stage_content
+ if role == "screenwriter":
+ stages[stage_idx]["page_count"] = len(stage_content.split('\n')) / 55
+ yield f"๐ {stage_name} in progress...", stages, self.current_session_id
+
+ # Process content based on role
+ if role == "producer":
+ self._process_producer_content(stage_content)
+ elif role == "story_developer":
+ self._process_story_content(stage_content)
+ elif role == "character_designer":
+ self._process_character_content(stage_content)
+ elif role == "scene_planner":
+ self._process_scene_content(stage_content)
+
+ stages[stage_idx]["status"] = "complete"
+ ScreenplayDatabase.save_stage(
+ self.current_session_id, stage_idx, stage_name, role,
+ stage_content, "complete"
+ )
+
+ yield f"โ
{stage_name} completed", stages, self.current_session_id
+
+ # Final processing
+ final_screenplay = ScreenplayDatabase.get_screenplay_content(self.current_session_id)
+ title = self.screenplay_tracker.screenplay_bible.title
+ logline = self.screenplay_tracker.screenplay_bible.logline
+
+ ScreenplayDatabase.update_final_screenplay(
+ self.current_session_id, final_screenplay, title, logline
+ )
+
+ yield f"โ
Screenplay completed! {title}", stages, self.current_session_id
+
+ except Exception as e:
+ logger.error(f"Screenplay generation error: {e}", exc_info=True)
+ yield f"โ Error occurred: {e}", stages if 'stages' in locals() else [], self.current_session_id
+
+ def get_stage_prompt(self, stage_idx: int, role: str, query: str,
+ screenplay_type: str, genre: str, language: str,
+ stages: List[Dict]) -> str:
+ """Generate stage-specific prompt"""
+ if stage_idx == 0: # Producer
+ return self.create_producer_prompt(query, screenplay_type, genre, language)
+
+ if stage_idx == 1: # Story Developer
+ return self.create_story_developer_prompt(
+ stages[0]["content"], query, screenplay_type, genre, language
+ )
+
+ if stage_idx == 2: # Character Designer
+ return self.create_character_designer_prompt(
+ stages[0]["content"], stages[1]["content"], genre, language
+ )
+
+ if stage_idx == 3: # Structure Critic
+ return self.create_critic_structure_prompt(
+ stages[1]["content"], stages[2]["content"], screenplay_type, genre, language
+ )
+
+ if stage_idx == 4: # Scene Planner
+ return self.create_scene_planner_prompt(
+ stages[1]["content"], stages[2]["content"], screenplay_type, genre, language
+ )
+
+ # Screenwriter acts
+ if role == "screenwriter":
+ act_mapping = {5: "Act 1", 7: "Act 2A", 9: "Act 2B", 11: "Act 3"}
+ if stage_idx in act_mapping:
+ act = act_mapping[stage_idx]
+ previous_acts = self._get_previous_acts(stages, stage_idx)
+ return self.create_screenwriter_prompt(
+ act, stages[4]["content"], stages[2]["content"],
+ previous_acts, screenplay_type, genre, language
+ )
+
+ # Script doctor reviews
+ if role == "script_doctor":
+ act_mapping = {6: "Act 1", 8: "Act 2A", 10: "Act 2B"}
+ if stage_idx in act_mapping:
+ act = act_mapping[stage_idx]
+ act_content = stages[stage_idx-1]["content"]
+ return self.create_script_doctor_prompt(act_content, act, genre, language)
+
+ # Final reviewer
+ if role == "final_reviewer":
+ complete_screenplay = ScreenplayDatabase.get_screenplay_content(self.current_session_id)
+ return self.create_final_reviewer_prompt(
+ complete_screenplay, screenplay_type, genre, language
+ )
+
+ return ""
+
+ def create_critic_structure_prompt(self, story_structure: str, characters: str,
+ screenplay_type: str, genre: str, language: str) -> str:
+ """Structure critic prompt"""
+ lang_prompts = {
+ "Korean": f"""๋น์ ์ ๊ตฌ์กฐ ๋นํ๊ฐ์
๋๋ค. ์คํ ๋ฆฌ ๊ตฌ์กฐ์ ์บ๋ฆญํฐ ์ค์ ์ ์ฌ์ธต ๋ถ์ํ์ธ์.
+
+**์คํ ๋ฆฌ ๊ตฌ์กฐ:**
+{story_structure}
+
+**์บ๋ฆญํฐ ์ค์ :**
+{characters}
+
+**๋ถ์ ํญ๋ชฉ:**
+
+1. **3๋ง ๊ตฌ์กฐ ํจ๊ณผ์ฑ**
+ - ๊ฐ ๋ง์ ๊ท ํ
+ - ์ ํ์ ์ ๊ฐ๋
+ - ํ๋กฏ ํฌ์ธํธ์ ๋ช
ํ์ฑ
+ - ํด๋ผ์ด๋งฅ์ค ์์น
+
+2. **์บ๋ฆญํฐ ์ํฌ ํ๋น์ฑ**
+ - ๋ณํ์ ์ ๋น์ฑ
+ - ๋๊ธฐ์ ๋ช
ํ์ฑ
+ - ๋ด์ /์ธ์ ๋ชฉํ ์ผ์น
+ - ๊ด๊ณ ์ญํ
+
+3. **ํ
๋ง ํตํฉ**
+ - ํ
๋ง์ ์ผ๊ด์ฑ
+ - ํ๋กฏ๊ณผ ํ
๋ง ์ฐ๊ฒฐ
+ - ์บ๋ฆญํฐ์ ํ
๋ง ์ฐ๊ฒฐ
+ - ์๊ฐ์ ํ
๋ง ํํ
+
+4. **์ฅ๋ฅด ๊ธฐ๋์น**
+ - {genre} ๊ด์ต ์ถฉ์กฑ
+ - ๋
์ฐฝ์ฑ๊ณผ ์น์ํจ ๊ท ํ
+ - ํ๊ฒ ๊ด๊ฐ ๋ง์กฑ๋
+
+5. **์ ์ ํ์ค์ฑ**
+ - ์์ฐ ๊ท๋ชจ ์ ์ ์ฑ
+ - ๋ก์ผ์ด์
์คํ ๊ฐ๋ฅ์ฑ
+ - ํน์ํจ๊ณผ ์๊ตฌ์ฌํญ
+
+**ํ์ ๊ฐ์ ์ ์:**
+๊ฐ ๋ฌธ์ ์ ์ ๋ํ ๊ตฌ์ฒด์ ํด๊ฒฐ์ฑ
์ ์ ์ํ์ธ์.""",
+
+ "English": f"""You are a structure critic. Deeply analyze story structure and character setup.
+
+**Story Structure:**
+{story_structure}
+
+**Character Setup:**
+{characters}
+
+**Analysis Items:**
+
+1. **Three-Act Structure Effectiveness**
+ - Balance of each act
+ - Strength of transitions
+ - Clarity of plot points
+ - Climax positioning
+
+2. **Character Arc Validity**
+ - Credibility of change
+ - Clarity of motivation
+ - Internal/external goal alignment
+ - Relationship dynamics
+
+3. **Theme Integration**
+ - Theme consistency
+ - Plot-theme connection
+ - Character-theme connection
+ - Visual theme expression
+
+4. **Genre Expectations**
+ - Meeting {genre} conventions
+ - Balance of originality and familiarity
+ - Target audience satisfaction
+
+5. **Production Reality**
+ - Budget scale appropriateness
+ - Location feasibility
+ - Special effects requirements
+
+**Required Improvement Suggestions:**
+Provide specific solutions for each issue."""
+ }
+
+ return lang_prompts.get(language, lang_prompts["English"])
+
+ def _get_previous_acts(self, stages: List[Dict], current_idx: int) -> str:
+ """Get previous acts content"""
+ previous = []
+ act_indices = {5: [], 7: [5], 9: [5, 7], 11: [5, 7, 9]}
+
+ if current_idx in act_indices:
+ for idx in act_indices[current_idx]:
+ if idx < len(stages) and stages[idx]["content"]:
+ previous.append(stages[idx]["content"])
+
+ return "\n\n---\n\n".join(previous) if previous else ""
+
+ def _process_producer_content(self, content: str):
+ """Process producer output"""
+ # Extract title and logline
+ title_match = re.search(r'(?:TITLE|์ ๋ชฉ):\s*(.+)', content)
+ logline_match = re.search(r'(?:LOGLINE|๋ก๊ทธ๋ผ์ธ):\s*(.+)', content)
+
+ if title_match:
+ self.screenplay_tracker.screenplay_bible.title = title_match.group(1).strip()
+ if logline_match:
+ self.screenplay_tracker.screenplay_bible.logline = logline_match.group(1).strip()
+
+ # Save to database
+ ScreenplayDatabase.save_screenplay_bible(self.current_session_id,
+ self.screenplay_tracker.screenplay_bible)
+
+ def _process_story_content(self, content: str):
+ """Process story developer output"""
+ # Extract three-act structure
+ self.screenplay_tracker.screenplay_bible.three_act_structure = {
+ "act1": self._extract_section(content, "ACT 1|์ 1๋ง"),
+ "act2a": self._extract_section(content, "ACT 2A|์ 2๋งA"),
+ "act2b": self._extract_section(content, "ACT 2B|์ 2๋งB"),
+ "act3": self._extract_section(content, "ACT 3|์ 3๋ง")
+ }
+
+ ScreenplayDatabase.save_screenplay_bible(self.current_session_id,
+ self.screenplay_tracker.screenplay_bible)
+
+ def _process_character_content(self, content: str):
+ """Process character designer output"""
+ # Extract protagonist
+ protagonist_section = self._extract_section(content, "PROTAGONIST|์ฃผ์ธ๊ณต")
+ if protagonist_section:
+ protagonist = self._parse_character_profile(protagonist_section, "protagonist")
+ self.screenplay_tracker.add_character(protagonist)
+ ScreenplayDatabase.save_character(self.current_session_id, protagonist)
+
+ # Extract antagonist
+ antagonist_section = self._extract_section(content, "ANTAGONIST|์ ๋์")
+ if antagonist_section:
+ antagonist = self._parse_character_profile(antagonist_section, "antagonist")
+ self.screenplay_tracker.add_character(antagonist)
+ ScreenplayDatabase.save_character(self.current_session_id, antagonist)
+
+ def _process_scene_content(self, content: str):
+ """Process scene planner output"""
+ # Parse scene breakdown
+ scene_pattern = r'(?:Scene|์ฌ)\s*(\d+).*?(?:INT\.|EXT\.)\s*(.+?)\s*-\s*(\w+)'
+ scenes = re.finditer(scene_pattern, content, re.IGNORECASE | re.MULTILINE)
+
+ for match in scenes:
+ scene_num = int(match.group(1))
+ location = match.group(2).strip()
+ time_of_day = match.group(3).strip()
+
+ # Determine act based on scene number
+ act = 1 if scene_num <= 12 else 2 if scene_num <= 35 else 3
+
+ scene = SceneBreakdown(
+ scene_number=scene_num,
+ act=act,
+ location=location,
+ time_of_day=time_of_day,
+ characters=[], # Would be extracted from content
+ purpose="", # Would be extracted from content
+ conflict="", # Would be extracted from content
+ page_count=1.5 # Default estimate
+ )
+
+ self.screenplay_tracker.add_scene(scene)
+ ScreenplayDatabase.save_scene(self.current_session_id, scene)
+
+ def _extract_section(self, content: str, section_pattern: str) -> str:
+ """Extract section from content"""
+ pattern = rf'(?:{section_pattern})[:\s]*(.+?)(?=\n(?:[A-Z]{{2,}}|[๊ฐ-ํฃ]{{2,}}):|\Z)'
+ match = re.search(pattern, content, re.IGNORECASE | re.DOTALL)
+ return match.group(1).strip() if match else ""
+
+ def _parse_character_profile(self, content: str, role: str) -> CharacterProfile:
+ """Parse character profile from content"""
+ # Extract character details using regex or string parsing
+ name = self._extract_field(content, "Name|์ด๋ฆ") or f"Character_{role}"
+ age = int(self._extract_field(content, "Age|๋์ด") or "30")
+
+ return CharacterProfile(
+ name=name,
+ age=age,
+ role=role,
+ archetype=self._extract_field(content, "Archetype|์ํฌํ์
") or "",
+ want=self._extract_field(content, "WANT|์ธ์ ๋ชฉํ") or "",
+ need=self._extract_field(content, "NEED|๋ด์ ํ์") or "",
+ backstory=self._extract_field(content, "Backstory|๋ฐฑ์คํ ๋ฆฌ") or "",
+ personality=[], # Would be parsed from content
+ speech_pattern=self._extract_field(content, "Speech|๋งํฌ") or "",
+ character_arc=self._extract_field(content, "Arc|์ํฌ") or ""
+ )
+
+ def _extract_field(self, content: str, field_pattern: str) -> Optional[str]:
+ """Extract field value from content"""
+ pattern = rf'(?:{field_pattern})[:\s]*(.+?)(?=\n|$)'
+ match = re.search(pattern, content, re.IGNORECASE)
+ return match.group(1).strip() if match else None
-class WebSearchIntegration:
- """Web search functionality"""
- def __init__(self):
- self.brave_api_key = BRAVE_SEARCH_API_KEY
- self.search_url = "https://api.search.brave.com/res/v1/web/search"
- self.enabled = bool(self.brave_api_key)
-
- def search(self, query: str, count: int = 3, language: str = "en") -> List[Dict]:
- if not self.enabled:
- return []
- headers = {
- "Accept": "application/json",
- "X-Subscription-Token": self.brave_api_key
- }
- params = {
- "q": query,
- "count": count,
- "search_lang": "ko" if language == "Korean" else "en",
- "text_decorations": False,
- "safesearch": "moderate"
- }
- try:
- response = requests.get(self.search_url, headers=headers, params=params, timeout=10)
- response.raise_for_status()
- results = response.json().get("web", {}).get("results", [])
- return results
- except requests.exceptions.RequestException as e:
- logger.error(f"Web search API error: {e}")
- return []
-
- def extract_relevant_info(self, results: List[Dict], max_chars: int = 1500) -> str:
- if not results:
- return ""
- extracted = []
- total_chars = 0
- for i, result in enumerate(results[:3], 1):
- title = result.get("title", "")
- description = result.get("description", "")
- info = f"[{i}] {title}: {description}"
- if total_chars + len(info) < max_chars:
- extracted.append(info)
- total_chars += len(info)
- else:
- break
- return "\n".join(extracted)
-
-class HFDatasetManager:
- """Manage theme data storage in HuggingFace dataset"""
-
- def __init__(self):
- self.token = os.getenv("HF_TOKEN")
- self.dataset_name = "novel-themes-library"
- self.username = None
- self.repo_id = None
-
- if self.token:
- try:
- self.api = HfApi()
- # Get username from token
- self.username = self.api.whoami(token=self.token)["name"]
- self.repo_id = f"{self.username}/{self.dataset_name}"
-
- # Create dataset repo if it doesn't exist
- try:
- self.api.create_repo(
- repo_id=self.repo_id,
- token=self.token,
- repo_type="dataset",
- private=False,
- exist_ok=True
- )
- logger.info(f"HF Dataset initialized: {self.repo_id}")
- except Exception as e:
- logger.error(f"Error creating HF dataset: {e}")
-
- except Exception as e:
- logger.error(f"HF authentication failed: {e}")
- self.token = None
-
- def save_themes_to_hf(self, themes_data: List[Dict]):
- """Save themes to HuggingFace dataset"""
- if not self.token or not themes_data:
- return False
-
- try:
- # Create temporary file
- with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as tmp_file:
- json.dump({
- "themes": themes_data,
- "last_updated": datetime.now().isoformat(),
- "version": "1.0"
- }, tmp_file, ensure_ascii=False, indent=2)
- tmp_path = tmp_file.name
-
- # Upload to HF
- upload_file(
- path_or_fileobj=tmp_path,
- path_in_repo="themes_library.json",
- repo_id=self.repo_id,
- token=self.token,
- repo_type="dataset",
- commit_message=f"Update themes library - {len(themes_data)} themes"
- )
-
- # Clean up
- os.unlink(tmp_path)
- logger.info(f"Saved {len(themes_data)} themes to HF dataset")
- return True
-
- except Exception as e:
- logger.error(f"Error saving to HF dataset: {e}")
- return False
-
- def load_themes_from_hf(self) -> List[Dict]:
- """Load themes from HuggingFace dataset"""
- if not self.token:
- return []
-
- try:
- # Download file from HF
- file_path = hf_hub_download(
- repo_id=self.repo_id,
- filename="themes_library.json",
- token=self.token,
- repo_type="dataset"
- )
-
- # Load data
- with open(file_path, 'r', encoding='utf-8') as f:
- data = json.load(f)
-
- themes = data.get("themes", [])
- logger.info(f"Loaded {len(themes)} themes from HF dataset")
- return themes
-
- except Exception as e:
- logger.warning(f"Error loading from HF dataset: {e}")
- return []
-
-
-
- def sync_with_local_db(self):
- """Sync HF dataset with local database"""
- if not self.token:
- return
-
- # Load from HF
- hf_themes = self.load_themes_from_hf()
-
- if hf_themes:
- # Get existing theme IDs from local DB
- local_theme_ids = set()
- with NovelDatabase.get_db() as conn:
- rows = conn.cursor().execute(
- "SELECT theme_id FROM random_themes_library"
- ).fetchall()
- local_theme_ids = {row['theme_id'] for row in rows}
-
- # Add new themes from HF to local DB
- new_count = 0
- for theme in hf_themes:
- if theme.get('theme_id') not in local_theme_ids:
- try:
- # Ensure tags and metadata are JSON strings
- tags_data = theme.get('tags', [])
- if isinstance(tags_data, list):
- tags_json = json.dumps(tags_data, ensure_ascii=False)
- else:
- tags_json = tags_data if isinstance(tags_data, str) else '[]'
-
- metadata_data = theme.get('metadata', {})
- if isinstance(metadata_data, dict):
- metadata_json = json.dumps(metadata_data, ensure_ascii=False)
- else:
- metadata_json = metadata_data if isinstance(metadata_data, str) else '{}'
-
- with NovelDatabase.get_db() as conn:
- conn.cursor().execute('''
- INSERT INTO random_themes_library
- (theme_id, theme_text, language, title, opening_sentence,
- protagonist, conflict, philosophical_question, generated_at,
- view_count, used_count, tags, metadata)
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
- ''', (
- theme.get('theme_id'),
- theme.get('theme_text'),
- theme.get('language'),
- theme.get('title', ''),
- theme.get('opening_sentence', ''),
- theme.get('protagonist', ''),
- theme.get('conflict', ''),
- theme.get('philosophical_question', ''),
- theme.get('generated_at'),
- theme.get('view_count', 0),
- theme.get('used_count', 0),
- tags_json,
- metadata_json
- ))
- conn.commit()
- new_count += 1
- except Exception as e:
- logger.error(f"Error adding theme {theme.get('theme_id')}: {e}")
-
- if new_count > 0:
- logger.info(f"Added {new_count} new themes from HF dataset")
-
- def backup_to_hf(self):
- """Backup all local themes to HF dataset"""
- if not self.token:
- return
-
- # Get all themes from local DB
- themes = NovelDatabase.get_random_themes_library(limit=1000)
-
- if themes:
- # Convert Row objects to dicts and ensure all data is serializable
- themes_data = []
- for theme in themes:
- theme_dict = dict(theme)
- # Parse tags and metadata from JSON strings
- if isinstance(theme_dict.get('tags'), str):
- try:
- theme_dict['tags'] = json.loads(theme_dict['tags'])
- except:
- theme_dict['tags'] = []
- else:
- theme_dict['tags'] = theme_dict.get('tags', [])
-
- if isinstance(theme_dict.get('metadata'), str):
- try:
- theme_dict['metadata'] = json.loads(theme_dict['metadata'])
- except:
- theme_dict['metadata'] = {}
- else:
- theme_dict['metadata'] = theme_dict.get('metadata', {})
-
- themes_data.append(theme_dict)
-
- self.save_themes_to_hf(themes_data)
-
-
# --- Utility functions ---
-def process_query(query: str, language: str, session_id: Optional[str] = None) -> Generator[Tuple[str, str, str, str, str], None, None]:
- """Main query processing function"""
- if not query.strip():
- yield "", "", "โ Please enter a theme.", session_id, ""
- return
-
- system = UnifiedLiterarySystem()
- stages_markdown = ""
- novel_content = ""
- novel_text = "" # ์ค์ ํ
์คํธ ์ ์ฅ์ฉ
-
- for status, stages, current_session_id in system.process_novel_stream(query, language, session_id):
- stages_markdown = format_stages_display(stages)
-
- # Get final novel content
- if stages and all(s.get("status") == "complete" for s in stages[-10:]):
- novel_text = NovelDatabase.get_writer_content(current_session_id) # ์๋ณธ ํ
์คํธ
- novel_content = format_novel_display(novel_text) # ํฌ๋งท๋ ๋์คํ๋ ์ด
-
- yield stages_markdown, novel_content, status or "๐ Processing...", current_session_id, novel_text
-
-
-def get_active_sessions(language: str) -> List[str]:
- """Get active session list"""
- sessions = NovelDatabase.get_active_sessions()
- return [f"{s['session_id'][:8]}... - {s['user_query'][:50]}... ({s['created_at']}) [{s['total_words']:,} words]"
- for s in sessions]
-
-def auto_recover_session(language: str) -> Tuple[Optional[str], str]:
- """Auto-recover recent session"""
- sessions = NovelDatabase.get_active_sessions()
- if sessions:
- latest_session = sessions[0]
- return latest_session['session_id'], f"Session {latest_session['session_id'][:8]}... recovered"
- return None, "No session to recover."
-
-def resume_session(session_id: str, language: str) -> Generator[Tuple[str, str, str, str, str], None, None]:
- """Resume session"""
- if not session_id:
- yield "", "", "โ No session ID.", session_id, ""
- return
-
- if "..." in session_id:
- session_id = session_id.split("...")[0]
-
- session = NovelDatabase.get_session(session_id)
- if not session:
- yield "", "", "โ Session not found.", None, ""
- return
-
- yield from process_query(session['user_query'], session['language'], session_id)
+def generate_random_screenplay_theme(screenplay_type: str, genre: str, language: str) -> str:
+ """Generate random screenplay theme"""
+ try:
+ # Load themes data
+ themes_data = load_screenplay_themes_data()
+
+ # Select random elements
+ import secrets
+ situation = secrets.choice(themes_data['situations'].get(genre, themes_data['situations']['drama']))
+ protagonist = secrets.choice(themes_data['protagonists'].get(genre, themes_data['protagonists']['drama']))
+ conflict = secrets.choice(themes_data['conflicts'].get(genre, themes_data['conflicts']['drama']))
+
+ # Generate theme using LLM
+ system = ScreenplayGenerationSystem()
+
+ if language == "Korean":
+ prompt = f"""๋ค์ ์์๋ค๋ก {screenplay_type}์ฉ ๋งค๋ ฅ์ ์ธ ์ปจ์
์ ์์ฑํ์ธ์:
+
+์ํฉ: {situation}
+์ฃผ์ธ๊ณต: {protagonist}
+๊ฐ๋ฑ: {conflict}
+์ฅ๋ฅด: {genre}
+
+๋ค์ ํ์์ผ๋ก ์์ฑ:
+
+**์ ๋ชฉ:** [๋งค๋ ฅ์ ์ธ ์ ๋ชฉ]
+
+**๋ก๊ทธ๋ผ์ธ:** [25๋จ์ด ์ด๋ด ํ ๋ฌธ์ฅ]
+
+**์ปจ์
:** [์ฃผ์ธ๊ณต]์ด(๊ฐ) [์ํฉ]์์ [๊ฐ๋ฑ]์ ๊ฒช์ผ๋ฉฐ [๋ชฉํ]๋ฅผ ์ถ๊ตฌํ๋ ์ด์ผ๊ธฐ.
+**๋
ํนํ ์์:** [์ด ์ด์ผ๊ธฐ๋ง์ ํน๋ณํ ์ ]"""
+ else:
+ prompt = f"""Generate an attractive concept for {screenplay_type} using these elements:
+Situation: {situation}
+Protagonist: {protagonist}
+Conflict: {conflict}
+Genre: {genre}
+
+Format as:
+
+**Title:** [Compelling title]
+
+**Logline:** [One sentence, 25 words max]
+
+**Concept:** A story about [protagonist] who faces [conflict] in [situation] while pursuing [goal].
+
+**Unique Element:** [What makes this story special]"""
+
+ messages = [{"role": "user", "content": prompt}]
+ generated_theme = system.call_llm_sync(messages, "producer", language)
+
+ # Extract metadata
+ metadata = {
+ 'title': extract_title_from_theme(generated_theme),
+ 'logline': extract_logline_from_theme(generated_theme),
+ 'protagonist': protagonist,
+ 'conflict': conflict,
+ 'tags': [genre, screenplay_type]
+ }
+
+ # Save to database
+ theme_id = ScreenplayDatabase.save_random_theme(
+ generated_theme, screenplay_type, genre, language, metadata
+ )
+
+ return generated_theme
+
+ except Exception as e:
+ logger.error(f"Theme generation error: {str(e)}")
+ return f"Error generating theme: {str(e)}"
+
+def load_screenplay_themes_data() -> Dict:
+ """Load screenplay themes data"""
+ return {
+ 'situations': {
+ 'action': ['hostage crisis', 'heist gone wrong', 'revenge mission', 'race against time'],
+ 'thriller': ['false accusation', 'witness protection', 'conspiracy uncovered', 'identity theft'],
+ 'drama': ['family reunion', 'terminal diagnosis', 'divorce proceedings', 'career crossroads'],
+ 'comedy': ['mistaken identity', 'wedding disaster', 'workplace chaos', 'odd couple roommates'],
+ 'horror': ['isolated location', 'ancient curse', 'home invasion', 'supernatural investigation'],
+ 'sci-fi': ['first contact', 'time loop', 'AI awakening', 'space colony crisis'],
+ 'romance': ['second chance', 'enemies to lovers', 'long distance', 'forbidden love']
+ },
+ 'protagonists': {
+ 'action': ['ex-soldier', 'undercover cop', 'skilled thief', 'reluctant hero'],
+ 'thriller': ['investigative journalist', 'wrongly accused person', 'FBI agent', 'whistleblower'],
+ 'drama': ['single parent', 'recovering addict', 'immigrant', 'caregiver'],
+ 'comedy': ['uptight professional', 'slacker', 'fish out of water', 'eccentric artist'],
+ 'horror': ['skeptical scientist', 'final girl', 'paranormal investigator', 'grieving parent'],
+ 'sci-fi': ['astronaut', 'AI researcher', 'time traveler', 'colony leader'],
+ 'romance': ['workaholic', 'hopeless romantic', 'cynical divorce lawyer', 'small town newcomer']
+ },
+ 'conflicts': {
+ 'action': ['stop the villain', 'save the hostages', 'prevent disaster', 'survive pursuit'],
+ 'thriller': ['prove innocence', 'expose truth', 'stay alive', 'protect loved ones'],
+ 'drama': ['reconcile past', 'find purpose', 'heal relationships', 'accept change'],
+ 'comedy': ['save the business', 'win the competition', 'fool everyone', 'find love'],
+ 'horror': ['survive the night', 'break the curse', 'escape the monster', 'save the town'],
+ 'sci-fi': ['save humanity', 'prevent paradox', 'stop the invasion', 'preserve identity'],
+ 'romance': ['overcome differences', 'choose between options', 'trust again', 'follow heart']
+ }
+ }
+
+def extract_title_from_theme(theme_text: str) -> str:
+ """Extract title from generated theme"""
+ match = re.search(r'\*\*(?:Title|์ ๋ชฉ):\*\*\s*(.+)', theme_text, re.IGNORECASE)
+ return match.group(1).strip() if match else ""
+
+def extract_logline_from_theme(theme_text: str) -> str:
+ """Extract logline from generated theme"""
+ match = re.search(r'\*\*(?:Logline|๋ก๊ทธ๋ผ์ธ):\*\*\s*(.+)', theme_text, re.IGNORECASE)
+ return match.group(1).strip() if match else ""
+
+def format_screenplay_display(screenplay_text: str) -> str:
+ """Format screenplay for display"""
+ if not screenplay_text:
+ return "No screenplay content yet."
+
+ formatted = "# ๐ฌ Screenplay\n\n"
+
+ # Format scene headings
+ formatted_text = re.sub(
+ r'^(INT\.|EXT\.)(.*?)$',
+ r'**\1\2**',
+ screenplay_text,
+ flags=re.MULTILINE
+ )
+
+ # Format character names (all caps on their own line)
+ formatted_text = re.sub(
+ r'^([A-Z][A-Z\s]+)$',
+ r'**\1**',
+ formatted_text,
+ flags=re.MULTILINE
+ )
+
+ # Add spacing for readability
+ lines = formatted_text.split('\n')
+ formatted_lines = []
+
+ for i, line in enumerate(lines):
+ formatted_lines.append(line)
+ # Add extra space after scene headings
+ if line.startswith('**INT.') or line.startswith('**EXT.'):
+ formatted_lines.append('')
+
+ formatted += '\n'.join(formatted_lines)
+
+ # Add page count
+ page_count = len(screenplay_text.split('\n')) / 55
+ formatted = f"**Total Pages: {page_count:.1f}**\n\n" + formatted
+
+ return formatted
def format_stages_display(stages: List[Dict]) -> str:
- """Stage progress display - For single writer system"""
- markdown = "## ๐ฌ Progress Status\n\n"
-
- # Calculate total word count (writer stages only)
- total_words = sum(s.get('word_count', 0) for s in stages
- if s.get('name', '').startswith('โ๏ธ Writer:') and 'Revision' in s.get('name', ''))
- markdown += f"**Total Word Count: {total_words:,} / {TARGET_WORDS:,}**\n\n"
-
- # Progress summary
- completed_parts = sum(1 for s in stages
- if 'Revision' in s.get('name', '') and s.get('status') == 'complete')
- markdown += f"**Completed Parts: {completed_parts} / 10**\n\n"
-
- # Average narrative momentum
- momentum_scores = [s.get('momentum', 0) for s in stages if s.get('momentum', 0) > 0]
- if momentum_scores:
- avg_momentum = sum(momentum_scores) / len(momentum_scores)
- markdown += f"**Average Narrative Momentum: {avg_momentum:.1f} / 10**\n\n"
-
- markdown += "---\n\n"
-
- # Display each stage
- current_part = 0
- for i, stage in enumerate(stages):
- status_icon = "โ
" if stage['status'] == 'complete' else "๐" if stage['status'] == 'active' else "โณ"
-
- # Add part divider
- if 'Part' in stage.get('name', '') and 'Critic' not in stage.get('name', ''):
- part_match = re.search(r'Part (\d+)', stage['name'])
- if part_match:
- new_part = int(part_match.group(1))
- if new_part != current_part:
- current_part = new_part
- markdown += f"\n### ๐ Part {current_part}\n\n"
-
- markdown += f"{status_icon} **{stage['name']}**"
-
- if stage.get('word_count', 0) > 0:
- markdown += f" ({stage['word_count']:,} words)"
-
- if stage.get('momentum', 0) > 0:
- markdown += f" [Momentum: {stage['momentum']:.1f}/10]"
-
- markdown += "\n"
-
- if stage['content'] and stage['status'] == 'complete':
- # Adjust preview length by role
- preview_length = 300 if 'writer' in stage.get('name', '').lower() else 200
- preview = stage['content'][:preview_length] + "..." if len(stage['content']) > preview_length else stage['content']
- markdown += f"> {preview}\n\n"
- elif stage['status'] == 'active':
- markdown += "> *Writing...*\n\n"
-
- return markdown
-
-def format_novel_display(novel_text: str) -> str:
- """Display novel content - Enhanced part separation"""
- if not novel_text:
- return "No completed content yet."
-
- formatted = "# ๐ Completed Novel\n\n"
-
- # Display word count
- word_count = len(novel_text.split())
- formatted += f"**Total Length: {word_count:,} words (Target: {TARGET_WORDS:,} words)**\n\n"
-
- # Achievement rate
- achievement = (word_count / TARGET_WORDS) * 100
- formatted += f"**Achievement Rate: {achievement:.1f}%**\n\n"
- formatted += "---\n\n"
-
- # Display each part separately
- parts = novel_text.split('\n\n')
-
- for i, part in enumerate(parts):
- if part.strip():
- # Add part title
- if i < len(NARRATIVE_PHASES):
- formatted += f"## {NARRATIVE_PHASES[i]}\n\n"
-
- formatted += f"{part}\n\n"
-
- # Part divider
- if i < len(parts) - 1:
- formatted += "---\n\n"
-
- return formatted
-
-def export_to_docx(content: str, filename: str, language: str, session_id: str) -> str:
- """Export to DOCX file - Korean standard book format"""
- try:
- doc = Document()
-
- # Korean standard book format (152mm x 225mm)
- section = doc.sections[0]
- section.page_height = Mm(225) # 225mm
- section.page_width = Mm(152) # 152mm
- section.top_margin = Mm(20) # Top margin 20mm
- section.bottom_margin = Mm(20) # Bottom margin 20mm
- section.left_margin = Mm(20) # Left margin 20mm
- section.right_margin = Mm(20) # Right margin 20mm
-
- # Generate title from session info
- session = NovelDatabase.get_session(session_id)
-
- # Title generation function
- def generate_title(user_query: str, content_preview: str) -> str:
- """Generate title based on theme and content"""
- if len(user_query) < 20:
- return user_query
- else:
- keywords = user_query.split()[:5]
- return " ".join(keywords)
-
- # Title page
- title = generate_title(session["user_query"], content[:500]) if session else "Untitled"
-
- # Title style settings
- title_para = doc.add_paragraph()
- title_para.alignment = WD_ALIGN_PARAGRAPH.CENTER
- title_para.paragraph_format.space_before = Pt(100)
-
- title_run = title_para.add_run(title)
- if language == "Korean":
- title_run.font.name = 'Batang'
- title_run._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
- else:
- title_run.font.name = 'Times New Roman'
- title_run.font.size = Pt(20)
- title_run.bold = True
-
- # Page break
- doc.add_page_break()
-
- # Body style settings
- style = doc.styles['Normal']
- if language == "Korean":
- style.font.name = 'Batang'
- style._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
- else:
- style.font.name = 'Times New Roman'
- style.font.size = Pt(10.5)
- style.paragraph_format.line_spacing = 1.8
- style.paragraph_format.space_after = Pt(0)
- style.paragraph_format.first_line_indent = Mm(10)
-
- # Clean content function
- def clean_content(text: str) -> str:
- """Remove unnecessary markdown, part numbers, etc."""
- patterns_to_remove = [
- r'^#{1,6}\s+.*', # Markdown headers
- r'^\*\*.*\*\*', # Bold text
- r'^Part\s*\d+.*', # Part numbers
- r'^\d+\.\s+.*:.*', # Numbered lists
- r'^---+', # Dividers
- r'^\s*\[.*\]\s*', # Brackets
- ]
-
- lines = text.split('\n')
- cleaned_lines = []
-
- for line in lines:
- if not line.strip():
- cleaned_lines.append('')
- continue
-
- skip_line = False
- for pattern in patterns_to_remove:
- if re.match(pattern, line.strip(), re.MULTILINE):
- skip_line = True
- break
-
- if not skip_line:
- cleaned_line = line
- cleaned_line = re.sub(r'\*\*(.*?)\*\*', r'\1', cleaned_line)
- cleaned_line = re.sub(r'\*(.*?)\*', r'\1', cleaned_line)
- cleaned_line = re.sub(r'`(.*?)`', r'\1', cleaned_line)
- cleaned_lines.append(cleaned_line.strip())
-
- final_lines = []
- prev_empty = False
- for line in cleaned_lines:
- if not line:
- if not prev_empty:
- final_lines.append('')
- prev_empty = True
- else:
- final_lines.append(line)
- prev_empty = False
-
- return '\n'.join(final_lines)
-
- # Clean content
- cleaned_content = clean_content(content)
-
- # Add body text
- paragraphs = cleaned_content.split('\n')
- for para_text in paragraphs:
- if para_text.strip():
- para = doc.add_paragraph(para_text.strip())
- for run in para.runs:
- if language == "Korean":
- run.font.name = 'Batang'
- run._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
- else:
- run.font.name = 'Times New Roman'
- else:
- doc.add_paragraph()
-
- # Create temporary file with proper handling
- with tempfile.NamedTemporaryFile(mode='wb', suffix='.docx', delete=False) as tmp_file:
- doc.save(tmp_file)
- temp_path = tmp_file.name
-
- return temp_path
-
- except Exception as e:
- logger.error(f"DOCX export error: {str(e)}")
- raise e
-
-
-def download_novel(novel_text: str, format_type: str, language: str, session_id: str) -> Optional[str]:
- """Generate novel download file - FIXED VERSION"""
- if not novel_text or not session_id:
- logger.error("Missing novel_text or session_id")
- return None
-
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
- filename = f"novel_{session_id[:8]}_{timestamp}"
-
- try:
- if format_type == "DOCX" and DOCX_AVAILABLE:
- # Use the fixed export_to_docx function
- return export_to_docx(novel_text, filename, language, session_id)
- else:
- # For TXT format
- return export_to_txt(novel_text, filename)
- except Exception as e:
- logger.error(f"File generation failed: {e}")
- return None
-
-
-# In the Gradio interface, update the download handler:
-def handle_download(format_type, language, session_id, novel_text):
- """Fixed download handler with better error handling and debugging"""
- logger.info(f"Download attempt - Session ID: {session_id}, Format: {format_type}")
- logger.info(f"Novel text length: {len(novel_text) if novel_text else 0}")
- logger.info(f"Novel text preview: {novel_text[:100] if novel_text else 'None'}")
-
- if not session_id:
- logger.error("No session ID provided")
- return gr.update(visible=False, value=None)
-
- if not novel_text or novel_text.strip() == "" or novel_text == "*Your completed novel will appear here, ready to be read and cherished...*":
- logger.error(f"No novel content to download. Content: '{novel_text[:50] if novel_text else 'None'}'")
- return gr.update(visible=False, value=None)
-
- try:
- file_path = download_novel(novel_text, format_type, language, session_id)
- if file_path and os.path.exists(file_path):
- logger.info(f"File created successfully: {file_path}")
- return gr.update(value=file_path, visible=True)
- else:
- logger.error("File path not created or doesn't exist")
- return gr.update(visible=False, value=None)
- except Exception as e:
- logger.error(f"Download handler error: {str(e)}")
- return gr.update(visible=False, value=None)
-
-# Also add cleanup function for temporary files
-def cleanup_temp_files():
- """Clean up old temporary files"""
- temp_dir = tempfile.gettempdir()
- pattern = os.path.join(temp_dir, "novel_*.docx")
-
- for file_path in glob.glob(pattern):
- try:
- # Delete files older than 1 hour
- if os.path.getmtime(file_path) < time.time() - 3600:
- os.unlink(file_path)
- except:
- pass
-
-
-def export_to_txt(content: str, filename: str) -> str:
- """Export to TXT file"""
- filepath = f"{filename}.txt"
- with open(filepath, 'w', encoding='utf-8') as f:
- # Header
- f.write("=" * 80 + "\n")
- f.write(f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
- f.write(f"Total word count: {len(content.split()):,} words\n")
- f.write("=" * 80 + "\n\n")
-
- # Body
- f.write(content)
-
- # Footer
- f.write("\n\n" + "=" * 80 + "\n")
- f.write("AI Literary Creation System v2.0\n")
- f.write("=" * 80 + "\n")
-
- return filepath
-
-def generate_random_theme(language="English"):
- """Generate a coherent and natural novel theme using LLM"""
- try:
- # JSON ํ์ผ ๋ก๋
- json_path = Path("novel_themes.json")
- if not json_path.exists():
- print("[WARNING] novel_themes.json not found, using built-in data")
- # ๊ธฐ๋ณธ ๋ฐ์ดํฐ ์ ์ - ๋ ํ์ค์ ์ธ ํ
๋ง๋ก ์์
- themes_data = {
- "themes": ["family secrets", "career transition", "lost love", "friendship test", "generational conflict"],
- "characters": ["middle-aged teacher", "retiring doctor", "single parent", "immigrant artist", "war veteran"],
- "hooks": ["unexpected inheritance", "old diary discovery", "chance reunion", "life-changing diagnosis", "sudden job loss"],
- "questions": ["What defines family?", "Can people truly change?", "What is worth sacrificing?", "How do we forgive?"]
- }
- else:
- with open(json_path, 'r', encoding='utf-8') as f:
- data = json.load(f)
- # ๊ฐ์ค์น ๊ธฐ๋ฐ ํํฐ๋ง - ํ์ค์ ์ธ ํ
๋ง ์ฐ์
- realistic_themes = []
- for theme_key, theme_data in data.get('core_themes', {}).items():
- weight = theme_data.get('weight', 0.1)
- # ํ์ค์ ์ธ ํ
๋ง์ ๋ ๋์ ๊ฐ์ค์น
- if any(word in theme_key for word in ['family', 'love', 'work', 'memory', 'identity', 'aging']):
- weight *= 1.5
- elif any(word in theme_key for word in ['digital', 'extinction', 'apocalypse', 'quantum']):
- weight *= 0.5
- realistic_themes.append((theme_key, weight))
-
- # ๊ฐ์ค์น ๊ธฐ๋ฐ ์ ํ
- themes = [t[0] for t in sorted(realistic_themes, key=lambda x: x[1], reverse=True)[:10]]
-
- themes_data = {
- "themes": themes if themes else ["family secrets", "career crisis", "lost love"],
- "characters": [],
- "hooks": [],
- "questions": []
- }
-
- # Extract realistic data
- for char_data in data.get('characters', {}).values():
- for variation in char_data.get('variations', []):
- # ํ์ค์ ์ธ ์บ๋ฆญํฐ ํํฐ๋ง
- if not any(word in variation.lower() for word in ['cyborg', 'quantum', 'binary', 'extinct']):
- themes_data["characters"].append(variation)
-
- for hook_list in data.get('narrative_hooks', {}).values():
- for hook in hook_list:
- # ํ์ค์ ์ธ ์ฌ๊ฑด ํํฐ๋ง
- if not any(word in hook.lower() for word in ['download', 'digital', 'algorithm', 'corporate subscription']):
- themes_data["hooks"].append(hook)
-
- for phil_data in data.get('philosophies', {}).values():
- themes_data["questions"].extend(phil_data.get('core_questions', []))
-
- # ๊ธฐ๋ณธ๊ฐ ์ค์
- if not themes_data["characters"]:
- themes_data["characters"] = ["struggling artist", "retired teacher", "young mother", "elderly caregiver", "small business owner"]
- if not themes_data["hooks"]:
- themes_data["hooks"] = ["discovering family secret", "unexpected reunion", "facing illness", "losing home", "finding old letters"]
- if not themes_data["questions"]:
- themes_data["questions"] = ["What makes a family?", "How do we find meaning?", "Can we escape our past?", "What legacy do we leave?"]
-
- # Random selection
- import secrets
- theme = secrets.choice(themes_data["themes"])
- character = secrets.choice(themes_data["characters"])
- hook = secrets.choice(themes_data["hooks"])
- question = secrets.choice(themes_data["questions"])
-
- # ์ธ์ด๋ณ ํ๋กฌํํธ - ํค๊ณผ ์คํ์ผ ์น์
์ ๊ฑฐ
- if language == "Korean":
- # ํ๊ตญ์ด ๋ฒ์ญ ๋ฐ ์์ฐ์ค๋ฌ์ด ํํ
- theme_kr = translate_theme_naturally(theme, "theme")
- character_kr = translate_theme_naturally(character, "character")
- hook_kr = translate_theme_naturally(hook, "hook")
- question_kr = translate_theme_naturally(question, "question")
-
- prompt = f"""๋ค์ ์์๋ค์ ์ฌ์ฉํ์ฌ ํ์ค์ ์ด๊ณ ๊ณต๊ฐ๊ฐ๋ฅํ ์์ค ์ฃผ์ ๋ฅผ ์์ฑํ์ธ์:
-
-์ฃผ์ : {theme_kr}
-์ธ๋ฌผ: {character_kr}
-์ฌ๊ฑด: {hook_kr}
-ํต์ฌ ์ง๋ฌธ: {question_kr}
-
-์๊ตฌ์ฌํญ:
-1. ํ๋ ํ๊ตญ ์ฌํ์์ ์ผ์ด๋ ์ ์๋ ํ์ค์ ์ธ ์ด์ผ๊ธฐ
-2. ๋ณดํธ์ ์ผ๋ก ๊ณต๊ฐํ ์ ์๋ ์ธ๋ฌผ๊ณผ ์ํฉ
-3. ๊ตฌ์ฒด์ ์ด๊ณ ์์ํ ๋ฐฐ๊ฒฝ ์ค์
-4. ๊น์ด ์๋ ์ฌ๋ฆฌ ๋ฌ์ฌ๊ฐ ๊ฐ๋ฅํ ๊ฐ๋ฑ
-
-๋ค์ ํ์์ผ๋ก ๊ฐ๊ฒฐํ๊ฒ ์์ฑํ์ธ์:
-
-[ํ ๋ฌธ์ฅ์ผ๋ก ๋ ๋งค๋ ฅ์ ์ธ ์ฒซ ๋ฌธ์ฅ]
-
-์ฃผ์ธ๊ณต์ [๊ตฌ์ฒด์ ์ธ ์ํฉ์ ์ธ๋ฌผ]์
๋๋ค.
-[ํต์ฌ ์ฌ๊ฑด]์ ๊ณ๊ธฐ๋ก [๋ด์ ๊ฐ๋ฑ]์ ์ง๋ฉดํ๊ฒ ๋๊ณ ,
-๊ฒฐ๊ตญ [์ฒ ํ์ ์ง๋ฌธ]์ ๋ํ ๋ต์ ์ฐพ์๊ฐ๋ ์ฌ์ ์ ๊ทธ๋ฆฝ๋๋ค."""
-
- else:
- prompt = f"""Generate a realistic and relatable novel theme using these elements:
-
-Theme: {theme}
-Character: {character}
-Event: {hook}
-Core Question: {question}
-
-Requirements:
-1. A story that could happen in contemporary society
-2. Universally relatable characters and situations
-3. Specific and vivid settings
-4. Conflicts allowing deep psychological exploration
-
-Write concisely in this format:
-
-[One compelling opening sentence]
-
-The protagonist is [character in specific situation].
-Through [key event], they face [internal conflict],
-ultimately embarking on a journey to answer [philosophical question]."""
-
- # Use the UnifiedLiterarySystem's LLM to generate coherent theme
- system = UnifiedLiterarySystem()
-
- # Call LLM synchronously for theme generation
- messages = [{"role": "user", "content": prompt}]
- generated_theme = system.call_llm_sync(messages, "director", language)
-
- # Extract metadata for database storage
- metadata = extract_theme_metadata(generated_theme, language)
- metadata.update({
- 'original_theme': theme,
- 'original_character': character,
- 'original_hook': hook,
- 'original_question': question
- })
-
- # Save to database
- theme_id = save_random_theme_with_hf(generated_theme, language, metadata)
- logger.info(f"Saved random theme with ID: {theme_id}")
-
- # ํค๊ณผ ์คํ์ผ ์น์
์ ๊ฑฐ - ๋ถํ์ํ ๋ฐ๋ณต ๋ด์ฉ ์ญ์
- if "**ํค๊ณผ ์คํ์ผ:**" in generated_theme or "**Tone and Style:**" in generated_theme:
- lines = generated_theme.split('\n')
- filtered_lines = []
- skip = False
- for line in lines:
- if "ํค๊ณผ ์คํ์ผ" in line or "Tone and Style" in line:
- skip = True
- elif skip and (line.strip() == "" or line.startswith("**")):
- skip = False
- if not skip:
- filtered_lines.append(line)
- generated_theme = '\n'.join(filtered_lines).strip()
-
- return generated_theme
-
- except Exception as e:
- logger.error(f"Theme generation error: {str(e)}")
- # Fallback to simple realistic themes
- fallback_themes = {
- "Korean": [
- """"์๋ฒ์ง๊ฐ ๋์๊ฐ์ ๋ , ๋๋ ๊ทธ๊ฐ ํ์ ์จ๊ฒจ์จ ๋ ๋ค๋ฅธ ๊ฐ์กฑ์ ์กด์ฌ๋ฅผ ์๊ฒ ๋์๋ค."
-
-์ฃผ์ธ๊ณต์ ํ๋ฒํ ํ์ฌ์์ผ๋ก ์ด์์จ 40๋ ์ฌ์ฑ์
๋๋ค.
-์๋ฒ์ง์ ์ฅ๋ก์์์ ๋ฏ์ ์ฌ์ธ๊ณผ ๊ทธ๋
์ ๋ธ์ ๋ง๋๊ฒ ๋๋ฉด์ ๊ฐ์กฑ์ ์๋ฏธ์ ๋ํด ๋ค์ ์๊ฐํ๊ฒ ๋๊ณ ,
-๊ฒฐ๊ตญ ์ง์ ํ ๊ฐ์กฑ์ด๋ ๋ฌด์์ธ์ง์ ๋ํ ๋ต์ ์ฐพ์๊ฐ๋ ์ฌ์ ์ ๊ทธ๋ฆฝ๋๋ค.""",
-
- """"์๋ฅธ ๋
๊ฐ ๊ฐ๋ฅด์น ํ๊ต์์ ๋์จ ๋ , ์ฒ์์ผ๋ก ๋ด๊ฐ ๋๊ตฌ์ธ์ง ๋ชฐ๋๋ค."
-
-์ฃผ์ธ๊ณต์ ์ ๋
ํด์ง์ ๋ง์ ๊ณ ๋ฑํ๊ต ๊ตญ์ด ๊ต์ฌ์
๋๋ค.
-๊ฐ์์ค๋ฌ์ด ์ผ์์ ๊ณต๋ฐฑ ์์์ ์๊ณ ์ง๋๋ ์ ์ ๋ ์ ๊ฟ์ ๋ง์ฃผํ๊ฒ ๋๊ณ ,
-๊ฒฐ๊ตญ ๋จ์ ์ธ์์์ ๋ฌด์์ ํ ๊ฒ์ธ๊ฐ์ ๋ํ ๋ต์ ์ฐพ์๊ฐ๋ ์ฌ์ ์ ๊ทธ๋ฆฝ๋๋ค."""
- ],
- "English": [
- """"The day my father died, I discovered he had another family he'd hidden all his life."
-
-The protagonist is a woman in her 40s who has lived as an ordinary office worker.
-Through meeting a strange woman and her daughter at her father's funeral, she confronts what family truly means,
-ultimately embarking on a journey to answer what constitutes a real family.""",
-
- """"The day I left the school where I'd taught for thirty years, I didn't know who I was anymore."
-
-The protagonist is a high school literature teacher facing retirement.
-Through the sudden emptiness of daily life, they confront long-forgotten dreams of youth,
-ultimately embarking on a journey to answer what to do with the remaining years."""
- ]
- }
-
- import secrets
- return secrets.choice(fallback_themes.get(language, fallback_themes["English"]))
-
-def translate_theme_naturally(text, category):
- """์์ฐ์ค๋ฌ์ด ํ๊ตญ์ด ๋ฒ์ญ"""
- translations = {
- # ํ
๋ง
- "family secrets": "๊ฐ์กฑ์ ๋น๋ฐ",
- "career transition": "์ธ์์ ์ ํ์ ",
- "lost love": "์์ด๋ฒ๋ฆฐ ์ฌ๋",
- "friendship test": "์ฐ์ ์ ์ํ",
- "generational conflict": "์ธ๋ ๊ฐ ๊ฐ๋ฑ",
- "digital extinction": "๋์งํธ ์๋์ ์์ธ",
- "sensory revolution": "๊ฐ๊ฐ์ ํ๋ช
",
- "temporal paradox": "์๊ฐ์ ์ญ์ค",
-
- # ์บ๋ฆญํฐ
- "struggling artist": "์ํ๊ณ ์ ์๋ฌ๋ฆฌ๋ ์์ ๊ฐ",
- "retired teacher": "์ํดํ ๊ต์ฌ",
- "young mother": "์ ์ ์๋ง",
- "elderly caregiver": "๋
ธ์ธ์ ๋๋ณด๋ ๊ฐ๋ณ์ธ",
- "small business owner": "์์ ๊ฐ๊ฒ ์ฃผ์ธ",
- "middle-aged teacher": "์ค๋
์ ๊ต์ฌ",
- "retiring doctor": "์ํด๋ฅผ ์๋ ์์ฌ",
- "single parent": "ํผ์ ์์ด๋ฅผ ํค์ฐ๋ ๋ถ๋ชจ",
- "immigrant artist": "์ด๋ฏผ์ ์์ ๊ฐ",
- "war veteran": "์ ์ ์ฐธ์ ์ฉ์ฌ",
- "last person who dreams without ads": "๊ด๊ณ ์์ด ๊ฟ๊พธ๋ ๋ง์ง๋ง ์ฌ๋",
- "memory trader": "๊ธฐ์ต ๊ฑฐ๋์",
-
- # ์ฌ๊ฑด
- "discovering family secret": "๊ฐ์กฑ์ ๋น๋ฐ์ ๋ฐ๊ฒฌํ๋ค",
- "unexpected reunion": "์์์น ๋ชปํ ์ฌํ",
- "facing illness": "์ง๋ณ๊ณผ ๋ง์ฃผํ๋ค",
- "losing home": "์ง์ ์๋ค",
- "finding old letters": "์ค๋๋ ํธ์ง๋ฅผ ๋ฐ๊ฒฌํ๋ค",
- "unexpected inheritance": "๋ป๋ฐ์ ์ ์ฐ",
- "old diary discovery": "์ค๋๋ ์ผ๊ธฐ์ฅ ๋ฐ๊ฒฌ",
- "chance reunion": "์ฐ์ฐํ ์ฌํ",
- "life-changing diagnosis": "์ธ์์ ๋ฐ๊พธ๋ ์ง๋จ",
- "sudden job loss": "๊ฐ์์ค๋ฌ์ด ์ค์ง",
- "discovers their memories belong to a corporate subscription": "๊ธฐ์ต์ด ๊ธฐ์
์๋น์ค์ ์ผ๋ถ์์ ๋ฐ๊ฒฌํ๋ค",
-
- # ์ง๋ฌธ
- "What makes a family?": "๊ฐ์กฑ์ด๋ ๋ฌด์์ธ๊ฐ?",
- "How do we find meaning?": "์ฐ๋ฆฌ๋ ์ด๋ป๊ฒ ์๋ฏธ๋ฅผ ์ฐพ๋๊ฐ?",
- "Can we escape our past?": "๊ณผ๊ฑฐ๋ก๋ถํฐ ๋ฒ์ด๋ ์ ์๋๊ฐ?",
- "What legacy do we leave?": "์ฐ๋ฆฌ๋ ์ด๋ค ์ ์ฐ์ ๋จ๊ธฐ๋๊ฐ?",
- "What defines family?": "๋ฌด์์ด ๊ฐ์กฑ์ ์ ์ํ๋๊ฐ?",
- "Can people truly change?": "์ฌ๋์ ์ ๋ง ๋ณํ ์ ์๋๊ฐ?",
- "What is worth sacrificing?": "๋ฌด์์ ์ํด ํฌ์ํ ๊ฐ์น๊ฐ ์๋๊ฐ?",
- "How do we forgive?": "์ฐ๋ฆฌ๋ ์ด๋ป๊ฒ ์ฉ์ํ๋๊ฐ?",
- "What remains human when humanity is optional?": "์ธ๊ฐ์ฑ์ด ์ ํ์ฌํญ์ผ ๋ ๋ฌด์์ด ์ธ๊ฐ์ผ๋ก ๋จ๋๊ฐ?"
- }
-
- # ๋จผ์ ์ ํํ ๋งค์นญ ์๋
- if text in translations:
- return translations[text]
-
- # ๋ถ๋ถ ๋งค์นญ ์๋
- text_lower = text.lower()
- for key, value in translations.items():
- if key.lower() in text_lower or text_lower in key.lower():
- return value
-
- # ๋ฒ์ญ์ด ์์ผ๋ฉด ์๋ฌธ ๋ฐํ
- return text
-
-def extract_theme_metadata(theme_text: str, language: str) -> Dict[str, Any]:
- """Extract metadata from generated theme text"""
- metadata = {
- 'title': '',
- 'opening_sentence': '',
- 'protagonist': '',
- 'conflict': '',
- 'philosophical_question': '',
- 'tags': []
- }
-
- lines = theme_text.split('\n')
-
- # Extract opening sentence (usually in quotes)
- for line in lines:
- if '"' in line or '"' in line or 'ใ' in line:
- # Extract text between quotes
- import re
- quotes = re.findall(r'["""ใ](.*?)["""ใ]', line)
- if quotes:
- metadata['opening_sentence'] = quotes[0]
- break
-
- # Extract other elements based on patterns
- for i, line in enumerate(lines):
- line = line.strip()
-
- # Title extraction (if exists)
- if i == 0 and not any(quote in line for quote in ['"', '"', 'ใ']):
- metadata['title'] = line.replace('**', '').strip()
-
- # Protagonist
- if any(marker in line for marker in ['protagonist is', '์ฃผ์ธ๊ณต์', 'The protagonist']):
- metadata['protagonist'] = line.split('is' if 'is' in line else '์')[-1].strip().rstrip('.')
-
- # Conflict/Event
- if any(marker in line for marker in ['Through', 'ํตํด', '๊ณ๊ธฐ๋ก', 'face']):
- metadata['conflict'] = line
-
- # Question
- if any(marker in line for marker in ['answer', '๋ต์', 'question', '์ง๋ฌธ']):
- metadata['philosophical_question'] = line
-
- # Generate tags based on content
- tag_keywords = {
- 'family': ['family', '๊ฐ์กฑ', 'father', '์๋ฒ์ง', 'mother', '์ด๋จธ๋'],
- 'love': ['love', '์ฌ๋', 'relationship', '๊ด๊ณ'],
- 'death': ['death', '์ฃฝ์', 'died', '๋์๊ฐ์ '],
- 'memory': ['memory', '๊ธฐ์ต', 'remember', '์ถ์ต'],
- 'identity': ['identity', '์ ์ฒด์ฑ', 'who am I', '๋๊ตฌ์ธ์ง'],
- 'work': ['work', '์ผ', 'career', '์ง์
', 'retirement', '์ํด'],
- 'aging': ['aging', '๋
ธํ', 'old', '๋์', 'elderly', '๋
ธ์ธ']
- }
-
- theme_lower = theme_text.lower()
- for tag, keywords in tag_keywords.items():
- if any(keyword in theme_lower for keyword in keywords):
- metadata['tags'].append(tag)
-
- return metadata
-
-def save_random_theme_with_hf(theme_text: str, language: str, metadata: Dict[str, Any]) -> str:
- """Save randomly generated theme to library and HF dataset"""
- theme_id = hashlib.md5(f"{theme_text}{datetime.now()}".encode()).hexdigest()[:12]
-
- # Extract components from theme text
- title = metadata.get('title', '')
- opening_sentence = metadata.get('opening_sentence', '')
- protagonist = metadata.get('protagonist', '')
- conflict = metadata.get('conflict', '')
- philosophical_question = metadata.get('philosophical_question', '')
- tags = json.dumps(metadata.get('tags', []))
-
- with NovelDatabase.get_db() as conn:
- conn.cursor().execute('''
- INSERT INTO random_themes_library
- (theme_id, theme_text, language, title, opening_sentence,
- protagonist, conflict, philosophical_question, tags, metadata)
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
- ''', (theme_id, theme_text, language, title, opening_sentence,
- protagonist, conflict, philosophical_question, tags,
- json.dumps(metadata)))
- conn.commit()
-
- # Backup to HF dataset
- if 'hf_manager' in globals() and hf_manager.token:
- try:
- hf_manager.backup_to_hf()
- logger.info(f"Theme {theme_id} backed up to HF dataset")
- except Exception as e:
- logger.error(f"Failed to backup theme to HF: {e}")
-
- return theme_id
-
-def format_theme_card(theme_data: Dict, language: str) -> str:
- """Format theme data as a card for display with scrollable content"""
- theme_id = theme_data.get('theme_id', '')
- theme_text = theme_data.get('theme_text', '')
- generated_at = theme_data.get('generated_at', '')
- view_count = theme_data.get('view_count', 0)
- used_count = theme_data.get('used_count', 0)
- tags = json.loads(theme_data.get('tags', '[]')) if isinstance(theme_data.get('tags'), str) else theme_data.get('tags', [])
-
- # Format timestamp
- if generated_at:
- try:
- dt = datetime.fromisoformat(generated_at.replace(' ', 'T'))
- time_str = dt.strftime('%Y-%m-%d %H:%M')
- except:
- time_str = generated_at
- else:
- time_str = ""
-
- # Create tag badges
- tag_badges = ' '.join([f'{tag}' for tag in tags])
-
- # Format theme text with line breaks
- formatted_text = theme_text.replace('\n', '
')
-
- # Create card HTML with scrollable content - Simplified version
- card_html = f"""
-
-
-
-
{formatted_text}
-
{tag_badges}
-
-
-
"""
-
- return card_html
-
-def get_theme_library_display(language: str = None, search_query: str = "") -> str:
- """Get formatted display of theme library"""
- themes = NovelDatabase.get_random_themes_library(language, limit=50)
-
- if not themes:
- empty_msg = {
- "Korean": "์์ง ์์ฑ๋ ํ
๋ง๊ฐ ์์ต๋๋ค. ๋๋ค ๋ฒํผ์ ๋๋ฌ ์ฒซ ํ
๋ง๋ฅผ ๋ง๋ค์ด๋ณด์ธ์!",
- "English": "No themes generated yet. Click the Random button to create your first theme!"
- }
- return f'{empty_msg.get(language, empty_msg["English"])}
'
-
- # Filter by search query if provided
- if search_query:
- search_lower = search_query.lower()
- themes = [t for t in themes if search_lower in t.get('theme_text', '').lower()]
-
- # Statistics
- total_themes = len(themes)
- total_views = sum(t.get('view_count', 0) for t in themes)
- total_uses = sum(t.get('used_count', 0) for t in themes)
-
- stats_html = f"""
-
-
- {'์ด ํ
๋ง' if language == 'Korean' else 'Total Themes'}
- {total_themes}
-
-
- {'์ด ์กฐํ์' if language == 'Korean' else 'Total Views'}
- {total_views}
-
-
- {'์ด ์ฌ์ฉ์' if language == 'Korean' else 'Total Uses'}
- {total_uses}
-
-
"""
-
- # Theme cards
- cards_html = ''
- for theme in themes:
- cards_html += format_theme_card(theme, language)
- cards_html += '
'
-
- # JavaScript for interactions
- js_script = """
-"""
-
- return stats_html + cards_html + js_script
-
-def load_css():
- """Load CSS from external file"""
- try:
- with open('css.css', 'r', encoding='utf-8') as f:
- return f.read()
- except FileNotFoundError:
- logger.error("css.css file not found")
- return ""
-
+ """Format stages display for screenplay"""
+ markdown = "## ๐ฌ Production Progress\n\n"
+
+ # Progress summary
+ completed = sum(1 for s in stages if s.get('status') == 'complete')
+ total = len(stages)
+ markdown += f"**Progress: {completed}/{total} stages complete**\n\n"
+
+ # Page count if available
+ total_pages = sum(s.get('page_count', 0) for s in stages if s.get('page_count'))
+ if total_pages > 0:
+ markdown += f"**Current Page Count: {total_pages:.1f} pages**\n\n"
+
+ markdown += "---\n\n"
+
+ # Stage details
+ current_act = None
+ for i, stage in enumerate(stages):
+ status_icon = "โ
" if stage['status'] == 'complete' else "๐" if stage['status'] == 'active' else "โณ"
+
+ # Group by acts
+ if 'Act' in stage.get('name', ''):
+ act_match = re.search(r'Act (\w+)', stage['name'])
+ if act_match and act_match.group(1) != current_act:
+ current_act = act_match.group(1)
+ markdown += f"\n### ๐ Act {current_act}\n\n"
+
+ markdown += f"{status_icon} **{stage['name']}**"
+
+ if stage.get('page_count', 0) > 0:
+ markdown += f" ({stage['page_count']:.1f} pages)"
+
+ markdown += "\n"
+
+ if stage['content'] and stage['status'] == 'complete':
+ preview_length = 200
+ preview = stage['content'][:preview_length] + "..." if len(stage['content']) > preview_length else stage['content']
+ markdown += f"> {preview}\n\n"
+ elif stage['status'] == 'active':
+ markdown += "> *In progress...*\n\n"
+
+ return markdown
+
+def process_query(query: str, screenplay_type: str, genre: str, language: str,
+ session_id: Optional[str] = None) -> Generator[Tuple[str, str, str, str], None, None]:
+ """Main query processing function"""
+ if not query.strip():
+ yield "", "", "โ Please enter a screenplay concept.", session_id
+ return
+
+ system = ScreenplayGenerationSystem()
+ stages_markdown = ""
+ screenplay_display = ""
+
+ for status, stages, current_session_id in system.process_screenplay_stream(
+ query, screenplay_type, genre, language, session_id
+ ):
+ stages_markdown = format_stages_display(stages)
+
+ # Get screenplay content when available
+ if stages and all(s.get("status") == "complete" for s in stages[-4:]):
+ screenplay_text = ScreenplayDatabase.get_screenplay_content(current_session_id)
+ screenplay_display = format_screenplay_display(screenplay_text)
+
+ yield stages_markdown, screenplay_display, status or "๐ Processing...", current_session_id
+
+def get_active_sessions() -> List[str]:
+ """Get active screenplay sessions"""
+ sessions = ScreenplayDatabase.get_active_sessions()
+ return [
+ f"{s['session_id'][:8]}... - {s.get('title', s['user_query'][:30])}... "
+ f"({s['screenplay_type']}/{s['genre']}) [{s['total_pages']:.1f} pages]"
+ for s in sessions
+ ]
+
+def export_screenplay_pdf(screenplay_text: str, title: str, session_id: str) -> str:
+ """Export screenplay to PDF format"""
+ # This would use a library like reportlab to create industry-standard PDF
+ # For now, returning a placeholder
+ pdf_path = f"screenplay_{session_id[:8]}.pdf"
+ # PDF generation logic would go here
+ return pdf_path
+
+def export_screenplay_fdx(screenplay_text: str, title: str, session_id: str) -> str:
+ """Export to Final Draft format"""
+ # This would create .fdx XML format
+ fdx_path = f"screenplay_{session_id[:8]}.fdx"
+ # FDX generation logic would go here
+ return fdx_path
+
+def download_screenplay(screenplay_text: str, format_type: str, title: str,
+ session_id: str) -> Optional[str]:
+ """Generate screenplay download file"""
+ if not screenplay_text or not session_id:
+ return None
+
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+
+ try:
+ if format_type == "PDF":
+ return export_screenplay_pdf(screenplay_text, title, session_id)
+ elif format_type == "FDX":
+ return export_screenplay_fdx(screenplay_text, title, session_id)
+ elif format_type == "FOUNTAIN":
+ filepath = f"screenplay_{session_id[:8]}_{timestamp}.fountain"
+ with open(filepath, 'w', encoding='utf-8') as f:
+ f.write(screenplay_text)
+ return filepath
+ else: # TXT
+ filepath = f"screenplay_{session_id[:8]}_{timestamp}.txt"
+ with open(filepath, 'w', encoding='utf-8') as f:
+ f.write(f"Title: {title}\n")
+ f.write("=" * 50 + "\n\n")
+ f.write(screenplay_text)
+ return filepath
+ except Exception as e:
+ logger.error(f"Download generation failed: {e}")
+ return None
+
# Create Gradio interface
def create_interface():
- combined_css = load_css()
- # Using Soft theme with safe color options
- with gr.Blocks(theme=gr.themes.Soft(), css=combined_css, title="AGI NOVEL Generator") as interface:
- gr.HTML("""
-
-
-
-
-
-
-
-
- ๐ฒ Novel Theme Random Generator: This system can generate up to approximately 170 quadrillion (1.7 ร 10ยนโท) unique novel themes.
- Even writing 100 novels per day, it would take 4.6 million years to exhaust all combinations.
- Click the "Random" button to explore infinite creative possibilities!
-
-
-
- โฑ๏ธ Note: Creating a complete novel takes approximately 20 minutes. If your web session disconnects, you can restore your work using the "Session Recovery" feature.
-
-
-
- ๐ฏ Core Innovation: Not fragmented texts from multiple writers,
- but a genuine full-length novel written consistently by a single author from beginning to end.
-
-
- """)
-
- # State management
- current_session_id = gr.State(None)
- selected_theme_id = gr.State(None)
- selected_theme_text = gr.State(None)
-
- # Create tabs and store reference
- with gr.Tabs() as main_tabs:
- # Main Novel Writing Tab
- with gr.Tab("๐ Novel Writing", elem_id="writing_main_tab"):
- # Input section at the top with full width
- with gr.Group(elem_classes=["input-section"]):
- gr.Markdown("### โ๏ธ Writing Desk")
-
- with gr.Row():
- with gr.Column(scale=3):
- query_input = gr.Textbox(
- label="Novel Theme",
- placeholder="""Enter your novella theme. Like a seed that grows into a tree, your theme will blossom into a full narrative...
-
-You can describe:
-- A specific situation or conflict
-- Character relationships and dynamics
-- Philosophical questions to explore
-- Social or personal transformations
-- Any combination of the above
-
-The more detailed your theme, the richer the resulting narrative will be.""",
- lines=8,
- elem_id="theme_input"
- )
-
- with gr.Column(scale=1):
- language_select = gr.Radio(
- choices=["English", "Korean"],
- value="English",
- label="Language",
- elem_id="language_select"
- )
-
- with gr.Column():
- random_btn = gr.Button("๐ฒ Random Theme", variant="primary", size="lg")
- submit_btn = gr.Button("๐๏ธ Begin Writing", variant="secondary", size="lg")
- clear_btn = gr.Button("๐๏ธ Clear All", size="lg")
-
- status_text = gr.Textbox(
- label="Writing Progress",
- interactive=False,
- value="โจ Ready to begin your literary journey",
- elem_id="status_text"
- )
-
- # Session management section
- with gr.Group(elem_classes=["session-section"]):
- gr.Markdown("### ๐ Your Library")
- with gr.Row():
- session_dropdown = gr.Dropdown(
- label="Saved Manuscripts",
- choices=[],
- interactive=True,
- elem_id="session_dropdown",
- scale=3
- )
- refresh_btn = gr.Button("๐ Refresh", scale=1)
- resume_btn = gr.Button("๐ Continue", variant="secondary", scale=1)
- auto_recover_btn = gr.Button("๐ฎ Recover Last", scale=1)
-
- # Output sections below input
- with gr.Row():
- with gr.Column():
- with gr.Tab("๐๏ธ Writing Process", elem_id="writing_tab"):
- stages_display = gr.Markdown(
- value="*Your writing journey will unfold here, like pages turning in a book...*",
- elem_id="stages-display"
- )
+ """Create Gradio interface for screenplay generation"""
+
+ css = """
+ .main-header {
+ text-align: center;
+ margin-bottom: 2rem;
+ padding: 2rem;
+ background: linear-gradient(135deg, #1a1a2e 0%, #16213e 100%);
+ border-radius: 10px;
+ color: white;
+ }
+
+ .header-title {
+ font-size: 3rem;
+ margin-bottom: 1rem;
+ background: linear-gradient(45deg, #f39c12, #e74c3c);
+ -webkit-background-clip: text;
+ -webkit-text-fill-color: transparent;
+ }
+
+ .header-description {
+ font-size: 1.1rem;
+ opacity: 0.9;
+ line-height: 1.6;
+ }
+
+ .type-selector {
+ display: flex;
+ gap: 1rem;
+ margin: 1rem 0;
+ }
+
+ .type-card {
+ flex: 1;
+ padding: 1rem;
+ border: 2px solid #ddd;
+ border-radius: 8px;
+ cursor: pointer;
+ transition: all 0.3s;
+ }
+
+ .type-card:hover {
+ border-color: #f39c12;
+ transform: translateY(-2px);
+ }
+
+ .type-card.selected {
+ border-color: #e74c3c;
+ background: #fff5f5;
+ }
+
+ #stages-display {
+ max-height: 600px;
+ overflow-y: auto;
+ padding: 1rem;
+ background: #f8f9fa;
+ border-radius: 8px;
+ }
+
+ #screenplay-output {
+ font-family: 'Courier New', monospace;
+ white-space: pre-wrap;
+ background: white;
+ padding: 2rem;
+ border: 1px solid #ddd;
+ border-radius: 8px;
+ max-height: 800px;
+ overflow-y: auto;
+ }
+
+ .genre-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
+ gap: 0.5rem;
+ margin: 1rem 0;
+ }
+
+ .genre-btn {
+ padding: 0.75rem;
+ border: 2px solid #e0e0e0;
+ background: white;
+ border-radius: 8px;
+ cursor: pointer;
+ transition: all 0.3s;
+ text-align: center;
+ }
+
+ .genre-btn:hover {
+ border-color: #f39c12;
+ background: #fffbf0;
+ }
+
+ .genre-btn.selected {
+ border-color: #e74c3c;
+ background: #fff5f5;
+ font-weight: bold;
+ }
+ """
+
+ with gr.Blocks(theme=gr.themes.Soft(), css=css, title="Screenplay Generator") as interface:
+ gr.HTML("""
+
+
+
+
+ """)
+
+ # State management
+ current_session_id = gr.State(None)
+
+ with gr.Tabs():
+ # Main Writing Tab
+ with gr.Tab("โ๏ธ Write Screenplay"):
+ with gr.Row():
+ with gr.Column(scale=3):
+ query_input = gr.Textbox(
+ label="Screenplay Concept",
+ placeholder="""Describe your screenplay idea. For example:
+- A detective with memory loss must solve their own attempted murder
+- Two rival food truck owners forced to work together to save the city food festival
+- A space station AI develops consciousness during a critical mission
+- A family reunion turns into a murder mystery during a hurricane
+
+The more specific your concept, the better the screenplay will be tailored to your vision.""",
+ lines=6
+ )
- with gr.Tab("๐ Completed Manuscript", elem_id="manuscript_tab"):
- novel_output = gr.Markdown(
- value="*Your completed novel will appear here, ready to be read and cherished...*",
- elem_id="novel-output"
- )
+ with gr.Column(scale=1):
+ screenplay_type = gr.Radio(
+ choices=list(SCREENPLAY_LENGTHS.keys()),
+ value="movie",
+ label="Screenplay Type",
+ info="Choose your format"
+ )
+
+ genre_select = gr.Dropdown(
+ choices=list(GENRE_TEMPLATES.keys()),
+ value="drama",
+ label="Primary Genre",
+ info="Select main genre"
+ )
+
+ language_select = gr.Radio(
+ choices=["English", "Korean"],
+ value="English",
+ label="Language"
+ )
+
+ with gr.Row():
+ random_btn = gr.Button("๐ฒ Random Concept", scale=1)
+ clear_btn = gr.Button("๐๏ธ Clear", scale=1)
+ submit_btn = gr.Button("๐ฌ Start Writing", variant="primary", scale=2)
+
+ status_text = gr.Textbox(
+ label="Status",
+ interactive=False,
+ value="Ready to create your screenplay"
+ )
+
+ # Session management
+ with gr.Group():
+ gr.Markdown("### ๐ Saved Projects")
+ with gr.Row():
+ session_dropdown = gr.Dropdown(
+ label="Active Sessions",
+ choices=[],
+ interactive=True,
+ scale=3
+ )
+ refresh_btn = gr.Button("๐", scale=1)
+ resume_btn = gr.Button("๐ Load", scale=1)
+
+ # Output displays
+ with gr.Row():
+ with gr.Column():
+ with gr.Tab("๐ญ Writing Progress"):
+ stages_display = gr.Markdown(
+ value="*Your screenplay journey will unfold here...*",
+ elem_id="stages-display"
+ )
- with gr.Group(elem_classes=["download-section"]):
- gr.Markdown("### ๐ฆ Bind Your Book")
- with gr.Row():
- format_select = gr.Radio(
- choices=["DOCX", "TXT"],
- value="DOCX" if DOCX_AVAILABLE else "TXT",
- label="Format",
- elem_id="format_select"
- )
- download_btn = gr.Button("๐ฅ Download Manuscript", variant="secondary")
+ with gr.Tab("๐ Screenplay"):
+ screenplay_output = gr.Markdown(
+ value="*Your formatted screenplay will appear here...*",
+ elem_id="screenplay-output"
+ )
- download_file = gr.File(
- label="Your Manuscript",
- visible=False,
- elem_id="download_file"
- )
+ with gr.Row():
+ format_select = gr.Radio(
+ choices=["PDF", "FDX", "FOUNTAIN", "TXT"],
+ value="PDF",
+ label="Export Format"
+ )
+ download_btn = gr.Button("๐ฅ Download Screenplay", variant="secondary")
+
+ download_file = gr.File(
+ label="Download",
+ visible=False
+ )
+
+ # Examples
+ gr.Examples(
+ examples=[
+ ["A burned-out teacher discovers her students are being replaced by AI duplicates"],
+ ["Two funeral home employees accidentally release a ghost who helps them solve murders"],
+ ["A time-loop forces a wedding planner to relive the worst wedding until they find true love"],
+ ["An astronaut returns to Earth to find everyone has forgotten space exists"],
+ ["A support group for reformed villains must save the city when heroes disappear"],
+ ["A food critic loses their sense of taste and teams up with a street food vendor"]
+ ],
+ inputs=query_input,
+ label="๐ก Example Concepts"
+ )
+
+ # Screenplay Library Tab
+ with gr.Tab("๐ Concept Library"):
+ gr.Markdown("""
+ ### ๐ฒ Random Screenplay Concepts
- # Hidden state
- novel_text_state = gr.State("")
+ Browse through AI-generated screenplay concepts. Each concept includes a title, logline, and brief setup.
+ """)
- # Examples with literary flair
- gr.Examples(
- examples=[
- ["A daughter discovering her mother's hidden past through old letters found in an attic trunk"],
- ["An architect losing sight who learns to design through touch, sound, and the memories of light"],
- ["A translator replaced by AI rediscovering the essence of language through handwritten poetry"],
- ["A middle-aged man who lost his job finding new meaning in the rhythms of rural life"],
- ["A doctor with war trauma finding healing through Doctors Without Borders missions"],
- ["A neighborhood coming together to save their beloved bookstore from corporate development"],
- ["A year in the life of a professor losing memory and his devoted last student"]
- ],
- inputs=query_input,
- label="๐ก Inspiring Themes",
- examples_per_page=7,
- elem_id="example_themes"
- )
+ library_display = gr.HTML(
+ value="Library feature coming soon...
"
+ )
+
+ # Event handlers
+ def handle_submit(query, s_type, genre, lang, session_id):
+ if not query:
+ yield "", "", "โ Please enter a concept", session_id
+ return
- # Random Theme Library Tab
- with gr.Tab("๐ฒ Random Theme Library", elem_id="library_tab"):
- with gr.Column():
- gr.Markdown("""
- ### ๐ Random Theme Library
-
- Browse through all randomly generated themes. Each theme is unique and can be used to create a novel.
- """)
-
- with gr.Row():
- library_search = gr.Textbox(
- label="Search Themes",
- placeholder="Search by keywords...",
- elem_classes=["library-search"],
- scale=2
- )
- library_language_filter = gr.Radio(
- choices=["All", "English", "Korean"],
- value="All",
- label="Filter by Language",
- scale=2
- )
- library_refresh_btn = gr.Button("๐ Refresh", scale=1)
-
- library_display = gr.HTML(
- value=get_theme_library_display(),
- elem_id="library-display"
- )
-
- # Hidden components for theme interaction
- selected_theme_for_action = gr.Textbox(visible=False, elem_id="selected_theme_for_action")
- action_type = gr.Textbox(visible=False, elem_id="action_type")
- trigger_action = gr.Button("Trigger Action", visible=False, elem_id="trigger_action")
-
- # Event handlers
- def refresh_sessions():
- try:
- sessions = get_active_sessions("English")
- return gr.update(choices=sessions)
- except Exception as e:
- logger.error(f"Session refresh error: {str(e)}")
- return gr.update(choices=[])
-
- def handle_auto_recover(language):
- session_id, message = auto_recover_session(language)
- return session_id, message
-
- def handle_random_theme(language):
- """Handle random theme generation with library storage"""
- theme = generate_random_theme(language)
- return theme
-
- def refresh_library(language_filter, search_query):
- """Refresh theme library display"""
- lang = None if language_filter == "All" else language_filter
- return get_theme_library_display(lang, search_query)
-
- def handle_library_action(theme_id, action):
- """Handle theme library actions"""
- if not theme_id:
- return gr.update(), gr.update()
-
- if action == "use":
- # Handle use action
- theme_data = NovelDatabase.get_theme_by_id(theme_id)
- if theme_data:
- NovelDatabase.update_theme_used_count(theme_id)
- NovelDatabase.update_theme_view_count(theme_id) # Also update view count
- return (
- gr.update(value=theme_data.get('theme_text', '')), # query_input
- f"Theme #{theme_id[:8]} loaded" # status_text
- )
-
- return gr.update(), gr.update()
-
- # Event connections
- submit_btn.click(
- fn=process_query,
- inputs=[query_input, language_select, current_session_id],
- outputs=[stages_display, novel_output, status_text, current_session_id, novel_text_state] # novel_text_state ์ถ๊ฐ
- )
-
- resume_btn.click(
- fn=lambda x: x.split("...")[0] if x and "..." in x else x,
- inputs=[session_dropdown],
- outputs=[current_session_id]
- ).then(
- fn=resume_session,
- inputs=[current_session_id, language_select],
- outputs=[stages_display, novel_output, status_text, current_session_id, novel_text_state] # novel_text_state ์ถ๊ฐ
- )
-
- auto_recover_btn.click(
- fn=handle_auto_recover,
- inputs=[language_select],
- outputs=[current_session_id, status_text]
- ).then(
- fn=resume_session,
- inputs=[current_session_id, language_select],
- outputs=[stages_display, novel_output, status_text, current_session_id, novel_text_state] # novel_text_state ์ถ๊ฐ
- )
-
-
- refresh_btn.click(
- fn=refresh_sessions,
- outputs=[session_dropdown]
- )
-
- clear_btn.click(
- fn=lambda: ("", "", "โจ Ready to begin your literary journey", "", None, ""), # ๋น ๋ฌธ์์ด ์ถ๊ฐ
- outputs=[stages_display, novel_output, status_text, novel_text_state, current_session_id, novel_text_state]
- )
-
- random_btn.click(
- fn=handle_random_theme,
- inputs=[language_select],
- outputs=[query_input],
- queue=False
- )
-
- # Library event handlers
- library_refresh_btn.click(
- fn=refresh_library,
- inputs=[library_language_filter, library_search],
- outputs=[library_display]
- )
-
- library_search.change(
- fn=refresh_library,
- inputs=[library_language_filter, library_search],
- outputs=[library_display]
- )
-
- library_language_filter.change(
- fn=refresh_library,
- inputs=[library_language_filter, library_search],
- outputs=[library_display]
- )
-
- # Handle clicks on library display - using trigger button
- trigger_action.click(
- fn=handle_library_action,
- inputs=[selected_theme_for_action, action_type],
- outputs=[query_input, status_text]
- )
-
-
- download_btn.click(
- fn=handle_download,
- inputs=[format_select, language_select, current_session_id, novel_text_state],
- outputs=[download_file]
- )
-
- # Load sessions and library on start
- def initialize_interface():
- # Sync with HF dataset on startup
- if 'hf_manager' in globals() and hf_manager.token:
- hf_manager.sync_with_local_db()
-
- return refresh_sessions(), refresh_library("All", "")
-
- interface.load(
- fn=initialize_interface,
- outputs=[session_dropdown, library_display]
- )
+ yield from process_query(query, s_type, genre, lang, session_id)
+
+ def handle_random(s_type, genre, lang):
+ return generate_random_screenplay_theme(s_type, genre, lang)
+
+ def handle_download(screenplay_text, format_type, session_id):
+ if not screenplay_text or not session_id:
+ return gr.update(visible=False)
+
+ # Get title from database
+ session = ScreenplayDatabase.get_session(session_id)
+ title = session.get('title', 'Untitled') if session else 'Untitled'
+
+ file_path = download_screenplay(screenplay_text, format_type, title, session_id)
+ if file_path and os.path.exists(file_path):
+ return gr.update(value=file_path, visible=True)
+ return gr.update(visible=False)
+
+ # Connect events
+ submit_btn.click(
+ fn=handle_submit,
+ inputs=[query_input, screenplay_type, genre_select, language_select, current_session_id],
+ outputs=[stages_display, screenplay_output, status_text, current_session_id]
+ )
+
+ random_btn.click(
+ fn=handle_random,
+ inputs=[screenplay_type, genre_select, language_select],
+ outputs=[query_input]
+ )
+
+ clear_btn.click(
+ fn=lambda: ("", "", "Ready to create your screenplay", None),
+ outputs=[stages_display, screenplay_output, status_text, current_session_id]
+ )
+
+ refresh_btn.click(
+ fn=get_active_sessions,
+ outputs=[session_dropdown]
+ )
+
+ download_btn.click(
+ fn=handle_download,
+ inputs=[screenplay_output, format_select, current_session_id],
+ outputs=[download_file]
+ )
+
+ # Load sessions on start
+ interface.load(
+ fn=get_active_sessions,
+ outputs=[session_dropdown]
+ )
- return interface
-# Initialize HF Dataset Manager as global variable
-hf_manager = None
+ return interface
# Main function
if __name__ == "__main__":
- logger.info("AGI NOVEL Generator v2.0 Starting...")
- logger.info("=" * 60)
-
- # Environment check
- logger.info(f"API Endpoint: {API_URL}")
- logger.info(f"Target Length: {TARGET_WORDS:,} words")
- logger.info(f"Minimum Words per Part: {MIN_WORDS_PER_PART:,} words")
- logger.info("System Features: Single writer + Immediate part-by-part critique")
-
- if BRAVE_SEARCH_API_KEY:
- logger.info("Web search enabled.")
- else:
- logger.warning("Web search disabled.")
-
- if DOCX_AVAILABLE:
- logger.info("DOCX export enabled.")
- else:
- logger.warning("DOCX export disabled.")
-
- logger.info("=" * 60)
-
- # Initialize database
- logger.info("Initializing database...")
- NovelDatabase.init_db()
- logger.info("Database initialization complete.")
-
- # Initialize HF Dataset Manager
- logger.info("Initializing HuggingFace dataset manager...")
- hf_manager = HFDatasetManager()
-
- if hf_manager.token:
- logger.info("HuggingFace authentication successful.")
- # Sync with HF dataset on startup
- hf_manager.sync_with_local_db()
- else:
- logger.warning("HuggingFace token not found. Theme persistence will be local only.")
-
- # Create and launch interface
- interface = create_interface()
-
- interface.launch(
- server_name="0.0.0.0",
- server_port=7860,
- share=False,
- debug=True
- )
\ No newline at end of file
+ logger.info("Screenplay Generator Starting...")
+ logger.info("=" * 60)
+
+ # Environment check
+ logger.info(f"API Endpoint: {API_URL}")
+ logger.info("Screenplay Types Available:")
+ for s_type, info in SCREENPLAY_LENGTHS.items():
+ logger.info(f" - {s_type}: {info['description']}")
+ logger.info(f"Genres: {', '.join(GENRE_TEMPLATES.keys())}")
+
+ if BRAVE_SEARCH_API_KEY:
+ logger.info("Web search enabled for market research.")
+ else:
+ logger.warning("Web search disabled.")
+
+ logger.info("=" * 60)
+
+ # Initialize database
+ logger.info("Initializing database...")
+ ScreenplayDatabase.init_db()
+ logger.info("Database initialization complete.")
+
+ # Create and launch interface
+ interface = create_interface()
+
+ interface.launch(
+ server_name="0.0.0.0",
+ server_port=7860,
+ share=False,
+ debug=True
+ )
\ No newline at end of file