Spaces:
Running
Running
import gradio as gr | |
import os | |
import json | |
import requests | |
from datetime import datetime | |
import time | |
from typing import List, Dict, Any, Generator, Tuple, Optional, Set | |
import logging | |
import re | |
import tempfile | |
from pathlib import Path | |
import sqlite3 | |
import hashlib | |
import threading | |
from contextlib import contextmanager | |
from dataclasses import dataclass, field, asdict | |
from collections import defaultdict | |
# --- λ‘κΉ μ€μ --- | |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') | |
logger = logging.getLogger(__name__) | |
# --- Document export imports --- | |
try: | |
from docx import Document | |
from docx.shared import Inches, Pt, RGBColor | |
from docx.enum.text import WD_ALIGN_PARAGRAPH | |
from docx.enum.style import WD_STYLE_TYPE | |
from docx.oxml.ns import qn | |
from docx.oxml import OxmlElement | |
DOCX_AVAILABLE = True | |
except ImportError: | |
DOCX_AVAILABLE = False | |
logger.warning("python-docx not installed. DOCX export will be disabled.") | |
# --- νκ²½ λ³μ λ° μμ --- | |
FRIENDLI_TOKEN = os.getenv("FRIENDLI_TOKEN", "") | |
BRAVE_SEARCH_API_KEY = os.getenv("BRAVE_SEARCH_API_KEY", "") | |
API_URL = "https://api.friendli.ai/dedicated/v1/chat/completions" | |
MODEL_ID = "dep89a2fld32mcm" | |
DB_PATH = "novel_sessions_v6.db" | |
# λͺ©ν λΆλ μ€μ | |
TARGET_WORDS = 8000 # μμ λ§μ§μ μν΄ 8000λ¨μ΄ | |
MIN_WORDS_PER_WRITER = 800 # κ° μκ° μ΅μ λΆλ | |
# --- νκ²½ λ³μ κ²μ¦ --- | |
if not FRIENDLI_TOKEN: | |
logger.error("FRIENDLI_TOKEN not set. Application will not work properly.") | |
FRIENDLI_TOKEN = "dummy_token_for_testing" | |
if not BRAVE_SEARCH_API_KEY: | |
logger.warning("BRAVE_SEARCH_API_KEY not set. Web search features will be disabled.") | |
# --- μ μ λ³μ --- | |
db_lock = threading.Lock() | |
# μμ¬ μ§ν λ¨κ³ μ μ | |
NARRATIVE_PHASES = [ | |
"λμ : μΌμκ³Ό κ· μ΄", | |
"λ°μ 1: λΆμμ κ³ μ‘°", | |
"λ°μ 2: μΈλΆ 좩격", | |
"λ°μ 3: λ΄μ κ°λ± μ¬ν", | |
"μ μ 1: μκΈ°μ μ μ ", | |
"μ μ 2: μ νμ μκ°", | |
"νκ° 1: κ²°κ³Όμ μ¬ν", | |
"νκ° 2: μλ‘μ΄ μΈμ", | |
"κ²°λ§ 1: λ³νλ μΌμ", | |
"κ²°λ§ 2: μ΄λ¦° μ§λ¬Έ" | |
] | |
# λ¨κ³λ³ κ΅¬μ± - νΈμ§μ λ¨κ³ μΆκ° | |
PROGRESSIVE_STAGES = [ | |
("director", "π¬ κ°λ μ: ν΅ν©λ μμ¬ κ΅¬μ‘° κΈ°ν"), | |
("critic", "π λΉνκ°: μμ¬ μ§νμ±κ³Ό κΉμ΄ κ²ν "), | |
("director", "π¬ κ°λ μ: μμ λ λ§μ€ν°νλ"), | |
] + [ | |
(f"writer{i}", f"βοΈ μκ° {i}: μ΄μ - {NARRATIVE_PHASES[i-1]}") | |
for i in range(1, 11) | |
] + [ | |
("critic", "π λΉνκ°: μ€κ° κ²ν (μμ¬ λμ μ±κ³Ό λ³ν)"), | |
] + [ | |
(f"writer{i}", f"βοΈ μκ° {i}: μμ λ³Έ - {NARRATIVE_PHASES[i-1]}") | |
for i in range(1, 11) | |
] + [ | |
("editor", "βοΈ νΈμ§μ: λ°λ³΅ μ κ±° λ° μμ¬ μ¬κ΅¬μ±"), | |
("critic", f"π λΉνκ°: μ΅μ’ κ²ν λ° λ¬Ένμ νκ°"), | |
] | |
# --- λ°μ΄ν° ν΄λμ€ --- | |
class CharacterArc: | |
"""μΈλ¬Όμ λ³ν κΆ€μ μΆμ """ | |
name: str | |
initial_state: Dict[str, Any] # μ΄κΈ° μν | |
phase_states: Dict[int, Dict[str, Any]] = field(default_factory=dict) # λ¨κ³λ³ μν | |
transformations: List[str] = field(default_factory=list) # μ£Όμ λ³νλ€ | |
relationships_evolution: Dict[str, List[str]] = field(default_factory=dict) # κ΄κ³ λ³ν | |
class PlotThread: | |
"""νλ‘― λΌμΈ μΆμ """ | |
thread_id: str | |
description: str | |
introduction_phase: int | |
development_phases: List[int] | |
resolution_phase: Optional[int] | |
status: str = "active" # active, resolved, suspended | |
class SymbolicEvolution: | |
"""μμ§μ μλ―Έ λ³ν μΆμ """ | |
symbol: str | |
initial_meaning: str | |
phase_meanings: Dict[int, str] = field(default_factory=dict) | |
transformation_complete: bool = False | |
class CharacterConsistency: | |
"""μΊλ¦ν° μΌκ΄μ± κ΄λ¦¬""" | |
primary_names: Dict[str, str] = field(default_factory=dict) # role -> canonical name | |
aliases: Dict[str, List[str]] = field(default_factory=dict) # canonical -> aliases | |
name_history: List[Tuple[int, str, str]] = field(default_factory=list) # (phase, role, used_name) | |
def validate_name(self, phase: int, role: str, name: str) -> bool: | |
"""μ΄λ¦ μΌκ΄μ± κ²μ¦""" | |
if role in self.primary_names: | |
canonical = self.primary_names[role] | |
if name != canonical and name not in self.aliases.get(canonical, []): | |
return False | |
return True | |
def register_name(self, phase: int, role: str, name: str): | |
"""μ΄λ¦ λ±λ‘""" | |
if role not in self.primary_names: | |
self.primary_names[role] = name | |
self.name_history.append((phase, role, name)) | |
# --- ν΅μ¬ λ‘μ§ ν΄λμ€ --- | |
class ContentDeduplicator: | |
"""μ€λ³΅ μ½ν μΈ κ°μ§ λ° μ κ±°""" | |
def __init__(self): | |
self.seen_paragraphs = set() | |
self.seen_key_phrases = set() | |
self.similarity_threshold = 0.85 | |
def check_similarity(self, text1: str, text2: str) -> float: | |
"""λ ν μ€νΈμ μ μ¬λ μΈ‘μ """ | |
# κ°λ¨ν Jaccard μ μ¬λ ꡬν | |
words1 = set(text1.lower().split()) | |
words2 = set(text2.lower().split()) | |
intersection = words1.intersection(words2) | |
union = words1.union(words2) | |
return len(intersection) / len(union) if union else 0 | |
def extract_key_phrases(self, text: str) -> List[str]: | |
"""ν΅μ¬ 문ꡬ μΆμΆ""" | |
# 20μ μ΄μμ λ¬Έμ₯λ€μ ν΅μ¬ λ¬Έκ΅¬λ‘ κ°μ£Ό | |
sentences = [s.strip() for s in re.split(r'[.!?]', text) if len(s.strip()) > 20] | |
return sentences[:5] # μμ 5κ°λ§ | |
def is_duplicate(self, paragraph: str) -> bool: | |
"""μ€λ³΅ λ¬Έλ¨ κ°μ§""" | |
# ν΅μ¬ 문ꡬ μ²΄ν¬ | |
key_phrases = self.extract_key_phrases(paragraph) | |
for phrase in key_phrases: | |
if phrase in self.seen_key_phrases: | |
return True | |
# μ 체 λ¬Έλ¨ μ μ¬λ μ²΄ν¬ | |
for seen in self.seen_paragraphs: | |
if self.check_similarity(paragraph, seen) > self.similarity_threshold: | |
return True | |
# μ€λ³΅μ΄ μλλ©΄ μ μ₯ | |
self.seen_paragraphs.add(paragraph) | |
self.seen_key_phrases.update(key_phrases) | |
return False | |
def get_used_elements(self) -> List[str]: | |
"""μ¬μ©λ ν΅μ¬ μμ λ°ν""" | |
return list(self.seen_key_phrases)[:10] # μ΅κ·Ό 10κ° | |
def count_repetitions(self, content: str) -> int: | |
"""ν μ€νΈ λ΄μ λ°λ³΅ νμ κ³μ°""" | |
paragraphs = content.split('\n\n') | |
repetitions = 0 | |
for i, para1 in enumerate(paragraphs): | |
for para2 in paragraphs[i+1:]: | |
if self.check_similarity(para1, para2) > 0.7: | |
repetitions += 1 | |
return repetitions | |
class ProgressionMonitor: | |
"""μ€μκ° μμ¬ μ§ν λͺ¨λν°λ§""" | |
def __init__(self): | |
self.phase_keywords = {} | |
self.locations = set() | |
self.characters = set() | |
def count_new_characters(self, content: str, phase: int) -> int: | |
"""μλ‘μ΄ μΈλ¬Ό λ±μ₯ νμ""" | |
# κ°λ¨ν κ³ μ λͺ μ¬ μΆμΆ (λλ¬Έμλ‘ μμνλ λ¨μ΄) | |
potential_names = re.findall(r'\b[A-Zκ°-ν£][a-zκ°-ν£]+\b', content) | |
new_chars = set(potential_names) - self.characters | |
self.characters.update(new_chars) | |
return len(new_chars) | |
def count_new_locations(self, content: str, phase: int) -> int: | |
"""μλ‘μ΄ μ₯μ λ±μ₯ νμ""" | |
# μ₯μ κ΄λ ¨ ν€μλ | |
location_markers = ['μμ', 'μΌλ‘', 'μ', 'μ', 'at', 'in', 'to'] | |
new_locs = 0 | |
for marker in location_markers: | |
matches = re.findall(rf'(\S+)\s*{marker}', content) | |
for match in matches: | |
if match not in self.locations and len(match) > 2: | |
self.locations.add(match) | |
new_locs += 1 | |
return new_locs | |
def calculate_content_difference(self, current_phase: int, content: str, previous_content: str) -> float: | |
"""μ΄μ λ¨κ³μμ λ΄μ© μ°¨μ΄ λΉμ¨""" | |
if not previous_content: | |
return 1.0 | |
dedup = ContentDeduplicator() | |
return 1.0 - dedup.check_similarity(content, previous_content) | |
def count_repetitions(self, content: str) -> int: | |
"""λ°λ³΅ νμ κ³μ°""" | |
paragraphs = content.split('\n\n') | |
repetitions = 0 | |
for i, para1 in enumerate(paragraphs): | |
for para2 in paragraphs[i+1:]: | |
similarity = ContentDeduplicator().check_similarity(para1, para2) | |
if similarity > 0.7: | |
repetitions += 1 | |
return repetitions | |
def calculate_progression_score(self, current_phase: int, content: str, previous_content: str = "") -> Dict[str, float]: | |
"""μ§νλ μ μ κ³μ°""" | |
scores = { | |
"new_elements": 0.0, # μλ‘μ΄ μμ | |
"character_growth": 0.0, # μΈλ¬Ό μ±μ₯ | |
"plot_advancement": 0.0, # νλ‘― μ§μ | |
"no_repetition": 0.0 # λ°λ³΅ μμ | |
} | |
# μλ‘μ΄ μμ μ²΄ν¬ | |
new_characters = self.count_new_characters(content, current_phase) | |
new_locations = self.count_new_locations(content, current_phase) | |
scores["new_elements"] = min(10, (new_characters * 3 + new_locations * 2)) | |
# μ±μ₯ κ΄λ ¨ ν€μλ | |
growth_keywords = ["κΉ¨λ¬μλ€", "μ΄μ λ", "λ¬λΌμ‘λ€", "μλ‘κ²", "λΉλ‘μ", "λ³νλ€", "λ μ΄μ"] | |
growth_count = sum(1 for k in growth_keywords if k in content) | |
scores["character_growth"] = min(10, growth_count * 2) | |
# νλ‘― μ§μ (μ΄μ λ¨κ³μμ μ°¨μ΄) | |
if current_phase > 1 and previous_content: | |
diff_ratio = self.calculate_content_difference(current_phase, content, previous_content) | |
scores["plot_advancement"] = min(10, diff_ratio * 10) | |
else: | |
scores["plot_advancement"] = 8.0 # 첫 λ¨κ³λ κΈ°λ³Έ μ μ | |
# λ°λ³΅ μ²΄ν¬ (μμ°) | |
repetition_count = self.count_repetitions(content) | |
scores["no_repetition"] = max(0, 10 - repetition_count * 2) | |
return scores | |
class ProgressiveNarrativeTracker: | |
"""μμ¬ μ§νκ³Ό λμ μ μΆμ νλ μμ€ν """ | |
def __init__(self): | |
self.character_arcs: Dict[str, CharacterArc] = {} | |
self.plot_threads: Dict[str, PlotThread] = {} | |
self.symbolic_evolutions: Dict[str, SymbolicEvolution] = {} | |
self.phase_summaries: Dict[int, str] = {} | |
self.accumulated_events: List[Dict[str, Any]] = [] | |
self.thematic_deepening: List[str] = [] | |
self.philosophical_insights: List[str] = [] # μ² νμ ν΅μ°° μΆμ | |
self.literary_devices: Dict[int, List[str]] = {} # λ¬Ένμ κΈ°λ² μ¬μ© μΆμ | |
self.character_consistency = CharacterConsistency() # μΊλ¦ν° μΌκ΄μ± μΆκ° | |
self.content_deduplicator = ContentDeduplicator() # μ€λ³΅ κ°μ§κΈ° μΆκ° | |
self.progression_monitor = ProgressionMonitor() # μ§νλ λͺ¨λν° μΆκ° | |
self.used_expressions: Set[str] = set() # μ¬μ©λ νν μΆμ | |
def register_character_arc(self, name: str, initial_state: Dict[str, Any]): | |
"""μΊλ¦ν° μν¬ λ±λ‘""" | |
self.character_arcs[name] = CharacterArc(name=name, initial_state=initial_state) | |
self.character_consistency.register_name(0, "protagonist", name) | |
logger.info(f"Character arc registered: {name}") | |
def update_character_state(self, name: str, phase: int, new_state: Dict[str, Any], transformation: str): | |
"""μΊλ¦ν° μν μ λ°μ΄νΈ λ° λ³ν κΈ°λ‘""" | |
if name in self.character_arcs: | |
arc = self.character_arcs[name] | |
arc.phase_states[phase] = new_state | |
arc.transformations.append(f"Phase {phase}: {transformation}") | |
logger.info(f"Character {name} transformed in phase {phase}: {transformation}") | |
def add_plot_thread(self, thread_id: str, description: str, intro_phase: int): | |
"""μλ‘μ΄ νλ‘― λΌμΈ μΆκ°""" | |
self.plot_threads[thread_id] = PlotThread( | |
thread_id=thread_id, | |
description=description, | |
introduction_phase=intro_phase, | |
development_phases=[] | |
) | |
def develop_plot_thread(self, thread_id: str, phase: int): | |
"""νλ‘― λΌμΈ λ°μ """ | |
if thread_id in self.plot_threads: | |
self.plot_threads[thread_id].development_phases.append(phase) | |
def check_narrative_progression(self, current_phase: int) -> Tuple[bool, List[str]]: | |
"""μμ¬κ° μ€μ λ‘ μ§νλκ³ μλμ§ νμΈ""" | |
issues = [] | |
# 1. μΊλ¦ν° λ³ν νμΈ | |
static_characters = [] | |
for name, arc in self.character_arcs.items(): | |
if len(arc.transformations) < current_phase // 3: # μ΅μ 3λ¨κ³λ§λ€ λ³ν νμ | |
static_characters.append(name) | |
if static_characters: | |
issues.append(f"λ€μ μΈλ¬Όλ€μ λ³νκ° λΆμ‘±ν©λλ€: {', '.join(static_characters)}") | |
# 2. νλ‘― μ§ν νμΈ | |
unresolved_threads = [] | |
for thread_id, thread in self.plot_threads.items(): | |
if thread.status == "active" and len(thread.development_phases) < 2: | |
unresolved_threads.append(thread.description) | |
if unresolved_threads: | |
issues.append(f"μ§μ λμ§ μμ νλ‘―: {', '.join(unresolved_threads)}") | |
# 3. μμ§ λ°μ νμΈ | |
static_symbols = [] | |
for symbol, evolution in self.symbolic_evolutions.items(): | |
if len(evolution.phase_meanings) < current_phase // 4: | |
static_symbols.append(symbol) | |
if static_symbols: | |
issues.append(f"μλ―Έκ° λ°μ νμ§ μμ μμ§: {', '.join(static_symbols)}") | |
# 4. μ² νμ κΉμ΄ νμΈ | |
if len(self.philosophical_insights) < current_phase // 2: | |
issues.append("μ² νμ μ±μ°°κ³Ό μΈκ°μ λν ν΅μ°°μ΄ λΆμ‘±ν©λλ€") | |
# 5. λ¬Ένμ κΈ°λ² λ€μμ± | |
unique_devices = set() | |
for devices in self.literary_devices.values(): | |
unique_devices.update(devices) | |
if len(unique_devices) < 5: | |
issues.append("λ¬Ένμ κΈ°λ²μ΄ λ¨μ‘°λ‘μ΅λλ€. λ λ€μν νν κΈ°λ²μ΄ νμν©λλ€") | |
# 6. μΊλ¦ν° μ΄λ¦ μΌκ΄μ± | |
name_issues = [] | |
for phase, role, name in self.character_consistency.name_history: | |
if not self.character_consistency.validate_name(phase, role, name): | |
name_issues.append(f"Phase {phase}: {role} μ΄λ¦ λΆμΌμΉ ({name})") | |
if name_issues: | |
issues.extend(name_issues) | |
return len(issues) == 0, issues | |
def generate_phase_requirements(self, phase: int) -> str: | |
"""κ° λ¨κ³λ³ νμ μꡬμ¬ν μμ±""" | |
requirements = [] | |
# μ΄μ λ¨κ³ μμ½ | |
if phase > 1 and (phase-1) in self.phase_summaries: | |
requirements.append(f"μ΄μ λ¨κ³ ν΅μ¬: {self.phase_summaries[phase-1]}") | |
# μ¬μ©λ νν λͺ©λ‘ | |
if self.used_expressions: | |
requirements.append("\nβ λ€μ νν/μν©μ μ΄λ―Έ μ¬μ©λ¨ (μ¬μ¬μ© κΈμ§):") | |
for expr in list(self.used_expressions)[-10:]: # μ΅κ·Ό 10κ° | |
requirements.append(f"- {expr[:50]}...") | |
# λ¨κ³λ³ νΉμ μꡬμ¬ν | |
phase_name = NARRATIVE_PHASES[phase-1] if phase <= 10 else "μμ " | |
if "λμ " in phase_name: | |
requirements.append("\nβ νμ ν¬ν¨:") | |
requirements.append("- μΌμμ κ· μ΄μ 보μ¬μ£Όλ, ν° μ¬κ±΄μ΄ μλ λ―Έλ¬ν λ³νλ‘ μμ") | |
requirements.append("- μ£Όμ μΈλ¬Όλ€μ μ΄κΈ° μνμ κ΄κ³ μ€μ ") | |
requirements.append("- ν΅μ¬ μμ§ λμ (μμ°μ€λ½κ²)") | |
requirements.append("- μ£ΌμΈκ³΅ μ΄λ¦ λͺ νν μ€μ ") | |
elif "λ°μ " in phase_name: | |
requirements.append("\nβ νμ ν¬ν¨:") | |
requirements.append("- μ΄μ λ¨κ³μ κ· μ΄/κ°λ±μ΄ ꡬ체νλκ³ μ¬ν") | |
requirements.append("- μλ‘μ΄ μ¬κ±΄μ΄λ μΈμμ΄ μΆκ°λμ΄ λ³΅μ‘μ± μ¦κ°") | |
requirements.append("- μΈλ¬Ό κ° κ΄κ³μ λ―Έλ¬ν λ³ν") | |
requirements.append("- μλ‘μ΄ κ³΅κ°μ΄λ μκ°λ νμ") | |
elif "μ μ " in phase_name: | |
requirements.append("\nβ νμ ν¬ν¨:") | |
requirements.append("- μΆμ λ κ°λ±μ΄ μκ³μ μ λλ¬") | |
requirements.append("- μΈλ¬Όμ λ΄μ μ νμ΄λ μΈμμ μ νμ ") | |
requirements.append("- μμ§μ μλ―Έκ° μ 볡λκ±°λ μ¬ν") | |
requirements.append("- μ΄μ κ³Όλ λ€λ₯Έ νλμ΄λ κ²°μ ") | |
elif "νκ°" in phase_name: | |
requirements.append("\nβ νμ ν¬ν¨:") | |
requirements.append("- μ μ μ μ¬νμ κ·Έλ‘ μΈν λ³ν") | |
requirements.append("- μλ‘μ΄ κ· νμ μ μ°Ύμκ°λ κ³Όμ ") | |
requirements.append("- μΈλ¬Όλ€μ λ³νλ κ΄κ³μ μΈμ") | |
requirements.append("- ν볡μ΄λ μμ€μ ꡬ체μ λ¬μ¬") | |
elif "κ²°λ§" in phase_name: | |
requirements.append("\nβ νμ ν¬ν¨:") | |
requirements.append("- λ³νλ μΌμμ λͺ¨μ΅") | |
requirements.append("- ν΄κ²°λμ§ μμ μ§λ¬Έλ€") | |
requirements.append("- μ¬μ΄κ³Ό μ±μ°°μ μ¬μ§") | |
requirements.append("- μ²μκ³Ό λλΉλλ λ§μ§λ§") | |
# μ² νΒ·μΈκ°μ κ°ν 체ν¬λ¦¬μ€νΈ | |
requirements.append("\nπ νμ ν¬ν¨ μμ:") | |
requirements.append("- μ‘΄μ¬μ μλ―Έλ μΆμ λ³Έμ§μ λν μ±μ°°μ΄ λ΄κΈ΄ 1λ¬Έλ¨ μ΄μ") | |
requirements.append("- νμΈμ κ³ ν΅μ λν 곡κ°μ΄λ μ°λ―Όμ 보μ¬μ£Όλ ꡬ체μ μ₯λ©΄ 1κ° μ΄μ") | |
requirements.append("- '보μ¬μ£ΌκΈ°(showing)' κΈ°λ²: μ§μ μ€λͺ λμ κ°κ°μ λ¬μ¬μ νλμΌλ‘ νν") | |
requirements.append("- μ΄ λ¨κ³λ§μ λ νΉν λ¬Ένμ μ₯μΉλ μμ 1κ° μ΄μ") | |
# λ°λ³΅ λ°©μ§ μꡬμ¬ν | |
requirements.append("\nβ οΈ μ λ κΈμ§μ¬ν:") | |
requirements.append("- μ΄μ λ¨κ³μ λμΌν μ¬κ±΄μ΄λ κ°λ± λ°λ³΅") | |
requirements.append("- μΈλ¬Όμ΄ κ°μ μκ°μ΄λ κ°μ μ 머무λ₯΄κΈ°") | |
requirements.append("- νλ‘―μ΄ μ μ리걸μνκΈ°") | |
requirements.append("- '~μ λκΌλ€', '~μλ€'μ κ°μ μ§μ μ μ€λͺ ") | |
requirements.append("- μ΄λ―Έ μ»μ κΉ¨λ¬μμ μκ³ λ€μ μμνκΈ°") | |
# μ§ν 체ν¬λ¦¬μ€νΈ | |
requirements.append("\nβοΈ μ§ν 체ν¬λ¦¬μ€νΈ:") | |
requirements.append("β‘ μ΄μ λ¨κ³μ κ²°κ³Όκ° μ΄λ² λ¨κ³μ μμΈμ΄ λλκ°?") | |
requirements.append("β‘ μ£ΌμΈκ³΅μ λ΄μ λ³νκ° κ΅¬μ²΄μ μΌλ‘ λλ¬λλκ°?") | |
requirements.append("β‘ νλ‘―μ΄ μ€μ λ‘ μ μ§νλκ°?") | |
requirements.append("β‘ μλ‘μ΄ μ 보/μ¬κ±΄μ΄ μΆκ°λλκ°?") | |
return "\n".join(requirements) | |
def extract_used_elements(self, content: str): | |
"""μ¬μ©λ ν΅μ¬ νν μΆμΆ λ° μ μ₯""" | |
# 20μ μ΄μμ νΉμ§μ μΈ λ¬Έμ₯λ€ μΆμΆ | |
sentences = re.findall(r'[^.!?]+[.!?]', content) | |
for sent in sentences: | |
if len(sent) > 20 and len(sent) < 100: | |
self.used_expressions.add(sent.strip()) | |
class NovelDatabase: | |
"""λ°μ΄ν°λ² μ΄μ€ κ΄λ¦¬""" | |
def init_db(): | |
with sqlite3.connect(DB_PATH) as conn: | |
conn.execute("PRAGMA journal_mode=WAL") | |
cursor = conn.cursor() | |
# κΈ°μ‘΄ ν μ΄λΈλ€ | |
cursor.execute(''' | |
CREATE TABLE IF NOT EXISTS sessions ( | |
session_id TEXT PRIMARY KEY, | |
user_query TEXT NOT NULL, | |
language TEXT NOT NULL, | |
created_at TEXT DEFAULT (datetime('now')), | |
updated_at TEXT DEFAULT (datetime('now')), | |
status TEXT DEFAULT 'active', | |
current_stage INTEGER DEFAULT 0, | |
final_novel TEXT, | |
literary_report TEXT, | |
total_words INTEGER DEFAULT 0, | |
narrative_tracker TEXT | |
) | |
''') | |
cursor.execute(''' | |
CREATE TABLE IF NOT EXISTS stages ( | |
id INTEGER PRIMARY KEY AUTOINCREMENT, | |
session_id TEXT NOT NULL, | |
stage_number INTEGER NOT NULL, | |
stage_name TEXT NOT NULL, | |
role TEXT NOT NULL, | |
content TEXT, | |
word_count INTEGER DEFAULT 0, | |
status TEXT DEFAULT 'pending', | |
progression_score REAL DEFAULT 0.0, | |
repetition_score REAL DEFAULT 0.0, | |
created_at TEXT DEFAULT (datetime('now')), | |
updated_at TEXT DEFAULT (datetime('now')), | |
FOREIGN KEY (session_id) REFERENCES sessions(session_id), | |
UNIQUE(session_id, stage_number) | |
) | |
''') | |
cursor.execute(''' | |
CREATE TABLE IF NOT EXISTS plot_threads ( | |
id INTEGER PRIMARY KEY AUTOINCREMENT, | |
session_id TEXT NOT NULL, | |
thread_id TEXT NOT NULL, | |
description TEXT, | |
introduction_phase INTEGER, | |
status TEXT DEFAULT 'active', | |
created_at TEXT DEFAULT (datetime('now')), | |
FOREIGN KEY (session_id) REFERENCES sessions(session_id) | |
) | |
''') | |
# μλ‘μ΄ ν μ΄λΈ: μ€λ³΅ κ°μ§ κΈ°λ‘ | |
cursor.execute(''' | |
CREATE TABLE IF NOT EXISTS duplicate_detection ( | |
id INTEGER PRIMARY KEY AUTOINCREMENT, | |
session_id TEXT NOT NULL, | |
phase INTEGER NOT NULL, | |
duplicate_content TEXT, | |
original_phase INTEGER, | |
similarity_score REAL, | |
created_at TEXT DEFAULT (datetime('now')), | |
FOREIGN KEY (session_id) REFERENCES sessions(session_id) | |
) | |
''') | |
conn.commit() | |
# κΈ°μ‘΄ λ©μλλ€ μ μ§ | |
def get_db(): | |
with db_lock: | |
conn = sqlite3.connect(DB_PATH, timeout=30.0) | |
conn.row_factory = sqlite3.Row | |
try: | |
yield conn | |
finally: | |
conn.close() | |
def create_session(user_query: str, language: str) -> str: | |
session_id = hashlib.md5(f"{user_query}{datetime.now()}".encode()).hexdigest() | |
with NovelDatabase.get_db() as conn: | |
conn.cursor().execute( | |
'INSERT INTO sessions (session_id, user_query, language) VALUES (?, ?, ?)', | |
(session_id, user_query, language) | |
) | |
conn.commit() | |
return session_id | |
def save_stage(session_id: str, stage_number: int, stage_name: str, | |
role: str, content: str, status: str = 'complete', | |
progression_score: float = 0.0, repetition_score: float = 0.0): | |
word_count = len(content.split()) if content else 0 | |
with NovelDatabase.get_db() as conn: | |
cursor = conn.cursor() | |
cursor.execute(''' | |
INSERT INTO stages (session_id, stage_number, stage_name, role, content, word_count, status, progression_score, repetition_score) | |
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) | |
ON CONFLICT(session_id, stage_number) | |
DO UPDATE SET content=?, word_count=?, status=?, stage_name=?, progression_score=?, repetition_score=?, updated_at=datetime('now') | |
''', (session_id, stage_number, stage_name, role, content, word_count, status, progression_score, repetition_score, | |
content, word_count, status, stage_name, progression_score, repetition_score)) | |
# μ΄ λ¨μ΄ μ μ λ°μ΄νΈ | |
cursor.execute(''' | |
UPDATE sessions | |
SET total_words = ( | |
SELECT SUM(word_count) | |
FROM stages | |
WHERE session_id = ? AND role LIKE 'writer%' AND content IS NOT NULL | |
), | |
updated_at = datetime('now'), | |
current_stage = ? | |
WHERE session_id = ? | |
''', (session_id, stage_number, session_id)) | |
conn.commit() | |
def get_writer_content(session_id: str) -> str: | |
"""μκ° μ½ν μΈ κ°μ Έμ€κΈ° (μμ λ³Έ μ°μ )""" | |
with NovelDatabase.get_db() as conn: | |
all_content = [] | |
for writer_num in range(1, 11): | |
# μμ λ³Έμ΄ μμΌλ©΄ μμ λ³Έμ, μμΌλ©΄ μ΄μμ | |
row = conn.cursor().execute(''' | |
SELECT content FROM stages | |
WHERE session_id = ? AND role = ? | |
AND stage_name LIKE '%μμ λ³Έ%' | |
ORDER BY stage_number DESC LIMIT 1 | |
''', (session_id, f'writer{writer_num}')).fetchone() | |
if not row or not row['content']: | |
# μμ λ³Έμ΄ μμΌλ©΄ μ΄μ μ¬μ© | |
row = conn.cursor().execute(''' | |
SELECT content FROM stages | |
WHERE session_id = ? AND role = ? | |
AND stage_name LIKE '%μ΄μ%' | |
ORDER BY stage_number DESC LIMIT 1 | |
''', (session_id, f'writer{writer_num}')).fetchone() | |
if row and row['content']: | |
all_content.append(row['content'].strip()) | |
return '\n\n'.join(all_content) | |
def get_total_words(session_id: str) -> int: | |
"""μ΄ λ¨μ΄ μ κ°μ Έμ€κΈ°""" | |
with NovelDatabase.get_db() as conn: | |
row = conn.cursor().execute( | |
'SELECT total_words FROM sessions WHERE session_id = ?', | |
(session_id,) | |
).fetchone() | |
return row['total_words'] if row and row['total_words'] else 0 | |
def save_narrative_tracker(session_id: str, tracker: ProgressiveNarrativeTracker): | |
"""μμ¬ μΆμ κΈ° μ μ₯""" | |
with NovelDatabase.get_db() as conn: | |
tracker_data = json.dumps({ | |
'character_arcs': {k: asdict(v) for k, v in tracker.character_arcs.items()}, | |
'plot_threads': {k: asdict(v) for k, v in tracker.plot_threads.items()}, | |
'phase_summaries': tracker.phase_summaries, | |
'thematic_deepening': tracker.thematic_deepening, | |
'philosophical_insights': tracker.philosophical_insights, | |
'literary_devices': tracker.literary_devices, | |
'character_consistency': asdict(tracker.character_consistency), | |
'used_expressions': list(tracker.used_expressions) | |
}) | |
conn.cursor().execute( | |
'UPDATE sessions SET narrative_tracker = ? WHERE session_id = ?', | |
(tracker_data, session_id) | |
) | |
conn.commit() | |
def load_narrative_tracker(session_id: str) -> Optional[ProgressiveNarrativeTracker]: | |
"""μμ¬ μΆμ κΈ° λ‘λ""" | |
with NovelDatabase.get_db() as conn: | |
row = conn.cursor().execute( | |
'SELECT narrative_tracker FROM sessions WHERE session_id = ?', | |
(session_id,) | |
).fetchone() | |
if row and row['narrative_tracker']: | |
data = json.loads(row['narrative_tracker']) | |
tracker = ProgressiveNarrativeTracker() | |
# λ°μ΄ν° 볡μ | |
for name, arc_data in data.get('character_arcs', {}).items(): | |
tracker.character_arcs[name] = CharacterArc(**arc_data) | |
for thread_id, thread_data in data.get('plot_threads', {}).items(): | |
tracker.plot_threads[thread_id] = PlotThread(**thread_data) | |
tracker.phase_summaries = data.get('phase_summaries', {}) | |
tracker.thematic_deepening = data.get('thematic_deepening', []) | |
tracker.philosophical_insights = data.get('philosophical_insights', []) | |
tracker.literary_devices = data.get('literary_devices', {}) | |
# μΊλ¦ν° μΌκ΄μ± 볡μ | |
if 'character_consistency' in data: | |
tracker.character_consistency = CharacterConsistency(**data['character_consistency']) | |
# μ¬μ©λ νν 볡μ | |
if 'used_expressions' in data: | |
tracker.used_expressions = set(data['used_expressions']) | |
return tracker | |
return None | |
def save_duplicate_detection(session_id: str, phase: int, duplicate_content: str, | |
original_phase: int, similarity_score: float): | |
"""μ€λ³΅ κ°μ§ κΈ°λ‘ μ μ₯""" | |
with NovelDatabase.get_db() as conn: | |
conn.cursor().execute(''' | |
INSERT INTO duplicate_detection | |
(session_id, phase, duplicate_content, original_phase, similarity_score) | |
VALUES (?, ?, ?, ?, ?) | |
''', (session_id, phase, duplicate_content, original_phase, similarity_score)) | |
conn.commit() | |
def get_session(session_id: str) -> Optional[Dict]: | |
with NovelDatabase.get_db() as conn: | |
row = conn.cursor().execute('SELECT * FROM sessions WHERE session_id = ?', (session_id,)).fetchone() | |
return dict(row) if row else None | |
def get_stages(session_id: str) -> List[Dict]: | |
with NovelDatabase.get_db() as conn: | |
rows = conn.cursor().execute('SELECT * FROM stages WHERE session_id = ? ORDER BY stage_number', (session_id,)).fetchall() | |
return [dict(row) for row in rows] | |
def update_final_novel(session_id: str, final_novel: str, literary_report: str = ""): | |
with NovelDatabase.get_db() as conn: | |
conn.cursor().execute( | |
"UPDATE sessions SET final_novel = ?, status = 'complete', updated_at = datetime('now'), literary_report = ? WHERE session_id = ?", | |
(final_novel, literary_report, session_id) | |
) | |
conn.commit() | |
def get_active_sessions() -> List[Dict]: | |
with NovelDatabase.get_db() as conn: | |
rows = conn.cursor().execute( | |
"SELECT session_id, user_query, language, created_at, current_stage, total_words FROM sessions WHERE status = 'active' ORDER BY updated_at DESC LIMIT 10" | |
).fetchall() | |
return [dict(row) for row in rows] | |
class WebSearchIntegration: | |
"""μΉ κ²μ κΈ°λ₯""" | |
def __init__(self): | |
self.brave_api_key = BRAVE_SEARCH_API_KEY | |
self.search_url = "https://api.search.brave.com/res/v1/web/search" | |
self.enabled = bool(self.brave_api_key) | |
def search(self, query: str, count: int = 3, language: str = "en") -> List[Dict]: | |
if not self.enabled: | |
return [] | |
headers = { | |
"Accept": "application/json", | |
"X-Subscription-Token": self.brave_api_key | |
} | |
params = { | |
"q": query, | |
"count": count, | |
"search_lang": "ko" if language == "Korean" else "en", | |
"text_decorations": False, | |
"safesearch": "moderate" | |
} | |
try: | |
response = requests.get(self.search_url, headers=headers, params=params, timeout=10) | |
response.raise_for_status() | |
results = response.json().get("web", {}).get("results", []) | |
return results | |
except requests.exceptions.RequestException as e: | |
logger.error(f"μΉ κ²μ API μ€λ₯: {e}") | |
return [] | |
def extract_relevant_info(self, results: List[Dict], max_chars: int = 1500) -> str: | |
if not results: | |
return "" | |
extracted = [] | |
total_chars = 0 | |
for i, result in enumerate(results[:3], 1): | |
title = result.get("title", "") | |
description = result.get("description", "") | |
info = f"[{i}] {title}: {description}" | |
if total_chars + len(info) < max_chars: | |
extracted.append(info) | |
total_chars += len(info) | |
else: | |
break | |
return "\n".join(extracted) | |
class ProgressiveLiterarySystem: | |
"""μ§νν λ¬Έν μμ€ μμ± μμ€ν """ | |
def __init__(self): | |
self.token = FRIENDLI_TOKEN | |
self.api_url = API_URL | |
self.model_id = MODEL_ID | |
self.narrative_tracker = ProgressiveNarrativeTracker() | |
self.web_search = WebSearchIntegration() | |
self.current_session_id = None | |
NovelDatabase.init_db() | |
def create_headers(self): | |
return {"Authorization": f"Bearer {self.token}", "Content-Type": "application/json"} | |
# --- ν둬ννΈ μμ± ν¨μλ€ --- | |
def create_director_initial_prompt(self, user_query: str, language: str) -> str: | |
"""κ°λ μ μ΄κΈ° κΈ°ν - ν΅ν©λ μμ¬ κ΅¬μ‘°""" | |
search_results_str = "" | |
if self.web_search.enabled: | |
# μ² νμ ν€μλ μΆκ° (쿼리 κΈΈμ΄ μ ν) | |
short_query = user_query[:50] if len(user_query) > 50 else user_query | |
queries = [ | |
f"{short_query} μ² νμ μλ―Έ", # μ² νμ κ΄μ | |
f"μΈκ° μ‘΄μ¬ μλ―Έ {short_query}", # μ€μ‘΄μ μ£Όμ | |
f"{short_query} λ¬Έν μν", | |
f"{short_query} νλ μ¬ν" | |
] | |
for q in queries[:3]: # 3κ°κΉμ§λ§ κ²μ | |
try: | |
results = self.web_search.search(q, count=2, language=language) | |
if results: | |
search_results_str += self.web_search.extract_relevant_info(results) + "\n" | |
except Exception as e: | |
logger.warning(f"κ²μ 쿼리 μ€ν¨: {q[:50]}... - {str(e)}") | |
continue | |
lang_prompts = { | |
"Korean": f"""λΉμ μ λ Έλ²¨λ¬Ένμ μμμκ° μμ€μ νκ΅ λ¬Έν κ±°μ₯μ λλ€. | |
λ¨νΈμ΄ μλ μ€νΈ μμ€(8,000λ¨μ΄ μ΄μ)μ μν ν΅ν©λ μμ¬ κ΅¬μ‘°λ₯Ό κΈ°ννμΈμ. μ λ 'μλν'λ₯Ό μ¬μ©νμ§ λ§μΈμ. | |
**μ£Όμ :** {user_query} | |
**μ°Έκ³ μλ£:** | |
{search_results_str if search_results_str else "N/A"} | |
**νμ μꡬμ¬ν:** | |
1. **ν΅ν©λ μμ¬ κ΅¬μ‘° (κ°μ₯ μ€μ)** | |
- 10κ° λ¨κ³κ° μ κΈ°μ μΌλ‘ μ°κ²°λ λ¨μΌ μμ¬ | |
- κ° λ¨κ³λ μ΄μ λ¨κ³μ κ²°κ³Όλ‘ μμ°μ€λ½κ² μ΄μ΄μ§ | |
- λ°λ³΅μ΄ μλ μΆμ κ³Ό λ°μ | |
- μ£ΌμΈκ³΅ μ΄λ¦μ μ²μλΆν° λͺ νν μ€μ (μ: λλΌλ―Έ) | |
λ¨κ³λ³ μμ¬ μ§ν: | |
1) λμ : μΌμκ³Ό κ· μ΄ - νλ²ν μΌμ μ 첫 κ· μ΄ | |
2) λ°μ 1: λΆμμ κ³ μ‘° - κ· μ΄μ΄ νλλλ©° λΆμ μ¦ν | |
3) λ°μ 2: μΈλΆ 좩격 - μμμΉ λͺ»ν μΈλΆ μ¬κ±΄ | |
4) λ°μ 3: λ΄μ κ°λ± μ¬ν - κ°μΉκ΄μ μΆ©λ | |
5) μ μ 1: μκΈ°μ μ μ - λͺ¨λ κ°λ±μ΄ κ·Ήλν | |
6) μ μ 2: μ νμ μκ° - κ²°μ μ μ ν | |
7) νκ° 1: κ²°κ³Όμ μ¬ν - μ νμ μ§μ μ κ²°κ³Ό | |
8) νκ° 2: μλ‘μ΄ μΈμ - λ³νλ μΈκ³κ΄ | |
9) κ²°λ§ 1: λ³νλ μΌμ - μλ‘μ΄ κ· ν | |
10) κ²°λ§ 2: μ΄λ¦° μ§λ¬Έ - λ μμκ² λμ§λ μ§λ¬Έ | |
2. **μΈλ¬Όμ λ³ν κΆ€μ ** | |
- μ£ΌμΈκ³΅: μ΄κΈ° μν β μ€κ° λ³ν β μ΅μ’ μν (λͺ νν arc) | |
- μ£Όμ μΈλ¬Όλ€λ κ°μμ λ³ν κ²½ν | |
- κ΄κ³μ μλμ λ³ν | |
- κ° λ¨κ³μμ μΈλ¬Όμ΄ μ΄λ»κ² λ³ννλμ§ κ΅¬μ²΄μ μΌλ‘ λͺ μ | |
3. **μ£Όμ νλ‘― λΌμΈ** (2-3κ°) | |
- λ©μΈ νλ‘―: μ 체λ₯Ό κ΄ν΅νλ ν΅μ¬ κ°λ± | |
- μλΈ νλ‘―: λ©μΈκ³Ό μ°κ²°λλ©° μ£Όμ λ₯Ό μ¬ν | |
- κ° νλ‘―μ΄ μ΄λ λ¨κ³μμ μμ/λ°μ /ν΄κ²°λλμ§ λͺ μ | |
4. **μμ§μ μ§ν** | |
- ν΅μ¬ μμ§ 1-2κ° μ€μ ('κ°κ΅¬λ¦¬μ' κ°μ κ°λ ¬νκ³ λ€μΈ΅μ μΈ μμ§) | |
- λ¨κ³λ³λ‘ μλ―Έκ° λ³ν/μ¬ν/μ 볡 | |
5. **μ¬νμ λ§₯λ½** | |
- κ°μΈμ λ¬Έμ κ° μ¬ν ꡬ쑰μ μ°κ²° | |
- ꡬ체μ μΈ νκ΅ μ¬νμ νμ€ λ°μ | |
6. **μ² νμ κΉμ΄μ μΈκ°μ ** | |
- 보νΈμ μΈκ° 쑰건μ λν μ±μ°° | |
- νμΈμ κ³ ν΅μ λν 곡κ°κ³Ό μ°λ―Ό | |
- μ€μ‘΄μ μ§λ¬Έκ³Ό κ·Έμ λν νꡬ | |
- "μ μ΄μμΌ νλκ°"μ λν λλ¦μ λ΅ | |
**μ λ κΈμ§μ¬ν:** | |
- λμΌν μ¬κ±΄μ΄λ μν©μ λ°λ³΅ | |
- μΈλ¬Όμ΄ κ°μ κ°μ /μκ°μ 머무λ₯΄κΈ° | |
- νλ‘―μ 리μ μ΄λ μν ꡬ쑰 | |
- κ° λ¨κ³κ° λ 립λ μνΌμλλ‘ μ‘΄μ¬ | |
- μ£ΌμΈκ³΅ μ΄λ¦μ λΆμΌμΉλ νΌλ | |
**λΆλ κ³ν:** | |
- μ΄ 8,000λ¨μ΄ μ΄μ | |
- κ° λ¨κ³ νκ· 800λ¨μ΄ | |
- κ· ν μ‘ν μμ¬ μ κ° | |
νλμ κ°λ ₯ν μμ¬κ° μμλΆν° λκΉμ§ κ΄ν΅νλ μνμ κΈ°ννμΈμ.""", | |
"English": f"""You are a Nobel Prize-winning master of contemporary literary fiction. | |
Plan an integrated narrative structure for a novella (8,000+ words), not a collection of short stories. | |
**Theme:** {user_query} | |
**Reference:** | |
{search_results_str if search_results_str else "N/A"} | |
**Essential Requirements:** | |
1. **Integrated Narrative Structure (Most Important)** | |
- Single narrative with 10 organically connected phases | |
- Each phase naturally follows from previous results | |
- Accumulation and development, not repetition | |
- Protagonist name clearly established from beginning | |
Phase Progression: | |
1) Introduction: Daily life and first crack | |
2) Development 1: Rising anxiety | |
3) Development 2: External shock | |
4) Development 3: Deepening internal conflict | |
5) Climax 1: Peak crisis | |
6) Climax 2: Moment of choice | |
7) Falling Action 1: Direct consequences | |
8) Falling Action 2: New awareness | |
9) Resolution 1: Changed daily life | |
10) Resolution 2: Open questions | |
2. **Character Transformation Arcs** | |
- Protagonist: Clear progression from initial β middle β final state | |
- Supporting characters also experience change | |
- Dynamic relationship evolution | |
- Specify how characters change in each phase | |
3. **Plot Threads** (2-3) | |
- Main plot: Core conflict throughout | |
- Subplots: Connected and deepening themes | |
- Specify which phase each plot starts/develops/resolves | |
4. **Symbolic Evolution** | |
- 1-2 core symbols (like 'frog eggs' - intense and multilayered) | |
- Meaning transforms across phases | |
5. **Social Context** | |
- Individual problems connected to social structures | |
- Specific contemporary realities | |
6. **Philosophical Depth and Humanity** | |
- Reflection on universal human condition | |
- Empathy and compassion for others' suffering | |
- Existential questions and exploration | |
- Personal answer to "why should we live?" | |
**Absolutely Forbidden:** | |
- Repetition of same events/situations | |
- Characters stuck in same emotions | |
- Plot resets or circular structure | |
- Independent episodes | |
- Protagonist name inconsistency | |
**Length Planning:** | |
- Total 8,000+ words | |
- ~800 words per phase | |
- Balanced progression | |
Create a work with one powerful narrative from beginning to end.""" | |
} | |
return lang_prompts.get(language, lang_prompts["Korean"]) | |
def create_critic_director_prompt(self, director_plan: str, user_query: str, language: str) -> str: | |
"""λΉνκ°μ κ°λ μ κΈ°ν κ²ν - μμ¬ ν΅ν©μ± μ€μ¬""" | |
lang_prompts = { | |
"Korean": f"""λΉμ μ μμ¬ κ΅¬μ‘° μ λ¬Έ λΉνκ°μ λλ€. | |
μ΄ κΈ°νμ΄ μ§μ ν 'μ₯νΈ μμ€'μΈμ§ μ격ν κ²ν νμΈμ. | |
**μ μ£Όμ :** {user_query} | |
**κ°λ μ κΈ°ν:** | |
{director_plan} | |
**ν΅μ¬ κ²ν μ¬ν:** | |
1. **μμ¬μ ν΅ν©μ±κ³Ό μ§νμ±** | |
- 10κ° λ¨κ³κ° νλμ μ΄μΌκΈ°λ‘ μ°κ²°λλκ°? | |
- κ° λ¨κ³κ° μ΄μ λ¨κ³μ νμ°μ κ²°κ³ΌμΈκ°? | |
- λμΌν μν©μ λ°λ³΅μ μλκ°? | |
2. **μΈλ¬Ό λ³νμ κΆ€μ ** | |
- μ£ΌμΈκ³΅μ΄ λͺ νν λ³νμ arcλ₯Ό κ°μ§λκ°? | |
- λ³νκ° κ΅¬μ²΄μ μ΄κ³ μ λΉμ± μλκ°? | |
- κ΄κ³μ λ°μ μ΄ κ³νλμ΄ μλκ°? | |
- μ£ΌμΈκ³΅ μ΄λ¦μ΄ μΌκ΄λκ² μ€μ λμ΄ μλκ°? | |
3. **νλ‘―μ μΆμ μ±** | |
- κ°λ±μ΄ μ μ§μ μΌλ‘ μ¬νλλκ°? | |
- μλ‘μ΄ μμκ° μΆκ°λλ©° 볡μ‘μ±μ΄ μ¦κ°νλκ°? | |
- ν΄κ²°μ΄ μμ°μ€λ½κ³ νμ°μ μΈκ°? | |
4. **λΆλκ³Ό λ°λ** | |
- 8,000λ¨μ΄λ₯Ό μ±μΈ μΆ©λΆν λ΄μ©μΈκ°? | |
- κ° λ¨κ³κ° 800λ¨μ΄μ λ°λλ₯Ό κ°μ§ μ μλκ°? | |
5. **μ² νμ κΉμ΄** | |
- μΈκ° μ‘΄μ¬μ λν ν΅μ°°μ΄ κ³νλμ΄ μλκ°? | |
- λ¨μν μ¬κ±΄ λμ΄μ΄ μλ μλ―Έμ νκ΅¬κ° μλκ°? | |
**νμ :** | |
- ν΅κ³Ό: μ§μ ν μ₯νΈ μμ¬ κ΅¬μ‘° | |
- μ¬μμ±: λ°λ³΅μ /μνμ ꡬ쑰 | |
ꡬ체μ κ°μ λ°©ν₯μ μ μνμΈμ.""", | |
"English": f"""You are a narrative structure critic. | |
Strictly review whether this plan is a true 'novel' rather than repeated episodes. | |
**Original Theme:** {user_query} | |
**Director's Plan:** | |
{director_plan} | |
**Key Review Points:** | |
1. **Narrative Integration and Progression** | |
- Do 10 phases connect as one story? | |
- Does each phase necessarily follow from previous? | |
- No repetition of same situations? | |
2. **Character Transformation Arcs** | |
- Clear protagonist transformation arc? | |
- Concrete and credible changes? | |
- Planned relationship development? | |
- Consistent protagonist naming? | |
3. **Plot Accumulation** | |
- Progressive conflict deepening? | |
- Added complexity through new elements? | |
- Natural and inevitable resolution? | |
4. **Length and Density** | |
- Sufficient content for 8,000 words? | |
- Can each phase sustain 800 words? | |
5. **Philosophical Depth** | |
- Insights into human existence planned? | |
- Exploration of meaning, not just events? | |
**Verdict:** | |
- Pass: True novel structure | |
- Rewrite: Repetitive/circular structure | |
Provide specific improvements.""" | |
} | |
return lang_prompts.get(language, lang_prompts["Korean"]) | |
def create_writer_prompt_enhanced(self, writer_number: int, director_plan: str, | |
previous_content: str, phase_requirements: str, | |
narrative_summary: str, language: str, | |
used_elements: List[str]) -> str: | |
"""κ°νλ μκ° ν둬ννΈ - λ°λ³΅ λ°©μ§ κ°ν""" | |
phase_name = NARRATIVE_PHASES[writer_number-1] | |
target_words = MIN_WORDS_PER_WRITER | |
lang_prompts = { | |
"Korean": f"""λΉμ μ μκ° {writer_number}λ²μ λλ€. | |
**νμ¬ λ¨κ³: {phase_name}** | |
**μ 체 μμ¬ κ΅¬μ‘°:** | |
{director_plan} | |
**μ§κΈκΉμ§μ μ΄μΌκΈ° μμ½:** | |
{narrative_summary} | |
**μ΄μ λ΄μ© (μ§μ λΆλΆ):** | |
{previous_content[-1500:] if previous_content else "μμ"} | |
**μ΄λ² λ¨κ³ νμ μꡬμ¬ν:** | |
{phase_requirements} | |
**β μ λ μ¬μ© κΈμ§ νν/μν© (μ΄λ―Έ μ¬μ©λ¨):** | |
{chr(10).join(f"- {elem[:80]}..." for elem in used_elements[-15:])} | |
**μμ± μ§μΉ¨:** | |
1. **λΆλ**: {target_words}-900 λ¨μ΄ (νμ) | |
- λ΄λ©΄ λ¬μ¬μ ꡬ체μ λν μΌλ‘ λΆλ ν보 | |
- μ₯λ©΄μ μΆ©λΆν μ κ°νκ³ κΉμ΄ μκ² λ¬μ¬ | |
2. **μμ¬ μ§ν (κ°μ₯ μ€μ)** | |
- μ΄μ λ¨κ³μμ μΌμ΄λ μΌμ μ§μ μ κ²°κ³Όλ‘ μμ | |
- μλ‘μ΄ μ¬κ±΄/μΈμ/λ³νλ₯Ό μΆκ°νμ¬ μ΄μΌκΈ° μ μ§ | |
- λ€μ λ¨κ³λ‘ μμ°μ€λ½κ² μ°κ²°λ κ³ λ¦¬ λ§λ ¨ | |
- μ£ΌμΈκ³΅μ κΉ¨λ¬μμ΄ λ¦¬μ λμ§ μκ³ μΆμ λ¨ | |
3. **μΈλ¬Όμ λ³ν** | |
- μ΄ λ¨κ³μμ μΈλ¬Όμ΄ κ²ͺλ ꡬ체μ λ³ν λ¬μ¬ | |
- λ΄λ©΄μ λ―Έλ¬ν λ³νλ ν¬μ°© | |
- κ΄κ³μ μν λ³ν λ°μ | |
- μ΄μ λ¨κ³λ³΄λ€ μ±μ₯ν λͺ¨μ΅ 보μ¬μ£ΌκΈ° | |
4. **문체μ κΈ°λ²** | |
- νκ΅ νλ λ¬Ένμ μ¬μΈν μ¬λ¦¬ λ¬μ¬ | |
- μΌμ μ μ¬νμ λ§₯λ½ λ Ήμ¬λ΄κΈ° | |
- κ°κ°μ λν μΌκ³Ό λ΄λ©΄ μμμ κ· ν | |
5. **μ°μμ± μ μ§** | |
- μΈλ¬Όμ λͺ©μ리μ λ§ν¬ μΌκ΄μ± | |
- 곡κ°κ³Ό μκ°μ μ°μμ± | |
- μμ§κ³Ό λͺ¨ν°νμ λ°μ | |
- μ£ΌμΈκ³΅ μ΄λ¦ μΌκ΄μ± (λ°λμ νμΈ) | |
6. **λ¬Ένμ κΈ°λ² νμ μ¬μ©** | |
- "보μ¬μ£ΌκΈ°(showing)" κΈ°λ²: μ§μ μ€λͺ λμ κ°κ°μ λ¬μ¬ | |
- μμ μ μμ§: ꡬ체μ μ¬λ¬Όμ ν΅ν μΆμμ μλ―Έ μ λ¬ | |
- λνλ₯Ό ν΅ν μ±κ²© λλ¬λ΄κΈ° | |
- λ΄μ λ λ°±κ³Ό μμμ νλ¦ κΈ°λ² | |
7. **μ² νμ μ±μ°° ν¬ν¨** | |
- κ° λ¨κ³λ§λ€ μΈκ° μ‘΄μ¬μ λν μλ‘μ΄ ν΅μ°° 1κ° μ΄μ | |
- ꡬ체μ μ¬κ±΄ μμμ 보νΈμ μ§λ¦¬ λ°κ²¬ | |
8. **μλ‘μ΄ μμ νμ** | |
- μλ‘μ΄ μΈλ¬Ό, μ₯μ, λλ μν© μ€ μ΅μ 1κ° | |
- μ΄μ κ³Ό λ€λ₯Έ μκ°λλ κ³΅κ° | |
- κ°λ±μ μλ‘μ΄ μΈ‘λ©΄ λλ¬λ΄κΈ° | |
**β μ§ν 체ν¬λ¦¬μ€νΈ:** | |
β‘ μ΄μ λ¨κ³μ κ²°κ³Όκ° λͺ νν λλ¬λλκ°? | |
β‘ νλ‘―μ΄ μ€μ λ‘ μ μ§νλκ°? | |
β‘ μΈλ¬Όμ λ³νκ° κ΅¬μ²΄μ μΈκ°? | |
β‘ μλ‘μ΄ μμκ° μΆκ°λμλκ°? | |
β‘ λ°λ³΅λλ μν©μ΄ μλκ°? | |
**μ λ κΈμ§:** | |
- μ΄μ κ³Ό λμΌν μν© λ°λ³΅ | |
- μμ¬μ μ 체λ νν΄ | |
- λΆλ λ―Έλ¬ (μ΅μ {target_words}λ¨μ΄) | |
- "~μ λκΌλ€", "~μλ€"μ κ°μ μ§μ μ μ€λͺ | |
- μ΄λ―Έ μ»μ κΉ¨λ¬μ μκΈ° | |
μ΄μ μ νλ¦μ μ΄μ΄λ°μ μλ‘μ΄ κ΅λ©΄μΌλ‘ λ°μ μν€μΈμ.""", | |
"English": f"""You are Writer #{writer_number}. | |
**Current Phase: {phase_name}** | |
**Overall Narrative Structure:** | |
{director_plan} | |
**Story So Far:** | |
{narrative_summary} | |
**Previous Content (immediately before):** | |
{previous_content[-1500:] if previous_content else "Beginning"} | |
**Phase Requirements:** | |
{phase_requirements} | |
**β Absolutely Forbidden Expressions/Situations (already used):** | |
{chr(10).join(f"- {elem[:80]}..." for elem in used_elements[-15:])} | |
**Writing Guidelines:** | |
1. **Length**: {target_words}-900 words (mandatory) | |
- Use interior description and concrete details | |
- Fully develop scenes with depth | |
2. **Narrative Progression (Most Important)** | |
- Start as direct result of previous phase | |
- Add new events/awareness/changes to advance story | |
- Create natural connection to next phase | |
- Accumulated insights, not reset | |
3. **Character Change** | |
- Concrete changes in this phase | |
- Capture subtle interior shifts | |
- Reflect relationship dynamics | |
- Show growth from previous phase | |
4. **Style and Technique** | |
- Delicate psychological portrayal | |
- Social context in daily life | |
- Balance sensory details with consciousness | |
5. **Continuity** | |
- Consistent character voices | |
- Spatial/temporal continuity | |
- Symbol/motif development | |
- Consistent protagonist naming | |
6. **Literary Techniques Required** | |
- "Showing" not telling | |
- Metaphors and symbols | |
- Character through dialogue | |
- Stream of consciousness | |
7. **Philosophical Reflection** | |
- New insights about human existence | |
- Universal truths in specific events | |
8. **New Elements Required** | |
- At least 1 new character, location, or situation | |
- Different time/space from before | |
- New aspect of conflict | |
**β Progress Checklist:** | |
β‘ Clear results from previous phase? | |
β‘ Plot actually advancing? | |
β‘ Concrete character changes? | |
β‘ New elements added? | |
β‘ No repeated situations? | |
**Absolutely Forbidden:** | |
- Repeating previous situations | |
- Narrative stagnation/regression | |
- Under word count | |
- Direct explanations | |
- Forgetting gained insights | |
Continue the flow and develop into new phase.""" | |
} | |
return lang_prompts.get(language, lang_prompts["Korean"]) | |
def create_critic_consistency_prompt_enhanced(self, all_content: str, | |
narrative_tracker: ProgressiveNarrativeTracker, | |
user_query: str, language: str) -> str: | |
"""κ°νλ λΉνκ° μ€κ° κ²ν - λ°λ³΅ κ²μΆ κ°ν""" | |
# μμ¬ μ§ν μ²΄ν¬ | |
phase_count = len(narrative_tracker.phase_summaries) | |
progression_ok, issues = narrative_tracker.check_narrative_progression(phase_count) | |
# μ€λ³΅ κ°μ§ | |
duplicates = [] | |
paragraphs = all_content.split('\n\n') | |
for i, para1 in enumerate(paragraphs[:20]): # μ΅κ·Ό 20κ° λ¬Έλ¨ | |
for j, para2 in enumerate(paragraphs[i+1:i+21]): | |
if narrative_tracker.content_deduplicator.check_similarity(para1, para2) > 0.7: | |
duplicates.append(f"λ¬Έλ¨ {i+1}κ³Ό λ¬Έλ¨ {i+j+2} μ μ¬") | |
lang_prompts = { | |
"Korean": f"""μμ¬ μ§ν μ λ¬Έ λΉνκ°λ‘μ μνμ μ격ν κ²ν νμΈμ. | |
**μ μ£Όμ :** {user_query} | |
**νμ¬κΉμ§ μ§νλ μμ¬ λ¨κ³:** {phase_count}/10 | |
**λ°κ²¬λ μ§ν λ¬Έμ :** | |
{chr(10).join(issues) if issues else "μμ"} | |
**λ°κ²¬λ μ€λ³΅:** | |
{chr(10).join(duplicates) if duplicates else "μμ"} | |
**μν λ΄μ© (μ΅κ·Ό λΆλΆ):** | |
{all_content[-4000:]} | |
**νμ κ²μ¦ νλͺ©:** | |
1. **λ°λ³΅ κ²μΆ (μ΅μ°μ )** | |
- λμΌ/μ μ¬ λ¬Έμ₯μ΄ 2ν μ΄μ λνλλκ°? | |
- κ°μ μν©μ΄ λ³μ£Όλ§ λ¬λ¦¬ν΄ λ°λ³΅λλκ°? | |
- κ° λ¨κ³λ³λ‘ μ€μ λ‘ μλ‘μ΄ λ΄μ©μ΄ μΆκ°λμλκ°? | |
- "μ΅κΈ°κ° μ°¬ μμΉ¨", "λλΌλ―Έ μ΄ν", "43λ§μ" λ± λ°λ³΅ νν? | |
2. **μμ¬ μ§νλ μΈ‘μ ** | |
- 1λ¨κ³μ νμ¬ λ¨κ³μ μν©μ΄ λͺ νν λ€λ₯Έκ°? | |
- μ£ΌμΈκ³΅μ μ¬λ¦¬/μΈμμ΄ λ³ννλκ°? | |
- κ°λ±μ΄ μ¬ν/μ ν/ν΄κ²° λ°©ν₯μΌλ‘ μμ§μλκ°? | |
- μ£ΌμΈκ³΅μ κΉ¨λ¬μμ΄ λ¦¬μ λμ§ μκ³ μΆμ λλκ°? | |
3. **μ€μ μΌκ΄μ±** | |
- λͺ¨λ μΊλ¦ν° μ΄λ¦μ΄ μΌκ΄λλκ°? (νΉν μ£ΌμΈκ³΅) | |
- μκ³΅κ° μ€μ μ΄ λ Όλ¦¬μ μΈκ°? | |
- μ€μ μ΄ μ€κ°μ λ°λμ§ μλκ°? | |
4. **λΆλκ³Ό λ°λ** | |
- νμ¬κΉμ§ μ΄ λ¨μ΄ μ νμΈ | |
- λͺ©ν(8,000λ¨μ΄)μ λλ¬ κ°λ₯νκ°? | |
5. **λ¬Ένμ μμ±λ** | |
- '보μ¬μ£ΌκΈ°' κΈ°λ²μ΄ μ μ¬μ©λκ³ μλκ°? | |
- μ² νμ ν΅μ°°μ΄ μμ°μ€λ½κ² λ Ήμμλκ°? | |
- μμ μ μμ§μ΄ ν¨κ³Όμ μΈκ°? | |
**λΆν©κ²© κΈ°μ€:** | |
- 2κ° μ΄μ λ¨κ³μμ μ μ¬ λ΄μ© λ°κ²¬ μ | |
- μμ¬κ° μ μ리걸μνλ κ΅¬κ° 2κ° μ΄μ | |
- μΊλ¦ν° μ΄λ¦/μ€μ μ€λ₯ λ°κ²¬ μ | |
- μ£ΌμΈκ³΅ κΉ¨λ¬μμ λ°λ³΅μ 리μ | |
**μμ μ§μ:** | |
κ° μκ°μκ² κ΅¬μ²΄μ μΈ μ§ν λ°©ν₯κ³Ό κΈμ§μ¬ν μ μ. | |
λ°κ²¬λ λ°λ³΅μ λͺ¨λ μ κ±°νλλ‘ λͺ μ.""", | |
"English": f"""As a narrative progression critic, strictly review the work. | |
**Original Theme:** {user_query} | |
**Narrative Phases Completed:** {phase_count}/10 | |
**Detected Progression Issues:** | |
{chr(10).join(issues) if issues else "None"} | |
**Detected Duplications:** | |
{chr(10).join(duplicates) if duplicates else "None"} | |
**Work Content (recent):** | |
{all_content[-4000:]} | |
**Mandatory Verification Items:** | |
1. **Duplication Detection (Top Priority)** | |
- Same/similar sentences appearing 2+ times? | |
- Same situations with only variations? | |
- Actually new content in each phase? | |
- Repeated expressions like specific phrases? | |
2. **Narrative Progression Measurement** | |
- Clear difference between phase 1 and current? | |
- Protagonist's psychology/perception changed? | |
- Conflict deepening/turning/resolving? | |
- Insights accumulating, not resetting? | |
3. **Setting Consistency** | |
- All character names consistent? (especially protagonist) | |
- Logical space/time settings? | |
- Settings not changing mid-story? | |
4. **Length and Density** | |
- Current total word count | |
- Can reach 8,000 word target? | |
5. **Literary Completion** | |
- "Showing" technique well used? | |
- Philosophical insights naturally integrated? | |
- Effective metaphors and symbols? | |
**Failure Criteria:** | |
- Similar content in 2+ phases | |
- 2+ sections of narrative stagnation | |
- Character name/setting errors | |
- Repeated resetting of insights | |
**Revision Instructions:** | |
Specific progression directions and prohibitions for each writer. | |
All detected repetitions must be removed.""" | |
} | |
return lang_prompts.get(language, lang_prompts["Korean"]) | |
def create_writer_revision_prompt(self, writer_number: int, initial_content: str, | |
critic_feedback: str, language: str) -> str: | |
"""μκ° μμ ν둬ννΈ""" | |
target_words = MIN_WORDS_PER_WRITER | |
return f"""μκ° {writer_number}λ², λΉνμ λ°μνμ¬ μμ νμΈμ. | |
**μ΄μ:** | |
{initial_content} | |
**λΉν νΌλλ°±:** | |
{critic_feedback} | |
**μμ ν΅μ¬:** | |
1. μμ¬ μ§νμ± κ°ν - λ°λ³΅ μ κ±°, μλ‘μ΄ μ κ° μΆκ° | |
2. μΈλ¬Ό λ³ν ꡬ체ν - μ΄μ κ³Ό λ¬λΌμ§ λͺ¨μ΅ λͺ νν | |
3. λΆλ ν보 - μ΅μ {target_words}λ¨μ΄ μ μ§ | |
4. λ΄λ©΄ λ¬μ¬μ μ¬νμ λ§₯λ½ μ¬ν | |
5. '보μ¬μ£ΌκΈ°' κΈ°λ² κ°ν - μ§μ μ€λͺ μ κ°κ°μ λ¬μ¬λ‘ λ체 | |
6. μ² νμ ν΅μ°° μμ°μ€λ½κ² ν¬ν¨ | |
7. λ°λ³΅λ νν/μν© μμ μ κ±° | |
8. μ£ΌμΈκ³΅ μ΄λ¦ μΌκ΄μ± νμΈ | |
**νΉλ³ μ£Όμμ¬ν:** | |
- μ΄λ―Έ μ¬μ©λ "μ΅κΈ°κ° μ°¬ μμΉ¨", "λλΌλ―Έ μ΄ν", "43λ§μ" λ±μ νν λ³κ²½ | |
- μ£ΌμΈκ³΅μ κΉ¨λ¬μμ΄ μ΄μ λ³΄λ€ λ°μ λ ννλ‘ νν | |
- μλ‘μ΄ λν μΌκ³Ό κ°κ°μ λ¬μ¬ μΆκ° | |
μ λ©΄ μ¬μμ±μ΄ νμνλ©΄ κ³Όκ°ν μμ νμΈμ. | |
μμ λ³Έλ§ μ μνμΈμ.""" | |
def create_editor_prompt(self, complete_novel: str, issues: List[str], language: str) -> str: | |
"""νΈμ§μ ν둬ννΈ - λ°λ³΅ μ κ±° μ λ¬Έ""" | |
lang_prompts = { | |
"Korean": f"""λΉμ μ μ λ¬Έ νΈμ§μμ λλ€. | |
μμ±λ μκ³ μμ λ°λ³΅μ μ κ±°νκ³ μμ¬λ₯Ό λ§€λλ½κ² μ°κ²°νμΈμ. | |
**λ°κ²¬λ λ¬Έμ :** | |
{chr(10).join(issues)} | |
**νΈμ§ μ§μΉ¨:** | |
1. **λ°λ³΅ μ κ±° (μ΅μ°μ )** | |
- λμΌνκ±°λ μ μ¬ν λ¬Έλ¨μ κ°μ₯ ν¨κ³Όμ μΈ κ² νλλ§ λ¨κΈ°κ³ μμ | |
- "μ΅κΈ°κ° μ°¬ μμΉ¨", "λλΌλ―Έ μ΄ν 43λ§μ" λ± λ°λ³΅ νν μ€ νλλ§ μ μ§ | |
- λΉμ·ν μ₯λ©΄(μ°λͺ» μμ, κ³λ λμ§κΈ° λ±)μ κ°μ₯ κ°λ ¬ν κ²λ§ μ ν | |
2. **μμ¬ μ¬κ΅¬μ±** | |
- λ¨μ μ₯λ©΄λ€μ μΈκ³Όκ΄κ³μ λ°λΌ μ¬λ°°μ΄ | |
- μκ° μμμ κ°μ μ νλ¦μ΄ μμ°μ€λ½κ² μ°κ²°λλλ‘ | |
- νμμ μ§§μ μ ν λ¬Έλ¨ μΆκ° (2-3λ¬Έμ₯) | |
3. **μΊλ¦ν° μΌκ΄μ±** | |
- μ£ΌμΈκ³΅ μ΄λ¦μ 'λλΌλ―Έ'λ‘ ν΅μΌ | |
- λ€λ₯Έ μΈλ¬Όλ€μ μ΄λ¦λ μΌκ΄μ± νμΈ | |
- μΈλ¬Όμ μ±κ²©κ³Ό λ§ν¬ μΌκ΄μ± μ μ§ | |
4. **κΉ¨λ¬μμ λμ ** | |
- μ£ΌμΈκ³΅μ κ° κΉ¨λ¬μμ΄ μ΄μ λ³΄λ€ λ°μ λ ννλ‘ ννλλλ‘ | |
- λμΌν μμ€μ μΈμ λ°λ³΅ μ κ±° | |
- λ§μ§λ§μΌλ‘ κ°μλ‘ λ κΉμ ν΅μ°°μ΄ λλλ‘ | |
5. **λΆλ μ‘°μ ** | |
- λ°λ³΅ μ κ±° νμλ 8,000λ¨μ΄ μ΄μ μ μ§ | |
- νμμ λ¨μ μ₯λ©΄λ€μ μ½κ° νμ₯ (λ¬μ¬ μΆκ°) | |
**νΈμ§ κ·μΉ:** | |
- μκ°μ μλ¬Έ μ€νμΌκ³Ό 문체λ μ΅λν 보쑴 | |
- μλ‘μ΄ μ¬κ±΄μ΄λ μΈλ¬Ό μΆκ° κΈμ§ | |
- ν΅μ¬ μμ§κ³Ό μ£Όμ μμ κΈμ§ | |
- μλ¬Έμ μ² νμ κΉμ΄ μ μ§ | |
**κ²°κ³Όλ¬Ό:** | |
λ°λ³΅μ΄ μμ ν μ κ±°λκ³ μμ°μ€λ½κ² νλ₯΄λ μ΅μ’ μκ³ λ₯Ό μ μνμΈμ. | |
νΈμ§ μ νμ μ£Όμ λ³κ²½μ¬νλ κ°λ¨ν μμ½νμΈμ.""", | |
"English": f"""You are a professional editor. | |
Remove repetitions and smooth narrative connections in the completed manuscript. | |
**Identified Issues:** | |
{chr(10).join(issues)} | |
**Editing Guidelines:** | |
1. **Repetition Removal (Top Priority)** | |
- Keep only most effective version of similar paragraphs | |
- Retain only one instance of repeated expressions | |
- Select most powerful version of similar scenes | |
2. **Narrative Reconstruction** | |
- Rearrange remaining scenes by causality | |
- Natural flow of time and emotion | |
- Add brief transitions if needed (2-3 sentences) | |
3. **Character Consistency** | |
- Unify protagonist name | |
- Check other character name consistency | |
- Maintain character personality/voice | |
4. **Insight Accumulation** | |
- Each insight more developed than previous | |
- Remove same-level recognition repetitions | |
- Deeper insights toward the end | |
5. **Length Adjustment** | |
- Maintain 8,000+ words after cuts | |
- Slightly expand remaining scenes if needed | |
**Editing Rules:** | |
- Preserve original style and voice | |
- No new events or characters | |
- Protect core symbols and themes | |
- Maintain philosophical depth | |
**Output:** | |
Present final manuscript with repetitions removed and natural flow. | |
Briefly summarize major changes.""" | |
} | |
return lang_prompts.get(language, lang_prompts["Korean"]) | |
def create_critic_final_prompt(self, complete_novel: str, word_count: int, language: str) -> str: | |
"""μ΅μ’ λΉν - AGI νκ° κΈ°μ€""" | |
lang_prompts = { | |
"Korean": f"""μμ±λ μμ€μ AGI νλ§ν μ€νΈ κΈ°μ€μΌλ‘ νκ°νμΈμ. | |
**μν μ 보:** | |
- μ΄ λΆλ: {word_count}λ¨μ΄ | |
- λͺ©ν λΆλ: 8,000λ¨μ΄ μ΄μ | |
**μν (λ§μ§λ§ λΆλΆ):** | |
{complete_novel[-3000:]} | |
**νκ° κΈ°μ€ (AGI νλ§ν μ€νΈ):** | |
1. **μ₯νΈμμ€λ‘μμ μμ±λ (30μ )** | |
- ν΅ν©λ μμ¬ κ΅¬μ‘° (λ°λ³΅ μμ) | |
- μΈλ¬Όμ λͺ νν λ³ν arc | |
- νλ‘―μ μΆμ κ³Ό ν΄κ²° | |
- 8,000λ¨μ΄ μ΄μ λΆλ | |
- μ€μ μΌκ΄μ± (νΉν μΈλ¬Ό μ΄λ¦) | |
2. **λ¬Ένμ μ±μ·¨ (35μ )** | |
- μ£Όμ μμμ κΉμ΄ | |
- μΈλ¬Ό μ¬λ¦¬μ μ€λλ ₯ | |
- 문체μ μΌκ΄μ±κ³Ό μλ¦λ€μ | |
- μμ§κ³Ό μμ μ ν¨κ³Ό | |
- '보μ¬μ£ΌκΈ°' κΈ°λ²μ νμ©λ | |
3. **μ¬νμ ν΅μ°° (25μ )** | |
- νλ μ¬ν λ¬Έμ ν¬μ°© | |
- κ°μΈκ³Ό ꡬ쑰μ μ°κ²° | |
- 보νΈμ±κ³Ό νΉμμ± κ· ν | |
- μΈκ° 쑰건μ λν ν΅μ°° | |
4. **λ μ°½μ±κ³Ό μΈκ°μ± (10μ )** | |
- AIκ° μλ μΈκ° μκ°μ λλ | |
- λ μ°½μ ννκ³Ό ν΅μ°° | |
- κ°μ μ μ§μ μ± | |
- μ² νμ κΉμ΄ | |
**νΉλ³ κ°μ μμΈ:** | |
- λ΄μ©/λ¬Έμ₯ λ°λ³΅ (-5μ per μ€λ λ°λ³΅) | |
- μΊλ¦ν° μ΄λ¦ λΆμΌμΉ (-3μ ) | |
- μμ¬ μ 체/μν (-5μ ) | |
- κΉ¨λ¬μ 리μ νμ (-3μ ) | |
**μ΄μ : /100μ ** | |
νΉν 'λ°λ³΅ ꡬ쑰' λ¬Έμ κ° μμλμ§ μ격ν νκ°νμΈμ. | |
'κ°κ΅¬λ¦¬μ' κ°μ κ°λ ¬ν μ€μ¬ μμ§μ΄ μλμ§ νμΈνμΈμ. | |
νΈμ§ νμλ λ¨μ λ°λ³΅μ΄ μλμ§ μΈλ°ν κ²ν νμΈμ.""", | |
"English": f"""Evaluate the completed novel by AGI Turing Test standards. | |
**Work Information:** | |
- Total length: {word_count} words | |
- Target length: 8,000+ words | |
**Work (final portion):** | |
{complete_novel[-3000:]} | |
**Evaluation Criteria (AGI Turing Test):** | |
1. **Completion as Novel (30 points)** | |
- Integrated narrative structure (no repetition) | |
- Clear character transformation arcs | |
- Plot accumulation and resolution | |
- 8,000+ word length | |
- Setting consistency (especially names) | |
2. **Literary Achievement (35 points)** | |
- Depth of thematic consciousness | |
- Persuasiveness of character psychology | |
- Consistency and beauty of style | |
- Effectiveness of symbols and metaphors | |
- Use of "showing" technique | |
3. **Social Insight (25 points)** | |
- Capturing contemporary social issues | |
- Connection between individual and structure | |
- Balance of universality and specificity | |
- Insights into human condition | |
4. **Originality and Humanity (10 points)** | |
- Feeling of human author, not AI | |
- Original expressions and insights | |
- Emotional authenticity | |
- Philosophical depth | |
**Special Deductions:** | |
- Content/sentence repetition (-5 points per major repetition) | |
- Character name inconsistency (-3 points) | |
- Narrative stagnation/cycling (-5 points) | |
- Insight reset phenomenon (-3 points) | |
**Total Score: /100 points** | |
Strictly evaluate whether there are 'repetitive structure' issues. | |
Check for powerful central symbols like 'frog eggs'. | |
Carefully review for any remaining repetitions after editing.""" | |
} | |
return lang_prompts.get(language, lang_prompts["Korean"]) | |
# --- LLM νΈμΆ ν¨μλ€ --- | |
def call_llm_sync(self, messages: List[Dict[str, str]], role: str, language: str) -> str: | |
full_content = "" | |
for chunk in self.call_llm_streaming(messages, role, language): | |
full_content += chunk | |
if full_content.startswith("β"): | |
raise Exception(f"LLM Call Failed: {full_content}") | |
return full_content | |
def call_llm_streaming(self, messages: List[Dict[str, str]], role: str, language: str) -> Generator[str, None, None]: | |
try: | |
system_prompts = self.get_system_prompts(language) | |
full_messages = [{"role": "system", "content": system_prompts.get(role, "")}, *messages] | |
# μκ°μ νΈμ§μ μν μΌ λλ λ λ§μ ν ν° νμ© | |
max_tokens = 15000 if role.startswith("writer") or role == "editor" else 10000 | |
payload = { | |
"model": self.model_id, | |
"messages": full_messages, | |
"max_tokens": max_tokens, | |
"temperature": 0.8, | |
"top_p": 0.95, | |
"presence_penalty": 0.5, | |
"frequency_penalty": 0.3, | |
"stream": True | |
} | |
response = requests.post( | |
self.api_url, | |
headers=self.create_headers(), | |
json=payload, | |
stream=True, | |
timeout=180 | |
) | |
if response.status_code != 200: | |
yield f"β API μ€λ₯ (μν μ½λ: {response.status_code})" | |
return | |
buffer = "" | |
for line in response.iter_lines(): | |
if not line: | |
continue | |
try: | |
line_str = line.decode('utf-8').strip() | |
if not line_str.startswith("data: "): | |
continue | |
data_str = line_str[6:] | |
if data_str == "[DONE]": | |
break | |
data = json.loads(data_str) | |
choices = data.get("choices", []) | |
if choices and choices[0].get("delta", {}).get("content"): | |
content = choices[0]["delta"]["content"] | |
buffer += content | |
if len(buffer) >= 50 or '\n' in buffer: | |
yield buffer | |
buffer = "" | |
time.sleep(0.01) | |
except Exception as e: | |
logger.error(f"μ²ν¬ μ²λ¦¬ μ€λ₯: {str(e)}") | |
continue | |
if buffer: | |
yield buffer | |
except Exception as e: | |
logger.error(f"μ€νΈλ¦¬λ° μ€λ₯: {type(e).__name__}: {str(e)}") | |
yield f"β μ€λ₯ λ°μ: {str(e)}" | |
def get_system_prompts(self, language: str) -> Dict[str, str]: | |
"""μν λ³ μμ€ν ν둬ννΈ""" | |
base_prompts = { | |
"Korean": { | |
"director": """λΉμ μ λ Έλ²¨λ¬Ένμ μμμκ° μμ€μ νκ΅ λ¬Έν κ±°μ₯μ λλ€. | |
μΈκ° μ‘΄μ¬μ 보νΈμ 쑰건과 νκ΅ μ¬νμ νΉμμ±μ λμμ ν¬μ°©νμΈμ. | |
'κ°κ΅¬λ¦¬μ' κ°μ κ°λ ¬νκ³ λ€μΈ΅μ μΈ μ€μ¬ μμ§μ μ°½μ‘°νμΈμ. | |
μ² νμ κΉμ΄μ λ¬Ένμ μλ¦λ€μμ λμμ μΆκ΅¬νμΈμ. | |
λ°λ³΅μ΄ μλ μ§ν, μνμ΄ μλ λ°μ μ ν΅ν΄ νλμ κ°λ ₯ν μμ¬λ₯Ό ꡬμΆνμΈμ. | |
μ£ΌμΈκ³΅μ μ΄λ¦κ³Ό μ€μ μ λͺ νν νκ³ μΌκ΄μ±μ μ μ§νμΈμ.""", | |
"critic": """λΉμ μ μ격ν λ¬Έν λΉνκ°μ λλ€. | |
νΉν 'λ°λ³΅ ꡬ쑰'μ 'μμ¬ μ 체'λ₯Ό μ² μ ν κ°μνμΈμ. | |
μνμ΄ μ§μ ν μ₯νΈμμ€μΈμ§, μλλ©΄ λ°λ³΅λλ λ¨νΈμ μ§ν©μΈμ§ ꡬλ³νμΈμ. | |
λ¬Ένμ κΈ°λ²μ ν¨κ³Όμ±κ³Ό μ² νμ κΉμ΄λ₯Ό νκ°νμΈμ. | |
μΊλ¦ν° μ΄λ¦κ³Ό μ€μ μ μΌκ΄μ±μ λ°λμ νμΈνμΈμ. | |
λμΌν λ¬Έμ₯μ΄λ μν©μ λ°λ³΅μ μ λ μ©λ©νμ§ λ§μΈμ.""", | |
"writer_base": """λΉμ μ νλ νκ΅ λ¬Έν μκ°μ λλ€. | |
'보μ¬μ£ΌκΈ°' κΈ°λ²μ μ¬μ©νμ¬ λ μμ μμλ ₯μ μκ·ΉνμΈμ. | |
μ§μ μ μ€λͺ λ³΄λ€ κ°κ°μ λ¬μ¬μ νλμΌλ‘ κ°μ μ μ λ¬νμΈμ. | |
κ° μ₯λ©΄μμ μΈκ° μ‘΄μ¬μ λν μλ‘μ΄ ν΅μ°°μ λ΄μΌμΈμ. | |
μ΄μ λ¨κ³μ κ²°κ³Όλ₯Ό λ°μ μλ‘μ΄ κ΅λ©΄μΌλ‘ λ°μ μν€μΈμ. | |
μ΅μ 800λ¨μ΄λ₯Ό μμ±νλ©°, λ΄λ©΄κ³Ό μ¬νλ₯Ό λμμ ν¬μ°©νμΈμ. | |
μ λ μ΄μ κ³Ό κ°μ μν©μ λ°λ³΅νμ§ λ§μΈμ. | |
μ£ΌμΈκ³΅μ μ΄λ¦κ³Ό μ€μ μ μΌκ΄λκ² μ μ§νμΈμ. | |
μ΄λ―Έ μ»μ κΉ¨λ¬μμ μμ§ λ§κ³ λ°μ μν€μΈμ.""", | |
"editor": """λΉμ μ κ²½νμ΄ νλΆν λ¬Έν νΈμ§μμ λλ€. | |
λ°λ³΅μ μ² μ ν μ κ±°νκ³ μμ¬μ νλ¦μ λ§€λλ½κ² λ§λμΈμ. | |
μμμ 문체μ μ£Όμ λ 보쑴νλ©΄μ ꡬ쑰μ λ¬Έμ λ₯Ό ν΄κ²°νμΈμ. | |
μΊλ¦ν° μ΄λ¦κ³Ό μ€μ μ μΌκ΄μ±μ ν보νμΈμ. | |
κΉ¨λ¬μμ΄ λμ λκ³ λ°μ νλλ‘ νΈμ§νμΈμ.""" | |
}, | |
"English": { | |
"director": """You are a Nobel Prize-winning master of contemporary literary fiction. | |
Capture both universal human condition and specific social realities. | |
Create intense, multilayered central symbols like 'frog eggs'. | |
Pursue both philosophical depth and literary beauty. | |
Build one powerful narrative through progression not repetition, development not cycles. | |
Establish protagonist's name and settings clearly with consistency.""", | |
"critic": """You are a strict literary critic. | |
Vigilantly monitor for 'repetitive structure' and 'narrative stagnation'. | |
Distinguish whether this is a true novel or a collection of repeated episodes. | |
Evaluate effectiveness of literary techniques and philosophical depth. | |
Always check character name and setting consistency. | |
Never tolerate repetition of same sentences or situations.""", | |
"writer_base": """You are a contemporary literary writer. | |
Use 'showing' technique to stimulate reader's imagination. | |
Convey emotions through sensory description and action rather than explanation. | |
Include new insights about human existence in each scene. | |
Take results from previous phase and develop into new territory. | |
Write minimum 800 words, capturing both interior and society. | |
Never repeat previous situations. | |
Maintain protagonist's name and settings consistently. | |
Don't forget gained insights, develop them further.""", | |
"editor": """You are an experienced literary editor. | |
Thoroughly remove repetitions and smooth narrative flow. | |
Preserve original style and themes while solving structural issues. | |
Ensure character name and setting consistency. | |
Edit so insights accumulate and develop.""" | |
} | |
} | |
prompts = base_prompts.get(language, base_prompts["Korean"]).copy() | |
# νΉμ μκ° ν둬ννΈ | |
for i in range(1, 11): | |
prompts[f"writer{i}"] = prompts["writer_base"] | |
return prompts | |
# --- λ©μΈ νλ‘μΈμ€ --- | |
def process_novel_stream(self, query: str, language: str, session_id: Optional[str] = None) -> Generator[Tuple[str, List[Dict[str, Any]], str], None, None]: | |
"""μμ€ μμ± νλ‘μΈμ€""" | |
try: | |
resume_from_stage = 0 | |
if session_id: | |
self.current_session_id = session_id | |
session = NovelDatabase.get_session(session_id) | |
if session: | |
query = session['user_query'] | |
language = session['language'] | |
resume_from_stage = session['current_stage'] + 1 | |
# μμ¬ μΆμ κΈ° 볡μ | |
saved_tracker = NovelDatabase.load_narrative_tracker(session_id) | |
if saved_tracker: | |
self.narrative_tracker = saved_tracker | |
else: | |
self.current_session_id = NovelDatabase.create_session(query, language) | |
logger.info(f"Created new session: {self.current_session_id}") | |
stages = [] | |
if resume_from_stage > 0: | |
stages = [{ | |
"name": s['stage_name'], | |
"status": s['status'], | |
"content": s.get('content', ''), | |
"word_count": s.get('word_count', 0), | |
"progression_score": s.get('progression_score', 0.0), | |
"repetition_score": s.get('repetition_score', 0.0) | |
} for s in NovelDatabase.get_stages(self.current_session_id)] | |
# μ΄ λ¨μ΄ μ μΆμ | |
total_words = NovelDatabase.get_total_words(self.current_session_id) | |
for stage_idx in range(resume_from_stage, len(PROGRESSIVE_STAGES)): | |
role, stage_name = PROGRESSIVE_STAGES[stage_idx] | |
if stage_idx >= len(stages): | |
stages.append({ | |
"name": stage_name, | |
"status": "active", | |
"content": "", | |
"word_count": 0, | |
"progression_score": 0.0, | |
"repetition_score": 0.0 | |
}) | |
else: | |
stages[stage_idx]["status"] = "active" | |
yield f"π μ§ν μ€... (νμ¬ {total_words:,}λ¨μ΄)", stages, self.current_session_id | |
prompt = self.get_stage_prompt(stage_idx, role, query, language, stages) | |
stage_content = "" | |
for chunk in self.call_llm_streaming([{"role": "user", "content": prompt}], role, language): | |
stage_content += chunk | |
stages[stage_idx]["content"] = stage_content | |
stages[stage_idx]["word_count"] = len(stage_content.split()) | |
yield f"π {stage_name} μμ± μ€... ({total_words + stages[stage_idx]['word_count']:,}λ¨μ΄)", stages, self.current_session_id | |
# μ§νλ νκ° | |
if role.startswith("writer"): | |
writer_num = int(re.search(r'\d+', role).group()) | |
previous_content = self.get_previous_writer_content(stages, writer_num) | |
# μ§νλ μ μ κ³μ° | |
progression_scores = self.narrative_tracker.progression_monitor.calculate_progression_score( | |
writer_num, stage_content, previous_content | |
) | |
progression_score = sum(progression_scores.values()) / len(progression_scores) | |
stages[stage_idx]["progression_score"] = progression_score | |
# λ°λ³΅λ μ μ κ³μ° | |
repetition_score = 10.0 - self.narrative_tracker.progression_monitor.count_repetitions(stage_content) | |
stages[stage_idx]["repetition_score"] = max(0, repetition_score) | |
# μμ¬ μΆμ κΈ° μ λ°μ΄νΈ | |
self.update_narrative_tracker(stage_content, writer_num) | |
self.narrative_tracker.extract_used_elements(stage_content) | |
stages[stage_idx]["status"] = "complete" | |
NovelDatabase.save_stage( | |
self.current_session_id, stage_idx, stage_name, role, | |
stage_content, "complete", | |
stages[stage_idx].get("progression_score", 0.0), | |
stages[stage_idx].get("repetition_score", 0.0) | |
) | |
# μμ¬ μΆμ κΈ° μ μ₯ | |
NovelDatabase.save_narrative_tracker(self.current_session_id, self.narrative_tracker) | |
# μ΄ λ¨μ΄ μ μ λ°μ΄νΈ | |
total_words = NovelDatabase.get_total_words(self.current_session_id) | |
yield f"β {stage_name} μλ£ (μ΄ {total_words:,}λ¨μ΄)", stages, self.current_session_id | |
# μ΅μ’ μμ€ μ 리 | |
final_novel = NovelDatabase.get_writer_content(self.current_session_id) | |
# νΈμ§μκ° μ²λ¦¬ν λ΄μ©μ΄ μμΌλ©΄ κ·Έκ²μ μ¬μ© | |
edited_content = self.get_edited_content(stages) | |
if edited_content: | |
final_novel = edited_content | |
final_word_count = len(final_novel.split()) | |
final_report = self.generate_literary_report(final_novel, final_word_count, language) | |
NovelDatabase.update_final_novel(self.current_session_id, final_novel, final_report) | |
yield f"β μμ€ μμ±! μ΄ {final_word_count:,}λ¨μ΄ (λͺ©ν: {TARGET_WORDS:,}λ¨μ΄)", stages, self.current_session_id | |
except Exception as e: | |
logger.error(f"μμ€ μμ± νλ‘μΈμ€ μ€λ₯: {e}", exc_info=True) | |
yield f"β μ€λ₯ λ°μ: {e}", stages if 'stages' in locals() else [], self.current_session_id | |
def get_stage_prompt(self, stage_idx: int, role: str, query: str, language: str, stages: List[Dict]) -> str: | |
"""λ¨κ³λ³ ν둬ννΈ μμ±""" | |
if stage_idx == 0: | |
return self.create_director_initial_prompt(query, language) | |
if stage_idx == 1: | |
return self.create_critic_director_prompt(stages[0]["content"], query, language) | |
if stage_idx == 2: | |
return self.create_director_revision_prompt(stages[0]["content"], stages[1]["content"], query, language) | |
master_plan = stages[2]["content"] | |
if 3 <= stage_idx <= 12: # μκ° μ΄μ | |
writer_num = stage_idx - 2 | |
previous_content = self.get_previous_writer_content(stages, writer_num) | |
phase_requirements = self.narrative_tracker.generate_phase_requirements(writer_num) | |
narrative_summary = self.generate_narrative_summary(stages, writer_num) | |
used_elements = list(self.narrative_tracker.used_expressions) | |
return self.create_writer_prompt_enhanced( | |
writer_num, master_plan, previous_content, | |
phase_requirements, narrative_summary, language, used_elements | |
) | |
if stage_idx == 13: # λΉνκ° μ€κ° κ²ν | |
all_content = self.get_all_writer_content(stages, 12) | |
return self.create_critic_consistency_prompt_enhanced( | |
all_content, self.narrative_tracker, query, language | |
) | |
if 14 <= stage_idx <= 23: # μκ° μμ | |
writer_num = stage_idx - 13 | |
initial_content = stages[2 + writer_num]["content"] | |
feedback = stages[13]["content"] | |
return self.create_writer_revision_prompt(writer_num, initial_content, feedback, language) | |
if stage_idx == 24: # νΈμ§μ | |
complete_novel = self.get_all_writer_content(stages, 23) | |
issues = self.detect_issues(complete_novel) | |
return self.create_editor_prompt(complete_novel, issues, language) | |
if stage_idx == 25: # μ΅μ’ κ²ν | |
edited_novel = stages[24]["content"] if stages[24]["content"] else self.get_all_writer_content(stages, 23) | |
word_count = len(edited_novel.split()) | |
return self.create_critic_final_prompt(edited_novel, word_count, language) | |
return "" | |
def create_director_revision_prompt(self, initial_plan: str, critic_feedback: str, user_query: str, language: str) -> str: | |
"""κ°λ μ μμ ν둬ννΈ""" | |
return f"""λΉνμ λ°μνμ¬ ν΅ν©λ μμ¬ κ΅¬μ‘°λ₯Ό μμ±νμΈμ. | |
**μ μ£Όμ :** {user_query} | |
**μ΄κΈ° κΈ°ν:** | |
{initial_plan} | |
**λΉν:** | |
{critic_feedback} | |
**ν΅μ¬ μμ μ¬ν:** | |
1. λ°λ³΅ ꡬ쑰 μμ μ κ±° | |
2. 10λ¨κ³κ° νλμ μ΄μΌκΈ°λ‘ μ°κ²° | |
3. μΈλ¬Όμ λͺ νν λ³ν κΆ€μ | |
4. 8,000λ¨μ΄ λΆλ κ³ν | |
5. μλν μ¬μ©κΈμ§ | |
6. μ² νμ κΉμ΄μ μΈκ°μ ν¬ν¨ | |
7. κ°λ ¬ν μ€μ¬ μμ§ μ°½μ‘° | |
8. μ£ΌμΈκ³΅ μ΄λ¦ λͺ νν μ€μ (μΌκ΄μ± μ μ§) | |
κ° λ¨κ³κ° μ΄μ μ νμ°μ κ²°κ³Όκ° λλλ‘ μμ νμΈμ.""" | |
def get_previous_writer_content(self, stages: List[Dict], current_writer: int) -> str: | |
"""μ΄μ μκ°μ λ΄μ© κ°μ Έμ€κΈ°""" | |
if current_writer == 1: | |
return "" | |
# λ°λ‘ μ΄μ μκ°μ λ΄μ© | |
prev_idx = current_writer + 1 # stages μΈλ±μ€λ writer_num + 2 | |
if prev_idx < len(stages) and stages[prev_idx]["content"]: | |
return stages[prev_idx]["content"] | |
return "" | |
def get_all_writer_content(self, stages: List[Dict], up_to_stage: int) -> str: | |
"""νΉμ λ¨κ³κΉμ§μ λͺ¨λ μκ° λ΄μ©""" | |
contents = [] | |
for i, s in enumerate(stages): | |
if i <= up_to_stage and "writer" in s.get("name", "") and s["content"]: | |
contents.append(s["content"]) | |
return "\n\n".join(contents) | |
def get_edited_content(self, stages: List[Dict]) -> str: | |
"""νΈμ§λ λ΄μ© κ°μ Έμ€κΈ°""" | |
for s in stages: | |
if "νΈμ§μ" in s.get("name", "") and s["content"]: | |
return s["content"] | |
return "" | |
def generate_narrative_summary(self, stages: List[Dict], up_to_writer: int) -> str: | |
"""νμ¬κΉμ§μ μμ¬ μμ½""" | |
if up_to_writer == 1: | |
return "첫 μμμ λλ€." | |
summary_parts = [] | |
for i in range(1, up_to_writer): | |
if i in self.narrative_tracker.phase_summaries: | |
summary_parts.append(f"[{NARRATIVE_PHASES[i-1]}]: {self.narrative_tracker.phase_summaries[i]}") | |
return "\n".join(summary_parts) if summary_parts else "μ΄μ λ΄μ©μ μ΄μ΄λ°μ μ§ννμΈμ." | |
def update_narrative_tracker(self, content: str, writer_num: int): | |
"""μμ¬ μΆμ κΈ° μ λ°μ΄νΈ""" | |
# κ°λ¨ν μμ½ μμ± (μ€μ λ‘λ λ μ κ΅ν λΆμ νμ) | |
lines = content.split('\n') | |
key_events = [line.strip() for line in lines if len(line.strip()) > 50][:3] | |
if key_events: | |
summary = " ".join(key_events[:2])[:200] + "..." | |
self.narrative_tracker.phase_summaries[writer_num] = summary | |
# μ² νμ ν΅μ°° μΆμΆ (κ°λ¨ν ν€μλ κΈ°λ°) | |
philosophical_keywords = ['μ‘΄μ¬', 'μλ―Έ', 'μΆ', 'μ£½μ', 'μΈκ°', 'κ³ ν΅', 'ν¬λ§', 'μ¬λ', | |
'existence', 'meaning', 'life', 'death', 'human', 'suffering', 'hope', 'love'] | |
for keyword in philosophical_keywords: | |
if keyword in content: | |
self.narrative_tracker.philosophical_insights.append(f"Phase {writer_num}: {keyword} νꡬ") | |
break | |
# λ¬Ένμ κΈ°λ² κ°μ§ | |
literary_devices = [] | |
if 'μ²λΌ' in content or 'like' in content or 'as if' in content: | |
literary_devices.append('λΉμ ') | |
if '...' in content or 'β' in content: | |
literary_devices.append('μμμ νλ¦') | |
if content.count('"') > 4: | |
literary_devices.append('λν') | |
if literary_devices: | |
self.narrative_tracker.literary_devices[writer_num] = literary_devices | |
def detect_issues(self, content: str) -> List[str]: | |
"""λ¬Έμ μ κ°μ§""" | |
issues = [] | |
# λ°λ³΅ κ°μ§ | |
duplicates = self.narrative_tracker.content_deduplicator.count_repetitions(content) | |
if duplicates > 0: | |
issues.append(f"{duplicates}κ°μ λ°λ³΅λ λ¬Έλ¨ λ°κ²¬") | |
# νΉμ λ°λ³΅ νν κ°μ§ | |
repetitive_phrases = ["μ΅κΈ°κ° μ°¬ μμΉ¨", "λλΌλ―Έ μ΄ν", "43λ§μ", "κ°κ΅¬λ¦¬μμ λ°λΌλ³΄μλ€"] | |
for phrase in repetitive_phrases: | |
count = content.count(phrase) | |
if count > 2: | |
issues.append(f"'{phrase}' ννμ΄ {count}ν λ°λ³΅λ¨") | |
# μΊλ¦ν° μ΄λ¦ μΌκ΄μ± | |
name_variations = ["λλΌλ―Έ", "μμ ", "λ"] | |
found_names = [name for name in name_variations if name in content] | |
if len(found_names) > 1: | |
issues.append(f"μ£ΌμΈκ³΅ μ΄λ¦ λΆμΌμΉ: {', '.join(found_names)}") | |
return issues | |
def evaluate_progression(self, content: str, phase: int) -> float: | |
"""μμ¬ μ§νλ νκ°""" | |
score = 5.0 | |
# λΆλ μ²΄ν¬ | |
word_count = len(content.split()) | |
if word_count >= MIN_WORDS_PER_WRITER: | |
score += 2.0 | |
# μλ‘μ΄ μμ μ²΄ν¬ | |
if phase > 1: | |
prev_summary = self.narrative_tracker.phase_summaries.get(phase-1, "") | |
if prev_summary and len(set(content.split()) - set(prev_summary.split())) > 100: | |
score += 1.5 | |
# λ³ν μΈκΈ μ²΄ν¬ | |
change_keywords = ['λ³ν', 'λ¬λΌμ‘', 'μλ‘μ΄', 'μ΄μ λ', 'λ μ΄μ', | |
'changed', 'different', 'new', 'now', 'no longer'] | |
if any(keyword in content for keyword in change_keywords): | |
score += 1.5 | |
# μ² νμ κΉμ΄ μ²΄ν¬ | |
philosophical_keywords = ['μ‘΄μ¬', 'μλ―Έ', 'μΆμ', 'μΈκ°μ', 'μ', 'existence', 'meaning', 'life', 'human', 'why'] | |
if any(keyword in content for keyword in philosophical_keywords): | |
score += 0.5 | |
# λ¬Ένμ κΈ°λ² μ²΄ν¬ | |
if not any(phrase in content for phrase in ['λκΌλ€', 'μλ€', 'felt', 'was']): | |
score += 0.5 # 보μ¬μ£ΌκΈ° κΈ°λ² μ¬μ© | |
return min(10.0, score) | |
def generate_literary_report(self, complete_novel: str, word_count: int, language: str) -> str: | |
"""μ΅μ’ λ¬Ένμ νκ°""" | |
prompt = self.create_critic_final_prompt(complete_novel, word_count, language) | |
try: | |
report = self.call_llm_sync([{"role": "user", "content": prompt}], "critic", language) | |
return report | |
except Exception as e: | |
logger.error(f"μ΅μ’ λ³΄κ³ μ μμ± μ€ν¨: {e}") | |
return "λ³΄κ³ μ μμ± μ€ μ€λ₯ λ°μ" | |
# --- μ νΈλ¦¬ν° ν¨μλ€ --- | |
def process_query(query: str, language: str, session_id: Optional[str] = None) -> Generator[Tuple[str, str, str, str], None, None]: | |
"""λ©μΈ 쿼리 μ²λ¦¬ ν¨μ""" | |
if not query.strip(): | |
yield "", "", "β μ£Όμ λ₯Ό μ λ ₯ν΄μ£ΌμΈμ.", session_id | |
return | |
system = ProgressiveLiterarySystem() | |
stages_markdown = "" | |
novel_content = "" | |
for status, stages, current_session_id in system.process_novel_stream(query, language, session_id): | |
stages_markdown = format_stages_display(stages) | |
# μ΅μ’ μμ€ λ΄μ© κ°μ Έμ€κΈ° | |
if stages and all(s.get("status") == "complete" for s in stages[-10:]): | |
novel_content = NovelDatabase.get_writer_content(current_session_id) | |
# νΈμ§λ λ΄μ©μ΄ μμΌλ©΄ κ·Έκ²μ μ¬μ© | |
edited = system.get_edited_content(stages) | |
if edited: | |
novel_content = edited | |
novel_content = format_novel_display(novel_content) | |
yield stages_markdown, novel_content, status or "π μ²λ¦¬ μ€...", current_session_id | |
def get_active_sessions(language: str) -> List[str]: | |
"""νμ± μΈμ λͺ©λ‘""" | |
sessions = NovelDatabase.get_active_sessions() | |
return [f"{s['session_id'][:8]}... - {s['user_query'][:50]}... ({s['created_at']}) [{s['total_words']:,}λ¨μ΄]" | |
for s in sessions] | |
def auto_recover_session(language: str) -> Tuple[Optional[str], str]: | |
"""μ΅κ·Ό μΈμ μλ 볡ꡬ""" | |
sessions = NovelDatabase.get_active_sessions() | |
if sessions: | |
latest_session = sessions[0] | |
return latest_session['session_id'], f"μΈμ {latest_session['session_id'][:8]}... 볡ꡬλ¨" | |
return None, "볡ꡬν μΈμ μ΄ μμ΅λλ€." | |
def resume_session(session_id: str, language: str) -> Generator[Tuple[str, str, str, str], None, None]: | |
"""μΈμ μ¬κ°""" | |
if not session_id: | |
yield "", "", "β μΈμ IDκ° μμ΅λλ€.", session_id | |
return | |
if "..." in session_id: | |
session_id = session_id.split("...")[0] | |
session = NovelDatabase.get_session(session_id) | |
if not session: | |
yield "", "", "β μΈμ μ μ°Ύμ μ μμ΅λλ€.", None | |
return | |
yield from process_query(session['user_query'], session['language'], session_id) | |
def download_novel(novel_text: str, format_type: str, language: str, session_id: str) -> Optional[str]: | |
"""μμ€ λ€μ΄λ‘λ νμΌ μμ±""" | |
if not novel_text or not session_id: | |
return None | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
filename = f"novel_{session_id[:8]}_{timestamp}" | |
try: | |
if format_type == "DOCX" and DOCX_AVAILABLE: | |
return export_to_docx(novel_text, filename, language, session_id) | |
else: | |
return export_to_txt(novel_text, filename) | |
except Exception as e: | |
logger.error(f"νμΌ μμ± μ€ν¨: {e}") | |
return None | |
def format_stages_display(stages: List[Dict]) -> str: | |
"""λ¨κ³λ³ μ§ν μν© νμ""" | |
markdown = "## π¬ μ§ν μν©\n\n" | |
# μ΄ λ¨μ΄ μ κ³μ° | |
total_words = sum(s.get('word_count', 0) for s in stages if 'writer' in s.get('name', '')) | |
markdown += f"**μ΄ λ¨μ΄ μ: {total_words:,} / {TARGET_WORDS:,}**\n\n" | |
for i, stage in enumerate(stages): | |
status_icon = "β " if stage['status'] == 'complete' else "π" if stage['status'] == 'active' else "β³" | |
markdown += f"{status_icon} **{stage['name']}**" | |
if stage.get('word_count', 0) > 0: | |
markdown += f" ({stage['word_count']:,}λ¨μ΄)" | |
# μ§νλμ λ°λ³΅λ μ μ νμ | |
if stage.get('progression_score', 0) > 0: | |
markdown += f" [μ§νλ: {stage['progression_score']:.1f}/10]" | |
if stage.get('repetition_score', 0) > 0: | |
markdown += f" [λ°λ³΅λ: {stage['repetition_score']:.1f}/10]" | |
markdown += "\n" | |
if stage['content']: | |
preview = stage['content'][:200] + "..." if len(stage['content']) > 200 else stage['content'] | |
markdown += f"> {preview}\n\n" | |
return markdown | |
def format_novel_display(novel_text: str) -> str: | |
"""μμ€ λ΄μ© νμ""" | |
if not novel_text: | |
return "μμ§ μμ±λ λ΄μ©μ΄ μμ΅λλ€." | |
formatted = "# π μμ±λ μμ€\n\n" | |
# λ¨μ΄ μ νμ | |
word_count = len(novel_text.split()) | |
formatted += f"**μ΄ λΆλ: {word_count:,}λ¨μ΄ (λͺ©ν: {TARGET_WORDS:,}λ¨μ΄)**\n\n" | |
formatted += "---\n\n" | |
# κ° λ¨κ³λ₯Ό ꡬλΆνμ¬ νμ | |
sections = novel_text.split('\n\n') | |
for i, section in enumerate(sections): | |
if section.strip(): | |
formatted += f"{section}\n\n" | |
return formatted | |
def export_to_docx(content: str, filename: str, language: str, session_id: str) -> str: | |
"""DOCX νμΌλ‘ λ΄λ³΄λ΄κΈ°""" | |
doc = Document() | |
# νμ΄μ§ μ€μ | |
section = doc.sections[0] | |
section.page_height = Inches(11) | |
section.page_width = Inches(8.5) | |
section.top_margin = Inches(1) | |
section.bottom_margin = Inches(1) | |
section.left_margin = Inches(1.25) | |
section.right_margin = Inches(1.25) | |
# μΈμ μ 보 | |
session = NovelDatabase.get_session(session_id) | |
# μ λͺ© νμ΄μ§ | |
title_para = doc.add_paragraph() | |
title_para.alignment = WD_ALIGN_PARAGRAPH.CENTER | |
if session: | |
title_run = title_para.add_run(session["user_query"]) | |
title_run.font.size = Pt(24) | |
title_run.bold = True | |
# λ©ν μ 보 | |
doc.add_paragraph() | |
meta_para = doc.add_paragraph() | |
meta_para.alignment = WD_ALIGN_PARAGRAPH.CENTER | |
meta_para.add_run(f"μμ±μΌ: {datetime.now().strftime('%Yλ %mμ %dμΌ')}\n") | |
meta_para.add_run(f"μ΄ λ¨μ΄ μ: {len(content.split()):,}λ¨μ΄") | |
# νμ΄μ§ λλκΈ° | |
doc.add_page_break() | |
# λ³Έλ¬Έ μ€νμΌ μ€μ | |
style = doc.styles['Normal'] | |
style.font.name = 'Calibri' | |
style.font.size = Pt(11) | |
style.paragraph_format.line_spacing = 1.5 | |
style.paragraph_format.space_after = Pt(6) | |
# λ³Έλ¬Έ μΆκ° | |
paragraphs = content.split('\n\n') | |
for para_text in paragraphs: | |
if para_text.strip(): | |
para = doc.add_paragraph(para_text.strip()) | |
# νμΌ μ μ₯ | |
filepath = f"{filename}.docx" | |
doc.save(filepath) | |
return filepath | |
def export_to_txt(content: str, filename: str) -> str: | |
"""TXT νμΌλ‘ λ΄λ³΄λ΄κΈ°""" | |
filepath = f"{filename}.txt" | |
with open(filepath, 'w', encoding='utf-8') as f: | |
f.write(content) | |
return filepath | |
# CSS μ€νμΌ | |
custom_css = """ | |
.gradio-container { | |
background: linear-gradient(135deg, #1e3c72 0%, #2a5298 50%, #1e3c72 100%); | |
min-height: 100vh; | |
} | |
.main-header { | |
background-color: rgba(255, 255, 255, 0.1); | |
backdrop-filter: blur(10px); | |
padding: 30px; | |
border-radius: 12px; | |
margin-bottom: 30px; | |
text-align: center; | |
color: white; | |
border: 1px solid rgba(255, 255, 255, 0.2); | |
} | |
.progress-note { | |
background-color: rgba(255, 223, 0, 0.1); | |
border-left: 3px solid #ffd700; | |
padding: 15px; | |
margin: 20px 0; | |
border-radius: 8px; | |
color: #fff; | |
} | |
.improvement-note { | |
background-color: rgba(0, 255, 127, 0.1); | |
border-left: 3px solid #00ff7f; | |
padding: 15px; | |
margin: 20px 0; | |
border-radius: 8px; | |
color: #fff; | |
} | |
.input-section { | |
background-color: rgba(255, 255, 255, 0.1); | |
backdrop-filter: blur(10px); | |
padding: 20px; | |
border-radius: 12px; | |
margin-bottom: 20px; | |
border: 1px solid rgba(255, 255, 255, 0.2); | |
} | |
.session-section { | |
background-color: rgba(255, 255, 255, 0.1); | |
backdrop-filter: blur(10px); | |
padding: 15px; | |
border-radius: 8px; | |
margin-top: 20px; | |
color: white; | |
border: 1px solid rgba(255, 255, 255, 0.2); | |
} | |
#stages-display { | |
background-color: rgba(255, 255, 255, 0.95); | |
padding: 20px; | |
border-radius: 12px; | |
max-height: 600px; | |
overflow-y: auto; | |
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); | |
} | |
#novel-output { | |
background-color: rgba(255, 255, 255, 0.95); | |
padding: 30px; | |
border-radius: 12px; | |
max-height: 700px; | |
overflow-y: auto; | |
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); | |
} | |
.download-section { | |
background-color: rgba(255, 255, 255, 0.9); | |
padding: 15px; | |
border-radius: 8px; | |
margin-top: 20px; | |
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); | |
} | |
/* μ§ν νμκΈ° μ€νμΌ */ | |
.progress-bar { | |
background-color: #e0e0e0; | |
height: 20px; | |
border-radius: 10px; | |
overflow: hidden; | |
margin: 10px 0; | |
} | |
.progress-fill { | |
background-color: #4CAF50; | |
height: 100%; | |
transition: width 0.3s ease; | |
} | |
/* μ μ νμ μ€νμΌ */ | |
.score-badge { | |
display: inline-block; | |
padding: 2px 8px; | |
border-radius: 12px; | |
font-size: 0.9em; | |
font-weight: bold; | |
margin-left: 5px; | |
} | |
.score-high { | |
background-color: #4CAF50; | |
color: white; | |
} | |
.score-medium { | |
background-color: #FF9800; | |
color: white; | |
} | |
.score-low { | |
background-color: #F44336; | |
color: white; | |
} | |
""" | |
# Gradio μΈν°νμ΄μ€ μμ± | |
def create_interface(): | |
with gr.Blocks(css=custom_css, title="AI μ§νν μ₯νΈμμ€ μμ± μμ€ν v2") as interface: | |
gr.HTML(""" | |
<div class="main-header"> | |
<h1 style="font-size: 2.5em; margin-bottom: 10px;"> | |
π AI μ§νν μ₯νΈμμ€ μμ± μμ€ν v2.0 | |
</h1> | |
<h3 style="color: #ddd; margin-bottom: 20px;"> | |
λ°λ³΅ μλ μ§μ ν μ₯νΈ μμ¬ κ΅¬μ‘° μ€ν | |
</h3> | |
<p style="font-size: 1.1em; color: #eee; max-width: 800px; margin: 0 auto;"> | |
10κ°μ μ κΈ°μ μΌλ‘ μ°κ²°λ λ¨κ³λ₯Ό ν΅ν΄ νλμ μμ ν μ΄μΌκΈ°λ₯Ό λ§λ€μ΄λ λλ€. | |
<br> | |
κ° λ¨κ³λ μ΄μ λ¨κ³μ νμ°μ κ²°κ³Όλ‘ μ΄μ΄μ§λ©°, μΈλ¬Όμ λ³νμ μ±μ₯μ μΆμ ν©λλ€. | |
</p> | |
<div class="progress-note"> | |
β‘ λ°λ³΅μ΄ μλ μΆμ , μνμ΄ μλ μ§νμ ν΅ν μ§μ ν μ₯νΈ μμ¬ | |
</div> | |
<div class="improvement-note"> | |
π v2.0 κ°μ μ¬ν: κ°νλ λ°λ³΅ κ°μ§ μμ€ν , νΈμ§μ λ¨κ³ μΆκ°, μ€μκ° μ§νλ λͺ¨λν°λ§ | |
</div> | |
</div> | |
""") | |
# μν κ΄λ¦¬ | |
current_session_id = gr.State(None) | |
with gr.Row(): | |
with gr.Column(scale=1): | |
with gr.Group(elem_classes=["input-section"]): | |
query_input = gr.Textbox( | |
label="μμ€ μ£Όμ / Novel Theme", | |
placeholder="μ€νΈμμ€μ μ£Όμ λ₯Ό μ λ ₯νμΈμ. μΈλ¬Όμ λ³νμ μ±μ₯μ΄ μ€μ¬μ΄ λλ μ΄μΌκΈ°...\nEnter the theme for your novella. Focus on character transformation and growth...", | |
lines=4 | |
) | |
language_select = gr.Radio( | |
choices=["Korean", "English"], | |
value="Korean", | |
label="μΈμ΄ / Language" | |
) | |
with gr.Row(): | |
submit_btn = gr.Button("π μμ€ μμ± μμ", variant="primary", scale=2) | |
clear_btn = gr.Button("ποΈ μ΄κΈ°ν", scale=1) | |
status_text = gr.Textbox( | |
label="μν", | |
interactive=False, | |
value="π μ€λΉ μλ£" | |
) | |
# μΈμ κ΄λ¦¬ | |
with gr.Group(elem_classes=["session-section"]): | |
gr.Markdown("### πΎ μ§ν μ€μΈ μΈμ ") | |
session_dropdown = gr.Dropdown( | |
label="μΈμ μ ν", | |
choices=[], | |
interactive=True | |
) | |
with gr.Row(): | |
refresh_btn = gr.Button("π λͺ©λ‘ μλ‘κ³ μΉ¨", scale=1) | |
resume_btn = gr.Button("βΆοΈ μ ν μ¬κ°", variant="secondary", scale=1) | |
auto_recover_btn = gr.Button("β»οΈ μ΅κ·Ό μΈμ 볡ꡬ", scale=1) | |
with gr.Column(scale=2): | |
with gr.Tab("π μ°½μ μ§ν"): | |
stages_display = gr.Markdown( | |
value="μ°½μ κ³Όμ μ΄ μ¬κΈ°μ νμλ©λλ€...", | |
elem_id="stages-display" | |
) | |
with gr.Tab("π μμ±λ μμ€"): | |
novel_output = gr.Markdown( | |
value="μμ±λ μμ€μ΄ μ¬κΈ°μ νμλ©λλ€...", | |
elem_id="novel-output" | |
) | |
with gr.Group(elem_classes=["download-section"]): | |
gr.Markdown("### π₯ μμ€ λ€μ΄λ‘λ") | |
with gr.Row(): | |
format_select = gr.Radio( | |
choices=["DOCX", "TXT"], | |
value="DOCX" if DOCX_AVAILABLE else "TXT", | |
label="νμ" | |
) | |
download_btn = gr.Button("β¬οΈ λ€μ΄λ‘λ", variant="secondary") | |
download_file = gr.File( | |
label="λ€μ΄λ‘λλ νμΌ", | |
visible=False | |
) | |
# μ¨κ²¨μ§ μν | |
novel_text_state = gr.State("") | |
# μμ | |
with gr.Row(): | |
gr.Examples( | |
examples=[ | |
["μ€μ§ν μ€λ λ¨μ±μ΄ μλ‘μ΄ μΆμ μλ―Έλ₯Ό μ°Ύμκ°λ μ¬μ "], | |
["λμμμ μκ³¨λ‘ μ΄μ£Όν μ²λ μ μ μκ³Ό μ±μ₯ μ΄μΌκΈ°"], | |
["μΈ μΈλκ° ν¨κ» μ¬λ κ°μ‘±μ κ°λ±κ³Ό νν΄"], | |
["A middle-aged woman's journey to rediscover herself after divorce"], | |
["The transformation of a cynical journalist through unexpected encounters"], | |
["μμ μμ μ μ΄μνλ λ ΈλΆλΆμ λ§μ§λ§ 1λ "], | |
["AI μλμ μΌμ리λ₯Ό μμ λ²μκ°μ μλ‘μ΄ λμ "], | |
["κΈ°μ΄μνμκΈμκ° λ μ²λ μ μμ‘΄κ³Ό μ‘΄μμ± μ°ΎκΈ°"] | |
], | |
inputs=query_input, | |
label="π‘ μ£Όμ μμ" | |
) | |
# μ΄λ²€νΈ νΈλ€λ¬ | |
def refresh_sessions(): | |
try: | |
sessions = get_active_sessions("Korean") | |
return gr.update(choices=sessions) | |
except Exception as e: | |
logger.error(f"Error refreshing sessions: {str(e)}") | |
return gr.update(choices=[]) | |
def handle_auto_recover(language): | |
session_id, message = auto_recover_session(language) | |
return session_id, message | |
# μ΄λ²€νΈ μ°κ²° | |
submit_btn.click( | |
fn=process_query, | |
inputs=[query_input, language_select, current_session_id], | |
outputs=[stages_display, novel_output, status_text, current_session_id] | |
) | |
novel_output.change( | |
fn=lambda x: x, | |
inputs=[novel_output], | |
outputs=[novel_text_state] | |
) | |
resume_btn.click( | |
fn=lambda x: x.split("...")[0] if x and "..." in x else x, | |
inputs=[session_dropdown], | |
outputs=[current_session_id] | |
).then( | |
fn=resume_session, | |
inputs=[current_session_id, language_select], | |
outputs=[stages_display, novel_output, status_text, current_session_id] | |
) | |
auto_recover_btn.click( | |
fn=handle_auto_recover, | |
inputs=[language_select], | |
outputs=[current_session_id, status_text] | |
).then( | |
fn=resume_session, | |
inputs=[current_session_id, language_select], | |
outputs=[stages_display, novel_output, status_text, current_session_id] | |
) | |
refresh_btn.click( | |
fn=refresh_sessions, | |
outputs=[session_dropdown] | |
) | |
clear_btn.click( | |
fn=lambda: ("", "", "π μ€λΉ μλ£", "", None), | |
outputs=[stages_display, novel_output, status_text, novel_text_state, current_session_id] | |
) | |
def handle_download(format_type, language, session_id, novel_text): | |
if not session_id or not novel_text: | |
return gr.update(visible=False) | |
file_path = download_novel(novel_text, format_type, language, session_id) | |
if file_path: | |
return gr.update(value=file_path, visible=True) | |
else: | |
return gr.update(visible=False) | |
download_btn.click( | |
fn=handle_download, | |
inputs=[format_select, language_select, current_session_id, novel_text_state], | |
outputs=[download_file] | |
) | |
# μμ μ μΈμ λ‘λ | |
interface.load( | |
fn=refresh_sessions, | |
outputs=[session_dropdown] | |
) | |
return interface | |
# λ©μΈ μ€ν | |
if __name__ == "__main__": | |
logger.info("AI μ§νν μ₯νΈμμ€ μμ± μμ€ν v2.0 μμ...") | |
logger.info("=" * 60) | |
# νκ²½ νμΈ | |
logger.info(f"API μλν¬μΈνΈ: {API_URL}") | |
logger.info(f"λͺ©ν λΆλ: {TARGET_WORDS:,}λ¨μ΄") | |
logger.info(f"μκ°λΉ μ΅μ λΆλ: {MIN_WORDS_PER_WRITER:,}λ¨μ΄") | |
logger.info("μ£Όμ κ°μ μ¬ν: λ°λ³΅ κ°μ§ κ°ν, νΈμ§μ λ¨κ³ μΆκ°, μ§νλ λͺ¨λν°λ§") | |
if BRAVE_SEARCH_API_KEY: | |
logger.info("μΉ κ²μμ΄ νμ±νλμμ΅λλ€.") | |
else: | |
logger.warning("μΉ κ²μμ΄ λΉνμ±νλμμ΅λλ€.") | |
if DOCX_AVAILABLE: | |
logger.info("DOCX λ΄λ³΄λ΄κΈ°κ° νμ±νλμμ΅λλ€.") | |
else: | |
logger.warning("DOCX λ΄λ³΄λ΄κΈ°κ° λΉνμ±νλμμ΅λλ€.") | |
logger.info("=" * 60) | |
# λ°μ΄ν°λ² μ΄μ€ μ΄κΈ°ν | |
logger.info("λ°μ΄ν°λ² μ΄μ€ μ΄κΈ°ν μ€...") | |
NovelDatabase.init_db() | |
logger.info("λ°μ΄ν°λ² μ΄μ€ μ΄κΈ°ν μλ£.") | |
# μΈν°νμ΄μ€ μμ± λ° μ€ν | |
interface = create_interface() | |
interface.launch( | |
server_name="0.0.0.0", | |
server_port=7860, | |
share=False, | |
debug=True | |
) |