diff --git "a/app.py" "b/app.py"
--- "a/app.py"
+++ "b/app.py"
@@ -15,6 +15,8 @@ import threading
from contextlib import contextmanager
from dataclasses import dataclass, field, asdict
from collections import defaultdict
+import random
+from huggingface_hub import HfApi, upload_file, hf_hub_download
# --- Logging setup ---
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -252,6 +254,25 @@ class NovelDatabase:
)
''')
+ # Random themes library table
+ cursor.execute('''
+ CREATE TABLE IF NOT EXISTS random_themes_library (
+ theme_id TEXT PRIMARY KEY,
+ theme_text TEXT NOT NULL,
+ language TEXT NOT NULL,
+ title TEXT,
+ opening_sentence TEXT,
+ protagonist TEXT,
+ conflict TEXT,
+ philosophical_question TEXT,
+ generated_at TEXT DEFAULT (datetime('now')),
+ view_count INTEGER DEFAULT 0,
+ used_count INTEGER DEFAULT 0,
+ tags TEXT,
+ metadata TEXT
+ )
+ ''')
+
conn.commit()
@staticmethod
@@ -398,7 +419,80 @@ class NovelDatabase:
return tracker
return None
- # Maintain existing methods
+ @staticmethod
+ def save_random_theme(theme_text: str, language: str, metadata: Dict[str, Any]) -> str:
+ """Save randomly generated theme to library"""
+ theme_id = hashlib.md5(f"{theme_text}{datetime.now()}".encode()).hexdigest()[:12]
+
+ # Extract components from theme text
+ title = metadata.get('title', '')
+ opening_sentence = metadata.get('opening_sentence', '')
+ protagonist = metadata.get('protagonist', '')
+ conflict = metadata.get('conflict', '')
+ philosophical_question = metadata.get('philosophical_question', '')
+ tags = json.dumps(metadata.get('tags', []))
+
+ with NovelDatabase.get_db() as conn:
+ conn.cursor().execute('''
+ INSERT INTO random_themes_library
+ (theme_id, theme_text, language, title, opening_sentence,
+ protagonist, conflict, philosophical_question, tags, metadata)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+ ''', (theme_id, theme_text, language, title, opening_sentence,
+ protagonist, conflict, philosophical_question, tags,
+ json.dumps(metadata)))
+ conn.commit()
+
+ return theme_id
+
+ @staticmethod
+ def get_random_themes_library(language: str = None, limit: int = 50) -> List[Dict]:
+ """Get random themes from library"""
+ with NovelDatabase.get_db() as conn:
+ query = '''
+ SELECT * FROM random_themes_library
+ {}
+ ORDER BY generated_at DESC
+ LIMIT ?
+ '''.format('WHERE language = ?' if language else '')
+
+ if language:
+ rows = conn.cursor().execute(query, (language, limit)).fetchall()
+ else:
+ rows = conn.cursor().execute(query, (limit,)).fetchall()
+
+ return [dict(row) for row in rows]
+
+ @staticmethod
+ def update_theme_view_count(theme_id: str):
+ """Update view count for a theme"""
+ with NovelDatabase.get_db() as conn:
+ conn.cursor().execute(
+ 'UPDATE random_themes_library SET view_count = view_count + 1 WHERE theme_id = ?',
+ (theme_id,)
+ )
+ conn.commit()
+
+ @staticmethod
+ def update_theme_used_count(theme_id: str):
+ """Update used count when theme is used for novel"""
+ with NovelDatabase.get_db() as conn:
+ conn.cursor().execute(
+ 'UPDATE random_themes_library SET used_count = used_count + 1 WHERE theme_id = ?',
+ (theme_id,)
+ )
+ conn.commit()
+
+ @staticmethod
+ def get_theme_by_id(theme_id: str) -> Optional[Dict]:
+ """Get specific theme by ID"""
+ with NovelDatabase.get_db() as conn:
+ row = conn.cursor().execute(
+ 'SELECT * FROM random_themes_library WHERE theme_id = ?',
+ (theme_id,)
+ ).fetchone()
+ return dict(row) if row else None
+
@staticmethod
def get_session(session_id: str) -> Optional[Dict]:
with NovelDatabase.get_db() as conn:
@@ -490,7 +584,6 @@ class WebSearchIntegration:
break
return "\n".join(extracted)
-
class UnifiedLiterarySystem:
"""Single writer progressive literary novel generation system"""
def __init__(self):
@@ -1157,6 +1250,29 @@ Present specific strengths and weaknesses."""
return lang_prompts.get(language, lang_prompts["Korean"])
+ def create_director_final_prompt(self, initial_plan: str, critic_feedback: str,
+ user_query: str, language: str) -> str:
+ """Director final master plan"""
+ return f"""Reflect the critique and complete the final master plan.
+
+**Original Theme:** {user_query}
+
+**Initial Plan:**
+{initial_plan}
+
+**Critique Feedback:**
+{critic_feedback}
+
+**Final Master Plan Requirements:**
+1. Reflect all critique points
+2. Specific content and causality for 10 parts
+3. Clear transformation stages of protagonist
+4. Meaning evolution process of central symbol
+5. Feasibility of 800 words per part
+6. Implementation of philosophical depth and social message
+
+Present concrete and executable final plan."""
+
def _extract_part_plan(self, master_plan: str, part_number: int) -> str:
"""Extract specific part plan from master plan"""
lines = master_plan.split('\n')
@@ -1452,29 +1568,6 @@ Provide specific and actionable revision instructions."""
return ""
- def create_director_final_prompt(self, initial_plan: str, critic_feedback: str,
- user_query: str, language: str) -> str:
- """Director final master plan"""
- return f"""Reflect the critique and complete the final master plan.
-
-**Original Theme:** {user_query}
-
-**Initial Plan:**
-{initial_plan}
-
-**Critique Feedback:**
-{critic_feedback}
-
-**Final Master Plan Requirements:**
-1. Reflect all critique points
-2. Specific content and causality for 10 parts
-3. Clear transformation stages of protagonist
-4. Meaning evolution process of central symbol
-5. Feasibility of 800 words per part
-6. Implementation of philosophical depth and social message
-
-Present concrete and executable final plan."""
-
def _get_part_number(self, stage_idx: int) -> Optional[int]:
"""Extract part number from stage index"""
stage_name = UNIFIED_STAGES[stage_idx][1]
@@ -1511,28 +1604,269 @@ Present concrete and executable final plan."""
logger.error(f"Final report generation failed: {e}")
return "Error occurred during report generation"
+class WebSearchIntegration:
+ """Web search functionality"""
+ def __init__(self):
+ self.brave_api_key = BRAVE_SEARCH_API_KEY
+ self.search_url = "https://api.search.brave.com/res/v1/web/search"
+ self.enabled = bool(self.brave_api_key)
+
+ def search(self, query: str, count: int = 3, language: str = "en") -> List[Dict]:
+ if not self.enabled:
+ return []
+ headers = {
+ "Accept": "application/json",
+ "X-Subscription-Token": self.brave_api_key
+ }
+ params = {
+ "q": query,
+ "count": count,
+ "search_lang": "ko" if language == "Korean" else "en",
+ "text_decorations": False,
+ "safesearch": "moderate"
+ }
+ try:
+ response = requests.get(self.search_url, headers=headers, params=params, timeout=10)
+ response.raise_for_status()
+ results = response.json().get("web", {}).get("results", [])
+ return results
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Web search API error: {e}")
+ return []
+
+ def extract_relevant_info(self, results: List[Dict], max_chars: int = 1500) -> str:
+ if not results:
+ return ""
+ extracted = []
+ total_chars = 0
+ for i, result in enumerate(results[:3], 1):
+ title = result.get("title", "")
+ description = result.get("description", "")
+ info = f"[{i}] {title}: {description}"
+ if total_chars + len(info) < max_chars:
+ extracted.append(info)
+ total_chars += len(info)
+ else:
+ break
+ return "\n".join(extracted)
+
+class HFDatasetManager:
+ """Manage theme data storage in HuggingFace dataset"""
+
+ def __init__(self):
+ self.token = os.getenv("HF_TOKEN")
+ self.dataset_name = "novel-themes-library"
+ self.username = None
+ self.repo_id = None
+
+ if self.token:
+ try:
+ self.api = HfApi()
+ # Get username from token
+ self.username = self.api.whoami(token=self.token)["name"]
+ self.repo_id = f"{self.username}/{self.dataset_name}"
+
+ # Create dataset repo if it doesn't exist
+ try:
+ self.api.create_repo(
+ repo_id=self.repo_id,
+ token=self.token,
+ repo_type="dataset",
+ private=False,
+ exist_ok=True
+ )
+ logger.info(f"HF Dataset initialized: {self.repo_id}")
+ except Exception as e:
+ logger.error(f"Error creating HF dataset: {e}")
+
+ except Exception as e:
+ logger.error(f"HF authentication failed: {e}")
+ self.token = None
+
+ def save_themes_to_hf(self, themes_data: List[Dict]):
+ """Save themes to HuggingFace dataset"""
+ if not self.token or not themes_data:
+ return False
+
+ try:
+ # Create temporary file
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as tmp_file:
+ json.dump({
+ "themes": themes_data,
+ "last_updated": datetime.now().isoformat(),
+ "version": "1.0"
+ }, tmp_file, ensure_ascii=False, indent=2)
+ tmp_path = tmp_file.name
+
+ # Upload to HF
+ upload_file(
+ path_or_fileobj=tmp_path,
+ path_in_repo="themes_library.json",
+ repo_id=self.repo_id,
+ token=self.token,
+ repo_type="dataset",
+ commit_message=f"Update themes library - {len(themes_data)} themes"
+ )
+
+ # Clean up
+ os.unlink(tmp_path)
+ logger.info(f"Saved {len(themes_data)} themes to HF dataset")
+ return True
+
+ except Exception as e:
+ logger.error(f"Error saving to HF dataset: {e}")
+ return False
+
+ def load_themes_from_hf(self) -> List[Dict]:
+ """Load themes from HuggingFace dataset"""
+ if not self.token:
+ return []
+
+ try:
+ # Download file from HF
+ file_path = hf_hub_download(
+ repo_id=self.repo_id,
+ filename="themes_library.json",
+ token=self.token,
+ repo_type="dataset"
+ )
+
+ # Load data
+ with open(file_path, 'r', encoding='utf-8') as f:
+ data = json.load(f)
+
+ themes = data.get("themes", [])
+ logger.info(f"Loaded {len(themes)} themes from HF dataset")
+ return themes
+
+ except Exception as e:
+ logger.warning(f"Error loading from HF dataset: {e}")
+ return []
+
+ def sync_with_local_db(self):
+ """Sync HF dataset with local database"""
+ if not self.token:
+ return
+
+ # Load from HF
+ hf_themes = self.load_themes_from_hf()
+
+ if hf_themes:
+ # Get existing theme IDs from local DB
+ local_theme_ids = set()
+ with NovelDatabase.get_db() as conn:
+ rows = conn.cursor().execute(
+ "SELECT theme_id FROM random_themes_library"
+ ).fetchall()
+ local_theme_ids = {row['theme_id'] for row in rows}
+
+ # Add new themes from HF to local DB
+ new_count = 0
+ for theme in hf_themes:
+ if theme.get('theme_id') not in local_theme_ids:
+ try:
+ # Ensure tags and metadata are JSON strings
+ tags_data = theme.get('tags', [])
+ if isinstance(tags_data, list):
+ tags_json = json.dumps(tags_data, ensure_ascii=False)
+ else:
+ tags_json = tags_data if isinstance(tags_data, str) else '[]'
+
+ metadata_data = theme.get('metadata', {})
+ if isinstance(metadata_data, dict):
+ metadata_json = json.dumps(metadata_data, ensure_ascii=False)
+ else:
+ metadata_json = metadata_data if isinstance(metadata_data, str) else '{}'
+
+ with NovelDatabase.get_db() as conn:
+ conn.cursor().execute('''
+ INSERT INTO random_themes_library
+ (theme_id, theme_text, language, title, opening_sentence,
+ protagonist, conflict, philosophical_question, generated_at,
+ view_count, used_count, tags, metadata)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+ ''', (
+ theme.get('theme_id'),
+ theme.get('theme_text'),
+ theme.get('language'),
+ theme.get('title', ''),
+ theme.get('opening_sentence', ''),
+ theme.get('protagonist', ''),
+ theme.get('conflict', ''),
+ theme.get('philosophical_question', ''),
+ theme.get('generated_at'),
+ theme.get('view_count', 0),
+ theme.get('used_count', 0),
+ tags_json,
+ metadata_json
+ ))
+ conn.commit()
+ new_count += 1
+ except Exception as e:
+ logger.error(f"Error adding theme {theme.get('theme_id')}: {e}")
+
+ if new_count > 0:
+ logger.info(f"Added {new_count} new themes from HF dataset")
+
+ def backup_to_hf(self):
+ """Backup all local themes to HF dataset"""
+ if not self.token:
+ return
+
+ # Get all themes from local DB
+ themes = NovelDatabase.get_random_themes_library(limit=1000)
+
+ if themes:
+ # Convert Row objects to dicts and ensure all data is serializable
+ themes_data = []
+ for theme in themes:
+ theme_dict = dict(theme)
+ # Parse tags and metadata from JSON strings
+ if isinstance(theme_dict.get('tags'), str):
+ try:
+ theme_dict['tags'] = json.loads(theme_dict['tags'])
+ except:
+ theme_dict['tags'] = []
+ else:
+ theme_dict['tags'] = theme_dict.get('tags', [])
+
+ if isinstance(theme_dict.get('metadata'), str):
+ try:
+ theme_dict['metadata'] = json.loads(theme_dict['metadata'])
+ except:
+ theme_dict['metadata'] = {}
+ else:
+ theme_dict['metadata'] = theme_dict.get('metadata', {})
+
+ themes_data.append(theme_dict)
+
+ self.save_themes_to_hf(themes_data)
+
+
# --- Utility functions ---
-def process_query(query: str, language: str, session_id: Optional[str] = None) -> Generator[Tuple[str, str, str, str], None, None]:
+def process_query(query: str, language: str, session_id: Optional[str] = None) -> Generator[Tuple[str, str, str, str, str], None, None]:
"""Main query processing function"""
if not query.strip():
- yield "", "", "❌ Please enter a theme.", session_id
+ yield "", "", "❌ Please enter a theme.", session_id, ""
return
system = UnifiedLiterarySystem()
stages_markdown = ""
novel_content = ""
+ novel_text = "" # 실제 텍스트 저장용
for status, stages, current_session_id in system.process_novel_stream(query, language, session_id):
stages_markdown = format_stages_display(stages)
# Get final novel content
if stages and all(s.get("status") == "complete" for s in stages[-10:]):
- novel_content = NovelDatabase.get_writer_content(current_session_id)
- novel_content = format_novel_display(novel_content)
+ novel_text = NovelDatabase.get_writer_content(current_session_id) # 원본 텍스트
+ novel_content = format_novel_display(novel_text) # 포맷된 디스플레이
- yield stages_markdown, novel_content, status or "🔄 Processing...", current_session_id
+ yield stages_markdown, novel_content, status or "🔄 Processing...", current_session_id, novel_text
+
def get_active_sessions(language: str) -> List[str]:
"""Get active session list"""
@@ -1548,10 +1882,10 @@ def auto_recover_session(language: str) -> Tuple[Optional[str], str]:
return latest_session['session_id'], f"Session {latest_session['session_id'][:8]}... recovered"
return None, "No session to recover."
-def resume_session(session_id: str, language: str) -> Generator[Tuple[str, str, str, str], None, None]:
+def resume_session(session_id: str, language: str) -> Generator[Tuple[str, str, str, str, str], None, None]:
"""Resume session"""
if not session_id:
- yield "", "", "❌ No session ID.", session_id
+ yield "", "", "❌ No session ID.", session_id, ""
return
if "..." in session_id:
@@ -1559,669 +1893,1733 @@ def resume_session(session_id: str, language: str) -> Generator[Tuple[str, str,
session = NovelDatabase.get_session(session_id)
if not session:
- yield "", "", "❌ Session not found.", None
+ yield "", "", "❌ Session not found.", None, ""
return
yield from process_query(session['user_query'], session['language'], session_id)
-def download_novel(novel_text: str, format_type: str, language: str, session_id: str) -> Optional[str]:
- """Generate novel download file"""
- if not novel_text or not session_id:
- return None
-
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
- filename = f"novel_{session_id[:8]}_{timestamp}"
-
- try:
- if format_type == "DOCX" and DOCX_AVAILABLE:
- return export_to_docx(novel_text, filename, language, session_id)
- else:
- return export_to_txt(novel_text, filename)
- except Exception as e:
- logger.error(f"File generation failed: {e}")
- return None
+
def format_stages_display(stages: List[Dict]) -> str:
- """Stage progress display - For single writer system"""
- markdown = "## 🎬 Progress Status\n\n"
-
- # Calculate total word count (writer stages only)
- total_words = sum(s.get('word_count', 0) for s in stages
- if s.get('name', '').startswith('✍️ Writer:') and 'Revision' in s.get('name', ''))
- markdown += f"**Total Word Count: {total_words:,} / {TARGET_WORDS:,}**\n\n"
-
- # Progress summary
- completed_parts = sum(1 for s in stages
- if 'Revision' in s.get('name', '') and s.get('status') == 'complete')
- markdown += f"**Completed Parts: {completed_parts} / 10**\n\n"
-
- # Average narrative momentum
- momentum_scores = [s.get('momentum', 0) for s in stages if s.get('momentum', 0) > 0]
- if momentum_scores:
- avg_momentum = sum(momentum_scores) / len(momentum_scores)
- markdown += f"**Average Narrative Momentum: {avg_momentum:.1f} / 10**\n\n"
-
- markdown += "---\n\n"
-
- # Display each stage
- current_part = 0
- for i, stage in enumerate(stages):
- status_icon = "✅" if stage['status'] == 'complete' else "🔄" if stage['status'] == 'active' else "⏳"
-
- # Add part divider
- if 'Part' in stage.get('name', '') and 'Critic' not in stage.get('name', ''):
- part_match = re.search(r'Part (\d+)', stage['name'])
- if part_match:
- new_part = int(part_match.group(1))
- if new_part != current_part:
- current_part = new_part
- markdown += f"\n### 📚 Part {current_part}\n\n"
-
- markdown += f"{status_icon} **{stage['name']}**"
-
- if stage.get('word_count', 0) > 0:
- markdown += f" ({stage['word_count']:,} words)"
-
- if stage.get('momentum', 0) > 0:
- markdown += f" [Momentum: {stage['momentum']:.1f}/10]"
-
- markdown += "\n"
-
- if stage['content'] and stage['status'] == 'complete':
- # Adjust preview length by role
- preview_length = 300 if 'writer' in stage.get('name', '').lower() else 200
- preview = stage['content'][:preview_length] + "..." if len(stage['content']) > preview_length else stage['content']
- markdown += f"> {preview}\n\n"
- elif stage['status'] == 'active':
- markdown += "> *Writing...*\n\n"
-
- return markdown
+ """Stage progress display - For single writer system"""
+ markdown = "## 🎬 Progress Status\n\n"
+
+ # Calculate total word count (writer stages only)
+ total_words = sum(s.get('word_count', 0) for s in stages
+ if s.get('name', '').startswith('✍️ Writer:') and 'Revision' in s.get('name', ''))
+ markdown += f"**Total Word Count: {total_words:,} / {TARGET_WORDS:,}**\n\n"
+
+ # Progress summary
+ completed_parts = sum(1 for s in stages
+ if 'Revision' in s.get('name', '') and s.get('status') == 'complete')
+ markdown += f"**Completed Parts: {completed_parts} / 10**\n\n"
+
+ # Average narrative momentum
+ momentum_scores = [s.get('momentum', 0) for s in stages if s.get('momentum', 0) > 0]
+ if momentum_scores:
+ avg_momentum = sum(momentum_scores) / len(momentum_scores)
+ markdown += f"**Average Narrative Momentum: {avg_momentum:.1f} / 10**\n\n"
+
+ markdown += "---\n\n"
+
+ # Display each stage
+ current_part = 0
+ for i, stage in enumerate(stages):
+ status_icon = "✅" if stage['status'] == 'complete' else "🔄" if stage['status'] == 'active' else "⏳"
+
+ # Add part divider
+ if 'Part' in stage.get('name', '') and 'Critic' not in stage.get('name', ''):
+ part_match = re.search(r'Part (\d+)', stage['name'])
+ if part_match:
+ new_part = int(part_match.group(1))
+ if new_part != current_part:
+ current_part = new_part
+ markdown += f"\n### 📚 Part {current_part}\n\n"
+
+ markdown += f"{status_icon} **{stage['name']}**"
+
+ if stage.get('word_count', 0) > 0:
+ markdown += f" ({stage['word_count']:,} words)"
+
+ if stage.get('momentum', 0) > 0:
+ markdown += f" [Momentum: {stage['momentum']:.1f}/10]"
+
+ markdown += "\n"
+
+ if stage['content'] and stage['status'] == 'complete':
+ # Adjust preview length by role
+ preview_length = 300 if 'writer' in stage.get('name', '').lower() else 200
+ preview = stage['content'][:preview_length] + "..." if len(stage['content']) > preview_length else stage['content']
+ markdown += f"> {preview}\n\n"
+ elif stage['status'] == 'active':
+ markdown += "> *Writing...*\n\n"
+
+ return markdown
def format_novel_display(novel_text: str) -> str:
- """Display novel content - Enhanced part separation"""
- if not novel_text:
- return "No completed content yet."
-
- formatted = "# 📖 Completed Novel\n\n"
-
- # Display word count
- word_count = len(novel_text.split())
- formatted += f"**Total Length: {word_count:,} words (Target: {TARGET_WORDS:,} words)**\n\n"
-
- # Achievement rate
- achievement = (word_count / TARGET_WORDS) * 100
- formatted += f"**Achievement Rate: {achievement:.1f}%**\n\n"
- formatted += "---\n\n"
-
- # Display each part separately
- parts = novel_text.split('\n\n')
-
- for i, part in enumerate(parts):
- if part.strip():
- # Add part title
- if i < len(NARRATIVE_PHASES):
- formatted += f"## {NARRATIVE_PHASES[i]}\n\n"
-
- formatted += f"{part}\n\n"
-
- # Part divider
- if i < len(parts) - 1:
- formatted += "---\n\n"
-
- return formatted
+ """Display novel content - Enhanced part separation"""
+ if not novel_text:
+ return "No completed content yet."
+
+ formatted = "# 📖 Completed Novel\n\n"
+
+ # Display word count
+ word_count = len(novel_text.split())
+ formatted += f"**Total Length: {word_count:,} words (Target: {TARGET_WORDS:,} words)**\n\n"
+
+ # Achievement rate
+ achievement = (word_count / TARGET_WORDS) * 100
+ formatted += f"**Achievement Rate: {achievement:.1f}%**\n\n"
+ formatted += "---\n\n"
+
+ # Display each part separately
+ parts = novel_text.split('\n\n')
+
+ for i, part in enumerate(parts):
+ if part.strip():
+ # Add part title
+ if i < len(NARRATIVE_PHASES):
+ formatted += f"## {NARRATIVE_PHASES[i]}\n\n"
+
+ formatted += f"{part}\n\n"
+
+ # Part divider
+ if i < len(parts) - 1:
+ formatted += "---\n\n"
+
+ return formatted
def export_to_docx(content: str, filename: str, language: str, session_id: str) -> str:
"""Export to DOCX file - Korean standard book format"""
- doc = Document()
-
- # Korean standard book format (152mm x 225mm)
- section = doc.sections[0]
- section.page_height = Mm(225) # 225mm
- section.page_width = Mm(152) # 152mm
- section.top_margin = Mm(20) # Top margin 20mm
- section.bottom_margin = Mm(20) # Bottom margin 20mm
- section.left_margin = Mm(20) # Left margin 20mm
- section.right_margin = Mm(20) # Right margin 20mm
-
- # Generate title from session info
- session = NovelDatabase.get_session(session_id)
-
- # Title generation function
- def generate_title(user_query: str, content_preview: str) -> str:
- """Generate title based on theme and content"""
- # Simple rule-based title generation (could use LLM)
- if len(user_query) < 20:
- return user_query
+ try:
+ doc = Document()
+
+ # Korean standard book format (152mm x 225mm)
+ section = doc.sections[0]
+ section.page_height = Mm(225) # 225mm
+ section.page_width = Mm(152) # 152mm
+ section.top_margin = Mm(20) # Top margin 20mm
+ section.bottom_margin = Mm(20) # Bottom margin 20mm
+ section.left_margin = Mm(20) # Left margin 20mm
+ section.right_margin = Mm(20) # Right margin 20mm
+
+ # Generate title from session info
+ session = NovelDatabase.get_session(session_id)
+
+ # Title generation function
+ def generate_title(user_query: str, content_preview: str) -> str:
+ """Generate title based on theme and content"""
+ if len(user_query) < 20:
+ return user_query
+ else:
+ keywords = user_query.split()[:5]
+ return " ".join(keywords)
+
+ # Title page
+ title = generate_title(session["user_query"], content[:500]) if session else "Untitled"
+
+ # Title style settings
+ title_para = doc.add_paragraph()
+ title_para.alignment = WD_ALIGN_PARAGRAPH.CENTER
+ title_para.paragraph_format.space_before = Pt(100)
+
+ title_run = title_para.add_run(title)
+ if language == "Korean":
+ title_run.font.name = 'Batang'
+ title_run._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
else:
- # Extract key keywords from theme
- keywords = user_query.split()[:5]
- return " ".join(keywords)
-
- # Title page
- title = generate_title(session["user_query"], content[:500]) if session else "Untitled"
-
- # Title style settings
- title_para = doc.add_paragraph()
- title_para.alignment = WD_ALIGN_PARAGRAPH.CENTER
- title_para.paragraph_format.space_before = Pt(100)
-
- title_run = title_para.add_run(title)
- if language == "Korean":
- title_run.font.name = 'Batang'
- title_run._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
- else:
- title_run.font.name = 'Times New Roman'
- title_run.font.size = Pt(20)
- title_run.bold = True
-
- # Page break
- doc.add_page_break()
-
- # Body style settings
- style = doc.styles['Normal']
- if language == "Korean":
- style.font.name = 'Batang'
- style._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
- else:
- style.font.name = 'Times New Roman'
- style.font.size = Pt(10.5) # Standard size for novels
- style.paragraph_format.line_spacing = 1.8 # 180% line spacing
- style.paragraph_format.space_after = Pt(0)
- style.paragraph_format.first_line_indent = Mm(10) # 10mm indentation
-
- # Clean content - Extract pure text only
- def clean_content(text: str) -> str:
- """Remove unnecessary markdown, part numbers, etc."""
- # Remove part titles/numbers patterns
- patterns_to_remove = [
- r'^#{1,6}\s+.*', # Markdown headers
- r'^\*\*.*\*\*', # 굵은 글씨 **text**
- r'^Part\s*\d+.*', # “Part 1 …” 형식
- r'^\d+\.\s+.*:.*', # “1. 제목: …” 형식
- r'^---+', # 구분선
- r'^\s*\[.*\]\s*', # 대괄호 라벨
- ]
-
-
-
- lines = text.split('\n')
- cleaned_lines = []
+ title_run.font.name = 'Times New Roman'
+ title_run.font.size = Pt(20)
+ title_run.bold = True
- for line in lines:
- # Keep empty lines
- if not line.strip():
- cleaned_lines.append('')
- continue
+ # Page break
+ doc.add_page_break()
+
+ # Body style settings
+ style = doc.styles['Normal']
+ if language == "Korean":
+ style.font.name = 'Batang'
+ style._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
+ else:
+ style.font.name = 'Times New Roman'
+ style.font.size = Pt(10.5)
+ style.paragraph_format.line_spacing = 1.8
+ style.paragraph_format.space_after = Pt(0)
+ style.paragraph_format.first_line_indent = Mm(10)
+
+ # Clean content function
+ def clean_content(text: str) -> str:
+ """Remove unnecessary markdown, part numbers, etc."""
+ patterns_to_remove = [
+ r'^#{1,6}\s+.*', # Markdown headers
+ r'^\*\*.*\*\*', # Bold text
+ r'^Part\s*\d+.*', # Part numbers
+ r'^\d+\.\s+.*:.*', # Numbered lists
+ r'^---+', # Dividers
+ r'^\s*\[.*\]\s*', # Brackets
+ ]
- # Remove unnecessary lines through pattern matching
- skip_line = False
- for pattern in patterns_to_remove:
- if re.match(pattern, line.strip(), re.MULTILINE):
- skip_line = True
- break
+ lines = text.split('\n')
+ cleaned_lines = []
- if not skip_line:
- # Remove markdown emphasis
- cleaned_line = line
- cleaned_line = re.sub(r'\*\*(.*?)\*\*', r'\1', cleaned_line) # **text** -> text
- cleaned_line = re.sub(r'\*(.*?)\*', r'\1', cleaned_line) # *text* -> text
- cleaned_line = re.sub(r'`(.*?)`', r'\1', cleaned_line) # `text` -> text
- cleaned_lines.append(cleaned_line.strip())
-
- # Remove consecutive empty lines (keep only 1)
- final_lines = []
- prev_empty = False
- for line in cleaned_lines:
- if not line:
- if not prev_empty:
- final_lines.append('')
- prev_empty = True
- else:
- final_lines.append(line)
- prev_empty = False
-
- return '\n'.join(final_lines)
-
- # Clean content
- cleaned_content = clean_content(content)
-
- # Add body text
- paragraphs = cleaned_content.split('\n')
- for para_text in paragraphs:
- if para_text.strip():
- para = doc.add_paragraph(para_text.strip())
- # Reconfirm style (apply font)
- for run in para.runs:
- if language == "Korean":
- run.font.name = 'Batang'
- run._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
+ for line in lines:
+ if not line.strip():
+ cleaned_lines.append('')
+ continue
+
+ skip_line = False
+ for pattern in patterns_to_remove:
+ if re.match(pattern, line.strip(), re.MULTILINE):
+ skip_line = True
+ break
+
+ if not skip_line:
+ cleaned_line = line
+ cleaned_line = re.sub(r'\*\*(.*?)\*\*', r'\1', cleaned_line)
+ cleaned_line = re.sub(r'\*(.*?)\*', r'\1', cleaned_line)
+ cleaned_line = re.sub(r'`(.*?)`', r'\1', cleaned_line)
+ cleaned_lines.append(cleaned_line.strip())
+
+ final_lines = []
+ prev_empty = False
+ for line in cleaned_lines:
+ if not line:
+ if not prev_empty:
+ final_lines.append('')
+ prev_empty = True
else:
- run.font.name = 'Times New Roman'
+ final_lines.append(line)
+ prev_empty = False
+
+ return '\n'.join(final_lines)
+
+ # Clean content
+ cleaned_content = clean_content(content)
+
+ # Add body text
+ paragraphs = cleaned_content.split('\n')
+ for para_text in paragraphs:
+ if para_text.strip():
+ para = doc.add_paragraph(para_text.strip())
+ for run in para.runs:
+ if language == "Korean":
+ run.font.name = 'Batang'
+ run._element.rPr.rFonts.set(qn('w:eastAsia'), 'Batang')
+ else:
+ run.font.name = 'Times New Roman'
+ else:
+ doc.add_paragraph()
+
+ # Create temporary file with proper handling
+ with tempfile.NamedTemporaryFile(mode='wb', suffix='.docx', delete=False) as tmp_file:
+ doc.save(tmp_file)
+ temp_path = tmp_file.name
+
+ return temp_path
+
+ except Exception as e:
+ logger.error(f"DOCX export error: {str(e)}")
+ raise e
+
+
+def download_novel(novel_text: str, format_type: str, language: str, session_id: str) -> Optional[str]:
+ """Generate novel download file - FIXED VERSION"""
+ if not novel_text or not session_id:
+ logger.error("Missing novel_text or session_id")
+ return None
+
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ filename = f"novel_{session_id[:8]}_{timestamp}"
+
+ try:
+ if format_type == "DOCX" and DOCX_AVAILABLE:
+ # Use the fixed export_to_docx function
+ return export_to_docx(novel_text, filename, language, session_id)
+ else:
+ # For TXT format
+ return export_to_txt(novel_text, filename)
+ except Exception as e:
+ logger.error(f"File generation failed: {e}")
+ return None
+
+
+# In the Gradio interface, update the download handler:
+def handle_download(format_type, language, session_id, novel_text):
+ """Fixed download handler with better error handling and debugging"""
+ logger.info(f"Download attempt - Session ID: {session_id}, Format: {format_type}")
+ logger.info(f"Novel text length: {len(novel_text) if novel_text else 0}")
+ logger.info(f"Novel text preview: {novel_text[:100] if novel_text else 'None'}")
+
+ if not session_id:
+ logger.error("No session ID provided")
+ return gr.update(visible=False, value=None)
+
+ if not novel_text or novel_text.strip() == "" or novel_text == "*Your completed novel will appear here, ready to be read and cherished...*":
+ logger.error(f"No novel content to download. Content: '{novel_text[:50] if novel_text else 'None'}'")
+ return gr.update(visible=False, value=None)
+
+ try:
+ file_path = download_novel(novel_text, format_type, language, session_id)
+ if file_path and os.path.exists(file_path):
+ logger.info(f"File created successfully: {file_path}")
+ return gr.update(value=file_path, visible=True)
else:
- # Empty line for paragraph separation
- doc.add_paragraph()
+ logger.error("File path not created or doesn't exist")
+ return gr.update(visible=False, value=None)
+ except Exception as e:
+ logger.error(f"Download handler error: {str(e)}")
+ return gr.update(visible=False, value=None)
+
+# Also add cleanup function for temporary files
+def cleanup_temp_files():
+ """Clean up old temporary files"""
+ temp_dir = tempfile.gettempdir()
+ pattern = os.path.join(temp_dir, "novel_*.docx")
- # Save file
- filepath = f"{filename}.docx"
- doc.save(filepath)
- return filepath
+ for file_path in glob.glob(pattern):
+ try:
+ # Delete files older than 1 hour
+ if os.path.getmtime(file_path) < time.time() - 3600:
+ os.unlink(file_path)
+ except:
+ pass
+
def export_to_txt(content: str, filename: str) -> str:
- """Export to TXT file"""
- filepath = f"{filename}.txt"
- with open(filepath, 'w', encoding='utf-8') as f:
- # Header
- f.write("=" * 80 + "\n")
- f.write(f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
- f.write(f"Total word count: {len(content.split()):,} words\n")
- f.write("=" * 80 + "\n\n")
-
- # Body
- f.write(content)
-
- # Footer
- f.write("\n\n" + "=" * 80 + "\n")
- f.write("AI Literary Creation System v2.0\n")
- f.write("=" * 80 + "\n")
-
- return filepath
+ """Export to TXT file"""
+ filepath = f"{filename}.txt"
+ with open(filepath, 'w', encoding='utf-8') as f:
+ # Header
+ f.write("=" * 80 + "\n")
+ f.write(f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
+ f.write(f"Total word count: {len(content.split()):,} words\n")
+ f.write("=" * 80 + "\n\n")
+
+ # Body
+ f.write(content)
+
+ # Footer
+ f.write("\n\n" + "=" * 80 + "\n")
+ f.write("AI Literary Creation System v2.0\n")
+ f.write("=" * 80 + "\n")
+
+ return filepath
+
+def generate_random_theme(language="English"):
+ """Generate a coherent and natural novel theme using LLM"""
+ try:
+ # JSON 파일 로드
+ json_path = Path("novel_themes.json")
+ if not json_path.exists():
+ print("[WARNING] novel_themes.json not found, using built-in data")
+ # 기본 데이터 정의 - 더 현실적인 테마로 수정
+ themes_data = {
+ "themes": ["family secrets", "career transition", "lost love", "friendship test", "generational conflict"],
+ "characters": ["middle-aged teacher", "retiring doctor", "single parent", "immigrant artist", "war veteran"],
+ "hooks": ["unexpected inheritance", "old diary discovery", "chance reunion", "life-changing diagnosis", "sudden job loss"],
+ "questions": ["What defines family?", "Can people truly change?", "What is worth sacrificing?", "How do we forgive?"]
+ }
+ else:
+ with open(json_path, 'r', encoding='utf-8') as f:
+ data = json.load(f)
+ # 가중치 기반 필터링 - 현실적인 테마 우선
+ realistic_themes = []
+ for theme_key, theme_data in data.get('core_themes', {}).items():
+ weight = theme_data.get('weight', 0.1)
+ # 현실적인 테마에 더 높은 가중치
+ if any(word in theme_key for word in ['family', 'love', 'work', 'memory', 'identity', 'aging']):
+ weight *= 1.5
+ elif any(word in theme_key for word in ['digital', 'extinction', 'apocalypse', 'quantum']):
+ weight *= 0.5
+ realistic_themes.append((theme_key, weight))
+
+ # 가중치 기반 선택
+ themes = [t[0] for t in sorted(realistic_themes, key=lambda x: x[1], reverse=True)[:10]]
+
+ themes_data = {
+ "themes": themes if themes else ["family secrets", "career crisis", "lost love"],
+ "characters": [],
+ "hooks": [],
+ "questions": []
+ }
+
+ # Extract realistic data
+ for char_data in data.get('characters', {}).values():
+ for variation in char_data.get('variations', []):
+ # 현실적인 캐릭터 필터링
+ if not any(word in variation.lower() for word in ['cyborg', 'quantum', 'binary', 'extinct']):
+ themes_data["characters"].append(variation)
+
+ for hook_list in data.get('narrative_hooks', {}).values():
+ for hook in hook_list:
+ # 현실적인 사건 필터링
+ if not any(word in hook.lower() for word in ['download', 'digital', 'algorithm', 'corporate subscription']):
+ themes_data["hooks"].append(hook)
+
+ for phil_data in data.get('philosophies', {}).values():
+ themes_data["questions"].extend(phil_data.get('core_questions', []))
+
+ # 기본값 설정
+ if not themes_data["characters"]:
+ themes_data["characters"] = ["struggling artist", "retired teacher", "young mother", "elderly caregiver", "small business owner"]
+ if not themes_data["hooks"]:
+ themes_data["hooks"] = ["discovering family secret", "unexpected reunion", "facing illness", "losing home", "finding old letters"]
+ if not themes_data["questions"]:
+ themes_data["questions"] = ["What makes a family?", "How do we find meaning?", "Can we escape our past?", "What legacy do we leave?"]
+
+ # Random selection
+ import secrets
+ theme = secrets.choice(themes_data["themes"])
+ character = secrets.choice(themes_data["characters"])
+ hook = secrets.choice(themes_data["hooks"])
+ question = secrets.choice(themes_data["questions"])
+
+ # 언어별 프롬프트 - 톤과 스타일 섹션 제거
+ if language == "Korean":
+ # 한국어 번역 및 자연스러운 표현
+ theme_kr = translate_theme_naturally(theme, "theme")
+ character_kr = translate_theme_naturally(character, "character")
+ hook_kr = translate_theme_naturally(hook, "hook")
+ question_kr = translate_theme_naturally(question, "question")
+
+ prompt = f"""다음 요소들을 사용하여 현실적이고 공감가능한 소설 주제를 생성하세요:
+
+주제: {theme_kr}
+인물: {character_kr}
+사건: {hook_kr}
+핵심 질문: {question_kr}
+
+요구사항:
+1. 현대 한국 사회에서 일어날 수 있는 현실적인 이야기
+2. 보편적으로 공감할 수 있는 인물과 상황
+3. 구체적이고 생생한 배경 설정
+4. 깊이 있는 심리 묘사가 가능한 갈등
+
+다음 형식으로 간결하게 작성하세요:
+
+[한 문장으로 된 매력적인 첫 문장]
+
+주인공은 [구체적인 상황의 인물]입니다.
+[핵심 사건]을 계기로 [내적 갈등]에 직면하게 되고,
+결국 [철학적 질문]에 대한 답을 찾아가는 여정을 그립니다."""
+
+ else:
+ prompt = f"""Generate a realistic and relatable novel theme using these elements:
+
+Theme: {theme}
+Character: {character}
+Event: {hook}
+Core Question: {question}
+
+Requirements:
+1. A story that could happen in contemporary society
+2. Universally relatable characters and situations
+3. Specific and vivid settings
+4. Conflicts allowing deep psychological exploration
+
+Write concisely in this format:
+
+[One compelling opening sentence]
+
+The protagonist is [character in specific situation].
+Through [key event], they face [internal conflict],
+ultimately embarking on a journey to answer [philosophical question]."""
+
+ # Use the UnifiedLiterarySystem's LLM to generate coherent theme
+ system = UnifiedLiterarySystem()
+
+ # Call LLM synchronously for theme generation
+ messages = [{"role": "user", "content": prompt}]
+ generated_theme = system.call_llm_sync(messages, "director", language)
+
+ # Extract metadata for database storage
+ metadata = extract_theme_metadata(generated_theme, language)
+ metadata.update({
+ 'original_theme': theme,
+ 'original_character': character,
+ 'original_hook': hook,
+ 'original_question': question
+ })
+
+ # Save to database
+ theme_id = save_random_theme_with_hf(generated_theme, language, metadata)
+ logger.info(f"Saved random theme with ID: {theme_id}")
+
+ # 톤과 스타일 섹션 제거 - 불필요한 반복 내용 삭제
+ if "**톤과 스타일:**" in generated_theme or "**Tone and Style:**" in generated_theme:
+ lines = generated_theme.split('\n')
+ filtered_lines = []
+ skip = False
+ for line in lines:
+ if "톤과 스타일" in line or "Tone and Style" in line:
+ skip = True
+ elif skip and (line.strip() == "" or line.startswith("**")):
+ skip = False
+ if not skip:
+ filtered_lines.append(line)
+ generated_theme = '\n'.join(filtered_lines).strip()
+
+ return generated_theme
+
+ except Exception as e:
+ logger.error(f"Theme generation error: {str(e)}")
+ # Fallback to simple realistic themes
+ fallback_themes = {
+ "Korean": [
+ """"아버지가 돌아가신 날, 나는 그가 평생 숨겨온 또 다른 가족의 존재를 알게 되었다."
+
+주인공은 평범한 회사원으로 살아온 40대 여성입니다.
+아버지의 장례식에서 낯선 여인과 그녀의 딸을 만나게 되면서 가족의 의미에 대해 다시 생각하게 되고,
+결국 진정한 가족이란 무엇인지에 대한 답을 찾아가는 여정을 그립니다.""",
+
+ """"서른 년간 가르친 학교에서 나온 날, 처음으로 내가 누구인지 몰랐다."
+
+주인공은 정년퇴직을 맞은 고등학교 국어 교사입니다.
+갑작스러운 일상의 공백 속에서 잊고 지냈던 젊은 날의 꿈을 마주하게 되고,
+결국 남은 인생에서 무엇을 할 것인가에 대한 답을 찾아가는 여정을 그립니다."""
+ ],
+ "English": [
+ """"The day my father died, I discovered he had another family he'd hidden all his life."
+
+The protagonist is a woman in her 40s who has lived as an ordinary office worker.
+Through meeting a strange woman and her daughter at her father's funeral, she confronts what family truly means,
+ultimately embarking on a journey to answer what constitutes a real family.""",
+
+ """"The day I left the school where I'd taught for thirty years, I didn't know who I was anymore."
+
+The protagonist is a high school literature teacher facing retirement.
+Through the sudden emptiness of daily life, they confront long-forgotten dreams of youth,
+ultimately embarking on a journey to answer what to do with the remaining years."""
+ ]
+ }
+
+ import secrets
+ return secrets.choice(fallback_themes.get(language, fallback_themes["English"]))
+
+def translate_theme_naturally(text, category):
+ """자연스러운 한국어 번역"""
+ translations = {
+ # 테마
+ "family secrets": "가족의 비밀",
+ "career transition": "인생의 전환점",
+ "lost love": "잃어버린 사랑",
+ "friendship test": "우정의 시험",
+ "generational conflict": "세대 간 갈등",
+ "digital extinction": "디지털 시대의 소외",
+ "sensory revolution": "감각의 혁명",
+ "temporal paradox": "시간의 역설",
+
+ # 캐릭터
+ "struggling artist": "생활고에 시달리는 예술가",
+ "retired teacher": "은퇴한 교사",
+ "young mother": "젊은 엄마",
+ "elderly caregiver": "노인을 돌보는 간병인",
+ "small business owner": "작은 가게 주인",
+ "middle-aged teacher": "중년의 교사",
+ "retiring doctor": "은퇴를 앞둔 의사",
+ "single parent": "혼자 아이를 키우는 부모",
+ "immigrant artist": "이민자 예술가",
+ "war veteran": "전쟁 참전용사",
+ "last person who dreams without ads": "광고 없이 꿈꾸는 마지막 사람",
+ "memory trader": "기억 거래상",
+
+ # 사건
+ "discovering family secret": "가족의 비밀을 발견하다",
+ "unexpected reunion": "예상치 못한 재회",
+ "facing illness": "질병과 마주하다",
+ "losing home": "집을 잃다",
+ "finding old letters": "오래된 편지를 발견하다",
+ "unexpected inheritance": "뜻밖의 유산",
+ "old diary discovery": "오래된 일기장 발견",
+ "chance reunion": "우연한 재회",
+ "life-changing diagnosis": "인생을 바꾸는 진단",
+ "sudden job loss": "갑작스러운 실직",
+ "discovers their memories belong to a corporate subscription": "기억이 기업 서비스의 일부임을 발견하다",
+
+ # 질문
+ "What makes a family?": "가족이란 무엇인가?",
+ "How do we find meaning?": "우리는 어떻게 의미를 찾는가?",
+ "Can we escape our past?": "과거로부터 벗어날 수 있는가?",
+ "What legacy do we leave?": "우리는 어떤 유산을 남기는가?",
+ "What defines family?": "무엇이 가족을 정의하는가?",
+ "Can people truly change?": "사람은 정말 변할 수 있는가?",
+ "What is worth sacrificing?": "무엇을 위해 희생할 가치가 있는가?",
+ "How do we forgive?": "우리는 어떻게 용서하는가?",
+ "What remains human when humanity is optional?": "인간성이 선택사항일 때 무엇이 인간으로 남는가?"
+ }
+
+ # 먼저 정확한 매칭 시도
+ if text in translations:
+ return translations[text]
+
+ # 부분 매칭 시도
+ text_lower = text.lower()
+ for key, value in translations.items():
+ if key.lower() in text_lower or text_lower in key.lower():
+ return value
+
+ # 번역이 없으면 원문 반환
+ return text
+
+def extract_theme_metadata(theme_text: str, language: str) -> Dict[str, Any]:
+ """Extract metadata from generated theme text"""
+ metadata = {
+ 'title': '',
+ 'opening_sentence': '',
+ 'protagonist': '',
+ 'conflict': '',
+ 'philosophical_question': '',
+ 'tags': []
+ }
+
+ lines = theme_text.split('\n')
+
+ # Extract opening sentence (usually in quotes)
+ for line in lines:
+ if '"' in line or '"' in line or '「' in line:
+ # Extract text between quotes
+ import re
+ quotes = re.findall(r'["""「](.*?)["""」]', line)
+ if quotes:
+ metadata['opening_sentence'] = quotes[0]
+ break
+
+ # Extract other elements based on patterns
+ for i, line in enumerate(lines):
+ line = line.strip()
+
+ # Title extraction (if exists)
+ if i == 0 and not any(quote in line for quote in ['"', '"', '「']):
+ metadata['title'] = line.replace('**', '').strip()
+
+ # Protagonist
+ if any(marker in line for marker in ['protagonist is', '주인공은', 'The protagonist']):
+ metadata['protagonist'] = line.split('is' if 'is' in line else '은')[-1].strip().rstrip('.')
+
+ # Conflict/Event
+ if any(marker in line for marker in ['Through', '통해', '계기로', 'face']):
+ metadata['conflict'] = line
+
+ # Question
+ if any(marker in line for marker in ['answer', '답을', 'question', '질문']):
+ metadata['philosophical_question'] = line
+
+ # Generate tags based on content
+ tag_keywords = {
+ 'family': ['family', '가족', 'father', '아버지', 'mother', '어머니'],
+ 'love': ['love', '사랑', 'relationship', '관계'],
+ 'death': ['death', '죽음', 'died', '돌아가신'],
+ 'memory': ['memory', '기억', 'remember', '추억'],
+ 'identity': ['identity', '정체성', 'who am I', '누구인지'],
+ 'work': ['work', '일', 'career', '직업', 'retirement', '은퇴'],
+ 'aging': ['aging', '노화', 'old', '늙은', 'elderly', '노인']
+ }
+
+ theme_lower = theme_text.lower()
+ for tag, keywords in tag_keywords.items():
+ if any(keyword in theme_lower for keyword in keywords):
+ metadata['tags'].append(tag)
+
+ return metadata
+
+def save_random_theme_with_hf(theme_text: str, language: str, metadata: Dict[str, Any]) -> str:
+ """Save randomly generated theme to library and HF dataset"""
+ theme_id = hashlib.md5(f"{theme_text}{datetime.now()}".encode()).hexdigest()[:12]
+
+ # Extract components from theme text
+ title = metadata.get('title', '')
+ opening_sentence = metadata.get('opening_sentence', '')
+ protagonist = metadata.get('protagonist', '')
+ conflict = metadata.get('conflict', '')
+ philosophical_question = metadata.get('philosophical_question', '')
+ tags = json.dumps(metadata.get('tags', []))
+
+ with NovelDatabase.get_db() as conn:
+ conn.cursor().execute('''
+ INSERT INTO random_themes_library
+ (theme_id, theme_text, language, title, opening_sentence,
+ protagonist, conflict, philosophical_question, tags, metadata)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+ ''', (theme_id, theme_text, language, title, opening_sentence,
+ protagonist, conflict, philosophical_question, tags,
+ json.dumps(metadata)))
+ conn.commit()
+
+ # Backup to HF dataset
+ if 'hf_manager' in globals() and hf_manager.token:
+ try:
+ hf_manager.backup_to_hf()
+ logger.info(f"Theme {theme_id} backed up to HF dataset")
+ except Exception as e:
+ logger.error(f"Failed to backup theme to HF: {e}")
+
+ return theme_id
+
+def format_theme_card(theme_data: Dict, language: str) -> str:
+ """Format theme data as a card for display with scrollable content"""
+ theme_id = theme_data.get('theme_id', '')
+ theme_text = theme_data.get('theme_text', '')
+ generated_at = theme_data.get('generated_at', '')
+ view_count = theme_data.get('view_count', 0)
+ used_count = theme_data.get('used_count', 0)
+ tags = json.loads(theme_data.get('tags', '[]')) if isinstance(theme_data.get('tags'), str) else theme_data.get('tags', [])
+
+ # Format timestamp
+ if generated_at:
+ try:
+ dt = datetime.fromisoformat(generated_at.replace(' ', 'T'))
+ time_str = dt.strftime('%Y-%m-%d %H:%M')
+ except:
+ time_str = generated_at
+ else:
+ time_str = ""
+
+ # Create tag badges
+ tag_badges = ' '.join([f'{tag}' for tag in tags])
+
+ # Format theme text with line breaks
+ formatted_text = theme_text.replace('\n', '
')
+
+ # Create card HTML with scrollable content - Simplified version
+ card_html = f"""
+
+
+
+
{formatted_text}
+
{tag_badges}
+
+
+
"""
+
+ return card_html
+
+def get_theme_library_display(language: str = None, search_query: str = "") -> str:
+ """Get formatted display of theme library"""
+ themes = NovelDatabase.get_random_themes_library(language, limit=50)
+
+ if not themes:
+ empty_msg = {
+ "Korean": "아직 생성된 테마가 없습니다. 랜덤 버튼을 눌러 첫 테마를 만들어보세요!",
+ "English": "No themes generated yet. Click the Random button to create your first theme!"
+ }
+ return f'{empty_msg.get(language, empty_msg["English"])}
'
+
+ # Filter by search query if provided
+ if search_query:
+ search_lower = search_query.lower()
+ themes = [t for t in themes if search_lower in t.get('theme_text', '').lower()]
+
+ # Statistics
+ total_themes = len(themes)
+ total_views = sum(t.get('view_count', 0) for t in themes)
+ total_uses = sum(t.get('used_count', 0) for t in themes)
+
+ stats_html = f"""
+
+
+ {'총 테마' if language == 'Korean' else 'Total Themes'}
+ {total_themes}
+
+
+ {'총 조회수' if language == 'Korean' else 'Total Views'}
+ {total_views}
+
+
+ {'총 사용수' if language == 'Korean' else 'Total Uses'}
+ {total_uses}
+
+
"""
+
+ # Theme cards
+ cards_html = ''
+ for theme in themes:
+ cards_html += format_theme_card(theme, language)
+ cards_html += '
'
+
+ # JavaScript for interactions
+ js_script = """
+"""
+
+ return stats_html + cards_html + js_script
# CSS styles
custom_css = """
+/* Global container - Light paper background */
.gradio-container {
- background: linear-gradient(135deg, #1a1a2e 0%, #16213e 50%, #0f3460 100%);
- min-height: 100vh;
+ background: linear-gradient(135deg, #faf8f3 0%, #f5f2e8 50%, #f0ebe0 100%);
+ min-height: 100vh;
+ font-family: 'Georgia', 'Times New Roman', serif;
}
+/* Main header - Classic book cover feel */
.main-header {
- background-color: rgba(255, 255, 255, 0.05);
- backdrop-filter: blur(20px);
- padding: 40px;
- border-radius: 20px;
- margin-bottom: 30px;
- text-align: center;
- color: white;
- border: 2px solid rgba(255, 255, 255, 0.1);
- box-shadow: 0 8px 32px rgba(0, 0, 0, 0.1);
+ background: linear-gradient(145deg, #ffffff 0%, #fdfcf8 100%);
+ backdrop-filter: blur(10px);
+ padding: 45px;
+ border-radius: 20px;
+ margin-bottom: 35px;
+ text-align: center;
+ color: #3d2914;
+ border: 1px solid #e8dcc6;
+ box-shadow: 0 10px 30px rgba(139, 69, 19, 0.08),
+ 0 5px 15px rgba(139, 69, 19, 0.05),
+ inset 0 1px 2px rgba(255, 255, 255, 0.9);
+ position: relative;
+ overflow: hidden;
+}
+
+/* Book spine decoration */
+.main-header::before {
+ content: '';
+ position: absolute;
+ left: 50px;
+ top: 0;
+ bottom: 0;
+ width: 3px;
+ background: linear-gradient(180deg, #d4a574 0%, #c19656 50%, #d4a574 100%);
+ box-shadow: 1px 0 2px rgba(0, 0, 0, 0.1);
}
.header-title {
- font-size: 2.8em;
- margin-bottom: 15px;
- font-weight: 700;
+ font-size: 3.2em;
+ margin-bottom: 20px;
+ font-weight: 700;
+ color: #2c1810;
+ text-shadow: 2px 2px 4px rgba(139, 69, 19, 0.1);
+ font-family: 'Playfair Display', 'Georgia', serif;
+ letter-spacing: -0.5px;
}
.header-description {
- font-size: 0.85em;
- color: #d0d0d0;
- line-height: 1.4;
- margin-top: 20px;
- text-align: left;
- max-width: 900px;
- margin-left: auto;
- margin-right: auto;
+ font-size: 0.95em;
+ color: #5a453a;
+ line-height: 1.7;
+ margin-top: 25px;
+ text-align: justify;
+ max-width: 920px;
+ margin-left: auto;
+ margin-right: auto;
+ font-family: 'Georgia', serif;
}
.badges-container {
- display: flex;
- justify-content: center;
- gap: 10px;
- margin-top: 20px;
- margin-bottom: 20px;
+ display: flex;
+ justify-content: center;
+ gap: 12px;
+ margin-top: 25px;
+ margin-bottom: 25px;
}
+/* Progress notes - Manuscript notes style */
.progress-note {
- background: linear-gradient(135deg, rgba(255, 107, 107, 0.1), rgba(255, 230, 109, 0.1));
- border-left: 4px solid #ff6b6b;
- padding: 20px;
- margin: 25px auto;
- border-radius: 10px;
- color: #fff;
- max-width: 800px;
- font-weight: 500;
+ background: linear-gradient(135deg, #fff9e6 0%, #fff5d6 100%);
+ border-left: 4px solid #d4a574;
+ padding: 22px 30px;
+ margin: 25px auto;
+ border-radius: 12px;
+ color: #5a453a;
+ max-width: 820px;
+ font-weight: 500;
+ box-shadow: 0 4px 12px rgba(212, 165, 116, 0.15);
+ position: relative;
+}
+
+/* Handwritten note effect */
+.progress-note::after {
+ content: '📌';
+ position: absolute;
+ top: -10px;
+ right: 20px;
+ font-size: 24px;
+ transform: rotate(15deg);
}
.warning-note {
- background: rgba(255, 193, 7, 0.1);
- border-left: 4px solid #ffc107;
- padding: 15px;
- margin: 20px auto;
- border-radius: 8px;
- color: #ffd700;
- max-width: 800px;
- font-size: 0.9em;
+ background: #fef3e2;
+ border-left: 4px solid #f6b73c;
+ padding: 18px 25px;
+ margin: 20px auto;
+ border-radius: 10px;
+ color: #7a5c00;
+ max-width: 820px;
+ font-size: 0.92em;
+ box-shadow: 0 3px 10px rgba(246, 183, 60, 0.15);
}
+/* Input section - Writing desk feel */
.input-section {
- background-color: rgba(255, 255, 255, 0.08);
- backdrop-filter: blur(15px);
- padding: 25px;
- border-radius: 15px;
- margin-bottom: 25px;
- border: 1px solid rgba(255, 255, 255, 0.1);
- box-shadow: 0 4px 16px rgba(0, 0, 0, 0.1);
+ background: linear-gradient(145deg, #ffffff 0%, #fcfaf7 100%);
+ backdrop-filter: blur(10px);
+ padding: 30px;
+ border-radius: 16px;
+ margin-bottom: 28px;
+ border: 1px solid #e8dcc6;
+ box-shadow: 0 6px 20px rgba(139, 69, 19, 0.06),
+ inset 0 1px 3px rgba(255, 255, 255, 0.8);
}
+/* Session section - File cabinet style */
.session-section {
- background-color: rgba(255, 255, 255, 0.06);
- backdrop-filter: blur(10px);
- padding: 20px;
- border-radius: 12px;
- margin-top: 25px;
- color: white;
- border: 1px solid rgba(255, 255, 255, 0.08);
+ background: linear-gradient(145deg, #f8f4ed 0%, #f3ede2 100%);
+ backdrop-filter: blur(8px);
+ padding: 22px;
+ border-radius: 14px;
+ margin-top: 28px;
+ color: #3d2914;
+ border: 1px solid #ddd0b8;
+ box-shadow: inset 0 2px 4px rgba(139, 69, 19, 0.08);
}
+/* Display areas - Clean manuscript pages */
#stages-display {
- background-color: rgba(255, 255, 255, 0.97);
- padding: 25px;
- border-radius: 15px;
- max-height: 650px;
- overflow-y: auto;
- box-shadow: 0 8px 24px rgba(0, 0, 0, 0.15);
- color: #2c3e50;
+ background: linear-gradient(to bottom, #ffffff 0%, #fdfcfa 100%);
+ padding: 35px 40px;
+ border-radius: 16px;
+ max-height: 680px;
+ overflow-y: auto;
+ box-shadow: 0 8px 25px rgba(139, 69, 19, 0.08),
+ inset 0 1px 3px rgba(255, 255, 255, 0.9);
+ color: #3d2914;
+ border: 1px solid #e8dcc6;
+ font-family: 'Georgia', serif;
+ line-height: 1.8;
}
#novel-output {
- background-color: rgba(255, 255, 255, 0.97);
- padding: 35px;
- border-radius: 15px;
- max-height: 750px;
- overflow-y: auto;
- box-shadow: 0 8px 24px rgba(0, 0, 0, 0.15);
- color: #2c3e50;
- line-height: 1.8;
+ background: linear-gradient(to bottom, #ffffff 0%, #fdfcfa 100%);
+ padding: 45px 50px;
+ border-radius: 16px;
+ max-height: 780px;
+ overflow-y: auto;
+ box-shadow: 0 10px 30px rgba(139, 69, 19, 0.1),
+ inset 0 1px 3px rgba(255, 255, 255, 0.9);
+ color: #2c1810;
+ line-height: 2.1;
+ font-size: 1.05em;
+ border: 1px solid #e8dcc6;
+ font-family: 'Georgia', serif;
+}
+
+/* Typography enhancements */
+#novel-output h1, #novel-output h2, #novel-output h3 {
+ color: #2c1810;
+ font-family: 'Playfair Display', 'Georgia', serif;
+ margin-top: 30px;
+ margin-bottom: 20px;
+}
+
+#novel-output blockquote {
+ border-left: 3px solid #d4a574;
+ padding-left: 20px;
+ margin: 20px 0;
+ font-style: italic;
+ color: #5a453a;
}
+/* Download section - Book binding style */
.download-section {
- background-color: rgba(255, 255, 255, 0.92);
- padding: 20px;
- border-radius: 12px;
- margin-top: 25px;
- box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
+ background: linear-gradient(145deg, #faf6f0 0%, #f5efe6 100%);
+ padding: 24px;
+ border-radius: 14px;
+ margin-top: 28px;
+ box-shadow: 0 5px 15px rgba(139, 69, 19, 0.08);
+ border: 1px solid #e8dcc6;
}
-/* Progress indicator improvements */
+/* Progress bar - Vintage style */
.progress-bar {
- background-color: #e0e0e0;
- height: 25px;
- border-radius: 12px;
- overflow: hidden;
- margin: 15px 0;
- box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.1);
+ background-color: #f0e6d6;
+ height: 28px;
+ border-radius: 14px;
+ overflow: hidden;
+ margin: 18px 0;
+ box-shadow: inset 0 3px 6px rgba(139, 69, 19, 0.15);
+ border: 1px solid #e0d0b8;
}
.progress-fill {
- background: linear-gradient(90deg, #4CAF50, #8BC34A);
- height: 100%;
- transition: width 0.5s ease;
- box-shadow: 0 2px 8px rgba(76, 175, 80, 0.3);
+ background: linear-gradient(90deg, #d4a574 0%, #c8995d 50%, #d4a574 100%);
+ height: 100%;
+ transition: width 0.6s ease;
+ box-shadow: 0 2px 8px rgba(212, 165, 116, 0.4);
}
-/* Scrollbar styles */
+/* Custom scrollbar - Antique style */
::-webkit-scrollbar {
- width: 10px;
+ width: 12px;
}
::-webkit-scrollbar-track {
- background: rgba(0, 0, 0, 0.1);
- border-radius: 5px;
+ background: #f5f0e6;
+ border-radius: 6px;
+ box-shadow: inset 0 0 3px rgba(139, 69, 19, 0.1);
}
::-webkit-scrollbar-thumb {
- background: rgba(0, 0, 0, 0.3);
- border-radius: 5px;
+ background: linear-gradient(180deg, #d4a574, #c19656);
+ border-radius: 6px;
+ box-shadow: 0 2px 4px rgba(139, 69, 19, 0.2);
}
::-webkit-scrollbar-thumb:hover {
- background: rgba(0, 0, 0, 0.5);
+ background: linear-gradient(180deg, #c19656, #b08648);
+}
+
+/* Button styling - Vintage typewriter keys */
+.gr-button {
+ background: linear-gradient(145deg, #faf8f5 0%, #f0e8dc 100%);
+ border: 1px solid #d4a574;
+ color: #3d2914;
+ font-weight: 600;
+ box-shadow: 0 3px 8px rgba(139, 69, 19, 0.15),
+ inset 0 1px 2px rgba(255, 255, 255, 0.8);
+ transition: all 0.3s ease;
+ font-family: 'Georgia', serif;
}
-/* Button hover effects */
.gr-button:hover {
- transform: translateY(-2px);
- box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2);
- transition: all 0.3s ease;
+ transform: translateY(-2px);
+ box-shadow: 0 5px 12px rgba(139, 69, 19, 0.2),
+ inset 0 1px 3px rgba(255, 255, 255, 0.9);
+ background: linear-gradient(145deg, #fdfbf8 0%, #f3ebe0 100%);
+}
+
+.gr-button:active {
+ transform: translateY(0);
+ box-shadow: 0 2px 5px rgba(139, 69, 19, 0.15),
+ inset 0 1px 2px rgba(139, 69, 19, 0.1);
+}
+
+/* Primary button - Gold accent */
+.gr-button.primary, button[variant="primary"] {
+ background: linear-gradient(145deg, #e4c896 0%, #d4a574 100%);
+ border: 1px solid #c19656;
+ color: #2c1810;
+ font-weight: 700;
+}
+
+.gr-button.primary:hover, button[variant="primary"]:hover {
+ background: linear-gradient(145deg, #e8d0a4 0%, #ddb280 100%);
+}
+
+/* Secondary button - Deep brown */
+.gr-button.secondary, button[variant="secondary"] {
+ background: linear-gradient(145deg, #8b6239 0%, #6d4e31 100%);
+ border: 1px solid #5a3e28;
+ color: #faf8f5;
+}
+
+.gr-button.secondary:hover, button[variant="secondary"]:hover {
+ background: linear-gradient(145deg, #96693f 0%, #785436 100%);
+}
+
+/* Input fields - Parchment style */
+input[type="text"], textarea, .gr-textbox textarea {
+ background: #fffefa;
+ border: 1px solid #d4c4b0;
+ color: #3d2914;
+ font-family: 'Georgia', serif;
+ box-shadow: inset 0 2px 4px rgba(139, 69, 19, 0.05);
+}
+
+input[type="text"]:focus, textarea:focus, .gr-textbox textarea:focus {
+ border-color: #c19656;
+ box-shadow: 0 0 0 2px rgba(212, 165, 116, 0.2),
+ inset 0 2px 4px rgba(139, 69, 19, 0.05);
+ outline: none;
+}
+
+/* Tab styling - Book chapters */
+.gr-tab-button {
+ background: #f5f0e6;
+ border: 1px solid #d4c4b0;
+ color: #5a453a;
+ font-weight: 600;
+}
+
+.gr-tab-button.selected {
+ background: linear-gradient(145deg, #ffffff 0%, #fdfcf8 100%);
+ border-bottom-color: transparent;
+ color: #2c1810;
+ box-shadow: 0 -2px 8px rgba(139, 69, 19, 0.1);
+}
+
+/* Dropdown styling */
+select, .gr-dropdown {
+ background: #fffefa;
+ border: 1px solid #d4c4b0;
+ color: #3d2914;
+}
+
+/* Radio button styling */
+.gr-radio-group {
+ background: transparent;
+}
+
+.gr-radio-group label {
+ color: #3d2914;
+}
+
+/* Examples section */
+.gr-examples {
+ background: #f8f4ed;
+ border: 1px solid #e8dcc6;
+ border-radius: 12px;
+ padding: 20px;
+ margin-top: 20px;
+}
+
+/* Loading animation - Typewriter effect */
+@keyframes typewriter {
+ from { width: 0; }
+ to { width: 100%; }
+}
+
+.typing-indicator {
+ overflow: hidden;
+ border-right: 3px solid #3d2914;
+ white-space: nowrap;
+ animation: typewriter 3s steps(40, end);
+}
+
+/* Markdown content styling */
+.markdown-text h1, .markdown-text h2, .markdown-text h3 {
+ color: #2c1810;
+ font-family: 'Playfair Display', 'Georgia', serif;
+}
+
+.markdown-text p {
+ color: #3d2914;
+ line-height: 1.8;
+}
+
+.markdown-text code {
+ background: #f5f0e6;
+ padding: 2px 6px;
+ border-radius: 4px;
+ font-family: 'Courier New', monospace;
+ color: #5a453a;
+}
+
+/* File component styling */
+.gr-file {
+ background: #faf8f5;
+ border: 1px solid #d4c4b0;
+ border-radius: 8px;
+}
+
+/* Status text special styling */
+#status_text textarea {
+ background: linear-gradient(145deg, #fff9e6 0%, #fff5d6 100%);
+ border: 2px solid #d4a574;
+ font-weight: 600;
+ text-align: center;
+}
+"""
+
+# Additional CSS for scrollable theme library
+theme_library_css = """
+/* Theme Library Styles - Simplified card design */
+.library-stats {
+ display: flex;
+ justify-content: space-around;
+ margin-bottom: 30px;
+ padding: 20px;
+ background: linear-gradient(145deg, #f8f4ed 0%, #f3ede2 100%);
+ border-radius: 12px;
+ box-shadow: 0 4px 12px rgba(139, 69, 19, 0.08);
+}
+
+.stat-item {
+ text-align: center;
+}
+
+.stat-label {
+ display: block;
+ font-size: 0.9em;
+ color: #5a453a;
+ margin-bottom: 5px;
+}
+
+.stat-value {
+ display: block;
+ font-size: 2em;
+ font-weight: bold;
+ color: #2c1810;
+ font-family: 'Playfair Display', 'Georgia', serif;
+}
+
+.theme-cards-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fill, minmax(400px, 1fr));
+ gap: 25px;
+ padding: 20px;
+}
+
+.theme-card {
+ background: linear-gradient(145deg, #ffffff 0%, #fdfcf8 100%);
+ border: 1px solid #e8dcc6;
+ border-radius: 12px;
+ padding: 0;
+ box-shadow: 0 4px 12px rgba(139, 69, 19, 0.06);
+ transition: all 0.3s ease;
+ position: relative;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+ height: 450px; /* Reduced height */
+}
+
+.theme-card:hover {
+ transform: translateY(-3px);
+ box-shadow: 0 6px 20px rgba(139, 69, 19, 0.12);
+}
+
+.theme-card-header {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: 15px 20px;
+ border-bottom: 1px solid #e8dcc6;
+ background: linear-gradient(145deg, #faf6f0 0%, #f5efe6 100%);
+ flex-shrink: 0;
+}
+
+.theme-id {
+ font-family: 'Courier New', monospace;
+ color: #8b6239;
+ font-size: 0.85em;
+ font-weight: bold;
+}
+
+.theme-timestamp {
+ font-size: 0.8em;
+ color: #8a7968;
+}
+
+.theme-card-content {
+ flex: 1;
+ overflow-y: auto;
+ padding: 20px;
+ background: #fffefa;
+}
+
+/* Custom scrollbar for theme content */
+.theme-card-content::-webkit-scrollbar {
+ width: 6px;
+}
+
+.theme-card-content::-webkit-scrollbar-track {
+ background: #f5f0e6;
+ border-radius: 3px;
+}
+
+.theme-card-content::-webkit-scrollbar-thumb {
+ background: #d4a574;
+ border-radius: 3px;
+}
+
+.theme-card-content::-webkit-scrollbar-thumb:hover {
+ background: #c19656;
+}
+
+.theme-full-text {
+ font-family: 'Georgia', serif;
+ line-height: 1.8;
+ color: #3d2914;
+ margin-bottom: 15px;
+ font-size: 0.95em;
+ text-align: justify;
+}
+
+.theme-tags {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 6px;
+ margin-top: 15px;
+ padding-top: 15px;
+ border-top: 1px solid #e8dcc6;
+}
+
+.theme-tag {
+ display: inline-block;
+ padding: 4px 12px;
+ background: #f0e6d6;
+ border-radius: 15px;
+ font-size: 0.75em;
+ color: #6d4e31;
+ border: 1px solid #d4c4b0;
+}
+
+.theme-card-footer {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: 15px 20px;
+ border-top: 1px solid #e8dcc6;
+ background: linear-gradient(145deg, #faf6f0 0%, #f5efe6 100%);
+ flex-shrink: 0;
+}
+
+.theme-stats {
+ display: flex;
+ gap: 15px;
+}
+
+.theme-stat {
+ font-size: 0.85em;
+ color: #8a7968;
+}
+
+.theme-action-btn {
+ padding: 8px 20px;
+ font-size: 0.9em;
+ border-radius: 6px;
+ border: 1px solid #d4a574;
+ background: linear-gradient(145deg, #e4c896 0%, #d4a574 100%);
+ color: #2c1810;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ font-family: 'Georgia', serif;
+ font-weight: bold;
+}
+
+.theme-action-btn:hover {
+ background: linear-gradient(145deg, #e8d0a4 0%, #ddb280 100%);
+ transform: translateY(-1px);
+ box-shadow: 0 3px 8px rgba(212, 165, 116, 0.3);
+}
+
+.use-btn {
+ background: linear-gradient(145deg, #e4c896 0%, #d4a574 100%);
+ font-weight: bold;
+}
+
+.empty-library {
+ text-align: center;
+ padding: 60px 20px;
+ color: #5a453a;
+ font-size: 1.1em;
+ font-style: italic;
+}
+
+/* Responsive design */
+@media (max-width: 768px) {
+ .theme-cards-grid {
+ grid-template-columns: 1fr;
+ }
+
+ .theme-card {
+ height: 400px;
+ }
}
"""
# Create Gradio interface
def create_interface():
- with gr.Blocks(theme=gr.themes.Soft,css=custom_css, title="AGI NOVEL Generator") as interface:
- gr.HTML("""
-
-
+ # Combine CSS
+ combined_css = custom_css + theme_library_css
+
+ # Using Soft theme with safe color options
+ with gr.Blocks(theme=gr.themes.Soft(), css=combined_css, title="AGI NOVEL Generator") as interface:
+ gr.HTML("""
+
+
+
+
-
+
-
+
+ 🎲 Novel Theme Random Generator: This system can generate up to approximately 170 quadrillion (1.7 × 10¹⁷) unique novel themes.
+ Even writing 100 novels per day, it would take 4.6 million years to exhaust all combinations.
+ Click the "Random" button to explore infinite creative possibilities!
+
-
- ⏱️ Note: Creating a complete novel takes approximately 20 minutes. If your web session disconnects, you can restore your work using the "Session Recovery" feature.
-
+
+ ⏱️ Note: Creating a complete novel takes approximately 20 minutes. If your web session disconnects, you can restore your work using the "Session Recovery" feature.
+
-
- 🎯 Core Innovation: Not fragmented texts from multiple writers,
- but a genuine full-length novel written consistently by a single author from beginning to end.
-
-
- """)
-
- # State management
- current_session_id = gr.State(None)
+
+ 🎯 Core Innovation: Not fragmented texts from multiple writers,
+ but a genuine full-length novel written consistently by a single author from beginning to end.
+
+
+ """)
- with gr.Row():
- with gr.Column(scale=1):
- with gr.Group(elem_classes=["input-section"]):
- query_input = gr.Textbox(
- label="Novel Theme",
- placeholder="""Enter your novella theme.
-Examples: Character transformation, relationship evolution, social conflict and personal choice...""",
- lines=5
- )
-
- language_select = gr.Radio(
- choices=["English", "Korean"],
- value="English",
- label="Language"
- )
+ # State management
+ current_session_id = gr.State(None)
+ selected_theme_id = gr.State(None)
+ selected_theme_text = gr.State(None)
+
+ # Create tabs and store reference
+ with gr.Tabs() as main_tabs:
+ # Main Novel Writing Tab
+ with gr.Tab("📝 Novel Writing", elem_id="writing_main_tab"):
+ # Input section at the top with full width
+ with gr.Group(elem_classes=["input-section"]):
+ gr.Markdown("### ✍️ Writing Desk")
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ query_input = gr.Textbox(
+ label="Novel Theme",
+ placeholder="""Enter your novella theme. Like a seed that grows into a tree, your theme will blossom into a full narrative...
+
+You can describe:
+- A specific situation or conflict
+- Character relationships and dynamics
+- Philosophical questions to explore
+- Social or personal transformations
+- Any combination of the above
+
+The more detailed your theme, the richer the resulting narrative will be.""",
+ lines=8,
+ elem_id="theme_input"
+ )
+
+ with gr.Column(scale=1):
+ language_select = gr.Radio(
+ choices=["English", "Korean"],
+ value="English",
+ label="Language",
+ elem_id="language_select"
+ )
+
+ with gr.Column():
+ random_btn = gr.Button("🎲 Random Theme", variant="primary", size="lg")
+ submit_btn = gr.Button("🖋️ Begin Writing", variant="secondary", size="lg")
+ clear_btn = gr.Button("🗑️ Clear All", size="lg")
+
+ status_text = gr.Textbox(
+ label="Writing Progress",
+ interactive=False,
+ value="✨ Ready to begin your literary journey",
+ elem_id="status_text"
+ )
+
+ # Session management section
+ with gr.Group(elem_classes=["session-section"]):
+ gr.Markdown("### 📚 Your Library")
+ with gr.Row():
+ session_dropdown = gr.Dropdown(
+ label="Saved Manuscripts",
+ choices=[],
+ interactive=True,
+ elem_id="session_dropdown",
+ scale=3
+ )
+ refresh_btn = gr.Button("🔄 Refresh", scale=1)
+ resume_btn = gr.Button("📖 Continue", variant="secondary", scale=1)
+ auto_recover_btn = gr.Button("🔮 Recover Last", scale=1)
+
+ # Output sections below input
+ with gr.Row():
+ with gr.Column():
+ with gr.Tab("🖋️ Writing Process", elem_id="writing_tab"):
+ stages_display = gr.Markdown(
+ value="*Your writing journey will unfold here, like pages turning in a book...*",
+ elem_id="stages-display"
+ )
- with gr.Row():
- submit_btn = gr.Button("🚀 Start Writing", variant="primary", scale=2)
- clear_btn = gr.Button("🗑️ Clear", scale=1)
+ with gr.Tab("📖 Completed Manuscript", elem_id="manuscript_tab"):
+ novel_output = gr.Markdown(
+ value="*Your completed novel will appear here, ready to be read and cherished...*",
+ elem_id="novel-output"
+ )
+
+ with gr.Group(elem_classes=["download-section"]):
+ gr.Markdown("### 📦 Bind Your Book")
+ with gr.Row():
+ format_select = gr.Radio(
+ choices=["DOCX", "TXT"],
+ value="DOCX" if DOCX_AVAILABLE else "TXT",
+ label="Format",
+ elem_id="format_select"
+ )
+ download_btn = gr.Button("📥 Download Manuscript", variant="secondary")
+
+ download_file = gr.File(
+ label="Your Manuscript",
+ visible=False,
+ elem_id="download_file"
+ )
+
+ # Hidden state
+ novel_text_state = gr.State("")
+
+ # Examples with literary flair
+ gr.Examples(
+ examples=[
+ ["A daughter discovering her mother's hidden past through old letters found in an attic trunk"],
+ ["An architect losing sight who learns to design through touch, sound, and the memories of light"],
+ ["A translator replaced by AI rediscovering the essence of language through handwritten poetry"],
+ ["A middle-aged man who lost his job finding new meaning in the rhythms of rural life"],
+ ["A doctor with war trauma finding healing through Doctors Without Borders missions"],
+ ["A neighborhood coming together to save their beloved bookstore from corporate development"],
+ ["A year in the life of a professor losing memory and his devoted last student"]
+ ],
+ inputs=query_input,
+ label="💡 Inspiring Themes",
+ examples_per_page=7,
+ elem_id="example_themes"
+ )
+
+ # Random Theme Library Tab
+ with gr.Tab("🎲 Random Theme Library", elem_id="library_tab"):
+ with gr.Column():
+ gr.Markdown("""
+ ### 📚 Random Theme Library
- status_text = gr.Textbox(
- label="Progress Status",
- interactive=False,
- value="🔄 Ready"
- )
+ Browse through all randomly generated themes. Each theme is unique and can be used to create a novel.
+ """)
- # Session management
- with gr.Group(elem_classes=["session-section"]):
- gr.Markdown("### 💾 Active Works")
- session_dropdown = gr.Dropdown(
- label="Saved Sessions",
- choices=[],
- interactive=True
- )
- with gr.Row():
- refresh_btn = gr.Button("🔄 Refresh", scale=1)
- resume_btn = gr.Button("▶️ Resume", variant="secondary", scale=1)
- auto_recover_btn = gr.Button("♻️ Recover Recent Work", scale=1)
-
- with gr.Column(scale=2):
- with gr.Tab("📝 Writing Process"):
- stages_display = gr.Markdown(
- value="Writing process will be displayed in real-time...",
- elem_id="stages-display"
- )
-
- with gr.Tab("📖 Completed Work"):
- novel_output = gr.Markdown(
- value="Completed novel will be displayed here...",
- elem_id="novel-output"
- )
-
- with gr.Group(elem_classes=["download-section"]):
- gr.Markdown("### 📥 Download Work")
- with gr.Row():
- format_select = gr.Radio(
- choices=["DOCX", "TXT"],
- value="DOCX" if DOCX_AVAILABLE else "TXT",
- label="File Format"
- )
- download_btn = gr.Button("⬇️ Download", variant="secondary")
+ with gr.Row():
+ library_search = gr.Textbox(
+ label="Search Themes",
+ placeholder="Search by keywords...",
+ elem_classes=["library-search"],
+ scale=2
+ )
+ library_language_filter = gr.Radio(
+ choices=["All", "English", "Korean"],
+ value="All",
+ label="Filter by Language",
+ scale=2
+ )
+ library_refresh_btn = gr.Button("🔄 Refresh", scale=1)
- download_file = gr.File(
- label="Download File",
- visible=False
- )
-
- # Hidden state
- novel_text_state = gr.State("")
-
- # Examples
- with gr.Row():
- gr.Examples(
- examples=[
- ["A daughter discovering her mother's hidden past through old letters"],
- ["An architect losing sight who learns to design through touch and sound"],
- ["A translator replaced by AI rediscovering the essence of language through classical literature transcription"],
- ["A middle-aged man who lost his job finding new meaning in rural life"],
- ["A doctor with war trauma healing through Doctors Without Borders"],
- ["Community solidarity to save a neighborhood bookstore from redevelopment"],
- ["A year with a professor losing memory and his last student"]
- ],
- inputs=query_input,
- label="💡 Theme Examples"
- )
-
- # Event handlers
- def refresh_sessions():
- try:
- sessions = get_active_sessions("English")
- return gr.update(choices=sessions)
- except Exception as e:
- logger.error(f"Session refresh error: {str(e)}")
- return gr.update(choices=[])
+ library_display = gr.HTML(
+ value=get_theme_library_display(),
+ elem_id="library-display"
+ )
+
+ # Hidden components for theme interaction
+ selected_theme_for_action = gr.Textbox(visible=False, elem_id="selected_theme_for_action")
+ action_type = gr.Textbox(visible=False, elem_id="action_type")
+ trigger_action = gr.Button("Trigger Action", visible=False, elem_id="trigger_action")
- def handle_auto_recover(language):
- session_id, message = auto_recover_session(language)
- return session_id, message
+ # Event handlers
+ def refresh_sessions():
+ try:
+ sessions = get_active_sessions("English")
+ return gr.update(choices=sessions)
+ except Exception as e:
+ logger.error(f"Session refresh error: {str(e)}")
+ return gr.update(choices=[])
- # Event connections
- submit_btn.click(
- fn=process_query,
- inputs=[query_input, language_select, current_session_id],
- outputs=[stages_display, novel_output, status_text, current_session_id]
- )
+ def handle_auto_recover(language):
+ session_id, message = auto_recover_session(language)
+ return session_id, message
+
+ def handle_random_theme(language):
+ """Handle random theme generation with library storage"""
+ theme = generate_random_theme(language)
+ return theme
- novel_output.change(
- fn=lambda x: x,
- inputs=[novel_output],
- outputs=[novel_text_state]
- )
+ def refresh_library(language_filter, search_query):
+ """Refresh theme library display"""
+ lang = None if language_filter == "All" else language_filter
+ return get_theme_library_display(lang, search_query)
+
+ def handle_library_action(theme_id, action):
+ """Handle theme library actions"""
+ if not theme_id:
+ return gr.update(), gr.update()
+
+ if action == "use":
+ # Handle use action
+ theme_data = NovelDatabase.get_theme_by_id(theme_id)
+ if theme_data:
+ NovelDatabase.update_theme_used_count(theme_id)
+ NovelDatabase.update_theme_view_count(theme_id) # Also update view count
+ return (
+ gr.update(value=theme_data.get('theme_text', '')), # query_input
+ f"Theme #{theme_id[:8]} loaded" # status_text
+ )
+
+ return gr.update(), gr.update()
- resume_btn.click(
- fn=lambda x: x.split("...")[0] if x and "..." in x else x,
- inputs=[session_dropdown],
- outputs=[current_session_id]
- ).then(
- fn=resume_session,
- inputs=[current_session_id, language_select],
- outputs=[stages_display, novel_output, status_text, current_session_id]
- )
+ # Event connections
+ submit_btn.click(
+ fn=process_query,
+ inputs=[query_input, language_select, current_session_id],
+ outputs=[stages_display, novel_output, status_text, current_session_id, novel_text_state] # novel_text_state 추가
+ )
+
+ resume_btn.click(
+ fn=lambda x: x.split("...")[0] if x and "..." in x else x,
+ inputs=[session_dropdown],
+ outputs=[current_session_id]
+ ).then(
+ fn=resume_session,
+ inputs=[current_session_id, language_select],
+ outputs=[stages_display, novel_output, status_text, current_session_id, novel_text_state] # novel_text_state 추가
+ )
+
+ auto_recover_btn.click(
+ fn=handle_auto_recover,
+ inputs=[language_select],
+ outputs=[current_session_id, status_text]
+ ).then(
+ fn=resume_session,
+ inputs=[current_session_id, language_select],
+ outputs=[stages_display, novel_output, status_text, current_session_id, novel_text_state] # novel_text_state 추가
+ )
+
+
+ refresh_btn.click(
+ fn=refresh_sessions,
+ outputs=[session_dropdown]
+ )
+
+ clear_btn.click(
+ fn=lambda: ("", "", "✨ Ready to begin your literary journey", "", None, ""), # 빈 문자열 추가
+ outputs=[stages_display, novel_output, status_text, novel_text_state, current_session_id, novel_text_state]
+ )
+
+ random_btn.click(
+ fn=handle_random_theme,
+ inputs=[language_select],
+ outputs=[query_input],
+ queue=False
+ )
- auto_recover_btn.click(
- fn=handle_auto_recover,
- inputs=[language_select],
- outputs=[current_session_id, status_text]
- ).then(
- fn=resume_session,
- inputs=[current_session_id, language_select],
- outputs=[stages_display, novel_output, status_text, current_session_id]
- )
+ # Library event handlers
+ library_refresh_btn.click(
+ fn=refresh_library,
+ inputs=[library_language_filter, library_search],
+ outputs=[library_display]
+ )
- refresh_btn.click(
- fn=refresh_sessions,
- outputs=[session_dropdown]
- )
+ library_search.change(
+ fn=refresh_library,
+ inputs=[library_language_filter, library_search],
+ outputs=[library_display]
+ )
- clear_btn.click(
- fn=lambda: ("", "", "🔄 Ready", "", None),
- outputs=[stages_display, novel_output, status_text, novel_text_state, current_session_id]
- )
+ library_language_filter.change(
+ fn=refresh_library,
+ inputs=[library_language_filter, library_search],
+ outputs=[library_display]
+ )
+
+ # Handle clicks on library display - using trigger button
+ trigger_action.click(
+ fn=handle_library_action,
+ inputs=[selected_theme_for_action, action_type],
+ outputs=[query_input, status_text]
+ )
- def handle_download(format_type, language, session_id, novel_text):
- if not session_id or not novel_text:
- return gr.update(visible=False)
-
- file_path = download_novel(novel_text, format_type, language, session_id)
- if file_path:
- return gr.update(value=file_path, visible=True)
- else:
- return gr.update(visible=False)
- download_btn.click(
- fn=handle_download,
- inputs=[format_select, language_select, current_session_id, novel_text_state],
- outputs=[download_file]
- )
+ download_btn.click(
+ fn=handle_download,
+ inputs=[format_select, language_select, current_session_id, novel_text_state],
+ outputs=[download_file]
+ )
- # Load sessions on start
- interface.load(
- fn=refresh_sessions,
- outputs=[session_dropdown]
- )
+ # Load sessions and library on start
+ def initialize_interface():
+ # Sync with HF dataset on startup
+ if 'hf_manager' in globals() and hf_manager.token:
+ hf_manager.sync_with_local_db()
+
+ return refresh_sessions(), refresh_library("All", "")
+
+ interface.load(
+ fn=initialize_interface,
+ outputs=[session_dropdown, library_display]
+ )
- return interface
+ return interface
+# Initialize HF Dataset Manager as global variable
+hf_manager = None
-# Main execution
+# Main function
if __name__ == "__main__":
- logger.info("AGI NOVEL Generator v2.0 Starting...")
- logger.info("=" * 60)
-
- # Environment check
- logger.info(f"API Endpoint: {API_URL}")
- logger.info(f"Target Length: {TARGET_WORDS:,} words")
- logger.info(f"Minimum Words per Part: {MIN_WORDS_PER_PART:,} words")
- logger.info("System Features: Single writer + Immediate part-by-part critique")
-
- if BRAVE_SEARCH_API_KEY:
- logger.info("Web search enabled.")
- else:
- logger.warning("Web search disabled.")
-
- if DOCX_AVAILABLE:
- logger.info("DOCX export enabled.")
- else:
- logger.warning("DOCX export disabled.")
-
- logger.info("=" * 60)
-
- # Initialize database
- logger.info("Initializing database...")
- NovelDatabase.init_db()
- logger.info("Database initialization complete.")
-
- # Create and launch interface
- interface = create_interface()
-
- interface.launch(
- server_name="0.0.0.0",
- server_port=7860,
- share=False,
- debug=True
- )
\ No newline at end of file
+ logger.info("AGI NOVEL Generator v2.0 Starting...")
+ logger.info("=" * 60)
+
+ # Environment check
+ logger.info(f"API Endpoint: {API_URL}")
+ logger.info(f"Target Length: {TARGET_WORDS:,} words")
+ logger.info(f"Minimum Words per Part: {MIN_WORDS_PER_PART:,} words")
+ logger.info("System Features: Single writer + Immediate part-by-part critique")
+
+ if BRAVE_SEARCH_API_KEY:
+ logger.info("Web search enabled.")
+ else:
+ logger.warning("Web search disabled.")
+
+ if DOCX_AVAILABLE:
+ logger.info("DOCX export enabled.")
+ else:
+ logger.warning("DOCX export disabled.")
+
+ logger.info("=" * 60)
+
+ # Initialize database
+ logger.info("Initializing database...")
+ NovelDatabase.init_db()
+ logger.info("Database initialization complete.")
+
+ # Initialize HF Dataset Manager
+ logger.info("Initializing HuggingFace dataset manager...")
+ hf_manager = HFDatasetManager()
+
+ if hf_manager.token:
+ logger.info("HuggingFace authentication successful.")
+ # Sync with HF dataset on startup
+ hf_manager.sync_with_local_db()
+ else:
+ logger.warning("HuggingFace token not found. Theme persistence will be local only.")
+
+ # Create and launch interface
+ interface = create_interface()
+
+ interface.launch(
+ server_name="0.0.0.0",
+ server_port=7860,
+ share=False,
+ debug=True
+ )
\ No newline at end of file