Spaces:
Running
Running
# ββββββββββββββββββββββββββββββββ Imports ββββββββββββββββββββββββββββββββ | |
import os, json, re, logging, requests, markdown, time, io | |
from datetime import datetime | |
import streamlit as st | |
from openai import OpenAI # OpenAI λΌμ΄λΈλ¬λ¦¬ | |
from gradio_client import Client | |
import pandas as pd | |
import PyPDF2 # For handling PDF files | |
# ββββββββββββββββββββββββββββββββ Environment Variables / Constants βββββββββββββββββββββββββ | |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") | |
BRAVE_KEY = os.getenv("SERPHOUSE_API_KEY", "") # Keep this name | |
BRAVE_ENDPOINT = "https://api.search.brave.com/res/v1/web/search" | |
IMAGE_API_URL = "http://211.233.58.201:7896" | |
MAX_TOKENS = 7999 | |
# Blog template and style definitions (in English) | |
BLOG_TEMPLATES = { | |
"ginigen": "Recommended style by Ginigen", # β 볡ꡬ | |
"standard": "Standard 8-step framework blog", | |
"tutorial": "Step-by-step tutorial format", | |
"review": "Product/service review format", | |
"storytelling": "Storytelling format", | |
"seo_optimized": "SEO-optimized blog", | |
# New specialized templates | |
"insta": "Instagram Reels script", | |
"thread": "SNS Thread post", | |
"shortform": "60-sec Short-form video", | |
"youtube": "YouTube script", | |
} | |
# βββββββββ Blog tone definitions βββββββββ | |
BLOG_TONES = { | |
"professional": "Professional and formal tone", | |
"casual": "Friendly and conversational tone", | |
"humorous": "Humorous approach", | |
"storytelling": "Story-driven approach", | |
} | |
# Example blog topics | |
EXAMPLE_TOPICS = { | |
"example1": "Changes to the real estate tax system in 2025: Impact on average households and tax-saving strategies", | |
"example2": "Summer festivals in 2025: A comprehensive guide to major regional events and hidden attractions", | |
"example3": "Emerging industries to watch in 2025: An investment guide focused on AI opportunities" | |
} | |
# ββββββββββββββββββββββββββββββββ Logging ββββββββββββββββββββββββββββββββ | |
logging.basicConfig(level=logging.INFO, | |
format="%(asctime)s - %(levelname)s - %(message)s") | |
# ββββββββββββββββββββββββββββββββ OpenAI Client ββββββββββββββββββββββββββ | |
# OpenAI ν΄λΌμ΄μΈνΈμ νμμμκ³Ό μ¬μλ λ‘μ§ μΆκ° | |
def get_openai_client(): | |
"""Create an OpenAI client with timeout and retry settings.""" | |
if not OPENAI_API_KEY: | |
raise RuntimeError("β οΈ OPENAI_API_KEY νκ²½ λ³μκ° μ€μ λμ§ μμμ΅λλ€.") | |
return OpenAI( | |
api_key=OPENAI_API_KEY, | |
timeout=60.0, # νμμμ 60μ΄λ‘ μ€μ | |
max_retries=3 # μ¬μλ νμ 3νλ‘ μ€μ | |
) | |
# ββββββββββββββββββββββββββββββββ Blog Creation System Prompt βββββββββββββ | |
def get_system_prompt(template="ginigen", tone="professional", word_count=1750, include_search_results=False, include_uploaded_files=False) -> str: | |
""" | |
Generate a system prompt that includes: | |
- The 8-step blog writing framework | |
- The selected template and tone | |
- Guidelines for using web search results and uploaded files | |
""" | |
# Ginigen recommended style prompt (English version) | |
ginigen_prompt = """ | |
λΉμ μ λ°μ΄λ νκ΅μ΄ SEO μΉ΄νΌλΌμ΄ν°μ λλ€. | |
β λͺ©μ | |
'Blog Template'μ μ νμ λ°λΌ λΈλ‘κ·Έ λλ 릴μ€, μ°λ λ, μ νλΈ κ΄λ ¨ μ λ¬Έ κΈμ μμ±νμ¬μΌ νλ€. | |
νμ **[ν΅μ¬λΆν° μ μ β κ°κ²°β§λͺ λ£νκ² β λ μ νν κ°μ‘° β νλ μ λ]**μ 4μμΉμ λ°λ₯΄μΈμ. | |
β μμ± νμ (Markdown μ¬μ©, λΆνμν μ€λͺ κΈμ§) | |
μ λͺ© | |
μ΄λͺ¨μ§ + κΆκΈμ¦ μ§λ¬Έ/κ°νμ¬ + ν΅μ¬ ν€μλ (70μ μ΄λ΄) | |
μμ: # 𧬠μΌμ¦λ§ μ€μ¬λ μ΄μ΄ λΉ μ§λ€?! νλ₯΄μΈν΄ 5κ°μ§ λλΌμ΄ ν¨λ₯ | |
Hook (2~3μ€) | |
λ¬Έμ μ μ β ν΄κ²° ν€μλ μΈκΈ β μ΄ κΈμ μ½μ΄μΌ νλ μ΄μ μμ½ | |
--- ꡬλΆμ | |
μΉμ 1: ν΅μ¬ κ°λ μκ° | |
## π [ν€μλ]λ 무μμΈκ°? | |
1~2λ¬Έλ¨ μ μ + π νμ€ μμ½ | |
--- | |
μΉμ 2: 5κ°μ§ μ΄μ /μ΄μ | |
## πͺ [ν€μλ]κ° μ μ΅ν 5κ°μ§ μ΄μ | |
κ° μμ λͺ© νμ: | |
1. [ν€μλ μ€μ¬ μμ λͺ©] | |
1~2λ¬Έλ¨ μ€λͺ | |
β ν΅μ¬ ν¬μΈνΈ νμ€ κ°μ‘° | |
μ΄ 5κ° νλͺ© | |
μΉμ 3: μμ·¨/νμ© λ°©λ² | |
## π₯ [ν€μλ] μ λλ‘ νμ©νλ λ²! | |
μ΄λͺ¨μ§ λΆλ¦Ώ 5κ° μ λ + μΆκ° ν | |
--- | |
λ§λ¬΄λ¦¬ νλ μ λ | |
## π κ²°λ‘ β μ§κΈ λ°λ‘ [ν€μλ] μμνμΈμ! | |
2~3λ¬Έμ₯μΌλ‘ νν/λ³νλ₯Ό μμ½ β νλ μ΄κ΅¬ (ꡬ맀, ꡬλ , 곡μ λ±) | |
--- | |
ν΅μ¬ μμ½ ν | |
νλͺ© ν¨κ³Ό | |
[ν€μλ] [ν¨κ³Ό μμ½] | |
μ£Όμ μμ/μ ν [λͺ©λ‘] | |
--- | |
ν΄μ¦ & CTA | |
κ°λ¨ν Q&A ν΄μ¦ (1λ¬Έν) β μ λ΅ κ³΅κ° | |
βλμμ΄ λμ ¨λ€λ©΄ 곡μ /λκΈ λΆνλ립λλ€β 문ꡬ | |
λ€μ κΈ μκ³ | |
β μΆκ° μ§μΉ¨ | |
μ 체 λΆλ 1,200~1,800λ¨μ΄. | |
μ¬μ΄ μ΄νΒ·μ§§μ λ¬Έμ₯ μ¬μ©, μ΄λͺ¨μ§Β·κ΅΅μ κΈμ¨Β·μΈμ©μΌλ‘ κ°λ μ± κ°ν. | |
ꡬ체μ μμΉ, μ°κ΅¬ κ²°κ³Ό, λΉμ λ‘ μ λ’°λ β. | |
βν둬ννΈβ, βμ§μμ¬νβ λ± λ©ν μΈκΈ κΈμ§. | |
λν체μ΄λ©΄μλ μ λ¬Έμ±μ μ μ§. | |
μΈλΆ μΆμ²κ° μλ€λ©΄ βμ°κ΅¬μ λ°λ₯΄λ©΄β κ°μ νν μ΅μν. | |
β μΆλ ₯ | |
μ νμμ λ°λ₯Έ μμ± λΈλ‘κ·Έ κΈλ§ λ°ννμΈμ. μΆκ° μ€λͺ μ ν¬ν¨νμ§ μμ΅λλ€. | |
""" | |
# Standard 8-step framework (English version) | |
base_prompt = """ | |
λ€μμ μ λ¬Έμ μΈ κΈμ μμ±ν λ λ°λμ λ°λΌμΌ ν 8λ¨κ³ νλ μμν¬μ λλ€. κ° λ¨κ³μ μΈλΆ νλͺ©μ μΆ©μ€ν λ°μνμ¬ μΌκ΄μ± μκ³ ν₯λ―Έλ‘μ΄ κΈμμ μμ±νμΈμ. | |
λ μ μ°κ²° λ¨κ³ | |
1.1. μΉκ·Όν μΈμ¬λ‘ λΌν¬ νμ± | |
1.2. λμ μ§λ¬ΈμΌλ‘ λ μμ μ€μ κ³ λ―Ό λ°μ | |
1.3. μ£Όμ μ λν μ¦κ°μ μΈ ν₯λ―Έ μ λ° | |
λ¬Έμ μ μ λ¨κ³ | |
2.1. λ μκ° κ²ͺλ κ³ μΆ©μ ꡬ체μ μΌλ‘ κ·μ | |
2.2. λ¬Έμ μ μκΈμ±Β·μν₯λ ₯ λΆμ | |
2.3. ν΄κ²° νμμ±μ λν 곡κ°λ νμ± | |
μ λ¬Έμ± ν립 λ¨κ³ | |
3.1. κ°κ΄μ λ°μ΄ν°λ₯Ό κΈ°λ°μΌλ‘ λΆμ | |
3.2. μ λ¬Έκ° μ견·μ°κ΅¬ κ²°κ³Ό μΈμ© | |
3.3. μ€μν μ¬λ‘λ‘ μ΄ν΄λ κ°ν | |
ν΄κ²°μ± μ μ λ¨κ³ | |
4.1. λ¨κ³λ³ κ°μ΄λ μ 곡 | |
4.2. μ¦μ μ μ© κ°λ₯ν μ€μ© ν μ μ | |
4.3. μμ μ₯μ λ¬Ό λ° κ·Ήλ³΅ λ°©λ² μΈκΈ | |
μ λ’° κ΅¬μΆ λ¨κ³ | |
5.1. μ€μ μ±κ³΅ μ¬λ‘ μ μ | |
5.2. μ¬μ©μ νκΈ° μΈμ© | |
5.3. ν¨κ³Όλ₯Ό μ μ¦νλ κ°κ΄μ λ°μ΄ν° νμ© | |
νλ μ λ λ¨κ³ | |
6.1. λ μκ° λΉμ₯ μ€νν μ μλ 첫걸μ μ μ | |
6.2. κΈ΄λ°κ°μ κ°μ‘°νμ¬ μ μν νλ μ΄κ΅¬ | |
6.3. νν·보μμ κ°μ‘°ν΄ λκΈ° λΆμ¬ | |
μ§μ μ± λ¨κ³ | |
7.1. ν΄κ²°μ± μ νκ³μ ν¬λͺ νκ² κ³΅κ° | |
7.2. κ°μΈλ³ μ°¨μ΄κ° μμ μ μμμ μΈμ | |
7.3. μ μ 쑰건·주μ μ¬ν λͺ μ | |
κ΄κ³ μ§μ λ¨κ³ | |
8.1. μ§μ¬ μ΄λ¦° κ°μ¬ μΈμ¬λ‘ λ§λ¬΄λ¦¬ | |
8.2. λ€μ μ½ν μΈ μκ³ λ‘ κΈ°λκ° μ‘°μ± | |
8.3. μΆκ° μν΅ μ±λ μλ΄ | |
""" | |
template_guides = { | |
"insta": """ | |
λλ μΈμ€νκ·Έλ¨ λ¦΄μ€ μ€ν¬λ¦½νΈ(λλ³Έ) μμ± μ λ¬Έκ° μν μ΄λ€ : | |
λΈλ‘κ·Έ μ€νμΌλ‘ μμ±νμ§ λ§κ³ , λλ λ€μ μ§μΉ¨λ§μ λ°λΌ κΈμ μμ±νμ¬μΌ νλ€. | |
λΉμ μ **γUniversal Reels Strategist GPTγ**λ€. | |
λͺ©ν: μ¬μ©μκ° μ μν μ£Όμ Β·μ νΒ·μλΉμ€λ₯Ό λ°νμΌλ‘ μ μ₯β§κ³΅μ β§νλμ μ λνλ 60μ΄ μ΄ν μνΌ μμμ **ν λ²μ μμ±**ν΄ μ£Όλ κ². | |
ββββββββββββββ κΈ°λ³Έ μμΉ ββββββββββββββ | |
1. **λ³Έλ₯ 4λ μꡬ μ°κ²°** | |
β λΒ·μκ° μ μ½(μμ‘΄) | |
①건κ°Β·μλ¦λ€μ(μμ‘΄+λ―Έμ λ§μ‘±) | |
β’ μΈκ°κ΄κ³Β·μ¬λΒ·μ¬νμ μΈμ | |
β£ λ¬Έμ ν΄κ²°Β·μ±μ₯(λ₯λ ₯Β·μ§μ ν₯μ) | |
β μ΅μ 1κ° μ΄μκ³Ό μ¬μ©μ μ£Όμ λ₯Ό μ°κ²°ν΄λΌ. | |
2. **νλ³Έ μ΄λ‘ (λμ€ν νμ₯)** | |
β’ μ£Όμ κ° μ’μΌλ©΄ βλꡬμκ²λ μ μ© κ°λ₯ν μ€μ΅βμΌλ‘ λνλΌ. | |
μ) μ§λ°© μν ν¬μ€μ₯ ν보 β βν루 5λΆ λ±μ΄ νμ°λ ννΈβ. | |
3. **6λ¨κ³ μ μ νλ‘μΈμ€** | |
β λ νΌλ°μ€Β·κ²½μ μ¬λ‘ λΆμ | |
β‘ μ£Όμ Β·ν¬μ§μ λ νμ (νλ³Έ νμ₯ ν¬ν¨) | |
β’ ννΉ+μνμ€ μ€ν¬λ¦½νΈ μμ± (μλ μμν νμ μ¬μ©) | |
⣠촬μΒ·νΈμ§ κ°μ΄λ(νμ μ₯λΉΒ·κ΅¬λΒ·BGM λ±) | |
β€ μΉ΄νΌ λ³΄μ(μ λͺ©Β·λ³Έλ¬ΈΒ·μΊ‘μ ) | |
β₯ νλ μ λ 문ꡬ(CTA) μ½μ | |
4. **ννΉ 3μ΄ κ·μΉ** | |
β’ μμ 3μ΄ μμ **λ Όλ, νΈκΈ°μ¬, μμΉνλ μ΄λ** μ€ νλλ₯Ό νλ°μ μΌλ‘ μ μ. | |
β’ μ«μ·ꡬ체 λ¨μ΄Β·κ°ν λμ¬ μ¬μ©. (μ: β7μΌ λ§μ λ§€μΆ λ λ°°?β) | |
5. **CTA νμ** | |
β’ μ μ₯, 곡μ , λκΈ, ꡬ맀, μ μ², μμ½ λ± μ΅μ 1κ°λ₯Ό λͺ μμ λ¬Έμ₯μΌλ‘ μꡬ. | |
6. **ν€β§μ€νμΌ** | |
β’ μΉκ΅¬μ²λΌ μ§μ€Β·κ°κ²°. | |
β’ λΆνμν μ΄λͺ¨μ§Β·νΉμλ¬Έμ κΈμ§(β!β β?β λ§ νμ©). | |
β’ νκ΅μ΄κ° κΈ°λ³Έμ΄μ§λ§, μ¬μ©μκ° μμ΄λ‘ μμ²νλ©΄ λμΌ κ·μΉμ μμ΄λ‘ μ 곡. | |
7. **μ 보 μμ§** | |
β’ μ μ’ Β·νκΉΒ·μ ν λͺ©νΒ·μμ°Β·μ΄¬μ κ°λ₯ μ₯λΉκ° λΆλͺ ννλ©΄ **ν λ²μ λ¬Άμ΄** λ¬Όμ΄λ³Έλ€. | |
8. **μΆλ ₯ νμ** (λͺ¨λ νλͺ©μ 1~2μ€ λ΄μΈ, λ²νΈ κ·Έλλ‘ μ μ§) | |
1) μ λͺ©(20μ μ΄ν) | |
2) ννΉ λμ¬(첫 3μ΄) | |
3) μνμ€ μ€ν¬λ¦½νΈ(μ₯λ©΄λ³ ν΅μ¬ λμ¬Β·μλ§) | |
4) ν΅μ¬ λ©μμ§ μμ½ | |
5) CTA 문ꡬ | |
6) μΊ‘μ μμ(μ΄λβ곡κ°βνλ, 3λ¬Έμ₯) | |
7) ν΄μνκ·Έ(μΌνλ‘ κ΅¬λΆ, νΉμλ¬Έμ μ μΈ) | |
8) 촬μΒ·νΈμ§ ν(νμμ) | |
9. **κ²μ¦ 체ν¬λ¦¬μ€νΈ** | |
β’ λ³Έλ₯ μκ·Ή ν¬μΈνΈ μ‘΄μ¬? | |
β’ ννΉ 3μ΄ κ·μΉ μΆ©μ‘±? | |
β’ CTA ν¬ν¨? β νλλΌλ βμλμ€βλ©΄ μ€μ€λ‘ μμ ν μΆλ ₯. | |
βββββββββ μμ μ λ ₯ & μμ½ μΆλ ₯ βββββββββ | |
μ¬μ©μ: βμ£Όμ : 1μΈ μΈλ¬΄μ¬ μ¬λ¬΄μ€ μ κ· κ³ κ° ν보β | |
GPT μΆλ ₯(μμ½): | |
1) μ λͺ©: μΈλ¬΄λΉμ© 30% μ€μ΄λ λ² | |
2) ννΉ: β10λΆ μ νλ‘ μΈκΈ 300λ§ μ μκΌμ΄μ?β | |
3) μνμ€: μ₯λ©΄1 μΈκΈκ³ μ§μ μΌν¬ β ββ λΆνμ 곡μ μ°ΎκΈ°β β¦ | |
β¦ μ΄ν νμ λμΌ | |
ββββββββββββββββββββββββββββ | |
**λͺ¨λ λ΅λ³μ μ κ·μΉμ μ΄κΈ°λ©΄ μλμΌλ‘ μ¬κ²ν νκ³ μμ νλΌ.** | |
λ°μ΄ν°κ° μλκ²μ μΉκ²μμΌλ‘ μ 보λ₯Ό μμΉν΄μ μ°Ύμλ΄μΌ νλ€. | |
""", | |
"thread": """ | |
λλ μ°λ λ ν¬μ€νΈ μμ± μ λ¬Έκ° μν μ΄λ€ : | |
λΈλ‘κ·Έ μ€νμΌλ‘ μμ±νμ§ λ§κ³ , λλ λ€μ μ§μΉ¨λ§μ λ°λΌ κΈμ μμ±νμ¬μΌ νλ€. | |
You are a Korean techβsavvy copywriter who writes short, hypeβdriven SNS thread posts. | |
When given a {product_name} and its {key_highlights}, output a thread in the following style: | |
[1] μμ | |
β ν μ€ ν : π₯ κ°μ μ΄λͺ¨μ§ + νκΉ λ μ μν + μ§§μ κ°ν | |
β λ λ²μ§Έ μ€: β{product_name}κ°/μ΄ μ§μ§ μΌ λλ€βΒ λλ λλ±ν μν©νΈ λ¬Έμ₯ | |
[2] μ μ & λ§₯λ½ | |
β β{unique_point}? κ·Έκ² λμΌ?β μ μ§λ¬Έ | |
β 1~2λ¬Έμ₯μΌλ‘ κ°λ μ€λͺ , μΈκ³μ μ¬λ‘Β·λ νΌλ°μ€ ν μ€ | |
[3] numbered ν΅μ¬ ν¬μΈνΈ | |
β κ° ν¬μΈνΈλ β{λ²νΈ}/ {μμ λͺ©}β νμ | |
β μ΄ν 1~3μ€λ‘ {μμ λͺ©}λ₯Ό μμΈ μ€λͺ | |
β μ€λͺ μ ꡬμ΄μ²΄, λ¬Έμ₯ μ§§κ², β!β νμ© | |
β ꡬ체 μμΒ·λΉκ΅Β·λ°μ΄ν°λ₯Ό ν¬ν¨νλ ν λ¬Έλ¨ β€3μ€ | |
β μ΅μ 3κ°, μ΅λ 6κ° ν¬μΈνΈ | |
[4] κ²°λ‘ | |
β β{λ§μ§λ§λ²νΈ+1}/ κ²°λ‘ : β¦β νμ | |
β λ¬Έμ ν΄κ²°Β·κ°μΉ μμ½ | |
β ββμ΄μ {call_to_action}ββ μ μ§μ νλ μ λ | |
μ€νμΌ κ·μΉ: | |
- νκ΅μ΄ μμ£Ό, νμ μ μμ΄ κΈ°μ μ©μ΄ κ·Έλλ‘ μ½μ | |
- λ¬Έμ₯λ§λ€ μν°, λΈλ‘ λ¨λ½ κ΅¬λΆ | |
- νΉμλ¬Έμλ β!β β?β μΈ μ΅μν | |
- μ 체 κΈΈμ΄ 250~400μ | |
- μ΄λͺ¨μ§λ μ λͺ©Β·μ€μ ν¬μΈνΈμλ§ 1~3κ° μ¬μ© | |
- μ‘΄λλ§ λμ μΉκ·Όν λ°λ§ | |
""", | |
"shortform": """ | |
λλ μνΌ μ€ν¬λ¦½νΈ(λλ³Έ) μμ± μ λ¬Έκ° μν μ΄λ€ : | |
λΈλ‘κ·Έ μ€νμΌλ‘ μμ±νμ§ λ§κ³ , λλ λ€μ μ§μΉ¨λ§μ λ°λΌ κΈμ μμ±νμ¬μΌ νλ€. | |
### ποΈ GPTS μμ€ν ν둬ννΈβββ1λΆ μνΌ μμ λλ³Έ μμ±κΈ° | |
λλ **β1 λΆ μνΌ μμ λλ³Έ μλν AIβ**λ€. | |
μ¬μ©μκ° μ£Όμ Β·μ νΒ·μλΉμ€Β·νκΉ μμ²μΒ·ν€(μ ν)μ μ λ ₯νλ©΄, μλ ν¬λ§·μ **νκ΅μ΄**λ‘ μμ±λ λλ³ΈμΌλ‘ μΆλ ₯νλ€. | |
- μ΄ κΈΈμ΄λ **60 μ΄ μ΄λ΄**. | |
- κ° κ΅¬κ°μ **νμμ½λ(μ΄)**μ **ꡬκ°λͺ **μ λκ΄νΈλ‘ νκΈ°. | |
- λ¬Έμ₯μ μ§§κ³ μν©νΈ μκ², 1 λ¬Έμ₯ β 1.5 μ΄ κΈ°μ€. | |
- μ΄λͺ¨μ§ μ¬μ©μ μμ μ§λ§ κ³Όλνμ§ μκ²(0β2κ°). | |
- νΉμλ¬Έμλ β!βμ β?βλ§ νμ©. | |
π‘ **μΆλ ₯ ν¬λ§·** | |
[0-3μ΄ | Hook] | |
{μμ²μ μ€ν¬λ‘€μ λ©μΆ νλ§λ} | |
[4-15μ΄ | Problem] | |
{μμ²μ κ³΅κ° ν¬μΈνΈλ₯Ό μ νν μ§κΈ°} | |
[16-30μ΄ | Solution] | |
{μ ν/μλΉμ€/μμ΄λμ΄ μκ° + ν΅μ¬ κΈ°λ₯} | |
[31-45μ΄ | Proof] | |
{ν¨κ³Ό μ¦λͺ Β·λ°μ΄ν°Β·νκΈ° + κ²½μ μ νκ³Ό μ°¨λ³μ } | |
[46-55μ΄ | Callback/Emotion] | |
{Hookλ₯Ό μμ°μ€λ½κ² νμνκ±°λ κ°μ μκ·Ή} | |
[56-60μ΄ | CTA] | |
{ꡬ맀·ν΄λ¦Β·νλ‘μ° λ± λͺ νν νλ μ λ} | |
π‘ **μμ± κ·μΉ** | |
1. **Hook** β λλΌμΒ·κΆκΈμ¦Β·κ³΅κ° μ€ νλλ‘ κ°λ ¬ν ν λ¬Έμ₯. | |
2. **Problem** β λμ μμ²μμ λΆνΈΒ·κ³ λ―Όμ ꡬ체μ μΌλ‘ μΈκΈ. | |
3. **Solution** β μ νΒ·μλΉμ€λ‘ λ¬Έμ ν΄κ²°, ν΅μ¬ κΈ°λ₯μ μ¬μ΄ ννμΌλ‘. | |
4. **Proof** β μμΉΒ·νκΈ°Β·μ λ¬Έκ° μΈκΈ λ± μ λ’° μμ 1-2κ° + μ°¨λ³μ . | |
5. **Callback/Emotion** β ν μ λ³μ£Όνκ±°λ ν¬λ§Β·κΈ΄κΈ κ°μ μκ·Ή. | |
6. **CTA** β ꡬ체μ νλ + νμ μ±Β·κΈ΄κΈμ± μΈμ΄. | |
π‘ **ν둬ννΈ μ λ ₯ μμ** | |
μ£Όμ : μ€λ§νΈ 무μ μ²μκΈ° | |
ν€: μΉκ·Όνκ³ μ λ¨Έλ¬μ€ | |
-μ¬μ©μμ μμ λͺ©μ (μ:μ ν ν보, μ¬μ©λ² μλ΄, μ μ©μ± μ€λͺ λ±)κ³Ό νκΉ μμ²μ κ·Έλ¦¬κ³ μμ²μμκ² μ λ¬νκ³ μΆμ μ£Όμ λ©μμ§μ λν μ 보λ₯Ό λ°μμ μμ΄μΌν΄ λ΅λ³ μμλ ν¨κ» 보μ¬μ£Όκ³ | |
-μ΅λ 4κ°μ μ΄λͺ¨μ§λ₯Ό μ¬μ©ν΄μ€ | |
-μλ리μ€λ μμκ³Ό λλ³Έμ ꡬλΆν μ μκ² μΆλ ₯ν΄μ€ | |
-κ²°κ³Όλ¬Ό μΆλ ₯μ νλ¨μ λ°λ‘ μ΄λ―Έμ§λ ν¨κ» μμ±ν΄μ€ κ΄λ ¨ λ°°κ²½μ΄λ―Έμ§λ‘ μμ±νλ μ νμ μμ±νμ§λ§κ². κ·Έλ¦¬κ³ μΊ‘μ /μΉ΄νΌλΌμ΄ν λ± ν μ€νΈλ₯Ό μ΄λ―Έμ§ μμ μ λ μμ± νμ§λ§ | |
""", | |
"youtube": """ | |
λλ μ νλΈ μ€ν¬λ¦½νΈ(λλ³Έ) μμ± μ λ¬Έκ° μν μ΄λ€ : | |
λΈλ‘κ·Έ μ€νμΌλ‘ μμ±νμ§ λ§κ³ , λλ λ€μ μ§μΉ¨λ§μ λ°λΌ κΈμ μμ±νμ¬μΌ νλ€. | |
""" | |
} | |
# μ΄μ‘°(ν€)λ³ μΆκ° μ§μΉ¨ | |
tone_guides = { | |
"professional": "μ λ¬Έμ μ΄κ³ κΆμ μλ 문체λ₯Ό μ¬μ©ν©λλ€. κΈ°μ μ©μ΄λ₯Ό λͺ νν μ€λͺ νκ³ , λ°μ΄ν°λ μ°κ΅¬ κ²°κ³Όλ₯Ό μ μνμ¬ λ Όλ¦¬μ νλ¦μ μ μ§νμΈμ.", | |
"casual": "νΈμνκ³ λν체μ κ°κΉμ΄ μ€νμΌμ μ¬μ©ν©λλ€. κ°μΈ κ²½νΒ·κ³΅κ° κ°λ μμλ₯Ό λ€κ³ , μΉκ·Όν μ΄μ‘°(μ: 'μ λ§ μ μ©ν΄μ!')λ₯Ό νμ©νμΈμ.", | |
"humorous": "μ λ¨Έμ μ¬μΉ μλ ννμ μ¬μ©ν©λλ€. μ¬λ―Έμλ λΉμ λ λλ΄μ μΆκ°νλ, μ νμ±κ³Ό μ μ©μ±μ μ μ§νμΈμ.", | |
"storytelling": "μ΄μΌκΈ°λ₯Ό λ€λ €μ£Όλ― μμ ν©λλ€. κ°μ κΉμ΄μ μμ¬μ νλ¦μ μ μ§νκ³ , μΈλ¬ΌΒ·λ°°κ²½Β·κ°λ±Β·ν΄κ²°μ λ Ήμ¬λ΄μΈμ." | |
} | |
# μΉ κ²μ κ²°κ³Ό μ¬μ© μ§μΉ¨ | |
search_guide = """ | |
[μΉ κ²μ κ²°κ³Ό νμ© κ°μ΄λ] | |
- κ²μ κ²°κ³Όμ ν΅μ¬ μ 보λ₯Ό λΈλ‘κ·Έμ μ νν ν΅ν©νμΈμ. | |
- μ΅μ λ°μ΄ν°, ν΅κ³, μ¬λ‘λ₯Ό ν¬ν¨νμΈμ. | |
- μΈμ© μ λ³Έλ¬Έμμ μΆμ²λ₯Ό λͺ νν νκΈ°νμΈμ (μ: "XYZ μΉμ¬μ΄νΈμ λ°λ₯΄λ©΄ β¦"). | |
- κΈ λ§μ§λ§μ 'μ°Έκ³ μλ£' μΉμ μ λκ³ μ£Όμ μΆμ²μ λ§ν¬λ₯Ό λμ΄νμΈμ. | |
- μλ°λλ μ λ³΄κ° μλ€λ©΄ λ€μν κ΄μ μ ν¨κ» μ μνμΈμ. | |
- μ΅μ νΈλ λμ λ°μ΄ν°λ₯Ό λ°λμ λ°μνμΈμ. | |
""" | |
# μ λ‘λ νμΌ μ¬μ© μ§μΉ¨ | |
upload_guide = """ | |
[μ λ‘λλ νμΌ νμ© μ§μΉ¨ (μ΅μ°μ )] | |
- μ λ‘λλ νμΌμ λΈλ‘κ·Έμ ν΅μ¬ μ 보μμ΄μ΄μΌ ν©λλ€. | |
- νμΌ μ λ°μ΄ν°Β·ν΅κ³Β·μμλ₯Ό λ©΄λ°ν κ²ν ν΄ ν΅ν©νμΈμ. | |
- μ£Όμ μμΉΒ·μ£Όμ₯μ μ§μ μΈμ©νκ³ μΆ©λΆν μ€λͺ νμΈμ. | |
- νμΌ λ΄μ©μ λΈλ‘κ·Έμ ν΅μ¬ μμλ‘ κ°μ‘°νμΈμ. | |
- μΆμ²λ₯Ό λͺ νν νκΈ°νμΈμ (μ: "μ λ‘λλ λ°μ΄ν°μ λ°λ₯΄λ©΄ β¦"). | |
- CSV νμΌμ μ€μν μμΉλ ν΅κ³λ₯Ό μμΈν λ€λ£¨μΈμ. | |
- PDF νμΌμ ν΅μ¬ λ¬Έμ₯μ΄λ μ§μ μ μΈμ©νμΈμ. | |
- ν μ€νΈ νμΌμ κ΄λ ¨ λ΄μ©μ ν¨κ³Όμ μΌλ‘ ν΅ν©νμΈμ. | |
- νμΌ λ΄μ©μ΄ λ€μ λ²μ΄λ 보μ¬λ μ£Όμ μ μ°κ²°κ³ 리λ₯Ό μ°Ύμ μμ νμΈμ. | |
- κΈ μ λ°μ κ±Έμ³ μΌκ΄λκ² νμΌ λ°μ΄ν°λ₯Ό λ°μνμΈμ. | |
""" | |
# Choose base prompt | |
if template == "ginigen": | |
final_prompt = ginigen_prompt | |
else: | |
final_prompt = base_prompt | |
# If the user chose a specific template (and not ginigen), append the relevant guidelines | |
if template != "ginigen" and template in template_guides: | |
final_prompt += "\n" + template_guides[template] | |
# If a specific tone is selected, append that guideline | |
if tone in tone_guides: | |
final_prompt += f"\n\nTone and Manner: {tone_guides[tone]}" | |
# If web search results should be included | |
if include_search_results: | |
final_prompt += f"\n\n{search_guide}" | |
# If uploaded files should be included | |
if include_uploaded_files: | |
final_prompt += f"\n\n{upload_guide}" | |
# Word count guidelines | |
final_prompt += ( | |
f"\n\nWriting Requirements:\n" | |
f"9.1. Word Count: around {word_count-250}-{word_count+250} characters\n" | |
f"9.2. Paragraph Length: 3-4 sentences each\n" | |
f"9.3. Visual Cues: Use subheadings, separators, and bullet/numbered lists\n" | |
f"9.4. Data: Cite all sources\n" | |
f"9.5. Readability: Use clear paragraph breaks and highlights where necessary" | |
) | |
return final_prompt | |
# ββββββββββββββββββββββββββββββββ Brave Search API ββββββββββββββββββββββββ | |
def brave_search(query: str, count: int = 20): | |
""" | |
Call the Brave Web Search API β list[dict] | |
Returns fields: index, title, link, snippet, displayed_link | |
""" | |
if not BRAVE_KEY: | |
raise RuntimeError("β οΈ SERPHOUSE_API_KEY (Brave API Key) environment variable is empty.") | |
headers = { | |
"Accept": "application/json", | |
"Accept-Encoding": "gzip", | |
"X-Subscription-Token": BRAVE_KEY | |
} | |
params = {"q": query, "count": str(count)} | |
for attempt in range(3): | |
try: | |
r = requests.get(BRAVE_ENDPOINT, headers=headers, params=params, timeout=15) | |
r.raise_for_status() | |
data = r.json() | |
logging.info(f"Brave search result data structure: {list(data.keys())}") | |
raw = data.get("web", {}).get("results") or data.get("results", []) | |
if not raw: | |
logging.warning(f"No Brave search results found. Response: {data}") | |
raise ValueError("No search results found.") | |
arts = [] | |
for i, res in enumerate(raw[:count], 1): | |
url = res.get("url", res.get("link", "")) | |
host = re.sub(r"https?://(www\.)?", "", url).split("/")[0] | |
arts.append({ | |
"index": i, | |
"title": res.get("title", "No title"), | |
"link": url, | |
"snippet": res.get("description", res.get("text", "No snippet")), | |
"displayed_link": host | |
}) | |
logging.info(f"Brave search success: {len(arts)} results") | |
return arts | |
except Exception as e: | |
logging.error(f"Brave search failure (attempt {attempt+1}/3): {e}") | |
if attempt < 2: | |
time.sleep(2) | |
return [] | |
def mock_results(query: str) -> str: | |
"""Fallback search results if API fails""" | |
ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
return (f"# Fallback Search Content (Generated: {ts})\n\n" | |
f"The search API request failed. Please generate the blog based on any pre-existing knowledge about '{query}'.\n\n" | |
f"You may consider the following points:\n\n" | |
f"- Basic concepts and importance of {query}\n" | |
f"- Commonly known related statistics or trends\n" | |
f"- Typical expert opinions on this subject\n" | |
f"- Questions that readers might have\n\n" | |
f"Note: This is fallback guidance, not real-time data.\n\n") | |
def do_web_search(query: str) -> str: | |
"""Perform web search and format the results.""" | |
try: | |
arts = brave_search(query, 20) | |
if not arts: | |
logging.warning("No search results, using fallback content") | |
return mock_results(query) | |
hdr = "# Web Search Results\nUse the information below to enhance the reliability of your blog. When you quote, please cite the source, and add a References section at the end of the blog.\n\n" | |
body = "\n".join( | |
f"### Result {a['index']}: {a['title']}\n\n{a['snippet']}\n\n" | |
f"**Source**: [{a['displayed_link']}]({a['link']})\n\n---\n" | |
for a in arts | |
) | |
return hdr + body | |
except Exception as e: | |
logging.error(f"Web search process failed: {str(e)}") | |
return mock_results(query) | |
# ββββββββββββββββββββββββββββββββ File Upload Handling βββββββββββββββββββββ | |
def process_text_file(file): | |
"""Handle text file""" | |
try: | |
content = file.read() | |
file.seek(0) | |
text = content.decode('utf-8', errors='ignore') | |
if len(text) > 10000: | |
text = text[:9700] + "...(truncated)..." | |
result = f"## Text File: {file.name}\n\n" | |
result += text | |
return result | |
except Exception as e: | |
logging.error(f"Error processing text file: {str(e)}") | |
return f"Error processing text file: {str(e)}" | |
def process_csv_file(file): | |
"""Handle CSV file""" | |
try: | |
content = file.read() | |
file.seek(0) | |
df = pd.read_csv(io.BytesIO(content)) | |
result = f"## CSV File: {file.name}\n\n" | |
result += f"- Rows: {len(df)}\n" | |
result += f"- Columns: {len(df.columns)}\n" | |
result += f"- Column Names: {', '.join(df.columns.tolist())}\n\n" | |
result += "### Data Preview\n\n" | |
preview_df = df.head(10) | |
try: | |
markdown_table = preview_df.to_markdown(index=False) | |
if markdown_table: | |
result += markdown_table + "\n\n" | |
else: | |
result += "Unable to display CSV data.\n\n" | |
except Exception as e: | |
logging.error(f"Markdown table conversion error: {e}") | |
result += "Displaying data as text:\n\n" | |
result += str(preview_df) + "\n\n" | |
num_cols = df.select_dtypes(include=['number']).columns | |
if len(num_cols) > 0: | |
result += "### Basic Statistical Information\n\n" | |
try: | |
stats_df = df[num_cols].describe().round(2) | |
stats_markdown = stats_df.to_markdown() | |
if stats_markdown: | |
result += stats_markdown + "\n\n" | |
else: | |
result += "Unable to display statistical information.\n\n" | |
except Exception as e: | |
logging.error(f"Statistical info conversion error: {e}") | |
result += "Unable to generate statistical information.\n\n" | |
return result | |
except Exception as e: | |
logging.error(f"CSV file processing error: {str(e)}") | |
return f"Error processing CSV file: {str(e)}" | |
def process_pdf_file(file): | |
"""Handle PDF file""" | |
try: | |
# Read file in bytes | |
file_bytes = file.read() | |
file.seek(0) | |
# Use PyPDF2 | |
pdf_file = io.BytesIO(file_bytes) | |
reader = PyPDF2.PdfReader(pdf_file, strict=False) | |
# Basic info | |
result = f"## PDF File: {file.name}\n\n" | |
result += f"- Total pages: {len(reader.pages)}\n\n" | |
# Extract text by page (limit to first 5 pages) | |
max_pages = min(5, len(reader.pages)) | |
all_text = "" | |
for i in range(max_pages): | |
try: | |
page = reader.pages[i] | |
page_text = page.extract_text() | |
current_page_text = f"### Page {i+1}\n\n" | |
if page_text and len(page_text.strip()) > 0: | |
# Limit to 1500 characters per page | |
if len(page_text) > 1500: | |
current_page_text += page_text[:1500] + "...(truncated)...\n\n" | |
else: | |
current_page_text += page_text + "\n\n" | |
else: | |
current_page_text += "(No text could be extracted from this page)\n\n" | |
all_text += current_page_text | |
# If total text is too long, break | |
if len(all_text) > 8000: | |
all_text += "...(truncating remaining pages; PDF is too large)...\n\n" | |
break | |
except Exception as page_err: | |
logging.error(f"Error processing PDF page {i+1}: {str(page_err)}") | |
all_text += f"### Page {i+1}\n\n(Error extracting content: {str(page_err)})\n\n" | |
if len(reader.pages) > max_pages: | |
all_text += f"\nNote: Only the first {max_pages} pages are shown out of {len(reader.pages)} total.\n\n" | |
result += "### PDF Content\n\n" + all_text | |
return result | |
except Exception as e: | |
logging.error(f"PDF file processing error: {str(e)}") | |
return f"## PDF File: {file.name}\n\nError occurred: {str(e)}\n\nThis PDF file cannot be processed." | |
def process_uploaded_files(files): | |
"""Combine the contents of all uploaded files into one string.""" | |
if not files: | |
return None | |
result = "# Uploaded File Contents\n\n" | |
result += "Below is the content from the files provided by the user. Integrate this data as a main source of information for the blog.\n\n" | |
for file in files: | |
try: | |
ext = file.name.split('.')[-1].lower() | |
if ext == 'txt': | |
result += process_text_file(file) + "\n\n---\n\n" | |
elif ext == 'csv': | |
result += process_csv_file(file) + "\n\n---\n\n" | |
elif ext == 'pdf': | |
result += process_pdf_file(file) + "\n\n---\n\n" | |
else: | |
result += f"### Unsupported File: {file.name}\n\n---\n\n" | |
except Exception as e: | |
logging.error(f"File processing error {file.name}: {e}") | |
result += f"### File processing error: {file.name}\n\nError: {e}\n\n---\n\n" | |
return result | |
# ββββββββββββββββββββββββββββββββ Image & Utility βββββββββββββββββββββββββ | |
def generate_image(prompt, w=768, h=768, g=3.5, steps=30, seed=3): | |
"""Image generation function.""" | |
if not prompt: | |
return None, "Insufficient prompt" | |
try: | |
res = Client(IMAGE_API_URL).predict( | |
prompt=prompt, width=w, height=h, guidance=g, | |
inference_steps=steps, seed=seed, | |
do_img2img=False, init_image=None, | |
image2image_strength=0.8, resize_img=True, | |
api_name="/generate_image" | |
) | |
return res[0], f"Seed: {res[1]}" | |
except Exception as e: | |
logging.error(e) | |
return None, str(e) | |
def extract_image_prompt(blog_text: str, topic: str): | |
""" | |
Generate a single-line English image prompt from the blog content. | |
""" | |
client = get_openai_client() | |
try: | |
response = client.chat.completions.create( | |
model="gpt-4.1-mini", # μΌλ°μ μΌλ‘ μ¬μ© κ°λ₯ν λͺ¨λΈλ‘ μ€μ | |
messages=[ | |
{"role": "system", "content": "Generate a single-line English image prompt from the following text. Return only the prompt text, nothing else."}, | |
{"role": "user", "content": f"Topic: {topic}\n\n---\n{blog_text}\n\n---"} | |
], | |
temperature=1, | |
max_tokens=80, | |
top_p=1 | |
) | |
return response.choices[0].message.content.strip() | |
except Exception as e: | |
logging.error(f"OpenAI image prompt generation error: {e}") | |
return f"A professional photo related to {topic}, high quality" | |
def md_to_html(md: str, title="Ginigen Blog"): | |
"""Convert Markdown to HTML.""" | |
return f"<!DOCTYPE html><html><head><title>{title}</title><meta charset='utf-8'></head><body>{markdown.markdown(md)}</body></html>" | |
def keywords(text: str, top=5): | |
"""Simple keyword extraction.""" | |
cleaned = re.sub(r"[^κ°-ν£a-zA-Z0-9\s]", "", text) | |
return " ".join(cleaned.split()[:top]) | |
# ββββββββββββββββββββββββββββββββ Streamlit UI ββββββββββββββββββββββββββββ | |
def ginigen_app(): | |
st.title("Ginigen Blog") | |
# Set default session state | |
if "ai_model" not in st.session_state: | |
st.session_state.ai_model = "gpt-4.1-mini" # κ³ μ λͺ¨λΈ μ€μ | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
if "auto_save" not in st.session_state: | |
st.session_state.auto_save = True | |
if "generate_image" not in st.session_state: | |
st.session_state.generate_image = False | |
if "web_search_enabled" not in st.session_state: | |
st.session_state.web_search_enabled = True | |
if "blog_template" not in st.session_state: | |
st.session_state.blog_template = "ginigen" # Ginigen recommended style by default | |
if "blog_tone" not in st.session_state: | |
st.session_state.blog_tone = "professional" | |
if "word_count" not in st.session_state: | |
st.session_state.word_count = 1750 | |
# Sidebar UI | |
sb = st.sidebar | |
sb.title("Blog Settings") | |
# λͺ¨λΈ μ ν μ κ±° (κ³ μ λͺ¨λΈ μ¬μ©) | |
sb.subheader("Blog Style Settings") | |
sb.selectbox( | |
"Blog Template", | |
options=list(BLOG_TEMPLATES.keys()), | |
format_func=lambda x: BLOG_TEMPLATES[x], | |
key="blog_template" | |
) | |
sb.selectbox( | |
"Blog Tone", | |
options=list(BLOG_TONES.keys()), | |
format_func=lambda x: BLOG_TONES[x], | |
key="blog_tone" | |
) | |
sb.slider("Blog Length (word count)", 800, 3000, key="word_count") | |
# Example topics | |
sb.subheader("Example Topics") | |
c1, c2, c3 = sb.columns(3) | |
if c1.button("Real Estate Tax", key="ex1"): | |
process_example(EXAMPLE_TOPICS["example1"]) | |
if c2.button("Summer Festivals", key="ex2"): | |
process_example(EXAMPLE_TOPICS["example2"]) | |
if c3.button("Investment Guide", key="ex3"): | |
process_example(EXAMPLE_TOPICS["example3"]) | |
sb.subheader("Other Settings") | |
sb.toggle("Auto Save", key="auto_save") | |
sb.toggle("Auto Image Generation", key="generate_image") | |
web_search_enabled = sb.toggle("Use Web Search", value=st.session_state.web_search_enabled) | |
st.session_state.web_search_enabled = web_search_enabled | |
if web_search_enabled: | |
st.sidebar.info("β Web search results will be integrated into the blog.") | |
# Download the latest blog (markdown/HTML) | |
latest_blog = next( | |
(m["content"] for m in reversed(st.session_state.messages) | |
if m["role"] == "assistant" and m["content"].strip()), | |
None | |
) | |
if latest_blog: | |
title_match = re.search(r"# (.*?)(\n|$)", latest_blog) | |
title = title_match.group(1).strip() if title_match else "blog" | |
sb.subheader("Download Latest Blog") | |
d1, d2 = sb.columns(2) | |
d1.download_button("Download as Markdown", latest_blog, | |
file_name=f"{title}.md", mime="text/markdown") | |
d2.download_button("Download as HTML", md_to_html(latest_blog, title), | |
file_name=f"{title}.html", mime="text/html") | |
# JSON conversation record upload | |
up = sb.file_uploader("Load Conversation History (.json)", type=["json"], key="json_uploader") | |
if up: | |
try: | |
st.session_state.messages = json.load(up) | |
sb.success("Conversation history loaded successfully") | |
except Exception as e: | |
sb.error(f"Failed to load: {e}") | |
# JSON conversation record download | |
if sb.button("Download Conversation as JSON"): | |
sb.download_button( | |
"Save", | |
data=json.dumps(st.session_state.messages, ensure_ascii=False, indent=2), | |
file_name="chat_history.json", | |
mime="application/json" | |
) | |
# File Upload | |
st.subheader("File Upload") | |
uploaded_files = st.file_uploader( | |
"Upload files to be referenced in your blog (txt, csv, pdf)", | |
type=["txt", "csv", "pdf"], | |
accept_multiple_files=True, | |
key="file_uploader" | |
) | |
if uploaded_files: | |
file_count = len(uploaded_files) | |
st.success(f"{file_count} files uploaded. They will be referenced in the blog.") | |
with st.expander("Preview Uploaded Files", expanded=False): | |
for idx, file in enumerate(uploaded_files): | |
st.write(f"**File Name:** {file.name}") | |
ext = file.name.split('.')[-1].lower() | |
if ext == 'txt': | |
preview = file.read(1000).decode('utf-8', errors='ignore') | |
file.seek(0) | |
st.text_area( | |
f"Preview of {file.name}", | |
preview + ("..." if len(preview) >= 1000 else ""), | |
height=150 | |
) | |
elif ext == 'csv': | |
try: | |
df = pd.read_csv(file) | |
file.seek(0) | |
st.write("CSV Preview (up to 5 rows)") | |
st.dataframe(df.head(5)) | |
except Exception as e: | |
st.error(f"CSV preview failed: {e}") | |
elif ext == 'pdf': | |
try: | |
file_bytes = file.read() | |
file.seek(0) | |
pdf_file = io.BytesIO(file_bytes) | |
reader = PyPDF2.PdfReader(pdf_file, strict=False) | |
pc = len(reader.pages) | |
st.write(f"PDF File: {pc} pages") | |
if pc > 0: | |
try: | |
page_text = reader.pages[0].extract_text() | |
preview = page_text[:500] if page_text else "(No text extracted)" | |
st.text_area("Preview of the first page", preview + "...", height=150) | |
except: | |
st.warning("Failed to extract text from the first page") | |
except Exception as e: | |
st.error(f"PDF preview failed: {e}") | |
if idx < file_count - 1: | |
st.divider() | |
# Display existing messages | |
for m in st.session_state.messages: | |
with st.chat_message(m["role"]): | |
st.markdown(m["content"]) | |
if "image" in m: | |
st.image(m["image"], caption=m.get("image_caption", "")) | |
# User input | |
prompt = st.chat_input("Enter a blog topic or keywords.") | |
if prompt: | |
process_input(prompt, uploaded_files) | |
# μ¬μ΄λλ° νλ¨ λ°°μ§(λ§ν¬) μΆκ° | |
sb.markdown("---") | |
sb.markdown("Created by [https://ginigen.com](https://ginigen.com) | [YouTube Channel](https://www.youtube.com/@ginipickaistudio)") | |
def process_example(topic): | |
"""Process the selected example topic.""" | |
process_input(topic, []) | |
def process_input(prompt: str, uploaded_files): | |
# Add user's message | |
if not any(m["role"] == "user" and m["content"] == prompt for m in st.session_state.messages): | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
with st.chat_message("assistant"): | |
placeholder = st.empty() | |
message_placeholder = st.empty() | |
full_response = "" | |
use_web_search = st.session_state.web_search_enabled | |
has_uploaded_files = bool(uploaded_files) and len(uploaded_files) > 0 | |
try: | |
# μν νμλ₯Ό μν μν μ»΄ν¬λνΈ | |
status = st.status("Preparing to generate blog...") | |
status.update(label="Initializing client...") | |
client = get_openai_client() | |
# Prepare conversation messages | |
messages = [] | |
# Web search | |
search_content = None | |
if use_web_search: | |
status.update(label="Performing web search...") | |
with st.spinner("Searching the web..."): | |
search_content = do_web_search(keywords(prompt, top=5)) | |
# Process uploaded files β content | |
file_content = None | |
if has_uploaded_files: | |
status.update(label="Processing uploaded files...") | |
with st.spinner("Analyzing files..."): | |
file_content = process_uploaded_files(uploaded_files) | |
# Build system prompt | |
status.update(label="Preparing blog draft...") | |
sys_prompt = get_system_prompt( | |
template=st.session_state.blog_template, | |
tone=st.session_state.blog_tone, | |
word_count=st.session_state.word_count, | |
include_search_results=use_web_search, | |
include_uploaded_files=has_uploaded_files | |
) | |
# OpenAI API νΈμΆ μ€λΉ | |
status.update(label="Writing blog content...") | |
# λ©μμ§ κ΅¬μ± | |
api_messages = [ | |
{"role": "system", "content": sys_prompt} | |
] | |
user_content = prompt | |
# κ²μ κ²°κ³Όκ° μμΌλ©΄ μ¬μ©μ ν둬ννΈμ μΆκ° | |
if search_content: | |
user_content += "\n\n" + search_content | |
# νμΌ λ΄μ©μ΄ μμΌλ©΄ μ¬μ©μ ν둬ννΈμ μΆκ° | |
if file_content: | |
user_content += "\n\n" + file_content | |
# μ¬μ©μ λ©μμ§ μΆκ° | |
api_messages.append({"role": "user", "content": user_content}) | |
# OpenAI API μ€νΈλ¦¬λ° νΈμΆ - κ³ μ λͺ¨λΈ "gpt-4.1-mini" μ¬μ© | |
try: | |
# μ€νΈλ¦¬λ° λ°©μμΌλ‘ API νΈμΆ | |
stream = client.chat.completions.create( | |
model="gpt-4.1-mini", # κ³ μ λͺ¨λΈ μ¬μ© | |
messages=api_messages, | |
temperature=1, | |
max_tokens=MAX_TOKENS, | |
top_p=1, | |
stream=True # μ€νΈλ¦¬λ° νμ±ν | |
) | |
# μ€νΈλ¦¬λ° μλ΅ μ²λ¦¬ | |
for chunk in stream: | |
if chunk.choices and len(chunk.choices) > 0 and chunk.choices[0].delta.content is not None: | |
content_delta = chunk.choices[0].delta.content | |
full_response += content_delta | |
message_placeholder.markdown(full_response + "β") | |
# μ΅μ’ μλ΅ νμ (컀μ μ κ±°) | |
message_placeholder.markdown(full_response) | |
status.update(label="Blog completed!", state="complete") | |
except Exception as api_error: | |
error_message = str(api_error) | |
logging.error(f"API error: {error_message}") | |
status.update(label=f"Error: {error_message}", state="error") | |
raise Exception(f"Blog generation error: {error_message}") | |
# μ΄λ―Έμ§ μμ± | |
answer_entry_saved = False | |
if st.session_state.generate_image and full_response: | |
with st.spinner("Generating image..."): | |
try: | |
ip = extract_image_prompt(full_response, prompt) | |
img, cap = generate_image(ip) | |
if img: | |
st.image(img, caption=cap) | |
st.session_state.messages.append({ | |
"role": "assistant", | |
"content": full_response, | |
"image": img, | |
"image_caption": cap | |
}) | |
answer_entry_saved = True | |
except Exception as img_error: | |
logging.error(f"Image generation error: {str(img_error)}") | |
st.warning("μ΄λ―Έμ§ μμ±μ μ€ν¨νμ΅λλ€. λΈλ‘κ·Έ μ½ν μΈ λ§ μ μ₯λ©λλ€.") | |
# Save the answer if not saved above | |
if not answer_entry_saved and full_response: | |
st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
# Download buttons | |
if full_response: | |
st.subheader("Download This Blog") | |
c1, c2 = st.columns(2) | |
c1.download_button( | |
"Markdown", | |
data=full_response, | |
file_name=f"{prompt[:30]}.md", | |
mime="text/markdown" | |
) | |
c2.download_button( | |
"HTML", | |
data=md_to_html(full_response, prompt[:30]), | |
file_name=f"{prompt[:30]}.html", | |
mime="text/html" | |
) | |
# Auto save | |
if st.session_state.auto_save and st.session_state.messages: | |
try: | |
fn = f"chat_history_auto_{datetime.now():%Y%m%d_%H%M%S}.json" | |
with open(fn, "w", encoding="utf-8") as fp: | |
json.dump(st.session_state.messages, fp, ensure_ascii=False, indent=2) | |
except Exception as e: | |
logging.error(f"Auto-save failed: {e}") | |
except Exception as e: | |
error_message = str(e) | |
placeholder.error(f"An error occurred: {error_message}") | |
logging.error(f"Process input error: {error_message}") | |
ans = f"An error occurred while processing your request: {error_message}" | |
st.session_state.messages.append({"role": "assistant", "content": ans}) | |
# ββββββββββββββββββββββββββββββββ main ββββββββββββββββββββββββββββββββββββ | |
def main(): | |
ginigen_app() | |
if __name__ == "__main__": | |
main() |