Naming / app.py
openfree's picture
Update app.py
5b4cfb6 verified
raw
history blame
5.73 kB
"""
Square Theory Generator (Koreanโ€‘friendly, LLMโ€‘powered)
-----------------------------------------------------
๋ณ€๊ฒฝ ์‚ฌํ•ญ
0) Matplotlib ํ•œ๊ธ€ ํฐํŠธ ์ž๋™ ์„ค์ •์œผ๋กœ ๊นจ์ง ํ•ด๊ฒฐ
1) ์ž…๋ ฅ ํ”„๋กฌํ”„ํŠธ๋ฅผ ํ•˜๋‚˜๋งŒ ๋‘๊ณ , ๋‚˜๋จธ์ง€ 3๋‹จ์–ด๋Š” LLM์ด ์ƒ์„ฑ
2) OpenAI ๊ณต์‹ Python SDK (`openai`) ์‚ฌ์šฉ โ€“ ์‚ฌ์šฉ์ž๊ฐ€ ์ œ๊ณตํ•œ `client.responses.create` ์˜ˆ์‹œ ๋ฐ˜์˜
3) LLM์ด ์‚ฌ๊ฐํ˜• ๋‹จ์–ด + ๋‘ ์ค„ ์นดํ”ผยท๋ธŒ๋žœ๋“œ ๋„ค์ž„๊นŒ์ง€ JSON์œผ๋กœ ๋ฐ˜ํ™˜ โ†’ ์ž๋™ ์ถœ๋ ฅ
โš ๏ธ **OPENAI_API_KEY** ํ™˜๊ฒฝ ๋ณ€์ˆ˜๋ฅผ ๋ฏธ๋ฆฌ ์„ค์ •ํ•ด ๋‘์„ธ์š”.
"""
import os
import json
import gradio as gr
import matplotlib.pyplot as plt
from matplotlib import patches, font_manager, rcParams
from openai import OpenAI
# -------------------------------------------------
# 0. ํ•œ๊ธ€ ํฐํŠธ ์ž๋™ ํƒ์ƒ‰ & ์„ค์ • (Malgun, Nanum ๋“ฑ)
# -------------------------------------------------
def _set_korean_font():
candidates = [
"Malgun Gothic", # Windows
"NanumGothic", # Linux (apt install fonts-nanum)
"AppleGothic", # macOS
"DejaVu Sans", # fallback (๊ธฐ๋ณธ ํฐํŠธ์ง€๋งŒ ํ•œ๊ธ€ ์ผ๋ถ€ ์ง€์›)
]
available = {f.name for f in font_manager.fontManager.ttflist}
for cand in candidates:
if cand in available:
rcParams["font.family"] = cand
break
rcParams["axes.unicode_minus"] = False # ์Œ์ˆ˜ ๋ถ€ํ˜ธ ๊นจ์ง ๋ฐฉ์ง€
_set_korean_font()
# -------------------------------------------------
# 1. OpenAI ํด๋ผ์ด์–ธํŠธ ์ดˆ๊ธฐํ™”
# -------------------------------------------------
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
raise EnvironmentError("OPENAI_API_KEY ํ™˜๊ฒฝ ๋ณ€์ˆ˜๋ฅผ ์„ค์ •ํ•˜์„ธ์š”.")
client = OpenAI()
# -------------------------------------------------
# 2. Square Theory ๋„์‹ ๊ทธ๋ฆฌ๊ธฐ
# -------------------------------------------------
def draw_square(words):
"""words = dict with keys tl, tr, br, bl"""
fig, ax = plt.subplots(figsize=(4, 4))
# ์‚ฌ๊ฐํ˜• ํ”„๋ ˆ์ž„
square = patches.Rectangle((0, 0), 1, 1, fill=False, linewidth=2)
ax.add_patch(square)
ax.text(-0.05, 1.05, words["tl"], ha="right", va="bottom", fontsize=14, fontweight="bold")
ax.text(1.05, 1.05, words["tr"], ha="left", va="bottom", fontsize=14, fontweight="bold")
ax.text(1.05, -0.05, words["br"], ha="left", va="top", fontsize=14, fontweight="bold")
ax.text(-0.05, -0.05, words["bl"], ha="right", va="top", fontsize=14, fontweight="bold")
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(-0.2, 1.2)
ax.set_ylim(-0.2, 1.2)
ax.set_aspect("equal")
return fig
# -------------------------------------------------
# 3. LLM ํ”„๋กฌํ”„ํŠธ & ์‘๋‹ต ํŒŒ์‹ฑ
# -------------------------------------------------
SYSTEM_PROMPT = """๋„ˆ๋Š” ํ•œ๊ตญ์–ด ์นดํ”ผยท๋ธŒ๋žœ๋“œ ๋„ค์ด๋ฐ ์ „๋ฌธ๊ฐ€์ด์ž Square Theory(์‚ฌ๊ฐํ˜• ์ด๋ก ) ๋„์šฐ๋ฏธ๋‹ค.\n์‚ฌ์šฉ์ž๊ฐ€ ์ œ์‹œํ•œ ํ•˜๋‚˜์˜ ๋‹จ์–ด(TL)๋ฅผ ๊ธฐ๋ฐ˜์œผ๋กœ, ๋‹ค์Œ JSON ํ˜•์‹์œผ๋กœ ์‚ฌ๊ฐํ˜•์„ ์™„์„ฑํ•˜๋ผ.\n- "tl": ์ž…๋ ฅ ๋‹จ์–ด ๊ทธ๋Œ€๋กœ\n- "tr": TL๊ณผ ์˜๋ฏธยท์Œ์ด ๋Œ€์‘๋˜๋Š” ๋‹ค๋ฅธ ๋‹จ์–ด\n- "br": TR๊ณผ ์˜๋ฏธยท์Œ์ด ๋Œ€์‘๋˜๋Š” ๋‹ค๋ฅธ ๋‹จ์–ด\n- "bl": BR๊ณผ ์˜๋ฏธยท์Œ์ด ๋Œ€์‘๋˜๋Š” ๋‹ค๋ฅธ ๋‹จ์–ด๋กœ TL๊ณผ๋„ ์—ฐ๊ฒฐ์ด ์žˆ์–ด์•ผ ํ•œ๋‹ค\n๋˜ํ•œ, "top_phrase"(tl+tr), "bottom_phrase"(bl+br) ๋‘ ํ‘œํ˜„์„ ์ž์—ฐ์Šค๋Ÿฌ์šด ํ•œ๊ตญ์–ด ๊ด€์šฉ๊ตฌ๋กœ ์ œ์‹œํ•˜๊ณ ,\n"slogan"(๋‘ ์ค„ ์นดํ”ผ)์™€ "brand"(๋ธŒ๋žœ๋“œ ๋„ค์ž„ ํ›„๋ณด) ํ•„๋“œ๋ฅผ ํ•จ๊ป˜ ํฌํ•จํ•˜๋ผ.\n๊ฒฐ๊ณผ๋Š” ๋ฐ˜๋“œ์‹œ **ํ•œ๊ธ€ UTFโ€‘8**๋กœ ์ธ์ฝ”๋”ฉ๋œ JSON๋งŒ ๋ฐ˜ํ™˜ํ•œ๋‹ค."""
def call_llm(seed):
"""Returns parsed dict from LLM JSON output."""
response = client.responses.create(
model="gpt-4.1-mini",
input=[
{"role": "system", "content": [{"type": "input_text", "text": SYSTEM_PROMPT}]},
{"role": "user", "content": [{"type": "input_text", "text": seed}]},
],
text={"format": {"type": "text"}},
temperature=0.9,
max_output_tokens=512,
)
raw = response.choices[0].message.content[0]["text"]
try:
data = json.loads(raw)
except json.JSONDecodeError as e:
raise ValueError(f"LLM ์‘๋‹ต์ด JSON ํ˜•์‹์ด ์•„๋‹™๋‹ˆ๋‹ค: {e}\n์›๋ณธ: {raw}")
return data
# -------------------------------------------------
# 4. Gradio ์ฝœ๋ฐฑ
# -------------------------------------------------
def generate(seed_word):
data = call_llm(seed_word)
words = {
"tl": data["tl"],
"tr": data["tr"],
"br": data["br"],
"bl": data["bl"],
}
fig = draw_square(words)
return (
fig,
data.get("top_phrase", ""),
data.get("bottom_phrase", ""),
data.get("slogan", ""),
data.get("brand", ""),
)
# -------------------------------------------------
# 5. Gradio UI
# -------------------------------------------------
with gr.Blocks(title="Square Theory Generator โ€“ LLM Powered ๐Ÿ‡ฐ๐Ÿ‡ท") as demo:
gr.Markdown("""# ๐ŸŸง Square Theory Generator\n๋‹จ์–ด ํ•˜๋‚˜๋งŒ ์ž…๋ ฅํ•˜๋ฉด, LLM์ด ์‚ฌ๊ฐํ˜• ๊ตฌ์กฐยท์นดํ”ผยท๋ธŒ๋žœ๋“œ ๋„ค์ž„์„ ์ž๋™์œผ๋กœ ์ œ์•ˆํ•ฉ๋‹ˆ๋‹ค.""")
seed = gr.Textbox(label="์‹œ๋“œ ๋‹จ์–ด(TL)", placeholder="์˜ˆ: ๊ณจ๋“ ")
run_btn = gr.Button("์‚ฌ๊ฐํ˜• ์ƒ์„ฑ")
fig_out = gr.Plot(label="์‚ฌ๊ฐํ˜• ๋‹ค์ด์–ด๊ทธ๋žจ")
top_out = gr.Textbox(label="์œ—๋ณ€ ํ‘œํ˜„")
bottom_out = gr.Textbox(label="์•„๋žซ๋ณ€ ํ‘œํ˜„")
slogan_out = gr.Textbox(label="๋‘ ์ค„ ์Šฌ๋กœ๊ฑด")
brand_out = gr.Textbox(label="๋ธŒ๋žœ๋“œ ๋„ค์ž„ ์ œ์•ˆ")
run_btn.click(fn=generate, inputs=seed, outputs=[fig_out, top_out, bottom_out, slogan_out, brand_out])
if __name__ == "__main__":
# launch(): share=True ํ•˜๋ฉด ์™ธ๋ถ€์—์„œ ์ ‘์† ๊ฐ€๋Šฅ
demo.launch()