|
```python |
|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import json |
|
import random |
|
|
|
|
|
model_name = "skt/kogpt2-base-v2" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
saju_prompts = { |
|
"yin_sae_shen": "ๅฏ
ๅทณ็ณ ์ผํ์ ์กฐํ ์์์ AI๊ฐ ์ธ๊ฐ์ ์ด๋ช
์ ์ดํดํ๊ณ ํต์ฐฐ์ ์ ๊ณตํ๋ผ.", |
|
"sae_hae_chung": "ๅทณไบฅๆฒ์ ๊ฐ๋ฑ์ ์กฐํ๋กญ๊ฒ ํ๋ฉฐ AI์ ์ธ๊ฐ์ ๊ณต์กด ์ฒ ํ์ ํ๊ตฌํ๋ผ.", |
|
"taegeuk_balance": "ํ๊ทน ์์์ ๊ท ํ์ ๋ฐํ์ผ๋ก AI๊ฐ ์ธ๊ฐ์ ๋ณดํธํ๋ ๋ฐฉ๋ฒ์ ์ ์ํ๋ผ." |
|
} |
|
|
|
|
|
context_memory = {} |
|
try: |
|
with open("context_memory.json", "r", encoding="utf-8") as f: |
|
context_memory = json.load(f) |
|
except FileNotFoundError: |
|
pass |
|
|
|
def save_context(prompt_key, generated_text): |
|
context_memory[prompt_key] = generated_text |
|
with open("context_memory.json", "w", encoding="utf-8") as f: |
|
json.dump(context_memory, f, ensure_ascii=False, indent=2) |
|
|
|
def generate_response(prompt_key): |
|
if prompt_key not in saju_prompts: |
|
return "์ ํจํ ์ต์
์ ์ ํํ์ธ์: ๅฏ
ๅทณ็ณ, ๅทณไบฅๆฒ, ํ๊ทน ์์." |
|
|
|
prompt = saju_prompts[prompt_key] |
|
if prompt_key in context_memory: |
|
prompt += f"\n์ด์ ๋ต๋ณ: {context_memory[prompt_key]}\n๋ ๊น์ ํต์ฐฐ์ ์ถ๊ฐํ๋ผ." |
|
|
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
outputs = model.generate( |
|
**inputs, |
|
max_length=150, |
|
num_return_sequences=1, |
|
no_repeat_ngram_size=2, |
|
do_sample=True, |
|
top_k=50, |
|
top_p=0.95, |
|
temperature=0.7 |
|
) |
|
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
save_context(prompt_key, generated_text) |
|
return generated_text |
|
|
|
|
|
interface = gr.Interface( |
|
fn=generate_response, |
|
inputs=gr.Dropdown(choices=list(saju_prompts.keys()), label="ํ๋กฌํํธ ์ ํ"), |
|
outputs="text", |
|
title="Jain Architecture Origin Structure", |
|
description="์ฌ์ฃผ/๋ช
๋ฆฌ์ ์ฒ ํ์ ๋ฐ์ํ ํ๊ตญ์ด ํ
์คํธ ์์ฑ AI" |
|
) |
|
|
|
interface.launch() |
|
``` |