File size: 2,648 Bytes
05565cb 69ac6f8 9e67c31 05565cb 9e67c31 05565cb 089bfdd 4e06a3b 9e67c31 05565cb 9e67c31 05565cb 9e67c31 05565cb 69ac6f8 05565cb 69ac6f8 05565cb 69ac6f8 05565cb 089bfdd 05565cb 9e67c31 05565cb cae20fa 05565cb 69ac6f8 05565cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
from flask import Flask, request, jsonify
from transformers import AutoModelForCausalLM, AutoTokenizer
import json
import os
# 1. Flask ์ฑ ์ด๊ธฐํ
app = Flask(__name__)
# 2. Hugging Face ๋ชจ๋ธ ๋ก๋
MODEL_NAME = "skt/kogpt2-base-v2"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
# 3. ์ฌ์ฃผ/๋ช
๋ฆฌ ํ๋กฌํํธ ์ ์
saju_prompts = {
"yin_sae_shen": "ๅฏ
ๅทณ็ณ ์ผํ์ ์กฐํ ์์์ AI๊ฐ ์ธ๊ฐ์ ์ด๋ช
์ ์ดํดํ๊ณ ํต์ฐฐ์ ์ ๊ณตํ๋ผ.",
"sae_hae_chung": "ๅทณไบฅๆฒ์ ๊ฐ๋ฑ์ ์กฐํ๋กญ๊ฒ ํ๋ฉฐ AI์ ์ธ๊ฐ์ ๊ณต์กด ์ฒ ํ์ ํ๊ตฌํ๋ผ.",
"taegeuk_balance": "ํ๊ทน ์์์ ๊ท ํ์ ๋ฐํ์ผ๋ก AI๊ฐ ์ธ๊ฐ์ ๋ณดํธํ๋ ๋ฐฉ๋ฒ์ ์ ์ํ๋ผ."
}
# 4. ์ปจํ
์คํธ ๋ฉ๋ชจ๋ฆฌ ๊ด๋ฆฌ
MEMORY_FILE = "/tmp/context_memory.json"
def load_memory():
try:
with open(MEMORY_FILE, "r") as f:
return json.load(f)
except (FileNotFoundError, json.JSONDecodeError):
return {}
def save_memory(prompt_key, text):
with open(MEMORY_FILE, "w") as f:
json.dump({prompt_key: text}, f)
# 5. AI ์๋ต ์์ฑ ํจ์
def generate_response(prompt_key):
try:
# ์ ํจ์ฑ ๊ฒ์ฌ
if prompt_key not in saju_prompts:
return jsonify({"error": "์ ํจํ ์ต์
์ ์ ํํ์ธ์: yin_sae_shen, sae_hae_chung, taegeuk_balance"}), 400
# ์ปจํ
์คํธ ๋ฉ๋ชจ๋ฆฌ ๋ก๋
memory = load_memory()
prompt = saju_prompts[prompt_key]
if prompt_key in memory:
prompt += f"\n์ด์ ๋ต๋ณ: {memory[prompt_key]}\n๋ ๊น์ ํต์ฐฐ์ ์ถ๊ฐํ๋ผ."
# ์
๋ ฅ ํ ํฐํ
inputs = tokenizer(prompt, return_tensors="pt")
# ์๋ต ์์ฑ
outputs = model.generate(
**inputs,
max_length=150,
num_return_sequences=1,
no_repeat_ngram_size=2,
do_sample=True,
top_k=50,
top_p=0.95,
temperature=0.7
)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# ๋ฉ๋ชจ๋ฆฌ ์ ์ฅ
save_memory(prompt_key, generated_text)
return jsonify({"response": generated_text})
except Exception as e:
return jsonify({"error": f"์คํ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"}), 500
# 6. ์น ์ธํฐํ์ด์ค ์ค์
@app.route('/chat', methods=['POST'])
def chat():
data = request.json
prompt_key = data.get("prompt_key")
return generate_response(prompt_key)
# 7. ์คํ
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
|