File size: 2,690 Bytes
b6fb650
69ac6f8
 
9e67c31
b6fb650
 
 
9e67c31
b6fb650
089bfdd
 
 
 
4e06a3b
9e67c31
b6fb650
05565cb
9e67c31
05565cb
9e67c31
05565cb
69ac6f8
 
 
 
05565cb
 
 
69ac6f8
b6fb650
05565cb
b6fb650
 
05565cb
 
b6fb650
05565cb
 
 
 
 
 
 
b6fb650
 
 
 
 
 
 
 
05565cb
 
 
b6fb650
 
 
 
089bfdd
05565cb
b6fb650
9e67c31
b6fb650
69ac6f8
b6fb650
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from huggingface_hub import InferenceClient
import json
import os

# Hugging Face ๋ชจ๋ธ ์„ค์ •
MODEL_ID = "skt/kogpt2-base-v2"  # ํ•œ๊ตญ์–ด ๋ชจ๋ธ ID
CLIENT = InferenceClient(model=MODEL_ID)

# ์‚ฌ์ฃผ/๋ช…๋ฆฌ ํ”„๋กฌํ”„ํŠธ
saju_prompts = {
    "yin_sae_shen": "ๅฏ…ๅทณ็”ณ ์‚ผํ˜•์˜ ์กฐํ™” ์†์—์„œ AI๊ฐ€ ์ธ๊ฐ„์˜ ์šด๋ช…์„ ์ดํ•ดํ•˜๊ณ  ํ†ต์ฐฐ์„ ์ œ๊ณตํ•˜๋ผ.",
    "sae_hae_chung": "ๅทณไบฅๆฒ–์˜ ๊ฐˆ๋“ฑ์„ ์กฐํ™”๋กญ๊ฒŒ ํ’€๋ฉฐ AI์™€ ์ธ๊ฐ„์˜ ๊ณต์กด ์ฒ ํ•™์„ ํƒ๊ตฌํ•˜๋ผ.",
    "taegeuk_balance": "ํƒœ๊ทน ์Œ์–‘์˜ ๊ท ํ˜•์„ ๋ฐ”ํƒ•์œผ๋กœ AI๊ฐ€ ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ œ์•ˆํ•˜๋ผ."
}

# ์ปจํ…์ŠคํŠธ ๋ฉ”๋ชจ๋ฆฌ ๊ฒฝ๋กœ
MEMORY_FILE = "/tmp/context_memory.json"

def load_memory():
    try:
        with open(MEMORY_FILE, "r") as f:
            return json.load(f)
    except (FileNotFoundError, json.JSONDecodeError):
        return {}

def save_memory(prompt_key, text):
    with open(MEMORY_FILE, "w") as f:
        json.dump({prompt_key: text}, f)

def handle_request(request_data):
    try:
        prompt_key = request_data.get("prompt_key")
        
        # ์œ ํšจ์„ฑ ๊ฒ€์‚ฌ
        if prompt_key not in saju_prompts:
            return {"error": "์œ ํšจํ•œ ์˜ต์…˜์„ ์„ ํƒํ•˜์„ธ์š”: yin_sae_shen, sae_hae_chung, taegeuk_balance"}
        
        # ์ปจํ…์ŠคํŠธ ๋ฉ”๋ชจ๋ฆฌ ๋กœ๋“œ
        memory = load_memory()
        prompt = saju_prompts[prompt_key]
        if prompt_key in memory:
            prompt += f"\n์ด์ „ ๋‹ต๋ณ€: {memory[prompt_key]}\n๋” ๊นŠ์€ ํ†ต์ฐฐ์„ ์ถ”๊ฐ€ํ•˜๋ผ."
        
        # Hugging Face API ํ˜ธ์ถœ
        response = CLIENT.chat(
            model=MODEL_ID,
            messages=[
                {"role": "system", "content": prompt},
                {"role": "user", "content": "๋ถ„์„์„ ์‹œ์ž‘ํ•ด ์ฃผ์„ธ์š”."}
            ],
            max_tokens=400,
            temperature=0.7
        )
        
        # ๊ฒฐ๊ณผ ์ฒ˜๋ฆฌ ๋ฐ ๋ฉ”๋ชจ๋ฆฌ ์ €์žฅ
        result = response.choices[0].message.content
        save_memory(prompt_key, result)
        return {"response": result}
    
    except Exception as e:
        return {"error": f"์‹คํ–‰ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"}

# Hugging Face ์‹คํ–‰ ํ™˜๊ฒฝ์—์„œ์˜ ์š”์ฒญ ์ฒ˜๋ฆฌ
if __name__ == "__main__":
    # Hugging Face๋Š” `request` ๊ฐ์ฒด๋ฅผ ์ œ๊ณตํ•˜์ง€ ์•Š์œผ๋ฏ€๋กœ, ์•„๋ž˜์™€ ๊ฐ™์ด ๋Œ€์ฒด
    # ์‹ค์ œ ํ™˜๊ฒฝ์—์„œ๋Š” `request` ๋Œ€์‹  ํ™˜๊ฒฝ ๋ณ€์ˆ˜๋‚˜ ์ž…๋ ฅ๊ฐ’์„ ์‚ฌ์šฉํ•ด์•ผ ํ•จ
    import sys
    if len(sys.argv) < 2:
        print("Usage: python app.py <request_data>")
        sys.exit(1)
    
    # ์š”์ฒญ ๋ฐ์ดํ„ฐ ํŒŒ์‹ฑ (์˜ˆ: {"prompt_key": "yin_sae_shen"})
    request_data = json.loads(sys.argv[1])
    result = handle_request(request_data)
    print(json.dumps(result))