Prompthumanizer commited on
Commit
b6fb650
ยท
verified ยท
1 Parent(s): 05565cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -39
app.py CHANGED
@@ -1,24 +1,19 @@
1
- from flask import Flask, request, jsonify
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import json
4
  import os
5
 
6
- # 1. Flask ์•ฑ ์ดˆ๊ธฐํ™”
7
- app = Flask(__name__)
 
8
 
9
- # 2. Hugging Face ๋ชจ๋ธ ๋กœ๋“œ
10
- MODEL_NAME = "skt/kogpt2-base-v2"
11
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
12
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
13
-
14
- # 3. ์‚ฌ์ฃผ/๋ช…๋ฆฌ ํ”„๋กฌํ”„ํŠธ ์ •์˜
15
  saju_prompts = {
16
  "yin_sae_shen": "ๅฏ…ๅทณ็”ณ ์‚ผํ˜•์˜ ์กฐํ™” ์†์—์„œ AI๊ฐ€ ์ธ๊ฐ„์˜ ์šด๋ช…์„ ์ดํ•ดํ•˜๊ณ  ํ†ต์ฐฐ์„ ์ œ๊ณตํ•˜๋ผ.",
17
  "sae_hae_chung": "ๅทณไบฅๆฒ–์˜ ๊ฐˆ๋“ฑ์„ ์กฐํ™”๋กญ๊ฒŒ ํ’€๋ฉฐ AI์™€ ์ธ๊ฐ„์˜ ๊ณต์กด ์ฒ ํ•™์„ ํƒ๊ตฌํ•˜๋ผ.",
18
  "taegeuk_balance": "ํƒœ๊ทน ์Œ์–‘์˜ ๊ท ํ˜•์„ ๋ฐ”ํƒ•์œผ๋กœ AI๊ฐ€ ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ œ์•ˆํ•˜๋ผ."
19
  }
20
 
21
- # 4. ์ปจํ…์ŠคํŠธ ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ฆฌ
22
  MEMORY_FILE = "/tmp/context_memory.json"
23
 
24
  def load_memory():
@@ -32,12 +27,13 @@ def save_memory(prompt_key, text):
32
  with open(MEMORY_FILE, "w") as f:
33
  json.dump({prompt_key: text}, f)
34
 
35
- # 5. AI ์‘๋‹ต ์ƒ์„ฑ ํ•จ์ˆ˜
36
- def generate_response(prompt_key):
37
  try:
 
 
38
  # ์œ ํšจ์„ฑ ๊ฒ€์‚ฌ
39
  if prompt_key not in saju_prompts:
40
- return jsonify({"error": "์œ ํšจํ•œ ์˜ต์…˜์„ ์„ ํƒํ•˜์„ธ์š”: yin_sae_shen, sae_hae_chung, taegeuk_balance"}), 400
41
 
42
  # ์ปจํ…์ŠคํŠธ ๋ฉ”๋ชจ๋ฆฌ ๋กœ๋“œ
43
  memory = load_memory()
@@ -45,35 +41,35 @@ def generate_response(prompt_key):
45
  if prompt_key in memory:
46
  prompt += f"\n์ด์ „ ๋‹ต๋ณ€: {memory[prompt_key]}\n๋” ๊นŠ์€ ํ†ต์ฐฐ์„ ์ถ”๊ฐ€ํ•˜๋ผ."
47
 
48
- # ์ž…๋ ฅ ํ† ํฐํ™”
49
- inputs = tokenizer(prompt, return_tensors="pt")
50
- # ์‘๋‹ต ์ƒ์„ฑ
51
- outputs = model.generate(
52
- **inputs,
53
- max_length=150,
54
- num_return_sequences=1,
55
- no_repeat_ngram_size=2,
56
- do_sample=True,
57
- top_k=50,
58
- top_p=0.95,
59
  temperature=0.7
60
  )
61
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
62
 
63
- # ๋ฉ”๋ชจ๋ฆฌ ์ €์žฅ
64
- save_memory(prompt_key, generated_text)
65
- return jsonify({"response": generated_text})
 
66
 
67
  except Exception as e:
68
- return jsonify({"error": f"์‹คํ–‰ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"}), 500
69
 
70
- # 6. ์›น ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ •
71
- @app.route('/chat', methods=['POST'])
72
- def chat():
73
- data = request.json
74
- prompt_key = data.get("prompt_key")
75
- return generate_response(prompt_key)
76
-
77
- # 7. ์‹คํ–‰
78
  if __name__ == "__main__":
79
- app.run(host='0.0.0.0', port=5000, debug=True)
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import InferenceClient
 
2
  import json
3
  import os
4
 
5
+ # Hugging Face ๋ชจ๋ธ ์„ค์ •
6
+ MODEL_ID = "skt/kogpt2-base-v2" # ํ•œ๊ตญ์–ด ๋ชจ๋ธ ID
7
+ CLIENT = InferenceClient(model=MODEL_ID)
8
 
9
+ # ์‚ฌ์ฃผ/๋ช…๋ฆฌ ํ”„๋กฌํ”„ํŠธ
 
 
 
 
 
10
  saju_prompts = {
11
  "yin_sae_shen": "ๅฏ…ๅทณ็”ณ ์‚ผํ˜•์˜ ์กฐํ™” ์†์—์„œ AI๊ฐ€ ์ธ๊ฐ„์˜ ์šด๋ช…์„ ์ดํ•ดํ•˜๊ณ  ํ†ต์ฐฐ์„ ์ œ๊ณตํ•˜๋ผ.",
12
  "sae_hae_chung": "ๅทณไบฅๆฒ–์˜ ๊ฐˆ๋“ฑ์„ ์กฐํ™”๋กญ๊ฒŒ ํ’€๋ฉฐ AI์™€ ์ธ๊ฐ„์˜ ๊ณต์กด ์ฒ ํ•™์„ ํƒ๊ตฌํ•˜๋ผ.",
13
  "taegeuk_balance": "ํƒœ๊ทน ์Œ์–‘์˜ ๊ท ํ˜•์„ ๋ฐ”ํƒ•์œผ๋กœ AI๊ฐ€ ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ œ์•ˆํ•˜๋ผ."
14
  }
15
 
16
+ # ์ปจํ…์ŠคํŠธ ๋ฉ”๋ชจ๋ฆฌ ๊ฒฝ๋กœ
17
  MEMORY_FILE = "/tmp/context_memory.json"
18
 
19
  def load_memory():
 
27
  with open(MEMORY_FILE, "w") as f:
28
  json.dump({prompt_key: text}, f)
29
 
30
+ def handle_request(request_data):
 
31
  try:
32
+ prompt_key = request_data.get("prompt_key")
33
+
34
  # ์œ ํšจ์„ฑ ๊ฒ€์‚ฌ
35
  if prompt_key not in saju_prompts:
36
+ return {"error": "์œ ํšจํ•œ ์˜ต์…˜์„ ์„ ํƒํ•˜์„ธ์š”: yin_sae_shen, sae_hae_chung, taegeuk_balance"}
37
 
38
  # ์ปจํ…์ŠคํŠธ ๋ฉ”๋ชจ๋ฆฌ ๋กœ๋“œ
39
  memory = load_memory()
 
41
  if prompt_key in memory:
42
  prompt += f"\n์ด์ „ ๋‹ต๋ณ€: {memory[prompt_key]}\n๋” ๊นŠ์€ ํ†ต์ฐฐ์„ ์ถ”๊ฐ€ํ•˜๋ผ."
43
 
44
+ # Hugging Face API ํ˜ธ์ถœ
45
+ response = CLIENT.chat(
46
+ model=MODEL_ID,
47
+ messages=[
48
+ {"role": "system", "content": prompt},
49
+ {"role": "user", "content": "๋ถ„์„์„ ์‹œ์ž‘ํ•ด ์ฃผ์„ธ์š”."}
50
+ ],
51
+ max_tokens=400,
 
 
 
52
  temperature=0.7
53
  )
 
54
 
55
+ # ๊ฒฐ๊ณผ ์ฒ˜๋ฆฌ ๋ฐ ๋ฉ”๋ชจ๋ฆฌ ์ €์žฅ
56
+ result = response.choices[0].message.content
57
+ save_memory(prompt_key, result)
58
+ return {"response": result}
59
 
60
  except Exception as e:
61
+ return {"error": f"์‹คํ–‰ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"}
62
 
63
+ # Hugging Face ์‹คํ–‰ ํ™˜๊ฒฝ์—์„œ์˜ ์š”์ฒญ ์ฒ˜๋ฆฌ
 
 
 
 
 
 
 
64
  if __name__ == "__main__":
65
+ # Hugging Face๋Š” `request` ๊ฐ์ฒด๋ฅผ ์ œ๊ณตํ•˜์ง€ ์•Š์œผ๋ฏ€๋กœ, ์•„๋ž˜์™€ ๊ฐ™์ด ๋Œ€์ฒด
66
+ # ์‹ค์ œ ํ™˜๊ฒฝ์—์„œ๋Š” `request` ๋Œ€์‹  ํ™˜๊ฒฝ ๋ณ€์ˆ˜๋‚˜ ์ž…๋ ฅ๊ฐ’์„ ์‚ฌ์šฉํ•ด์•ผ ํ•จ
67
+ import sys
68
+ if len(sys.argv) < 2:
69
+ print("Usage: python app.py <request_data>")
70
+ sys.exit(1)
71
+
72
+ # ์š”์ฒญ ๋ฐ์ดํ„ฐ ํŒŒ์‹ฑ (์˜ˆ: {"prompt_key": "yin_sae_shen"})
73
+ request_data = json.loads(sys.argv[1])
74
+ result = handle_request(request_data)
75
+ print(json.dumps(result))