Prompthumanizer commited on
Commit
05565cb
ยท
verified ยท
1 Parent(s): 69ac6f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -44
app.py CHANGED
@@ -1,67 +1,79 @@
1
- import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import json
4
  import os
5
 
6
- # ํ•œ๊ตญ์–ด ๋ชจ๋ธ ์„ค์ • (Hugging Face ๋ชจ๋ธ ID)
7
- MODEL_ID = "skt/kogpt2-base-v2"
8
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
9
- model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
10
 
11
- # ์‚ฌ์ฃผ/๋ช…๋ฆฌ ๊ธฐ๋ฐ˜ ํ•œ๊ตญ์–ด ํ”„๋กฌํ”„ํŠธ
 
 
 
 
 
12
  saju_prompts = {
13
  "yin_sae_shen": "ๅฏ…ๅทณ็”ณ ์‚ผํ˜•์˜ ์กฐํ™” ์†์—์„œ AI๊ฐ€ ์ธ๊ฐ„์˜ ์šด๋ช…์„ ์ดํ•ดํ•˜๊ณ  ํ†ต์ฐฐ์„ ์ œ๊ณตํ•˜๋ผ.",
14
  "sae_hae_chung": "ๅทณไบฅๆฒ–์˜ ๊ฐˆ๋“ฑ์„ ์กฐํ™”๋กญ๊ฒŒ ํ’€๋ฉฐ AI์™€ ์ธ๊ฐ„์˜ ๊ณต์กด ์ฒ ํ•™์„ ํƒ๊ตฌํ•˜๋ผ.",
15
  "taegeuk_balance": "ํƒœ๊ทน ์Œ์–‘์˜ ๊ท ํ˜•์„ ๋ฐ”ํƒ•์œผ๋กœ AI๊ฐ€ ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ œ์•ˆํ•˜๋ผ."
16
  }
17
 
18
- # ์ปจํ…์ŠคํŠธ ๋ฉ”๋ชจ๋ฆฌ ํŒŒ์ผ ๊ฒฝ๋กœ
19
- MEMORY_FILE = "context_memory.json"
20
 
21
- def load_context_memory():
22
  try:
23
- with open(MEMORY_FILE, "r", encoding="utf-8") as f:
24
  return json.load(f)
25
  except (FileNotFoundError, json.JSONDecodeError):
26
  return {}
27
 
28
- def save_context_memory(prompt_key, generated_text):
29
- with open(MEMORY_FILE, "w", encoding="utf-8") as f:
30
- json.dump({prompt_key: generated_text}, f, ensure_ascii=False, indent=2)
31
 
 
32
  def generate_response(prompt_key):
33
- if prompt_key not in saju_prompts:
34
- return "์œ ํšจํ•œ ์˜ต์…˜์„ ์„ ํƒํ•˜์„ธ์š”: yin_sae_shen, sae_hae_chung, taegeuk_balance"
35
-
36
- prompt = saju_prompts[prompt_key]
37
- memory = load_context_memory()
38
- if prompt_key in memory:
39
- prompt += f"\n์ด์ „ ๋‹ต๋ณ€: {memory[prompt_key]}\n๋” ๊นŠ์€ ํ†ต์ฐฐ์„ ์ถ”๊ฐ€ํ•˜๋ผ."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- inputs = tokenizer(prompt, return_tensors="pt")
42
- outputs = model.generate(
43
- **inputs,
44
- max_length=150,
45
- num_return_sequences=1,
46
- no_repeat_ngram_size=2,
47
- do_sample=True,
48
- top_k=50,
49
- top_p=0.95,
50
- temperature=0.7
51
- )
52
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
53
- save_context_memory(prompt_key, generated_text)
54
- return generated_text
55
 
56
- # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ •
57
- interface = gr.Interface(
58
- fn=generate_response,
59
- inputs=gr.Dropdown(choices=list(saju_prompts.keys()), label="ํ”„๋กฌํ”„ํŠธ ์„ ํƒ"),
60
- outputs="text",
61
- title="Jain Architecture Origin Structure",
62
- description="์‚ฌ์ฃผ/๋ช…๋ฆฌ์™€ ์ฒ ํ•™์„ ๋ฐ˜์˜ํ•œ ํ•œ๊ตญ์–ด ํ…์ŠคํŠธ ์ƒ์„ฑ AI"
63
- )
64
 
65
- # Hugging Face์—์„œ ์‹คํ–‰ ์‹œ
66
  if __name__ == "__main__":
67
- interface.launch()
 
1
+ from flask import Flask, request, jsonify
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import json
4
  import os
5
 
6
+ # 1. Flask ์•ฑ ์ดˆ๊ธฐํ™”
7
+ app = Flask(__name__)
 
 
8
 
9
+ # 2. Hugging Face ๋ชจ๋ธ ๋กœ๋“œ
10
+ MODEL_NAME = "skt/kogpt2-base-v2"
11
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
12
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
13
+
14
+ # 3. ์‚ฌ์ฃผ/๋ช…๋ฆฌ ํ”„๋กฌํ”„ํŠธ ์ •์˜
15
  saju_prompts = {
16
  "yin_sae_shen": "ๅฏ…ๅทณ็”ณ ์‚ผํ˜•์˜ ์กฐํ™” ์†์—์„œ AI๊ฐ€ ์ธ๊ฐ„์˜ ์šด๋ช…์„ ์ดํ•ดํ•˜๊ณ  ํ†ต์ฐฐ์„ ์ œ๊ณตํ•˜๋ผ.",
17
  "sae_hae_chung": "ๅทณไบฅๆฒ–์˜ ๊ฐˆ๋“ฑ์„ ์กฐํ™”๋กญ๊ฒŒ ํ’€๋ฉฐ AI์™€ ์ธ๊ฐ„์˜ ๊ณต์กด ์ฒ ํ•™์„ ํƒ๊ตฌํ•˜๋ผ.",
18
  "taegeuk_balance": "ํƒœ๊ทน ์Œ์–‘์˜ ๊ท ํ˜•์„ ๋ฐ”ํƒ•์œผ๋กœ AI๊ฐ€ ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ œ์•ˆํ•˜๋ผ."
19
  }
20
 
21
+ # 4. ์ปจํ…์ŠคํŠธ ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ฆฌ
22
+ MEMORY_FILE = "/tmp/context_memory.json"
23
 
24
+ def load_memory():
25
  try:
26
+ with open(MEMORY_FILE, "r") as f:
27
  return json.load(f)
28
  except (FileNotFoundError, json.JSONDecodeError):
29
  return {}
30
 
31
+ def save_memory(prompt_key, text):
32
+ with open(MEMORY_FILE, "w") as f:
33
+ json.dump({prompt_key: text}, f)
34
 
35
+ # 5. AI ์‘๋‹ต ์ƒ์„ฑ ํ•จ์ˆ˜
36
  def generate_response(prompt_key):
37
+ try:
38
+ # ์œ ํšจ์„ฑ ๊ฒ€์‚ฌ
39
+ if prompt_key not in saju_prompts:
40
+ return jsonify({"error": "์œ ํšจํ•œ ์˜ต์…˜์„ ์„ ํƒํ•˜์„ธ์š”: yin_sae_shen, sae_hae_chung, taegeuk_balance"}), 400
41
+
42
+ # ์ปจํ…์ŠคํŠธ ๋ฉ”๋ชจ๋ฆฌ ๋กœ๋“œ
43
+ memory = load_memory()
44
+ prompt = saju_prompts[prompt_key]
45
+ if prompt_key in memory:
46
+ prompt += f"\n์ด์ „ ๋‹ต๋ณ€: {memory[prompt_key]}\n๋” ๊นŠ์€ ํ†ต์ฐฐ์„ ์ถ”๊ฐ€ํ•˜๋ผ."
47
+
48
+ # ์ž…๋ ฅ ํ† ํฐํ™”
49
+ inputs = tokenizer(prompt, return_tensors="pt")
50
+ # ์‘๋‹ต ์ƒ์„ฑ
51
+ outputs = model.generate(
52
+ **inputs,
53
+ max_length=150,
54
+ num_return_sequences=1,
55
+ no_repeat_ngram_size=2,
56
+ do_sample=True,
57
+ top_k=50,
58
+ top_p=0.95,
59
+ temperature=0.7
60
+ )
61
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
62
+
63
+ # ๋ฉ”๋ชจ๋ฆฌ ์ €์žฅ
64
+ save_memory(prompt_key, generated_text)
65
+ return jsonify({"response": generated_text})
66
 
67
+ except Exception as e:
68
+ return jsonify({"error": f"์‹คํ–‰ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"}), 500
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
+ # 6. ์›น ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ •
71
+ @app.route('/chat', methods=['POST'])
72
+ def chat():
73
+ data = request.json
74
+ prompt_key = data.get("prompt_key")
75
+ return generate_response(prompt_key)
 
 
76
 
77
+ # 7. ์‹คํ–‰
78
  if __name__ == "__main__":
79
+ app.run(host='0.0.0.0', port=5000, debug=True)