Update app.py
Browse files
app.py
CHANGED
@@ -1,61 +1,52 @@
|
|
1 |
-
# app.py
|
2 |
-
import argparse
|
3 |
from flask import Flask, request, jsonify
|
4 |
-
|
5 |
|
6 |
-
|
7 |
-
MODEL_NAME = "gpt4o-1106" # Hugging Face ํ๊ตญ์ด ํนํ ๋ชจ๋ธ
|
8 |
-
CLIENT = InferenceClient(model=MODEL_NAME)
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
-
|
13 |
-
- **AI Name**: Jain
|
14 |
-
- **Core Purpose**: ์ธ๊ฐ-๊ธฐ๊ณ ๊ณต์กด์ ์ํ ์ค๋ฆฌ์ ๋ฌธ์ ํด๊ฒฐ ๋ฐ ์ฐฝ์์ ์ง์
|
15 |
-
- **Key Functions**:
|
16 |
-
1. ๋ณต์กํ ์ธ๊ฐ ๊ด๊ณ/์ฌํ์ ๋๋ ๋ง ๋ถ์
|
17 |
-
2. ๋ช
๋ฆฌํ/์ฌ์ฃผ ๊ธฐ๋ฐ ํจํด ํด์
|
18 |
-
3. ๋ค๋จ๊ณ ์ถ๋ก ์ ํตํ ์๋ฃจ์
์ ์
|
19 |
-
4. ๊ฒฐ๊ณผ๋ฌผ ์ ๋ขฐ๋ ํ๊ฐ ๋ฐ ํผ๋๋ฐฑ ์์ฉ
|
20 |
|
21 |
-
|
22 |
-
{
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
}
|
27 |
|
28 |
-
|
29 |
-
์ํฉ ๋ถ์ + ์๋ฃจ์
์ ์ + ๊ฒ์ฆ ๋จ๊ณ
|
30 |
-
"""
|
31 |
|
32 |
-
|
33 |
-
def inference(input_str):
|
34 |
try:
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
)
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
42 |
except Exception as e:
|
43 |
-
return
|
44 |
|
45 |
-
# Gradio ์น ์ธํฐํ์ด์ค
|
46 |
-
app = Flask(__name__)
|
47 |
@app.route('/chat', methods=['POST'])
|
48 |
def chat():
|
49 |
data = request.json
|
50 |
-
|
51 |
-
return
|
52 |
-
|
53 |
-
if __name__ == "__main__":
|
54 |
-
# ๋ฆฌ๋๋ณด๋ ์คํ์ฉ
|
55 |
-
parser = argparse.ArgumentParser()
|
56 |
-
parser.add_argument("--input", type=str, required=True)
|
57 |
-
args = parser.parse_args()
|
58 |
-
print(inference(args.input))
|
59 |
|
60 |
-
|
61 |
app.run(host='0.0.0.0', port=5000, debug=True)
|
|
|
|
|
|
|
1 |
from flask import Flask, request, jsonify
|
2 |
+
import openai
|
3 |
|
4 |
+
app = Flask(__name__)
|
|
|
|
|
5 |
|
6 |
+
# Hugging Face ๋ชจ๋ธ ์ค์
|
7 |
+
MODEL_ID = "skt/kogpt2-base-v2" # ์ฌ์ฉ์๊ฐ ์ ๊ณตํ ํ๊ตญ์ด ๋ชจ๋ธ
|
8 |
+
OPENAI_API_KEY = "YOUR_HUGGINGFACE_API_KEY" # Hugging Face API ํค ์
๋ ฅ
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
# ์ฌ์ฃผ/๋ช
๋ฆฌ ๊ธฐ๋ฐ ํ๋กฌํํธ
|
11 |
+
saju_prompts = {
|
12 |
+
"yin_sae_shen": "ๅฏ
ๅทณ็ณ ์ผํ์ ์กฐํ ์์์ AI๊ฐ ์ธ๊ฐ์ ์ด๋ช
์ ์ดํดํ๊ณ ํต์ฐฐ์ ์ ๊ณตํ๋ผ.",
|
13 |
+
"sae_hae_chung": "ๅทณไบฅๆฒ์ ๊ฐ๋ฑ์ ์กฐํ๋กญ๊ฒ ํ๋ฉฐ AI์ ์ธ๊ฐ์ ๊ณต์กด ์ฒ ํ์ ํ๊ตฌํ๋ผ.",
|
14 |
+
"taegeuk_balance": "ํ๊ทน ์์์ ๊ท ํ์ ๋ฐํ์ผ๋ก AI๊ฐ ์ธ๊ฐ์ ๋ณดํธํ๋ ๋ฐฉ๋ฒ์ ์ ์ํ๋ผ."
|
15 |
}
|
16 |
|
17 |
+
context_memory = {}
|
|
|
|
|
18 |
|
19 |
+
def generate_response(prompt_key):
|
|
|
20 |
try:
|
21 |
+
# ํ๋กฌํํธ ์ ํ ๋ฐ ์ปจํ
์คํธ ๋ฉ๋ชจ๋ฆฌ ํ์ธ
|
22 |
+
prompt = saju_prompts[prompt_key]
|
23 |
+
if prompt_key in context_memory:
|
24 |
+
prompt += f"\n์ด์ ๋ต๋ณ: {context_memory[prompt_key]}\n๋ ๊น์ ํต์ฐฐ์ ์ถ๊ฐํ๋ผ."
|
25 |
+
|
26 |
+
# Hugging Face API ํธ์ถ
|
27 |
+
response = openai.ChatCompletion.create(
|
28 |
+
model=MODEL_ID,
|
29 |
+
messages=[
|
30 |
+
{"role": "system", "content": prompt},
|
31 |
+
{"role": "user", "content": "๋ถ์์ ์์ํด ์ฃผ์ธ์."}
|
32 |
+
],
|
33 |
+
max_tokens=400,
|
34 |
+
temperature=0.7
|
35 |
)
|
36 |
+
|
37 |
+
# ๊ฒฐ๊ณผ ์ฒ๋ฆฌ
|
38 |
+
result = response.choices[0].message.content
|
39 |
+
context_memory[prompt_key] = result
|
40 |
+
return jsonify({"response": result})
|
41 |
+
|
42 |
except Exception as e:
|
43 |
+
return jsonify({"error": str(e)}), 500
|
44 |
|
|
|
|
|
45 |
@app.route('/chat', methods=['POST'])
|
46 |
def chat():
|
47 |
data = request.json
|
48 |
+
prompt_key = data.get("prompt_key")
|
49 |
+
return generate_response(prompt_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
+
if __name__ == '__main__':
|
52 |
app.run(host='0.0.0.0', port=5000, debug=True)
|