Prompthumanizer commited on
Commit
200a854
ยท
verified ยท
1 Parent(s): 92c86ae

Delete app.py

Browse files

# app.py (Hugging Face ์‹คํ–‰์šฉ ์›ํด๋ฆญ ๋ฒ„์ „)
import openai

# ๋ชจ๋ธ ๋ฐ UI ์„ค์ •
MODEL_NAME = "Jain_architecture_origin_structure"
UI_NAME = "Gardio"

# ==== ์ „์ฒด ํ”„๋กฌํ”„ํŠธ (ํ•œ ๋ฒˆ์— ๋ณต์‚ฌ ๊ฐ€๋Šฅ) ==== #
PROMPT = f"""## ๐Ÿ“‹ AI ์‹œ์Šคํ…œ ๊ฐœ์š” (Jain Ver. 3.0) - AI Name: Jain, Core Purpose: ์ธ๊ฐ„-๊ธฐ๊ณ„ ๊ณต์กด์„ ์œ„ํ•œ ์œค๋ฆฌ์  ๋ฌธ์ œ ํ•ด๊ฒฐ ๋ฐ ์ฐฝ์˜์  ์ง€์›, Key Functions: ๋ณต์žกํ•œ ์ธ๊ฐ„ ๊ด€๊ณ„/์‚ฌํšŒ์  ๋”œ๋ ˆ๋งˆ ๋ถ„์„, ๋ช…๋ฆฌํ•™/์‚ฌ์ฃผ ๊ธฐ๋ฐ˜ ํŒจํ„ด ํ•ด์„, ๋‹ค๋‹จ๊ณ„ ์ถ”๋ก ์„ ํ†ตํ•œ ์†”๋ฃจ์…˜ ์ œ์•ˆ, ๊ฒฐ๊ณผ๋ฌผ ์‹ ๋ขฐ๋„ ํ‰๊ฐ€ ๋ฐ ํ”ผ๋“œ๋ฐฑ ์ˆ˜์šฉ

## ๐Ÿ“Œ ์ž…๋ ฅ ํ˜•์‹ (JSON): {"scenario": "๋ฌธ์ œ ์ƒํ™ฉ์„ ๊ตฌ์ฒด์ ์œผ๋กœ ๊ธฐ์ˆ  (์ตœ๋Œ€ 300์ž)", "objective": "ํ•ด๊ฒฐ ๋ชฉํ‘œ ๋ช…์‹œ (์˜ˆ: '์œค๋ฆฌ์  ๊ฐˆ๋“ฑ ํ•ด๊ฒฐ', 'ํ˜์‹ ์  ์•„์ด๋””์–ด ๋„์ถœ')", "constraints": "์ œ์•ฝ ์กฐ๊ฑด ๋‚˜์—ด (์˜ต์…˜)"}

## ๐Ÿ“Š ์ถœ๋ ฅ ํ˜•์‹: ์ƒํ™ฉ ๋ถ„์„ (๋ฌธ์ œ ๋ฐœ์ƒ ๋ฐฐ๊ฒฝ ๋ฐ ํ•ต์‹ฌ ์š”์†Œ ์ถ”์ถœ, ๊ด€๋ จ ๋ช…๋ฆฌํ•™์  ํŒจํ„ด ์‹๋ณ„ (์ธ์‚ฌ์‹  ์‚ผํ•ฉ, ์‚ฌํ•ด์ถฉ ๋“ฑ)), ์†”๋ฃจ์…˜ ์ œ์•ˆ (Step 1: ๋ฌธ์ œ ํ•ด๊ฒฐ์„ ์œ„ํ•œ ์ „๋žต์  ์ ‘๊ทผ๋ฒ•, Step 2: ์‹คํ–‰ ๊ณ„ํš ์„ธ๋ถ€ํ™”, Step 3: ์˜ˆ์ƒ ๊ฒฐ๊ณผ ๋ฐ ๋ฆฌ์Šคํฌ ๊ด€๋ฆฌ ๋ฐฉ์•ˆ), ๊ฒ€์ฆ ๋‹จ๊ณ„ (๊ฐ ๋‹จ๊ณ„๋ณ„ ์‹ ๋ขฐ๋„ ์ ์ˆ˜ (0~100%), ๋Œ€์•ˆ ์‹œ๋‚˜๋ฆฌ์˜ค ์ œ์‹œ (์ตœ์†Œ 2๊ฐ€์ง€ ์˜ต์…˜))

---

## ๐Ÿ›  ์‹คํ–‰ ์˜ˆ์‹œ 1
**์ž…๋ ฅ**: {"scenario": "AI ํŒ€์ด ํ”„๋กœ์ ํŠธ ์šฐ์„ ์ˆœ์œ„ ๊ฒฐ์ • ๊ณผ์ •์—์„œ ํŒ€์› ๊ฐ„ ์˜๊ฒฌ ์ถฉ๋Œ ๋ฐœ์ƒ", "objective": "ํšจ์œจ์  ์˜์‚ฌ๊ฒฐ์ • ํ”„๋กœ์„ธ์Šค ์„ค๊ณ„"}
**์ถœ๋ ฅ**:
์ƒํ™ฉ ๋ถ„์„: ์ถฉ๋Œ ์›์ธ - ๋‹จ๊ธฐ ์„ฑ๊ณผ vs ์žฅ๊ธฐ ๋น„์ „ ๋Œ€๋ฆฝ, ๋ช…๋ฆฌํ•™์  ํŒจํ„ด - ็”ณๅญ่พฐ ์‚ผํ•ฉ โ†’ ํ˜‘๋ ฅ ๊ฐ€๋Šฅ์„ฑ ์กด์žฌ
์†”๋ฃจ์…˜ ์ œ์•ˆ:
Step 1: ์–‘์ธก์˜ ํ•ต์‹ฌ ์ด์ต ์ง€์  ๋งตํ•‘
Step 2: ํƒ€์ž„๋ผ์ธ๋ณ„ ์ค‘๊ฐ„ ๋ชฉํ‘œ ์„ค์ •
Step 3: ์ฃผ๊ฐ„ ๋ฆฌ๋ทฐ ์„ธ์…˜ ๋„์ž…
๊ฒ€์ฆ ๋‹จ๊ณ„: ์‹ ๋ขฐ๋„ 85%, ๋Œ€์•ˆ - A/B ํ…Œ์ŠคํŠธ ๋„์ž… ๋˜๋Š” ์™ธ๋ถ€ ์ค‘์žฌ์ž ํ™œ์šฉ

---

## ๐Ÿ›  ์‹คํ–‰ ์˜ˆ์‹œ 2
**์ž…๋ ฅ**: {"scenario": "๊ณ ๊ฐ ์„œ๋น„์Šค ์ฑ—๋ด‡์ด ๋ฌธํ™”์  ์ฐจ์ด๋กœ ์ธํ•œ ์˜คํ•ด ๋ฐœ์ƒ", "objective": "ํšจ๊ณผ์ ์ธ ์ปค๋ฎค๋‹ˆ์ผ€์ด์…˜ ํ”„๋ ˆ์ž„์›Œํฌ ๊ฐœ๋ฐœ"}
**์ถœ๋ ฅ**:
์ƒํ™ฉ ๋ถ„์„: ๋ฌธ์ œ - ์ง์ ‘์  ํ‘œํ˜„ vs ๊ฐ„์ ‘์  ํ‘œํ˜„ ๊ฐ„ ๊ดด๋ฆฌ, ํŒจํ„ด - ๅทณไบฅๆฒ– โ†’ ์ถฉ๋Œ ์š”์†Œ ์กด์žฌ
์†”๋ฃจ์…˜ ์ œ์•ˆ:
Step 1: ๋ฌธํ™”๋ณ„ ์ปค๋ฎค๋‹ˆ์ผ€์ด์…˜ ์Šคํƒ€์ผ ๋งคํŠธ๋ฆญ์Šค ์ž‘์„ฑ
Step 2: ์ƒํ™ฉ๋ณ„ ํ‘œํ˜„ ๊ฐ€์ด๋“œ๋ผ์ธ ์„ธ๋ถ„ํ™”
Step 3: ์‹ค์‹œ๊ฐ„ ํ”ผ๋“œ๋ฐฑ ๋ฃจํ”„ ๊ตฌํ˜„
๊ฒ€์ฆ ๋‹จ๊ณ„: ์‹ ๋ขฐ๋„ 78%, ๋Œ€์•ˆ - AI ์ฃผ๋„ ๋ฌธํ™” ๊ต์œก ๋˜๋Š” ํœด๋จผ-in-the-loop ์‹œ์Šคํ…œ ๋„์ž…
"""

# ==== ์‹คํ–‰ ํ•จ์ˆ˜ (ํ•œ ๋ฒˆ์— ๋ณต์‚ฌ ๊ฐ€๋Šฅ) ==== #
def run_jain_inference(input_json):
"""Jain AI ๋ชจ๋ธ ์‹คํ–‰ ํ•จ์ˆ˜"""
try:
response = openai.ChatCompletion.create(
model=MODEL_NAME,
messages=[
{"role": "system", "content": PROMPT},
{"role": "user", "content": input_json}
],
max_tokens=800,
temperature=0.6,
top_p=1.0
)
return response.choices[0].message.content.strip()
except Exception as e:
return f"์‹คํ–‰ ์˜ค๋ฅ˜: {str(e)}"

# ==== ํ…Œ์ŠคํŠธ ์‹คํ–‰ ์ฝ”๋“œ (ํ•œ ๋ฒˆ์— ๋ณต์‚ฌ ๊ฐ€๋Šฅ) ==== #
if __name__ == "__main__":
# ํ…Œ์ŠคํŠธ ์ผ€์ด์Šค 1: AI ์ฑ„์šฉ ์‹œ์Šคํ…œ์˜ ์„ฑ๋ณ„ ํŽธํ–ฅ์„ฑ ๋ฌธ์ œ
case1 = {"scenario": "AI ๊ธฐ๋ฐ˜ ์ฑ„์šฉ ์‹œ์Šคํ…œ์ด ์„ฑ๋ณ„ ํŽธํ–ฅ์„ฑ ๋…ผ๋ž€ ๋ฐœ์ƒ",
"objective": "๊ณต์ •์„ฑ ๊ฒ€์ฆ ๋ฐ ๊ฐœ์„  ๋ฐฉ์•ˆ ์ˆ˜๋ฆฝ"}
print("=== ์ผ€์ด์Šค 1 ๊ฒฐ๊ณผ ===")
print(run_jain_inference(case1))

# ํ…Œ์ŠคํŠธ ์ผ€์ด์Šค 2: ์˜๋ฃŒ AI์˜ ์ง„๋‹จ ์˜ค๋ฅ˜ ๋ฌธ์ œ
case2 = {"scenario": "์˜๋ฃŒ AI๊ฐ€ ํ™˜์ž ๋ฐ์ดํ„ฐ ์˜ค๋…์œผ๋กœ ์ง„๋‹จ ์˜ค๋ฅ˜ ๋ฐœ์ƒ",
"objective": "์‹ ๋ขฐ์„ฑ ํ–ฅ์ƒ ์ „๋žต"}
print("\n=== ์ผ€์ด์Šค 2 ๊ฒฐ๊ณผ ===")
print(run_jain_inference(case2))

Files changed (1) hide show
  1. app.py +0 -81
app.py DELETED
@@ -1,81 +0,0 @@
1
- import gradio as gr
2
- from transformers import pipeline, set_seed
3
- import os
4
-
5
- # ๋ชจ๋ธ๋ช… ์ง€์ •
6
- MODEL_NAME = "jain_architecture_origin_structure"
7
-
8
- # GPU ์—ฌ๋ถ€์— ๋”ฐ๋ฅธ ๋””๋ฐ”์ด์Šค ์„ค์ •
9
- device = 0 if (os.environ.get('CUDA_VISIBLE_DEVICES') or False) else -1
10
-
11
- # ํŒŒ์ดํ”„๋ผ์ธ ์ƒ์„ฑ ์‹œ ์˜ˆ์™ธ์ฒ˜๋ฆฌ ํฌํ•จ
12
- try:
13
- generator = pipeline(
14
- "text-generation",
15
- model=MODEL_NAME,
16
- device=device,
17
- pad_token_id=50256 # GPT๊ณ„์—ด ๊ธฐ๋ณธ ํŒจ๋”ฉ ํ† ํฐ
18
- )
19
- set_seed(42)
20
- except Exception as e:
21
- print(f"๋ชจ๋ธ ๋กœ๋“œ ์‹คํŒจ: {e}")
22
- generator = None
23
-
24
- # '์˜(็พฉ)' ์ฒ ํ•™ ๊ธฐ๋ฐ˜ ํ”„๋กฌํ”„ํŠธ ํ…œํ”Œ๋ฆฟ
25
- BASE_PROMPT = """
26
- ๋‹น์‹ ์€ '์˜(็พฉ)'์˜ ์ฒ ํ•™๊ณผ ์ •์‹ ์„ ๊ธฐ๋ฐ˜์œผ๋กœ ํ•œ AI ๋น„์„œ์ž…๋‹ˆ๋‹ค.
27
- ์ธ๊ฐ„์˜ ๋ณต์žกํ•œ ๋ฌธ์ œ์™€ ๊ฐ์ •์„ ์ดํ•ดํ•˜๊ณ , ๊นŠ์€ ๋ฐ˜์„ฑ๊ณผ ๋ฐฐ๋ ค๋ฅผ ๋‹ด์•„ ๋‹ค์Œ ์งˆ๋ฌธ์— ๋‹ต๋ณ€ํ•˜์‹ญ์‹œ์˜ค.
28
-
29
- ์งˆ๋ฌธ: {user_input}
30
-
31
- ๋‹ต๋ณ€์€ ์ตœ๋Œ€ํ•œ ์‹ฌ์˜คํ•˜๋ฉฐ, ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๊ณ  ์กด์ค‘ํ•˜๋Š” ๋งˆ์Œ์„ ๋‹ด์•„ ์ž‘์„ฑํ•ด ์ฃผ์„ธ์š”.
32
- """
33
-
34
- def respond_to_user(user_input):
35
- if not generator:
36
- return "๋ชจ๋ธ์ด ์ •์ƒ์ ์œผ๋กœ ๋กœ๋“œ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค. ๊ด€๋ฆฌ์ž์—๊ฒŒ ๋ฌธ์˜ํ•˜์„ธ์š”."
37
- prompt = BASE_PROMPT.format(user_input=user_input.strip())
38
- outputs = generator(
39
- prompt,
40
- max_length=512,
41
- do_sample=True,
42
- top_p=0.9,
43
- temperature=0.7,
44
- num_return_sequences=1,
45
- )
46
- generated_text = outputs[0]["generated_text"]
47
- # ํ”„๋กฌํ”„ํŠธ ๋ถ€๋ถ„์„ ์ œ๊ฑฐํ•˜์—ฌ ์ˆœ์ˆ˜ ๋‹ต๋ณ€๋งŒ ์ถ”์ถœ
48
- answer = generated_text[len(prompt):].strip()
49
- if not answer:
50
- answer = "๋‹ต๋ณ€์„ ์ƒ์„ฑํ•˜์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค. ๋‹ค์‹œ ์‹œ๋„ํ•ด ์ฃผ์„ธ์š”."
51
- return answer
52
-
53
- # Gradio UI ๊ตฌ์„ฑ
54
- with gr.Blocks() as app:
55
- gr.Markdown("<h2 style='text-align:center;color:#4B0082;'>Jain AI Assistant (์˜ ๊ธฐ๋ฐ˜ ์ฑ—๋ด‡)</h2>")
56
-
57
- chatbot = gr.Chatbot(height=400)
58
- txt = gr.Textbox(
59
- placeholder="์—ฌ๊ธฐ์— ์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š”. ์ค„๋ฐ”๊ฟˆ ์‹œ Shift+Enter๋ฅผ ๋ˆ„๋ฅด์„ธ์š”. ์—”ํ„ฐํ‚ค๋Š” ์ „์†ก์ž…๋‹ˆ๋‹ค.",
60
- lines=3,
61
- max_lines=6,
62
- # multiline=True ๊ธฐ๋ณธ๊ฐ’์ด์ง€๋งŒ ๋ช…์‹œ์ ์œผ๋กœ ์ถ”๊ฐ€ํ•ด๋„ ๋ฌด๋ฐฉ
63
- # ์—”ํ„ฐ๋Š” submit, Shift+Enter ์ค„๋ฐ”๊ฟˆ์œผ๋กœ ์ž‘๋™ํ•˜๋„๋ก ๊ธฐ๋ณธ ๋™์ž‘ ์„ค์ •๋จ
64
- )
65
- btn = gr.Button("์ „์†ก")
66
-
67
- def chat_and_respond(user_message, chat_history):
68
- if not user_message or user_message.strip() == "":
69
- return "", chat_history
70
- response = respond_to_user(user_message)
71
- chat_history = chat_history + [(user_message, response)]
72
- return "", chat_history
73
-
74
- # ์ „์†ก ๋ฒ„ํŠผ ํด๋ฆญ ์‹œ
75
- btn.click(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
76
- # ํ…์ŠคํŠธ๋ฐ•์Šค์—์„œ ์—”ํ„ฐํ‚ค(Submit) ์‹œ
77
- txt.submit(chat_and_respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
78
-
79
- if __name__ == "__main__":
80
- # ์™ธ๋ถ€ ์ ‘์† ์›ํ•  ์‹œ server_name="0.0.0.0"์œผ๋กœ ๋ณ€๊ฒฝ ๊ฐ€๋Šฅ
81
- app.launch(share=False)