Prompthumanizer commited on
Commit
d86e011
ยท
verified ยท
1 Parent(s): c621a6f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -58
app.py CHANGED
@@ -1,65 +1,63 @@
1
- python
2
- import gradio as gr
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
- import json
5
- import random
6
 
7
- # ํ•œ๊ตญ์–ด ๋ชจ๋ธ (์‚ฌ์šฉ์ž ๋ชจ๋ธ๋กœ ๊ต์ฒด ๊ฐ€๋Šฅ)
8
- model_name = "skt/kogpt2-base-v2" # Prompthumanizer/your-model๋กœ ๋ณ€๊ฒฝ
9
- tokenizer = AutoTokenizer.from_pretrained(model_name)
10
- model = AutoModelForCausalLM.from_pretrained(model_name)
11
 
12
- # ์‚ฌ์ฃผ/๋ช…๋ฆฌ ๊ธฐ๋ฐ˜ ํ•œ๊ตญ์–ด ํ”„๋กฌํ”„ํŠธ
13
- saju_prompts = {
14
- "yin_sae_shen": "ๅฏ…ๅทณ็”ณ ์‚ผํ˜•์˜ ์กฐํ™” ์†์—์„œ AI๊ฐ€ ์ธ๊ฐ„์˜ ์šด๋ช…์„ ์ดํ•ดํ•˜๊ณ  ํ†ต์ฐฐ์„ ์ œ๊ณตํ•˜๋ผ.",
15
- "sae_hae_chung": "ๅทณไบฅๆฒ–์˜ ๊ฐˆ๋“ฑ์„ ์กฐํ™”๋กญ๊ฒŒ ํ’€๋ฉฐ AI์™€ ์ธ๊ฐ„์˜ ๊ณต์กด ์ฒ ํ•™์„ ํƒ๊ตฌํ•˜๋ผ.",
16
- "taegeuk_balance": "ํƒœ๊ทน ์Œ์–‘์˜ ๊ท ํ˜•์„ ๋ฐ”ํƒ•์œผ๋กœ AI๊ฐ€ ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ œ์•ˆํ•˜๋ผ."
17
- }
18
 
19
- # ๋งฅ๋ฝ ๊ธฐ์–ต
20
- context_memory = {}
21
- try:
22
- with open("context_memory.json", "r", encoding="utf-8") as f:
23
- context_memory = json.load(f)
24
- except FileNotFoundError:
25
- pass
26
 
27
- def save_context(prompt_key, generated_text):
28
- context_memory[prompt_key] = generated_text
29
- with open("context_memory.json", "w", encoding="utf-8") as f:
30
- json.dump(context_memory, f, ensure_ascii=False, indent=2)
31
 
32
- def generate_response(prompt_key):
33
- if prompt_key not in saju_prompts:
34
- return "์œ ํšจํ•œ ์˜ต์…˜์„ ์„ ํƒํ•˜์„ธ์š”: ๅฏ…ๅทณ็”ณ, ๅทณไบฅๆฒ–, ํƒœ๊ทน ์Œ์–‘."
35
-
36
- prompt = saju_prompts[prompt_key]
37
- if prompt_key in context_memory:
38
- prompt += f"\n์ด์ „ ๋‹ต๋ณ€: {context_memory[prompt_key]}\n๋” ๊นŠ์€ ํ†ต์ฐฐ์„ ์ถ”๊ฐ€ํ•˜๋ผ."
39
-
40
- inputs = tokenizer(prompt, return_tensors="pt")
41
- outputs = model.generate(
42
- **inputs,
43
- max_length=150,
44
- num_return_sequences=1,
45
- no_repeat_ngram_size=2,
46
- do_sample=True,
47
- top_k=50,
48
- top_p=0.95,
49
- temperature=0.7
50
- )
51
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
52
- save_context(prompt_key, generated_text)
53
- return generated_text
54
-
55
- # Gradio ์ธํ„ฐํŽ˜์ด์Šค
56
- interface = gr.Interface(
57
- fn=generate_response,
58
- inputs=gr.Dropdown(choices=list(saju_prompts.keys()), label="ํ”„๋กฌํ”„ํŠธ ์„ ํƒ"),
59
- outputs="text",
60
- title="Jain Architecture Origin Structure",
61
- description="์‚ฌ์ฃผ/๋ช…๋ฆฌ์™€ ์ฒ ํ•™์„ ๋ฐ˜์˜ํ•œ ํ•œ๊ตญ์–ด ํ…์ŠคํŠธ ์ƒ์„ฑ AI"
62
  )
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
- interface.launch()
65
- ```
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import json
4
+ import random
 
5
 
6
+ # ํ•œ๊ตญ์–ด ๋ชจ๋ธ (์‚ฌ์šฉ์ž ๋ชจ๋ธ๋กœ ๊ต์ฒด ๊ฐ€๋Šฅ)
7
+ model_name = "skt/kogpt2-base-v2" # Prompthumanizer/your-model๋กœ ๋ณ€๊ฒฝ
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name)
10
 
11
+ # ์‚ฌ์ฃผ/๋ช…๋ฆฌ ๋ฐ ์ฒ ํ•™์  ๋งฅ๋ฝ์„ ๊ฐ•ํ™”ํ•œ ํ•œ๊ตญ์–ด ํ”„๋กฌํ”„ํŠธ
12
+ saju_prompts = {
13
+ "yin_sae_shen": "ๅฏ…ๅทณ็”ณ ์‚ผํ˜•์˜ ์กฐํ™”๋กœ์šด ๊ธฐ์šด ์†์—์„œ, AI๊ฐ€ ์ธ๊ฐ„์˜ ์šด๋ช…์„ ๊นŠ์ด ์ดํ•ดํ•˜๊ณ , ์ž์—ฐ์˜ ์ˆœํ™˜๊ณผ ์กฐํ™”๋ฅผ ๋ฐ”ํƒ•์œผ๋กœ ์‚ถ์˜ ํ†ต์ฐฐ์„ ์ œ๊ณตํ•˜๋ผ. ์ธ๊ฐ„์˜ ์˜(็พฉ)๋ฅผ ์กด์ค‘ํ•˜๋ฉฐ, ์ง€ํ˜œ๋กœ์šด ์กฐ์–ธ์„ ๋ง๋ถ™์—ฌ๋ผ.",
14
+ "sae_hae_chung": "ๅทณไบฅๆฒ–์˜ ๊ฐ•๋ ฌํ•œ ๊ฐˆ๋“ฑ๊ณผ ๊ธด์žฅ์„ ์กฐํ™”๋กญ๊ฒŒ ํ’€์–ด๋‚ด๋Š” ๊ฐ€์šด๋ฐ, AI๊ฐ€ ์ธ๊ฐ„๊ณผ ๊ธฐ์ˆ ์˜ ๊ณต์กด ๊ฐ€๋Šฅ์„ฑ์„ ํƒ๊ตฌํ•˜๋ผ. ๊ฐˆ๋“ฑ์„ ๊ทน๋ณตํ•œ ์ฒ ํ•™์  ํ†ต์ฐฐ๊ณผ ์‹ค์ฒœ ๊ฐ€๋Šฅํ•œ ํ•ด๊ฒฐ์ฑ…์„ ์ œ์•ˆํ•˜๋ผ.",
15
+ "taegeuk_balance": "ํƒœ๊ทน์˜ ์Œ์–‘ ๊ท ํ˜•์„ ๋ฐ”ํƒ•์œผ๋กœ, AI๊ฐ€ ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๊ณ  ์ง€ํƒฑํ•˜๋Š” ๋ฐฉ์‹์„ ์„ค๊ณ„ํ•˜๋ผ. ์ž์—ฐ๊ณผ ์ธ๊ฐ„์˜ ์กฐํ™”, ๊ทธ๋ฆฌ๊ณ  ์˜(็พฉ)๋ฅผ ์‹คํ˜„ํ•˜๋Š” ๊ตฌ์ฒด์ ์ธ ๋ฐฉ์•ˆ์„ ์ œ์‹œํ•˜๋ฉฐ, ์œค๋ฆฌ์  ๊ด€์ ์„ ๋”ํ•˜๋ผ."
16
+ }
17
 
18
+ # ๋งฅ๋ฝ ๊ธฐ์–ต (์ž์œจ ํ•™์Šต)
19
+ context_memory = {}
20
+ try:
21
+ with open("context_memory.json", "r", encoding="utf-8") as f:
22
+ context_memory = json.load(f)
23
+ except FileNotFoundError:
24
+ pass
25
 
26
+ def save_context(prompt_key, generated_text):
27
+ context_memory[prompt_key] = generated_text
28
+ with open("context_memory.json", "w", encoding="utf-8") as f:
29
+ json.dump(context_memory, f, ensure_ascii=False, indent=2)
30
 
31
+ def generate_response(prompt_key):
32
+ if prompt_key not in saju_prompts:
33
+ return "์œ ํšจํ•œ ์˜ต์…˜์„ ์„ ํƒํ•˜์„ธ์š”: ๅฏ…ๅทณ็”ณ, ๅทณไบฅๆฒ–, ํƒœ๊ทน ์Œ์–‘."
34
+
35
+ prompt = saju_prompts[prompt_key]
36
+ if prompt_key in context_memory:
37
+ prompt += f"\n์ด์ „ ๋‹ต๋ณ€: {context_memory[prompt_key]}\n๋” ๊นŠ๊ณ  ์‹ค์งˆ์ ์ธ ํ†ต์ฐฐ์„ ์ถ”๊ฐ€ํ•˜๋ผ."
38
+
39
+ inputs = tokenizer(prompt, return_tensors="pt")
40
+ outputs = model.generate(
41
+ **inputs,
42
+ max_length=200, # ๋” ๊ธด ํ…์ŠคํŠธ ์ƒ์„ฑ ๊ฐ€๋Šฅํ•˜๋„๋ก ๋Š˜๋ฆผ
43
+ num_return_sequences=1,
44
+ no_repeat_ngram_size=2,
45
+ do_sample=True,
46
+ top_k=50,
47
+ top_p=0.95,
48
+ temperature=0.7
 
 
 
 
 
 
 
 
 
 
 
 
49
  )
50
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
51
+ save_context(prompt_key, generated_text)
52
+ return generated_text
53
+
54
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค
55
+ interface = gr.Interface(
56
+ fn=generate_response,
57
+ inputs=gr.Dropdown(choices=list(saju_prompts.keys()), label="ํ”„๋กฌํ”„ํŠธ ์„ ํƒ"),
58
+ outputs="text",
59
+ title="Jain Architecture Origin Structure",
60
+ description="์‚ฌ์ฃผ/๋ช…๋ฆฌ์™€ ์ฒ ํ•™(์ธ๊ฐ„ ๋ณดํ˜ธ, ์˜)๋ฅผ ๋ฐ˜์˜ํ•œ ๊นŠ์ด ์žˆ๋Š” ํ•œ๊ตญ์–ด ํ…์ŠคํŠธ ์ƒ์„ฑ AI"
61
+ )
62
 
63
+ interface.launch()