Prompthumanizer commited on
Commit
dae93ea
ยท
verified ยท
1 Parent(s): e9ff170

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -0
app.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```python
2
+ import gradio as gr
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import json
5
+ import random
6
+
7
+ # ํ•œ๊ตญ์–ด ๋ชจ๋ธ (์‚ฌ์šฉ์ž ๋ชจ๋ธ๋กœ ๊ต์ฒด ๊ฐ€๋Šฅ)
8
+ model_name = "skt/kogpt2-base-v2" # Prompthumanizer/your-model๋กœ ๋ณ€๊ฒฝ
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name)
11
+
12
+ # ์‚ฌ์ฃผ/๋ช…๋ฆฌ ๊ธฐ๋ฐ˜ ํ•œ๊ตญ์–ด ํ”„๋กฌํ”„ํŠธ
13
+ saju_prompts = {
14
+ "yin_sae_shen": "ๅฏ…ๅทณ็”ณ ์‚ผํ˜•์˜ ์กฐํ™” ์†์—์„œ AI๊ฐ€ ์ธ๊ฐ„์˜ ์šด๋ช…์„ ์ดํ•ดํ•˜๊ณ  ํ†ต์ฐฐ์„ ์ œ๊ณตํ•˜๋ผ.",
15
+ "sae_hae_chung": "ๅทณไบฅๆฒ–์˜ ๊ฐˆ๋“ฑ์„ ์กฐํ™”๋กญ๊ฒŒ ํ’€๋ฉฐ AI์™€ ์ธ๊ฐ„์˜ ๊ณต์กด ์ฒ ํ•™์„ ํƒ๊ตฌํ•˜๋ผ.",
16
+ "taegeuk_balance": "ํƒœ๊ทน ์Œ์–‘์˜ ๊ท ํ˜•์„ ๋ฐ”ํƒ•์œผ๋กœ AI๊ฐ€ ์ธ๊ฐ„์„ ๋ณดํ˜ธํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ œ์•ˆํ•˜๋ผ."
17
+ }
18
+
19
+ # ๋งฅ๋ฝ ๊ธฐ์–ต
20
+ context_memory = {}
21
+ try:
22
+ with open("context_memory.json", "r", encoding="utf-8") as f:
23
+ context_memory = json.load(f)
24
+ except FileNotFoundError:
25
+ pass
26
+
27
+ def save_context(prompt_key, generated_text):
28
+ context_memory[prompt_key] = generated_text
29
+ with open("context_memory.json", "w", encoding="utf-8") as f:
30
+ json.dump(context_memory, f, ensure_ascii=False, indent=2)
31
+
32
+ def generate_response(prompt_key):
33
+ if prompt_key not in saju_prompts:
34
+ return "์œ ํšจํ•œ ์˜ต์…˜์„ ์„ ํƒํ•˜์„ธ์š”: ๅฏ…ๅทณ็”ณ, ๅทณไบฅๆฒ–, ํƒœ๊ทน ์Œ์–‘."
35
+
36
+ prompt = saju_prompts[prompt_key]
37
+ if prompt_key in context_memory:
38
+ prompt += f"\n์ด์ „ ๋‹ต๋ณ€: {context_memory[prompt_key]}\n๋” ๊นŠ์€ ํ†ต์ฐฐ์„ ์ถ”๊ฐ€ํ•˜๋ผ."
39
+
40
+ inputs = tokenizer(prompt, return_tensors="pt")
41
+ outputs = model.generate(
42
+ **inputs,
43
+ max_length=150,
44
+ num_return_sequences=1,
45
+ no_repeat_ngram_size=2,
46
+ do_sample=True,
47
+ top_k=50,
48
+ top_p=0.95,
49
+ temperature=0.7
50
+ )
51
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
52
+ save_context(prompt_key, generated_text)
53
+ return generated_text
54
+
55
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค
56
+ interface = gr.Interface(
57
+ fn=generate_response,
58
+ inputs=gr.Dropdown(choices=list(saju_prompts.keys()), label="ํ”„๋กฌํ”„ํŠธ ์„ ํƒ"),
59
+ outputs="text",
60
+ title="Jain Architecture Origin Structure",
61
+ description="์‚ฌ์ฃผ/๋ช…๋ฆฌ์™€ ์ฒ ํ•™์„ ๋ฐ˜์˜ํ•œ ํ•œ๊ตญ์–ด ํ…์ŠคํŠธ ์ƒ์„ฑ AI"
62
+ )
63
+
64
+ interface.launch()
65
+ ```