Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import
|
3 |
|
4 |
-
|
|
|
5 |
|
6 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
7 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
@@ -10,13 +11,18 @@ pipe = pipeline(
|
|
10 |
"text-generation",
|
11 |
model=model,
|
12 |
tokenizer=tokenizer,
|
13 |
-
max_new_tokens=
|
14 |
do_sample=True,
|
15 |
-
temperature=0.7
|
16 |
)
|
17 |
|
18 |
def chat(prompt):
|
19 |
-
|
20 |
-
return
|
21 |
|
22 |
-
gr.Interface(
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
|
4 |
+
# SKT 한국어 GPT 모델
|
5 |
+
model_id = "skt/ko-gpt-trinity-1.2B-v0.5"
|
6 |
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
8 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
|
|
11 |
"text-generation",
|
12 |
model=model,
|
13 |
tokenizer=tokenizer,
|
14 |
+
max_new_tokens=100,
|
15 |
do_sample=True,
|
16 |
+
temperature=0.7,
|
17 |
)
|
18 |
|
19 |
def chat(prompt):
|
20 |
+
response = pipe(prompt)[0]["generated_text"]
|
21 |
+
return response
|
22 |
|
23 |
+
gr.Interface(
|
24 |
+
fn=chat,
|
25 |
+
inputs="text",
|
26 |
+
outputs="text",
|
27 |
+
title="한국어 GPT 챗봇 (SKT Trinity 1.2B)"
|
28 |
+
).launch()
|