projectBongBong / app.py
devvtaco's picture
Create app.py
531d477 verified
raw
history blame
883 Bytes
import gradio as gr
from transformers import pipeline
# ํ•œ๊ตญ์–ด ์ฑ„ํŒ… ๋ชจ๋ธ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ (์˜ˆ: KoAlpaca, LLaMA, Mistral ๋“ฑ)
chat = pipeline("text-generation", model="beomi/KoAlpaca-Polyglot-5.8B")
# ๋ถ„์„ ํ•จ์ˆ˜ ์ •์˜
def analyze_korean_text(text):
prompt = f"๋‹ค์Œ ๋ฌธ์žฅ์„ ๋ถ„์„ํ•˜๊ณ  ๋ฌด๋ก€ํ•˜๊ฑฐ๋‚˜ ๊ณผ๊ฒฉํ•œ ํ‘œํ˜„์ด ์žˆ์œผ๋ฉด ์ง€์ ํ•˜๊ณ , ๋” ์นœ์ ˆํ•˜๊ฒŒ ๋ฐ”๊ฟ”์ค˜:\n\n{text}"
response = chat(prompt, max_new_tokens=200)[0]['generated_text']
return response
# Gradio ์ธํ„ฐํŽ˜์ด์Šค
iface = gr.Interface(
fn=analyze_korean_text,
inputs=gr.Textbox(label="๋ถ„์„ํ•  ๋ฌธ์žฅ ์ž…๋ ฅ"),
outputs=gr.Textbox(label="๋ถ„์„ ๊ฒฐ๊ณผ"),
title="๋ฌธ์žฅ ๋ถ„์„๊ธฐ (์นœ์ ˆํ•˜๊ฒŒ ๋ฐ”๊ฟ”์ฃผ๋Š” AI)",
description="๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜๋ฉด ๋ฌด๋ก€ํ•œ ํ‘œํ˜„์„ ์ง€์ ํ•˜๊ณ  ๋” ์˜ˆ์˜ ์žˆ๊ฒŒ ๋ฐ”๊ฟ”์ฃผ๋Š” ๋„์šฐ๋ฏธ์ž…๋‹ˆ๋‹ค."
)
# ์‹คํ–‰
iface.launch()