Spaces:
Sleeping
Sleeping
File size: 5,272 Bytes
cb245e5 226a535 cb245e5 226a535 cb245e5 226a535 cb245e5 226a535 cb245e5 226a535 cb245e5 ad0c832 cb245e5 ad0c832 cb245e5 226a535 cb245e5 226a535 cb245e5 226a535 cb245e5 226a535 cb245e5 226a535 cb245e5 226a535 cb245e5 226a535 cb245e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
# ํ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ๊ฐ์ ธ์ต๋๋ค.
import gradio as gr
import google.generativeai as genai
import os
# --- UI ๋ฐ ์ฑ๋ด ์ค๋ช
---
# Gradio Blocks๋ฅผ ์ฌ์ฉํ์ฌ ์ข ๋ ์ ์ฐํ UI๋ฅผ ๊ตฌ์ฑํฉ๋๋ค.
with gr.Blocks(theme=gr.themes.Default(primary_hue="blue")) as demo:
gr.Markdown(
"""
# โ๏ธ Gemini API ์ฑ๋ด (Secrets ์ฌ์ฉ)
Google Gemini API๋ฅผ ์ฌ์ฉํ๋ ์ฑ๋ด์
๋๋ค.
Hugging Face Spaces์ 'Settings' ํญ์ ์๋ 'Repository secrets'์ `GEMINI_API_KEY`๊ฐ ์ค์ ๋์ด ์์ด์ผ ํฉ๋๋ค.
[API ํค ๋ฐ๊ธ๋ฐ๊ธฐ](https://aistudio.google.com/app/apikey)
"""
)
# Gradio ์ฑ๋ด UI ์ปดํฌ๋ํธ
chatbot = gr.Chatbot(label="Gemini ์ฑ๋ด", height=600)
with gr.Row():
# ์ฌ์ฉ์ ๋ฉ์์ง ์
๋ ฅ๋
msg = gr.Textbox(
label="๋ฉ์์ง ์
๋ ฅ",
placeholder="๋ฌด์์ด๋ ๋ฌผ์ด๋ณด์ธ์...",
scale=7,
)
# ์ ์ก ๋ฒํผ
submit_button = gr.Button("์ ์ก", variant="primary", scale=1)
with gr.Accordion("๊ณ ๊ธ ์ค์ ", open=False):
# LLM์ ์ญํ ์ ์ ์ํ๋ ์์คํ
๋ฉ์์ง
system_message = gr.Textbox(
value="You are a helpful and friendly chatbot.", label="์์คํ
๋ฉ์์ง"
)
# ๋ชจ๋ธ์ ์ฐฝ์์ฑ์ ์กฐ์ ํ๋ ์ฌ๋ผ์ด๋
temperature = gr.Slider(
minimum=0.0, maximum=1.0, value=0.7, step=0.1, label="Temperature"
)
# ์์ฑํ ์ต๋ ํ ํฐ ์๋ฅผ ์กฐ์ ํ๋ ์ฌ๋ผ์ด๋
max_tokens = gr.Slider(
minimum=1, maximum=4096, value=1024, step=1, label="Max new tokens"
)
# --- Gemini API ํธ์ถ ํจ์ ---
def respond(message, chat_history, system_prompt, temp, max_output_tokens):
# ํจ์๊ฐ ํธ์ถ๋ ๋๋ง๋ค ํ๊ฒฝ๋ณ์์์ API ํค๋ฅผ ์ง์ ๊ฐ์ ธ์ต๋๋ค.
# ์ด๋ ๊ฒ ํ๋ฉด ์ฑ ์์ ์์ ์ ํค๋ฅผ ๋ชป ๋ถ๋ฌ์ค๋ ๋ฌธ์ ๋ฅผ ํด๊ฒฐํ ์ ์์ต๋๋ค.
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
# ํ๊ฒฝ๋ณ์์์ ๊ฐ์ ธ์จ API ํค๊ฐ ์์ผ๋ฉด ์๋ด ๋ฉ์์ง๋ฅผ ๋์๋๋ค.
if not GEMINI_API_KEY:
# UI์ ์ง์ ๊ฒฝ๊ณ ๋ฅผ ํ์ํ๊ธฐ ์ํด gr.Warning์ ์ฌ์ฉํ ์ ์์ง๋ง,
# ์ฌ๊ธฐ์๋ ์ฑํ
์๋ต์ผ๋ก ์ฒ๋ฆฌํฉ๋๋ค.
yield "โ ๏ธ **์ค๋ฅ**: `GEMINI_API_KEY`๊ฐ ์ค์ ๋์ง ์์์ต๋๋ค.\n\nHugging Face Spaces์ **Settings > Repository secrets**์ API ํค๋ฅผ ์ถ๊ฐํ๋์ง ํ์ธํด์ฃผ์ธ์."
return
try:
# API ํค๋ฅผ ์ค์ ํฉ๋๋ค.
genai.configure(api_key=GEMINI_API_KEY)
except Exception as e:
yield f"API ํค ์ค์ ์ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {e}"
return
# ์ฌ์ฉํ ๋ชจ๋ธ๊ณผ ์์คํ
ํ๋กฌํํธ๋ฅผ ์ค์ ํฉ๋๋ค.
model = genai.GenerativeModel(
model_name='gemini-2.0-flash', # ์ต์ Flash ๋ชจ๋ธ ์ฌ์ฉ
system_instruction=system_prompt
)
# Gradio์ ๋ํ ๊ธฐ๋ก์ Gemini API๊ฐ ์ดํดํ ์ ์๋ ํ์์ผ๋ก ๋ณํํฉ๋๋ค.
gemini_history = []
for user_msg, model_msg in chat_history:
if user_msg:
gemini_history.append({"role": "user", "parts": [user_msg]})
if model_msg:
gemini_history.append({"role": "model", "parts": [model_msg]})
# ์ด์ ๋ํ ๊ธฐ๋ก์ ๋ฐํ์ผ๋ก ์ฑํ
์ธ์
์ ์์ํฉ๋๋ค.
chat = model.start_chat(history=gemini_history)
# ๋ชจ๋ธ ์์ฑ ๊ด๋ จ ์ค์ ์ ๊ตฌ์ฑํฉ๋๋ค.
generation_config = genai.types.GenerationConfig(
temperature=temp,
max_output_tokens=int(max_output_tokens),
)
try:
# ์คํธ๋ฆฌ๋ฐ ๋ฐฉ์์ผ๋ก ๋ฉ์์ง๋ฅผ ๋ณด๋ด๊ณ ์๋ต์ ๋ฐ์ต๋๋ค.
response = chat.send_message(
message,
stream=True,
generation_config=generation_config
)
# ์คํธ๋ฆฌ๋ฐ ์๋ต์ ์ค์๊ฐ์ผ๋ก UI์ ํ์ํฉ๋๋ค.
full_response = ""
for chunk in response:
if hasattr(chunk, 'text'):
full_response += chunk.text
yield full_response
except Exception as e:
# API ํธ์ถ ์ค ์๋ฌ๊ฐ ๋ฐ์ํ๋ฉด UI์ ํ์ํฉ๋๋ค.
yield f"์๋ต ์์ฑ ์ค ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {e}"
# --- Gradio ์ด๋ฒคํธ ๋ฆฌ์ค๋ ---
def on_submit(message, chat_history, system_prompt, temp, max_output_tokens):
chat_history.append((message, None))
bot_response_stream = respond(message, chat_history, system_prompt, temp, max_output_tokens)
for partial_response in bot_response_stream:
chat_history[-1] = (message, partial_response)
yield "", chat_history
msg.submit(
on_submit,
[msg, chatbot, system_message, temperature, max_tokens],
[msg, chatbot]
)
submit_button.click(
on_submit,
[msg, chatbot, system_message, temperature, max_tokens],
[msg, chatbot]
)
if __name__ == "__main__":
demo.launch(debug=True)
|