Spaces:
Sleeping
Sleeping
# ํ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ๊ฐ์ ธ์ต๋๋ค. | |
import gradio as gr | |
import google.generativeai as genai | |
import os | |
import logging | |
# ๋ก๊น ์ค์ | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
# ์์ ์ ํ๊ฒฝ๋ณ์ ํ์ธ | |
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") | |
if GEMINI_API_KEY: | |
logger.info("API ํค๊ฐ ํ๊ฒฝ๋ณ์์์ ๊ฐ์ง๋์์ต๋๋ค.") | |
logger.info(f"API ํค ๋ฏธ๋ฆฌ๋ณด๊ธฐ: {GEMINI_API_KEY[:8]}...") | |
else: | |
logger.warning("GEMINI_API_KEY๊ฐ ํ๊ฒฝ๋ณ์์์ ๊ฐ์ง๋์ง ์์์ต๋๋ค.") | |
# --- UI ๋ฐ ์ฑ๋ด ์ค๋ช --- | |
# Gradio Blocks๋ฅผ ์ฌ์ฉํ์ฌ ์ข ๋ ์ ์ฐํ UI๋ฅผ ๊ตฌ์ฑํฉ๋๋ค. | |
with gr.Blocks(theme=gr.themes.Default(primary_hue="blue")) as demo: | |
gr.Markdown( | |
""" | |
# โ๏ธ Gemini API ์ฑ๋ด | |
Google Gemini API๋ฅผ ์ฌ์ฉํ๋ ์ฑ๋ด์ ๋๋ค. | |
**์ค์**: Hugging Face Spaces์ **Settings โ Repository secrets**์ `GEMINI_API_KEY`๊ฐ ์ค์ ๋์ด ์์ด์ผ ํฉ๋๋ค. | |
[API ํค ๋ฐ๊ธ๋ฐ๊ธฐ](https://aistudio.google.com/app/apikey) | |
""" | |
) | |
# ํ๊ฒฝ๋ณ์ ์ํ ํ์ | |
with gr.Row(): | |
env_status = gr.Textbox( | |
label="ํ๊ฒฝ๋ณ์ ์ํ", | |
value=f"GEMINI_API_KEY: {'โ ์ค์ ๋จ' if GEMINI_API_KEY else 'โ ์ค์ ๋์ง ์์'}", | |
interactive=False | |
) | |
# Gradio ์ฑ๋ด UI ์ปดํฌ๋ํธ - type ํ๋ผ๋ฏธํฐ ์ถ๊ฐ | |
chatbot = gr.Chatbot( | |
label="Gemini ์ฑ๋ด", | |
height=600, | |
type="messages" # ์ด ํ๋ผ๋ฏธํฐ๋ฅผ ์ถ๊ฐํ์ฌ ๊ฒฝ๊ณ ํด๊ฒฐ | |
) | |
with gr.Row(): | |
# ์ฌ์ฉ์ ๋ฉ์์ง ์ ๋ ฅ๋ | |
msg = gr.Textbox( | |
label="๋ฉ์์ง ์ ๋ ฅ", | |
placeholder="๋ฌด์์ด๋ ๋ฌผ์ด๋ณด์ธ์...", | |
scale=7, | |
lines=1 | |
) | |
# ์ ์ก ๋ฒํผ | |
submit_button = gr.Button("์ ์ก", variant="primary", scale=1) | |
with gr.Accordion("๊ณ ๊ธ ์ค์ ", open=False): | |
# LLM์ ์ญํ ์ ์ ์ํ๋ ์์คํ ๋ฉ์์ง | |
system_message = gr.Textbox( | |
value="You are a helpful and friendly chatbot.", | |
label="์์คํ ๋ฉ์์ง", | |
lines=2 | |
) | |
# ๋ชจ๋ธ์ ์ฐฝ์์ฑ์ ์กฐ์ ํ๋ ์ฌ๋ผ์ด๋ | |
temperature = gr.Slider( | |
minimum=0.0, | |
maximum=1.0, | |
value=0.7, | |
step=0.1, | |
label="Temperature" | |
) | |
# ์์ฑํ ์ต๋ ํ ํฐ ์๋ฅผ ์กฐ์ ํ๋ ์ฌ๋ผ์ด๋ | |
max_tokens = gr.Slider( | |
minimum=1, | |
maximum=4096, | |
value=1024, | |
step=1, | |
label="Max new tokens" | |
) | |
# ํ๊ฒฝ๋ณ์ ์๋ก๊ณ ์นจ ๋ฒํผ | |
refresh_button = gr.Button("๐ ํ๊ฒฝ๋ณ์ ์๋ก๊ณ ์นจ", size="sm") | |
def refresh_env_status(): | |
"""ํ๊ฒฝ๋ณ์ ์ํ๋ฅผ ์๋ก๊ณ ์นจํฉ๋๋ค.""" | |
global GEMINI_API_KEY | |
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") | |
status = f"GEMINI_API_KEY: {'โ ์ค์ ๋จ' if GEMINI_API_KEY else 'โ ์ค์ ๋์ง ์์'}" | |
if GEMINI_API_KEY: | |
status += f" (๋ฏธ๋ฆฌ๋ณด๊ธฐ: {GEMINI_API_KEY[:8]}...)" | |
return status | |
refresh_button.click(refresh_env_status, outputs=[env_status]) | |
# --- Gemini API ํธ์ถ ํจ์ --- | |
def respond(message, chat_history, system_prompt, temp, max_output_tokens): | |
# ํจ์๊ฐ ํธ์ถ๋ ๋๋ง๋ค ํ๊ฒฝ๋ณ์์์ API ํค๋ฅผ ๋ค์ ํ์ธ | |
api_key = os.environ.get("GEMINI_API_KEY") | |
# ๋๋ฒ๊น ์ ์ํ ์ถ๊ฐ ์ ๋ณด | |
logger.info(f"API ํค ํ์ธ: {'์์' if api_key else '์์'}") | |
# ํ๊ฒฝ๋ณ์์์ ๊ฐ์ ธ์จ API ํค๊ฐ ์์ผ๋ฉด ์๋ด ๋ฉ์์ง๋ฅผ ๋์๋๋ค. | |
if not api_key: | |
error_msg = """โ ๏ธ **์ค๋ฅ**: `GEMINI_API_KEY`๊ฐ ์ค์ ๋์ง ์์์ต๋๋ค. | |
**ํด๊ฒฐ ๋ฐฉ๋ฒ**: | |
1. Hugging Face Spaces์ **Settings** ํญ์ผ๋ก ์ด๋ | |
2. **Repository secrets** ์น์ ์ฐพ๊ธฐ | |
3. **New secret** ๋ฒํผ ํด๋ฆญ | |
4. Name: `GEMINI_API_KEY`, Value: ์ค์ API ํค ์ ๋ ฅ | |
5. **Save** ํด๋ฆญ | |
6. Space๋ฅผ **์ฌ์์** (Settings โ Factory reboot) | |
**์ฐธ๊ณ **: Private space๊ฐ ์๋ ๊ฒฝ์ฐ์๋ secrets๋ ์์ ํ๊ฒ ๋ณดํธ๋ฉ๋๋ค.""" | |
yield error_msg | |
return | |
try: | |
# API ํค๋ฅผ ์ค์ ํฉ๋๋ค. | |
genai.configure(api_key=api_key) | |
logger.info("API ํค ์ค์ ์ฑ๊ณต") | |
except Exception as e: | |
yield f"API ํค ์ค์ ์ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {str(e)}" | |
return | |
# ์ฌ์ฉํ ๋ชจ๋ธ๊ณผ ์์คํ ํ๋กฌํํธ๋ฅผ ์ค์ ํฉ๋๋ค. | |
try: | |
# ์ฌ์ฉ ๊ฐ๋ฅํ ๋ชจ๋ธ๋ก ๋ณ๊ฒฝ | |
model = genai.GenerativeModel( | |
model_name='gemini-2.0-flash', # ์์ ์ ์ธ ๋ชจ๋ธ ์ฌ์ฉ | |
system_instruction=system_prompt | |
) | |
except Exception as e: | |
yield f"๋ชจ๋ธ ์ด๊ธฐํ ์ค๋ฅ: {str(e)}\n์ฌ์ฉ ๊ฐ๋ฅํ ๋ชจ๋ธ: gemini-1.5-flash, gemini-1.5-pro" | |
return | |
# Gradio์ ๋ํ ๊ธฐ๋ก์ Gemini API๊ฐ ์ดํดํ ์ ์๋ ํ์์ผ๋ก ๋ณํํฉ๋๋ค. | |
gemini_history = [] | |
# type="messages" ํ์์ ๋ง๊ฒ ์ฒ๋ฆฌ | |
if isinstance(chat_history, list) and len(chat_history) > 0: | |
# ์๋ก์ด ๋ฉ์์ง ํ์ ์ฒ๋ฆฌ | |
if isinstance(chat_history[0], dict): | |
for msg in chat_history: | |
if msg.get("role") == "user": | |
gemini_history.append({"role": "user", "parts": [msg.get("content", "")]}) | |
elif msg.get("role") == "assistant": | |
gemini_history.append({"role": "model", "parts": [msg.get("content", "")]}) | |
# ์ด์ ํํ ํ์๋ ์ง์ | |
else: | |
for user_msg, model_msg in chat_history: | |
if user_msg: | |
gemini_history.append({"role": "user", "parts": [user_msg]}) | |
if model_msg: | |
gemini_history.append({"role": "model", "parts": [model_msg]}) | |
# ์ด์ ๋ํ ๊ธฐ๋ก์ ๋ฐํ์ผ๋ก ์ฑํ ์ธ์ ์ ์์ํฉ๋๋ค. | |
chat = model.start_chat(history=gemini_history) | |
# ๋ชจ๋ธ ์์ฑ ๊ด๋ จ ์ค์ ์ ๊ตฌ์ฑํฉ๋๋ค. | |
generation_config = genai.types.GenerationConfig( | |
temperature=temp, | |
max_output_tokens=int(max_output_tokens), | |
) | |
try: | |
# ์คํธ๋ฆฌ๋ฐ ๋ฐฉ์์ผ๋ก ๋ฉ์์ง๋ฅผ ๋ณด๋ด๊ณ ์๋ต์ ๋ฐ์ต๋๋ค. | |
response = chat.send_message( | |
message, | |
stream=True, | |
generation_config=generation_config | |
) | |
# ์คํธ๋ฆฌ๋ฐ ์๋ต์ ์ค์๊ฐ์ผ๋ก UI์ ํ์ํฉ๋๋ค. | |
full_response = "" | |
for chunk in response: | |
if hasattr(chunk, 'text'): | |
full_response += chunk.text | |
yield full_response | |
except Exception as e: | |
# API ํธ์ถ ์ค ์๋ฌ๊ฐ ๋ฐ์ํ๋ฉด UI์ ํ์ํฉ๋๋ค. | |
error_detail = str(e) | |
if "API_KEY_INVALID" in error_detail: | |
yield "โ API ํค๊ฐ ์ ํจํ์ง ์์ต๋๋ค. ์ฌ๋ฐ๋ฅธ API ํค์ธ์ง ํ์ธํด์ฃผ์ธ์." | |
elif "QUOTA_EXCEEDED" in error_detail: | |
yield "โ API ์ฌ์ฉ๋ ํ๋๋ฅผ ์ด๊ณผํ์ต๋๋ค." | |
else: | |
yield f"์๋ต ์์ฑ ์ค ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {error_detail}" | |
# --- Gradio ์ด๋ฒคํธ ๋ฆฌ์ค๋ --- | |
def on_submit(message, chat_history, system_prompt, temp, max_output_tokens): | |
if not message.strip(): | |
return "", chat_history | |
# ์๋ก์ด ๋ฉ์์ง ํ์ ์ฌ์ฉ | |
chat_history = chat_history or [] | |
# ์ฌ์ฉ์ ๋ฉ์์ง ์ถ๊ฐ | |
chat_history.append({"role": "user", "content": message}) | |
# ๋ด ์๋ต ์คํธ๋ฆฌ๋ฐ | |
bot_response_stream = respond(message, chat_history[:-1], system_prompt, temp, max_output_tokens) | |
for partial_response in bot_response_stream: | |
# ๋ง์ง๋ง ๋ฉ์์ง๊ฐ ์ฌ์ฉ์ ๋ฉ์์ง์ธ ๊ฒฝ์ฐ์๋ง ๋ด ์๋ต ์ถ๊ฐ | |
if chat_history and chat_history[-1]["role"] == "user": | |
chat_history.append({"role": "assistant", "content": partial_response}) | |
else: | |
# ๋ด ์๋ต ์ ๋ฐ์ดํธ | |
chat_history[-1]["content"] = partial_response | |
yield "", chat_history | |
# ์ด๋ฒคํธ ํธ๋ค๋ฌ ์ฐ๊ฒฐ | |
msg.submit( | |
on_submit, | |
[msg, chatbot, system_message, temperature, max_tokens], | |
[msg, chatbot] | |
) | |
submit_button.click( | |
on_submit, | |
[msg, chatbot, system_message, temperature, max_tokens], | |
[msg, chatbot] | |
) | |
# ๋ฉ์ธ ์คํ ๋ถ๋ถ | |
if __name__ == "__main__": | |
# ๋๋ฒ๊น ๋ชจ๋๋ก ์คํ | |
demo.launch(debug=True) |