๐ Real Nano Banana
Google Gemini 2.5 Flash Image Preview๋ก ๊ตฌ๋๋๋ AI ์ด๋ฏธ์ง ์์ฑ๊ธฐ
import gradio as gr from google import genai from google.genai import types import os from typing import Optional, List from huggingface_hub import whoami from PIL import Image from io import BytesIO import tempfile # --- Google Gemini API Configuration --- GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY", "") if not GOOGLE_API_KEY: raise ValueError("GOOGLE_API_KEY environment variable not set.") client = genai.Client( api_key=os.environ.get("GEMINI_API_KEY"), ) GEMINI_MODEL_NAME = 'gemini-2.5-flash-image-preview' def verify_pro_status(token: Optional[gr.OAuthToken]) -> bool: """Verifies if the user is a Hugging Face PRO user or part of an enterprise org.""" if not token: return False try: user_info = whoami(token=token.token) if user_info.get("isPro", False): return True orgs = user_info.get("orgs", []) if any(org.get("isEnterprise", False) for org in orgs): return True return False except Exception as e: print(f"Could not verify user's PRO/Enterprise status: {e}") return False def _extract_image_data_from_response(response) -> Optional[bytes]: """Helper to extract image data from the model's response.""" if hasattr(response, 'candidates') and response.candidates: for candidate in response.candidates: if hasattr(candidate, 'content') and hasattr(candidate.content, 'parts') and candidate.content.parts: for part in candidate.content.parts: if hasattr(part, 'inline_data') and hasattr(part.inline_data, 'data'): return part.inline_data.data return None def run_single_image_logic(prompt: str, image_path: Optional[str] = None, progress=gr.Progress()) -> str: """Handles text-to-image or single image-to-image using Google Gemini.""" try: progress(0.2, desc="๐จ ์ค๋น ์ค...") contents = [prompt] if image_path: input_image = Image.open(image_path) contents.append(input_image) progress(0.5, desc="โจ ์์ฑ ์ค...") response = client.models.generate_content( model=GEMINI_MODEL_NAME, contents=contents, ) progress(0.8, desc="๐ผ๏ธ ๋ง๋ฌด๋ฆฌ ์ค...") image_data = _extract_image_data_from_response(response) if not image_data: raise ValueError("No image data found in the model response.") # Save the generated image to a temporary file to return its path pil_image = Image.open(BytesIO(image_data)) with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile: pil_image.save(tmpfile.name) progress(1.0, desc="โ ์๋ฃ!") return tmpfile.name except Exception as e: raise gr.Error(f"์ด๋ฏธ์ง ์์ฑ ์คํจ: {e}") def run_multi_image_logic(prompt: str, images: List[str], progress=gr.Progress()) -> str: """ Handles multi-image editing by sending a list of images and a prompt. """ if not images: raise gr.Error("'์ฌ๋ฌ ์ด๋ฏธ์ง' ํญ์์ ์ต์ ํ ๊ฐ์ ์ด๋ฏธ์ง๋ฅผ ์ ๋ก๋ํด์ฃผ์ธ์.") try: progress(0.2, desc="๐จ ์ด๋ฏธ์ง ์ค๋น ์ค...") contents = [Image.open(image_path[0]) for image_path in images] contents.append(prompt) progress(0.5, desc="โจ ์์ฑ ์ค...") response = client.models.generate_content( model=GEMINI_MODEL_NAME, contents=contents, ) progress(0.8, desc="๐ผ๏ธ ๋ง๋ฌด๋ฆฌ ์ค...") image_data = _extract_image_data_from_response(response) if not image_data: raise ValueError("No image data found in the model response.") pil_image = Image.open(BytesIO(image_data)) with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile: pil_image.save(tmpfile.name) progress(1.0, desc="โ ์๋ฃ!") return tmpfile.name except Exception as e: raise gr.Error(f"์ด๋ฏธ์ง ์์ฑ ์คํจ: {e}") # --- Gradio App UI --- css = ''' /* Header Styling */ .main-header { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 2rem; border-radius: 1rem; margin-bottom: 2rem; box-shadow: 0 10px 30px rgba(0,0,0,0.1); } .header-title { font-size: 2.5rem !important; font-weight: bold; color: white; text-align: center; margin: 0 !important; text-shadow: 2px 2px 4px rgba(0,0,0,0.2); } .header-subtitle { color: rgba(255,255,255,0.9); text-align: center; margin-top: 0.5rem !important; font-size: 1.1rem; } /* Card Styling */ .card { background: white; border-radius: 1rem; padding: 1.5rem; box-shadow: 0 4px 6px rgba(0,0,0,0.1); border: 1px solid rgba(0,0,0,0.05); } .dark .card { background: #1f2937; border: 1px solid #374151; } /* Tab Styling */ .tabs { border-radius: 0.5rem; overflow: hidden; margin-bottom: 1rem; } .tabitem { padding: 1rem !important; } button.selected { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; color: white !important; } /* Button Styling */ .generate-btn { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; border: none !important; color: white !important; font-size: 1.1rem !important; font-weight: 600 !important; padding: 0.8rem 2rem !important; border-radius: 0.5rem !important; cursor: pointer !important; transition: all 0.3s ease !important; width: 100% !important; margin-top: 1rem !important; } .generate-btn:hover { transform: translateY(-2px) !important; box-shadow: 0 10px 20px rgba(102, 126, 234, 0.4) !important; } .use-btn { background: linear-gradient(135deg, #10b981 0%, #059669 100%) !important; border: none !important; color: white !important; font-weight: 600 !important; padding: 0.6rem 1.5rem !important; border-radius: 0.5rem !important; cursor: pointer !important; transition: all 0.3s ease !important; width: 100% !important; } .use-btn:hover { transform: translateY(-1px) !important; box-shadow: 0 5px 15px rgba(16, 185, 129, 0.4) !important; } /* Input Styling */ .prompt-input textarea { border-radius: 0.5rem !important; border: 2px solid #e5e7eb !important; padding: 0.8rem !important; font-size: 1rem !important; transition: border-color 0.3s ease !important; } .prompt-input textarea:focus { border-color: #667eea !important; outline: none !important; } .dark .prompt-input textarea { border-color: #374151 !important; background: #1f2937 !important; } /* Image Output Styling */ #output { border-radius: 0.5rem !important; overflow: hidden !important; box-shadow: 0 4px 6px rgba(0,0,0,0.1) !important; } /* Progress Bar Styling */ .progress-bar { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; } /* Examples Styling */ .examples-container { background: #f9fafb; border-radius: 0.5rem; padding: 1rem; margin-top: 1rem; } .dark .examples-container { background: #1f2937; } /* Pro Message Styling */ .pro-message { background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%); border-radius: 1rem; padding: 2rem; text-align: center; border: 2px solid #f59e0b; } .dark .pro-message { background: linear-gradient(135deg, #7c2d12 0%, #92400e 100%); border-color: #f59e0b; } /* Emoji Animations */ @keyframes bounce { 0%, 100% { transform: translateY(0); } 50% { transform: translateY(-10px); } } .emoji-icon { display: inline-block; animation: bounce 2s infinite; } /* Responsive Design */ @media (max-width: 768px) { .header-title { font-size: 2rem !important; } .main-container { padding: 1rem !important; } } ''' with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo: # Header gr.HTML('''
Google Gemini 2.5 Flash Image Preview๋ก ๊ตฌ๋๋๋ AI ์ด๋ฏธ์ง ์์ฑ๊ธฐ
๐ ์ด ์คํ์ด์ค๋ Hugging Face PRO ์ฌ์ฉ์ ์ ์ฉ์ ๋๋ค. PRO ๊ตฌ๋ ํ๊ธฐ
๐ก ํ ์คํธโ์ด๋ฏธ์ง ์์ฑ์ ๋น์๋์ธ์
''') with gr.TabItem("๐จ ๋ค์ค ์ด๋ฏธ์ง", id="multiple") as multi_tab: gallery_input = gr.Gallery( label="์ ๋ ฅ ์ด๋ฏธ์ง๋ค", file_types=["image"], elem_classes="gallery-input" ) gr.HTML('''๐ก ์ฌ๋ฌ ์ด๋ฏธ์ง๋ฅผ ๋๋๊ทธ ์ค ๋๋กญํ์ธ์
''') # Prompt Input gr.HTML('Made with ๐ by Hugging Face PRO | Powered by Google Gemini 2.5 Flash