File size: 6,798 Bytes
43bc49d
 
 
 
 
e60b597
43bc49d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e60b597
 
43bc49d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e60b597
43bc49d
 
 
e60b597
 
43bc49d
 
 
 
 
 
 
 
 
e60b597
 
 
e9e0da2
e60b597
 
43bc49d
e60b597
 
43bc49d
e60b597
43bc49d
43fd88f
 
 
 
 
e60b597
43fd88f
 
 
 
e60b597
 
 
 
 
 
e9e0da2
e60b597
 
43fd88f
e9e0da2
43bc49d
e9e0da2
 
43bc49d
 
bb65a12
 
e9e0da2
e60b597
 
e9e0da2
 
 
 
78bbf9c
e60b597
43bc49d
 
 
bb65a12
e9e0da2
bb65a12
e9e0da2
 
 
 
 
 
bb65a12
 
e9e0da2
bb65a12
e9e0da2
 
 
 
 
bb65a12
43bc49d
bb65a12
 
43bc49d
 
e9e0da2
bb65a12
 
 
43bc49d
 
 
e9e0da2
 
bb65a12
43bc49d
 
 
e9e0da2
bb65a12
 
 
e9e0da2
bb65a12
 
e9e0da2
bb65a12
e9e0da2
 
bb65a12
43bc49d
 
 
 
 
e60b597
43bc49d
e60b597
e9e0da2
bb65a12
e60b597
e9e0da2
e60b597
e9e0da2
e60b597
 
 
 
 
 
 
43bc49d
 
e9e0da2
bb65a12
43bc49d
 
e3e4474
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import json
import os
import time
import uuid
import tempfile
from PIL import Image, ImageDraw, ImageFont
import gradio as gr
import base64
import mimetypes

from google import genai
from google.genai import types

def save_binary_file(file_name, data):
    with open(file_name, "wb") as f:
        f.write(data)

def generate(text, file_name, api_key, model="gemini-2.0-flash-exp"):
    client = genai.Client(api_key=(api_key.strip() if api_key and api_key.strip() != ""
                                     else os.environ.get("GEMINI_API_KEY")))
    
    files = [ client.files.upload(file=file_name) ]
    
    contents = [
        types.Content(
            role="user",
            parts=[
                types.Part.from_uri(
                    file_uri=files[0].uri,
                    mime_type=files[0].mime_type,
                ),
                types.Part.from_text(text=text),
            ],
        ),
    ]
    generate_content_config = types.GenerateContentConfig(
        temperature=1,
        top_p=0.95,
        top_k=40,
        max_output_tokens=8192,
        response_modalities=["image", "text"],
        response_mime_type="text/plain",
    )

    text_response = ""
    image_path = None
    with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
        temp_path = tmp.name
        for chunk in client.models.generate_content_stream(
            model=model,
            contents=contents,
            config=generate_content_config,
        ):
            if not chunk.candidates or not chunk.candidates[0].content or not chunk.candidates[0].content.parts:
                continue
            candidate = chunk.candidates[0].content.parts[0]
            if candidate.inline_data:
                save_binary_file(temp_path, candidate.inline_data.data)
                print(f"Arquivo de tipo {candidate.inline_data.mime_type} salvo em: {temp_path} com prompt: {text}")
                image_path = temp_path
                break
            else:
                text_response += chunk.text + "\n"
    
    del files
    return image_path, text_response

def process_image_and_prompt(composite_pil, prompt, gemini_api_key):
    try:
        with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
            composite_path = tmp.name
            composite_pil.save(composite_path)
        
        file_name = composite_path  
        input_text = prompt 
        model = "gemini-2.0-flash-exp" 

        image_path, text_response = generate(text=input_text, file_name=file_name, api_key=gemini_api_key, model=model)
        
        if image_path:
            result_img = Image.open(image_path)
            if result_img.mode == "RGBA":
                result_img = result_img.convert("RGB")
            return [result_img], ""  
        else:
            return None, text_response
    except Exception as e:
        raise gr.Error(f"Erro ao processar: {e}", duration=5)

# Interface com Gradio
with gr.Blocks(css="style.css") as demo:
    gr.HTML(
    """
    <div class="header-container">
      <div>
          <img src="https://www.gstatic.com/lamda/images/gemini_favicon_f069958c85030456e93de685481c559f160ea06b.png" alt="Logo Gemini">
      </div>
      <div>
          <h1>Gemini para Edição de Imagens</h1>
          <p>Desenvolvido com <a href="https://gradio.app/">Gradio</a> ⚡️ | 
          <a href="https://huggingface.co/spaces/ameerazam08/Gemini-Image-Edit?duplicate=true">Duplique este Repositório</a> |
          <a href="https://aistudio.google.com/apikey">Obtenha uma Chave API</a> | 
          Siga-me no Linkedin: <a href="https://www.linkedin.com/in/dheiver-santos/">dheiver-santos</a></p>
      </div>
    </div>
    """
    )
    
    with gr.Accordion("⚠️ Configuração da API ⚠️", open=False, elem_classes="config-accordion"):
        gr.Markdown("""
    - **Problema:** ❗ Às vezes, o modelo retorna texto em vez de uma imagem.  
    ### 🔧 Solução:
    1. **🛠️ Duplique o Repositório**  
       - Crie uma cópia separada para modificações.  
    2. **🔑 Use Sua Própria Chave API Gemini**  
       - É **obrigatório** configurar sua chave Gemini para geração!  
    """)

    with gr.Accordion("📌 Instruções de Uso", open=False, elem_classes="instructions-accordion"):
        gr.Markdown("""
    ### 📌 Como Usar  
    - Faça upload de uma imagem e insira um prompt para gerar resultados.
    - Se texto for retornado em vez de imagem, ele aparecerá na saída de texto.
    - Use apenas imagens PNG.
    - ❌ **Não use imagens NSFW!**
    """)

    with gr.Row(elem_classes="main-content"):
        with gr.Column(elem_classes="input-column"):
            image_input = gr.Image(
                type="pil",
                label="Carregar Imagem",
                image_mode="RGBA",
                elem_id="image-input",
                elem_classes="upload-box"
            )
            gemini_api_key = gr.Textbox(
                lines=1,
                placeholder="Insira a Chave API Gemini (opcional)",
                label="Chave API Gemini (opcional)",
                elem_classes="api-key-input"
            )
            prompt_input = gr.Textbox(
                lines=2,
                placeholder="Digite seu prompt aqui...",
                label="Prompt",
                elem_classes="prompt-input"
            )
            submit_btn = gr.Button("Gerar", elem_classes="generate-btn")
        
        with gr.Column(elem_classes="output-column"):
            output_gallery = gr.Gallery(label="Resultados Gerados", elem_classes="output-gallery")
            output_text = gr.Textbox(
                label="Saída do Gemini", 
                placeholder="A resposta em texto aparecerá aqui se nenhuma imagem for gerada.",
                elem_classes="output-text"
            )

    submit_btn.click(
        fn=process_image_and_prompt,
        inputs=[image_input, prompt_input, gemini_api_key],
        outputs=[output_gallery, output_text],
    )
    
    gr.Markdown("## Experimente estes exemplos", elem_classes="gr-examples-header")
    
    examples = [
        ["data/1.webp", "change text to 'AMEER'", ""],
        ["data/2.webp", "remove the spoon from hand only", ""],
        ["data/3.webp", "change text to 'Make it'", ""],
        ["data/1.jpg", "add joker style only on face", ""],
        ["data/1777043.jpg", "add joker style only on face", ""],
        ["data/2807615.jpg", "add lipstick on lip only", ""],
        ["data/76860.jpg", "add lipstick on lip only", ""],
        ["data/2807615.jpg", "make it happy looking face only", ""],
    ]
    
    gr.Examples(
        examples=examples,
        inputs=[image_input, prompt_input],
        elem_id="examples-grid"
    )

demo.queue(max_size=50).launch()