Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,23 +7,15 @@ import textract
|
|
7 |
|
8 |
# List of available models (including experimental and recent ones)
|
9 |
models = [
|
10 |
-
"gemini-2.5-flash
|
11 |
-
"gemini-2.5-pro
|
12 |
-
"gemini-2.0-flash",
|
13 |
-
"gemini-2.0-flash-lite",
|
14 |
-
"gemini-2.0-flash-thinking-exp-01-21",
|
15 |
-
"gemini-1.5-pro",
|
16 |
"gemini-2.0-flash-exp-image-generation"
|
17 |
]
|
18 |
|
19 |
# Model types for handling inputs
|
20 |
model_types = {
|
21 |
-
"gemini-2.5-flash
|
22 |
-
"gemini-2.5-pro
|
23 |
-
"gemini-2.0-flash": "text",
|
24 |
-
"gemini-2.0-flash-lite": "text",
|
25 |
-
"gemini-2.0-flash-thinking-exp-01-21": "text",
|
26 |
-
"gemini-1.5-pro": "text",
|
27 |
"gemini-2.0-flash-exp-image-generationn": "multimodal"
|
28 |
}
|
29 |
|
@@ -52,8 +44,7 @@ def process_files(files, model_type):
|
|
52 |
return inputs
|
53 |
|
54 |
# Chat submit function
|
55 |
-
def chat_submit_func(message, files, chat_history, model, temperature, top_p, max_tokens, api_key):
|
56 |
-
print(model)
|
57 |
client = genai.Client(api_key=api_key)
|
58 |
|
59 |
# Prepare inputs
|
@@ -71,6 +62,8 @@ def chat_submit_func(message, files, chat_history, model, temperature, top_p, ma
|
|
71 |
"top_p": top_p,
|
72 |
"max_output_tokens": max_tokens,
|
73 |
}
|
|
|
|
|
74 |
|
75 |
try:
|
76 |
response = client.models.generate_content(inputs, model=model, config=generation_config)
|
@@ -104,8 +97,7 @@ def chat_submit_func(message, files, chat_history, model, temperature, top_p, ma
|
|
104 |
return chat_history, ""
|
105 |
|
106 |
# Single response submit function
|
107 |
-
def single_submit_func(prompt, files, model, temperature, top_p, max_tokens, api_key):
|
108 |
-
print(model)
|
109 |
client = genai.Client(api_key=api_key)
|
110 |
|
111 |
# Prepare inputs
|
@@ -127,7 +119,7 @@ def single_submit_func(prompt, files, model, temperature, top_p, max_tokens, api
|
|
127 |
),
|
128 |
]
|
129 |
generate_content_config = types.GenerateContentConfig(
|
130 |
-
response_mime_type=
|
131 |
)
|
132 |
try:
|
133 |
response = client.models.generate_content(model=model, contents=contents, config=generate_content_config)
|
@@ -160,7 +152,8 @@ with gr.Blocks(title="Gemini API Interface") as app:
|
|
160 |
model_selector = gr.Dropdown(choices=models, label="Select Model", value=models[0])
|
161 |
temperature = gr.Slider(0, 1, value=0.7, label="Temperature", step=0.01)
|
162 |
top_p = gr.Slider(0, 1, value=0.9, label="Top P", step=0.01)
|
163 |
-
max_tokens = gr.Number(value=
|
|
|
164 |
|
165 |
# Tabs for Chat and Single Response (hidden until key is validated)
|
166 |
with gr.Tabs(visible=False) as tabs:
|
@@ -194,15 +187,15 @@ with gr.Blocks(title="Gemini API Interface") as app:
|
|
194 |
# Chat submission
|
195 |
chat_submit_btn.click(
|
196 |
chat_submit_func,
|
197 |
-
inputs=[chat_input, chat_files, chat_display, model_selector, temperature, top_p, max_tokens, api_key_input],
|
198 |
outputs=[chat_display, chat_status]
|
199 |
)
|
200 |
|
201 |
# Single response submission
|
202 |
single_submit_btn.click(
|
203 |
single_submit_func,
|
204 |
-
inputs=[single_input, single_files, model_selector, temperature, top_p, max_tokens, api_key_input],
|
205 |
outputs=[single_text_output, single_image_output]
|
206 |
)
|
207 |
|
208 |
-
app.launch()
|
|
|
7 |
|
8 |
# List of available models (including experimental and recent ones)
|
9 |
models = [
|
10 |
+
"gemini-2.5-flash",
|
11 |
+
"gemini-2.5-pro",
|
|
|
|
|
|
|
|
|
12 |
"gemini-2.0-flash-exp-image-generation"
|
13 |
]
|
14 |
|
15 |
# Model types for handling inputs
|
16 |
model_types = {
|
17 |
+
"gemini-2.5-flash": "text",
|
18 |
+
"gemini-2.5-pro": "text",
|
|
|
|
|
|
|
|
|
19 |
"gemini-2.0-flash-exp-image-generationn": "multimodal"
|
20 |
}
|
21 |
|
|
|
44 |
return inputs
|
45 |
|
46 |
# Chat submit function
|
47 |
+
def chat_submit_func(message, files, chat_history, model, temperature, top_p, max_tokens, api_key, response_type="text/plain"):
|
|
|
48 |
client = genai.Client(api_key=api_key)
|
49 |
|
50 |
# Prepare inputs
|
|
|
62 |
"top_p": top_p,
|
63 |
"max_output_tokens": max_tokens,
|
64 |
}
|
65 |
+
if response_type:
|
66 |
+
generation_config["response_mime_type"] = response_type
|
67 |
|
68 |
try:
|
69 |
response = client.models.generate_content(inputs, model=model, config=generation_config)
|
|
|
97 |
return chat_history, ""
|
98 |
|
99 |
# Single response submit function
|
100 |
+
def single_submit_func(prompt, files, model, temperature, top_p, max_tokens, api_key, response_type="text/plain"):
|
|
|
101 |
client = genai.Client(api_key=api_key)
|
102 |
|
103 |
# Prepare inputs
|
|
|
119 |
),
|
120 |
]
|
121 |
generate_content_config = types.GenerateContentConfig(
|
122 |
+
response_mime_type=response_type,
|
123 |
)
|
124 |
try:
|
125 |
response = client.models.generate_content(model=model, contents=contents, config=generate_content_config)
|
|
|
152 |
model_selector = gr.Dropdown(choices=models, label="Select Model", value=models[0])
|
153 |
temperature = gr.Slider(0, 1, value=0.7, label="Temperature", step=0.01)
|
154 |
top_p = gr.Slider(0, 1, value=0.9, label="Top P", step=0.01)
|
155 |
+
max_tokens = gr.Number(value=65536, label="Max Tokens", minimum=1)
|
156 |
+
response_type_selector = gr.Radio(choices=["text/plain", "application/json"], value="text/plain", label="Response MIME Type")
|
157 |
|
158 |
# Tabs for Chat and Single Response (hidden until key is validated)
|
159 |
with gr.Tabs(visible=False) as tabs:
|
|
|
187 |
# Chat submission
|
188 |
chat_submit_btn.click(
|
189 |
chat_submit_func,
|
190 |
+
inputs=[chat_input, chat_files, chat_display, model_selector, temperature, top_p, max_tokens, api_key_input,response_type_selector],
|
191 |
outputs=[chat_display, chat_status]
|
192 |
)
|
193 |
|
194 |
# Single response submission
|
195 |
single_submit_btn.click(
|
196 |
single_submit_func,
|
197 |
+
inputs=[single_input, single_files, model_selector, temperature, top_p, max_tokens, api_key_input, response_type_selector],
|
198 |
outputs=[single_text_output, single_image_output]
|
199 |
)
|
200 |
|
201 |
+
app.launch()
|