Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -257,57 +257,54 @@ async def generate_image():
|
|
| 257 |
|
| 258 |
genai.configure(api_key="AIzaSyBPIdkEyVTDZnmXrBi4ykf0sOfkbOvxAzo")
|
| 259 |
|
| 260 |
-
|
| 261 |
-
generation_config = {
|
| 262 |
-
"temperature": 0.9,
|
| 263 |
-
"top_p": 1,
|
| 264 |
-
"top_k": 1,
|
| 265 |
-
"max_output_tokens": 2048,
|
| 266 |
-
}
|
| 267 |
-
|
| 268 |
-
safety_settings = [
|
| 269 |
-
{
|
| 270 |
-
"category": "HARM_CATEGORY_HARASSMENT",
|
| 271 |
-
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
| 272 |
-
},
|
| 273 |
-
{
|
| 274 |
-
"category": "HARM_CATEGORY_HATE_SPEECH",
|
| 275 |
-
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
| 276 |
-
},
|
| 277 |
-
{
|
| 278 |
-
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
| 279 |
-
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
| 280 |
-
},
|
| 281 |
-
{
|
| 282 |
-
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
| 283 |
-
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
| 284 |
-
},
|
| 285 |
-
]
|
| 286 |
-
|
| 287 |
-
model = genai.GenerativeModel(
|
| 288 |
-
model_name="gemini-1.0-pro-001",
|
| 289 |
-
generation_config=generation_config,
|
| 290 |
-
safety_settings=safety_settings
|
| 291 |
-
)
|
| 292 |
|
| 293 |
@app.route('/gemini', methods=['GET'])
|
| 294 |
def gemini():
|
| 295 |
prompt = request.args.get('prompt')
|
|
|
|
| 296 |
|
| 297 |
if not prompt:
|
| 298 |
return jsonify({'error': 'Prompt parameter is required'}), 400
|
| 299 |
|
|
|
|
|
|
|
|
|
|
| 300 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 301 |
convo = model.start_chat(history=[])
|
| 302 |
convo.send_message(prompt)
|
| 303 |
response = convo.last.text
|
| 304 |
|
| 305 |
-
return jsonify({'response': response})
|
| 306 |
|
| 307 |
except Exception as e:
|
| 308 |
error_message = str(e)
|
| 309 |
app.logger.error("Failed to generate content: %s", error_message)
|
| 310 |
-
return jsonify({'error': 'Failed to generate content.
|
| 311 |
|
| 312 |
|
| 313 |
if __name__ == "__main__":
|
|
|
|
| 257 |
|
| 258 |
genai.configure(api_key="AIzaSyBPIdkEyVTDZnmXrBi4ykf0sOfkbOvxAzo")
|
| 259 |
|
| 260 |
+
DEFAULT_MODELS = ["gemini-1.0-pro", "gemini-1.0-pro-001"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 261 |
|
| 262 |
@app.route('/gemini', methods=['GET'])
|
| 263 |
def gemini():
|
| 264 |
prompt = request.args.get('prompt')
|
| 265 |
+
model_name = request.args.get('model')
|
| 266 |
|
| 267 |
if not prompt:
|
| 268 |
return jsonify({'error': 'Prompt parameter is required'}), 400
|
| 269 |
|
| 270 |
+
if model_name and model_name not in DEFAULT_MODELS:
|
| 271 |
+
return jsonify({'error': f'Model {model_name} not found'}), 400
|
| 272 |
+
|
| 273 |
try:
|
| 274 |
+
# Use the specified model or default model
|
| 275 |
+
selected_model = model_name if model_name in DEFAULT_MODELS else DEFAULT_MODELS[0]
|
| 276 |
+
|
| 277 |
+
# Set up the selected model
|
| 278 |
+
generation_config = {
|
| 279 |
+
"temperature": 1,
|
| 280 |
+
"top_p": 1,
|
| 281 |
+
"top_k": 1,
|
| 282 |
+
"max_output_tokens": 2048,
|
| 283 |
+
}
|
| 284 |
+
safety_settings = [
|
| 285 |
+
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 286 |
+
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 287 |
+
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 288 |
+
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 289 |
+
]
|
| 290 |
+
|
| 291 |
+
model = genai.GenerativeModel(
|
| 292 |
+
model_name=selected_model,
|
| 293 |
+
generation_config=generation_config,
|
| 294 |
+
safety_settings=safety_settings
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
# Start the conversation and generate response
|
| 298 |
convo = model.start_chat(history=[])
|
| 299 |
convo.send_message(prompt)
|
| 300 |
response = convo.last.text
|
| 301 |
|
| 302 |
+
return jsonify({'status': 'true', 'response': response})
|
| 303 |
|
| 304 |
except Exception as e:
|
| 305 |
error_message = str(e)
|
| 306 |
app.logger.error("Failed to generate content: %s", error_message)
|
| 307 |
+
return jsonify({'status': 'false', 'error': 'Failed to generate content.'.}), 500
|
| 308 |
|
| 309 |
|
| 310 |
if __name__ == "__main__":
|