Athspi commited on
Commit
d2236a2
·
verified ·
1 Parent(s): ffc095f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -62
app.py CHANGED
@@ -3,15 +3,16 @@ import google.generativeai as genai
3
  import os
4
  import tempfile
5
  import base64
 
6
  from dotenv import load_dotenv
7
 
 
 
 
8
  # Load environment variables
9
  load_dotenv()
10
 
11
- # Configure Flask app
12
  app = Flask(__name__)
13
-
14
- # Configure Gemini API
15
  genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
16
 
17
  @app.route("/")
@@ -21,70 +22,69 @@ def home():
21
  @app.route("/process", methods=["POST"])
22
  def process_image():
23
  try:
24
- # Get data from request
25
  data = request.json
26
  image_data = data.get("image")
27
- object_type = data.get("objectType")
28
 
 
29
  if not image_data or not object_type:
30
- return jsonify({"success": False, "message": "Invalid input data"})
31
-
32
- # Decode base64 image data
33
- image_bytes = base64.b64decode(image_data.split(",")[1])
34
-
35
- # Create temporary directory
36
- temp_dir = tempfile.mkdtemp()
37
- input_path = os.path.join(temp_dir, "input.png")
38
- with open(input_path, "wb") as f:
39
- f.write(image_bytes)
40
-
41
- # Create the model
42
- model = genai.GenerativeModel('gemini-2.0-flash-exp-image-generation')
43
-
44
- # Build the prompt
45
- prompt = f"Remove the {object_type} from the image and fill the area naturally."
46
-
47
- # Disable all safety settings
48
- safety_settings = {
49
- "HARM_CATEGORY_HARASSMENT": "BLOCK_NONE",
50
- "HARM_CATEGORY_HATE_SPEECH": "BLOCK_NONE",
51
- "HARM_CATEGORY_SEXUALLY_EXPLICIT": "BLOCK_NONE",
52
- "HARM_CATEGORY_DANGEROUS_CONTENT": "BLOCK_NONE",
53
- "HARM_CATEGORY_CIVIC_INTEGRITY": "BLOCK_NONE"
54
- }
55
-
56
- # Generate content
57
- response = model.generate_content(
58
- [
59
- prompt,
60
- genai.upload_file(input_path)
61
- ],
62
- generation_config={
63
- "temperature": 1,
64
- "top_p": 0.95,
65
- "top_k": 40,
66
- "max_output_tokens": 8192,
67
- },
68
- safety_settings=safety_settings
69
- )
70
-
71
- # Process response
72
- output_path = os.path.join(temp_dir, "result.png")
73
- for chunk in response:
74
- if chunk.candidates:
75
- for part in chunk.candidates[0].content.parts:
76
- if hasattr(part, 'inline_data'):
77
- with open(output_path, "wb") as f:
78
- f.write(part.inline_data.data)
79
- return jsonify({
80
- "success": True,
81
- "resultPath": output_path
82
- })
83
-
84
- return jsonify({"success": False, "message": "No valid image data found in response"})
85
 
86
  except Exception as e:
87
- return jsonify({"success": False, "message": str(e)})
 
88
 
89
  if __name__ == "__main__":
90
- app.run(host="0.0.0.0", port=7860, debug=True)
 
3
  import os
4
  import tempfile
5
  import base64
6
+ import logging
7
  from dotenv import load_dotenv
8
 
9
+ # Configure logging
10
+ logging.basicConfig(level=logging.INFO)
11
+
12
  # Load environment variables
13
  load_dotenv()
14
 
 
15
  app = Flask(__name__)
 
 
16
  genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
17
 
18
  @app.route("/")
 
22
  @app.route("/process", methods=["POST"])
23
  def process_image():
24
  try:
 
25
  data = request.json
26
  image_data = data.get("image")
27
+ object_type = data.get("objectType", "").strip()
28
 
29
+ # Validate inputs
30
  if not image_data or not object_type:
31
+ return jsonify({"success": False, "message": "Missing required parameters"})
32
+
33
+ # Decode image
34
+ try:
35
+ header, encoded = image_data.split(",", 1)
36
+ image_bytes = base64.b64decode(encoded)
37
+ except Exception as e:
38
+ logging.error(f"Image decoding failed: {str(e)}")
39
+ return jsonify({"success": False, "message": "Invalid image data"})
40
+
41
+ # Temporary files
42
+ with tempfile.TemporaryDirectory() as temp_dir:
43
+ input_path = os.path.join(temp_dir, "input.png")
44
+ with open(input_path, "wb") as f:
45
+ f.write(image_bytes)
46
+
47
+ # Configure model with safety settings
48
+ model = genai.GenerativeModel('gemini-2.0-flash-exp-image-generation')
49
+ prompt = f"Remove {object_type} from image naturally without text or artifacts"
50
+
51
+ response = model.generate_content(
52
+ [prompt, genai.upload_file(input_path)],
53
+ generation_config={
54
+ "temperature": 0.9,
55
+ "top_p": 0.95,
56
+ "top_k": 32,
57
+ "max_output_tokens": 4096,
58
+ },
59
+ safety_settings={
60
+ "HARM_CATEGORY_CIVIC_INTEGRITY": "BLOCK_ONLY_HIGH",
61
+ "HARM_CATEGORY_HARASSMENT": "BLOCK_NONE",
62
+ "HARM_CATEGORY_HATE_SPEECH": "BLOCK_NONE",
63
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT": "BLOCK_NONE",
64
+ "HARM_CATEGORY_DANGEROUS_CONTENT": "BLOCK_NONE"
65
+ }
66
+ )
67
+
68
+ # Process response
69
+ output_path = os.path.join(temp_dir, "result.png")
70
+ for chunk in response:
71
+ if chunk.candidates:
72
+ for part in chunk.candidates[0].content.parts:
73
+ if hasattr(part, 'inline_data'):
74
+ with open(output_path, "wb") as f:
75
+ f.write(part.inline_data.data)
76
+ return jsonify({
77
+ "success": True,
78
+ "resultPath": output_path
79
+ })
80
+ elif hasattr(part, 'text'):
81
+ logging.info(f"Text response: {part.text}")
82
+
83
+ return jsonify({"success": False, "message": "No valid output generated"})
 
 
84
 
85
  except Exception as e:
86
+ logging.error(f"Processing error: {str(e)}")
87
+ return jsonify({"success": False, "message": f"Processing error: {str(e)}"})
88
 
89
  if __name__ == "__main__":
90
+ app.run(host="0.0.0.0", port=5000, debug=False)