skallewag commited on
Commit
35fadf1
·
verified ·
1 Parent(s): ca69ff4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -452
app.py CHANGED
@@ -6,474 +6,119 @@
6
  # Written by Xueyan Zou ([email protected]), Jianwei Yang ([email protected])
7
  # --------------------------------------------------------
8
 
9
- # This file is specifically adapted for Hugging Face Spaces deployment
10
-
11
  import os
12
  import sys
13
  import subprocess
14
- import warnings
15
- import traceback
16
- from pathlib import Path
17
-
18
- # Log all operations for debugging
19
- print("Starting SEEM HF Space setup...")
20
- print(f"Current directory: {os.getcwd()}")
21
- print(f"Python version: {sys.version}")
22
-
23
- # Create mock detectron2 structures to prevent import errors
24
- print("Setting up mock detectron2 module")
25
- class Boxes:
26
- def __init__(self, *args, **kwargs):
27
- pass
28
-
29
- class ImageList:
30
- def __init__(self, *args, **kwargs):
31
- pass
32
-
33
- @staticmethod
34
- def from_tensors(*args, **kwargs):
35
- return ImageList()
36
-
37
- class Instances:
38
- def __init__(self, *args, **kwargs):
39
- pass
40
-
41
- class BitMasks:
42
- def __init__(self, *args, **kwargs):
43
- pass
44
-
45
- @staticmethod
46
- def from_polygon_masks(*args, **kwargs):
47
- return BitMasks()
48
-
49
- class BoxMode:
50
- XYXY_ABS = 0
51
- XYWH_ABS = 1
52
-
53
- # Add mock detectron2 to sys.modules as a proper package
54
- if 'detectron2' not in sys.modules:
55
- import types
56
- detectron2_module = types.ModuleType('detectron2')
57
- structures_module = types.ModuleType('detectron2.structures')
58
- sys.modules['detectron2'] = detectron2_module
59
- sys.modules['detectron2.structures'] = structures_module
60
-
61
- # Add classes to structures module
62
- structures_module.Boxes = Boxes
63
- structures_module.ImageList = ImageList
64
- structures_module.Instances = Instances
65
- structures_module.BitMasks = BitMasks
66
- structures_module.BoxMode = BoxMode
67
 
68
- # Set structures as an attribute of detectron2
69
- detectron2_module.structures = structures_module
70
- print("Mock detectron2 module created")
71
-
72
- # Make sure utils directory exists
73
- os.makedirs('utils', exist_ok=True)
74
- print("Created utils directory if it didn't exist")
75
-
76
- # Create a custom distributed.py without mpi4py dependency
77
- with open('utils/distributed.py', 'w') as f:
78
- f.write("""# Custom distributed.py without mpi4py dependency
79
- import os
80
- import torch
81
- import torch.distributed as dist
82
-
83
- class MPI:
84
- class COMM_WORLD:
85
- @staticmethod
86
- def Get_rank():
87
- return 0
88
 
89
- @staticmethod
90
- def Get_size():
91
- return 1
92
-
93
- @staticmethod
94
- def bcast(data, root=0):
95
- return data
96
 
97
- @staticmethod
98
- def barrier():
99
- pass
100
-
101
- def apply_distributed(opt):
102
- opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
103
- opt.rank = 0
104
- opt.world_size = 1
105
- opt.gpu = 0
106
- return opt
107
-
108
- def init_distributed(opt=None):
109
- if opt is not None:
110
- opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
111
- opt.rank = 0
112
- opt.world_size = 1
113
- opt.gpu = 0
114
- return opt
115
 
116
- return None
117
-
118
- def get_rank():
119
- return 0
120
-
121
- def get_world_size():
122
- return 1
123
-
124
- def is_main_process():
125
- return True
126
-
127
- def synchronize():
128
- pass
129
-
130
- def all_gather(data):
131
- return [data]
132
-
133
- def reduce_dict(input_dict, average=True):
134
- return input_dict
135
  """)
136
- print("Created custom distributed.py")
137
-
138
- # Ensure examples directory exists
139
- os.makedirs('examples', exist_ok=True)
140
- print("Created examples directory if it didn't exist")
141
-
142
- # Create a minimal interactive.py in tasks directory
143
- os.makedirs('tasks', exist_ok=True)
144
- with open('tasks/interactive.py', 'w') as f:
145
- f.write("""
146
- import numpy as np
147
- from PIL import Image, ImageDraw
148
-
149
- def interactive_infer_image(model, audio_model, image, tasks, refimg=None, reftxt=None, audio_pth=None, video_pth=None):
150
- # Get image dimensions
151
- img = image['image']
152
- h, w = img.size[1], img.size[0]
153
 
154
- # Display a message and create a simple mask for demonstration
155
- print("Called interactive_infer_image with tasks:", tasks)
156
- print("Image size:", img.size)
157
- if refimg is not None:
158
- print("Referring image size:", refimg['image'].size)
159
- if reftxt:
160
- print("Text:", reftxt)
161
- if audio_pth:
162
- print("Audio path:", audio_pth)
 
 
 
163
 
164
- # Create a simple circle mask in the center
165
- mask = np.zeros((h, w), dtype=np.uint8)
166
- center_x, center_y = w//2, h//2
167
- radius = min(w, h) // 4
168
- for y in range(h):
169
- for x in range(w):
170
- if ((x - center_x)**2 + (y - center_y)**2) < radius**2:
171
- mask[y, x] = 255
172
-
173
- return Image.fromarray(mask), None
174
-
175
- def interactive_infer_video(model, audio_model, image, tasks, refimg=None, reftxt=None, audio_pth=None, video_pth=None):
176
- # Just return the input video for demonstration
177
- print("Called interactive_infer_video with tasks:", tasks)
178
- if video_pth:
179
- print("Video path:", video_pth)
180
- return None, video_pth
181
- """)
182
- print("Created simplified interactive.py")
183
-
184
- # Create some example placeholder files
185
- example_files = [
186
- 'corgi1.webp', 'corgi2.jpg', 'river1.png', 'river2.png',
187
- 'zebras1.jpg', 'zebras2.jpg', 'fries1.png', 'fries2.png',
188
- 'placeholder.png', 'ref_vase.JPG'
189
- ]
190
-
191
- placeholder_img = None
192
- try:
193
- from PIL import Image, ImageDraw
194
- placeholder_img = Image.new('RGB', (400, 300), color=(240, 240, 240))
195
- d = ImageDraw.Draw(placeholder_img)
196
- d.text((150, 150), "Placeholder", fill=(0, 0, 0))
197
- except Exception as e:
198
- print(f"Error creating placeholder image: {e}")
199
-
200
- for file_name in example_files:
201
- file_path = os.path.join('examples', file_name)
202
- if not os.path.exists(file_path) and placeholder_img is not None:
203
- try:
204
- placeholder_img.save(file_path)
205
- print(f"Created {file_path}")
206
- except Exception as e:
207
- print(f"Error creating {file_path}: {e}")
208
-
209
- # Create dummy audio/video files if needed
210
- if not os.path.exists('examples/river1.wav'):
211
- try:
212
- with open('examples/river1.wav', 'wb') as f:
213
- f.write(b'RIFF$\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01\x00\x01\x00\x00\x04\x00\x00\x00\x04\x00\x00\x01\x00\x08\x00data\x00\x00\x00\x00')
214
- print("Created dummy audio file")
215
- except Exception as e:
216
- print(f"Error creating dummy audio file: {e}")
217
-
218
- if not os.path.exists('examples/vasedeck.mp4'):
219
- try:
220
- with open('examples/vasedeck.mp4', 'wb') as f:
221
- f.write(b'\x00\x00\x00\x18ftypmp42\x00\x00\x00\x00mp42mp41\x00\x00\x00\x00')
222
- print("Created dummy video file")
223
- except Exception as e:
224
- print(f"Error creating dummy video file: {e}")
225
-
226
- # Continue with regular imports
227
- print("Importing required libraries...")
228
- try:
229
- import PIL
230
- from PIL import Image, ImageDraw
231
- import gradio as gr
232
- import torch
233
- import argparse
234
- import numpy as np
235
- from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
236
- from gradio import processing_utils
237
-
238
- print("Basic imports successful")
239
- except Exception as e:
240
- print(f"Error importing basic libraries: {e}")
241
- traceback.print_exc()
242
- sys.exit(1)
243
-
244
- # Try to import specialized libraries but handle their absence gracefully
245
- try:
246
- import whisper
247
- audio_loaded = True
248
- print("Whisper loaded successfully")
249
- except Exception as e:
250
- print(f"Error loading whisper: {e}")
251
- audio_loaded = False
252
-
253
- # Global flags for model status
254
- model_loaded = False
255
- audio_loaded = audio_loaded if 'audio_loaded' in locals() else False
256
- interactive_functions_imported = False
257
-
258
- # Dummy constants if not available
259
- try:
260
- from utils.constants import COCO_PANOPTIC_CLASSES
261
- print("Loaded COCO_PANOPTIC_CLASSES")
262
- except ImportError:
263
- print("Creating dummy COCO_PANOPTIC_CLASSES")
264
- COCO_PANOPTIC_CLASSES = ["person", "cat", "dog", "car", "bicycle", "umbrella", "tree", "sky", "building"]
265
-
266
- # Try to import the model but handle failures gracefully
267
- try:
268
- # Attempt to import specialized modules but don't fail if they're not available
269
- try:
270
- from modeling.BaseModel import BaseModel
271
- from modeling import build_model
272
- from utils.distributed import init_distributed
273
- from utils.arguments import load_opt_from_config_files
274
- print("Model imports successful")
275
-
276
- # Try to import interactive functions
277
- try:
278
- from tasks.interactive import interactive_infer_image, interactive_infer_video
279
- print("Successfully imported interactive functions from tasks.interactive")
280
- interactive_functions_imported = True
281
- except ImportError as e:
282
- print(f"Error importing interactive functions: {e}")
283
- interactive_functions_imported = False
284
-
285
- # Try to set up the model
286
- try:
287
- parser = argparse.ArgumentParser('SEEM Demo', add_help=False)
288
- parser.add_argument('--conf_files', default="configs/seem/focall_unicl_lang_demo.yaml", metavar="FILE", help='path to config file')
289
- cfg = parser.parse_args()
290
-
291
- opt = load_opt_from_config_files([cfg.conf_files])
292
- opt = init_distributed(opt)
293
-
294
- # META DATA
295
- cur_model = 'None'
296
- pretrained_pth = None
297
- if 'focalt' in cfg.conf_files:
298
- pretrained_pth = os.path.join("seem_focalt_v0.pt")
299
- if not os.path.exists(pretrained_pth):
300
- print(f"Downloading model file {pretrained_pth}...")
301
- os.system("wget {}".format("https://huggingface.co/xdecoder/SEEM/resolve/main/seem_focalt_v0.pt"))
302
- cur_model = 'Focal-T'
303
- elif 'focal' in cfg.conf_files:
304
- pretrained_pth = os.path.join("seem_focall_v0.pt")
305
- if not os.path.exists(pretrained_pth):
306
- print(f"Downloading model file {pretrained_pth}...")
307
- os.system("wget {}".format("https://huggingface.co/xdecoder/SEEM/resolve/main/seem_focall_v0.pt"))
308
- cur_model = 'Focal-L'
309
-
310
- if pretrained_pth and os.path.exists(pretrained_pth):
311
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
312
- print(f"Using device: {device}")
313
-
314
- model = BaseModel(opt, build_model(opt)).from_pretrained(pretrained_pth).eval().to(device)
315
- with torch.no_grad():
316
- model.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(COCO_PANOPTIC_CLASSES + ["background"], is_eval=True)
317
- print("Model loaded successfully")
318
- model_loaded = True
319
- else:
320
- print(f"Model file not found: {pretrained_pth}")
321
- model = None
322
- model_loaded = False
323
- except Exception as e:
324
- print(f"Error setting up model: {e}")
325
- traceback.print_exc()
326
- model = None
327
- model_loaded = False
328
- except Exception as e:
329
- print(f"Error during model import: {e}")
330
- traceback.print_exc()
331
- model = None
332
- model_loaded = False
333
- except Exception as e:
334
- print(f"Error during model setup: {e}")
335
- traceback.print_exc()
336
- model = None
337
- model_loaded = False
338
-
339
- # If interactive functions weren't imported, define dummy versions
340
- if not interactive_functions_imported:
341
- print("Creating dummy interactive functions")
342
- def interactive_infer_image(model, audio_model, image, tasks, refimg=None, reftxt=None, audio_pth=None, video_pth=None):
343
- # Create a simple circle mask in the center
344
- img = image['image']
345
- h, w = img.size[1], img.size[0]
346
- mask = np.zeros((h, w), dtype=np.uint8)
347
- center_x, center_y = w//2, h//2
348
- radius = min(w, h) // 4
349
- for y in range(h):
350
- for x in range(w):
351
- if ((x - center_x)**2 + (y - center_y)**2) < radius**2:
352
- mask[y, x] = 255
353
- return Image.fromarray(mask), None
354
-
355
- def interactive_infer_video(model, audio_model, image, tasks, refimg=None, reftxt=None, audio_pth=None, video_pth=None):
356
- return None, video_pth
357
 
358
- # Inference function
359
- @torch.no_grad()
360
- def inference(image, task, *args, **kwargs):
361
- if not model_loaded:
362
- # Return a placeholder image with an informative message
363
- print("Model not loaded, returning placeholder image")
364
-
365
- # Generate a simple mask based on the image size
366
- if image is not None:
367
- try:
368
- h, w = image.size[1], image.size[0]
369
- mask = np.zeros((h, w), dtype=np.uint8)
370
-
371
- # Add a simple shape to the mask for demonstration
372
- center_x, center_y = w//2, h//2
373
- radius = min(w, h) // 4
374
- for y in range(h):
375
- for x in range(w):
376
- if ((x - center_x)**2 + (y - center_y)**2) < radius**2:
377
- mask[y, x] = 255
378
-
379
- return Image.fromarray(mask), None
380
- except Exception as e:
381
- print(f"Error creating demo mask: {e}")
382
- warning_img = Image.new('RGB', (600, 400), color=(240, 240, 240))
383
- d = ImageDraw.Draw(warning_img)
384
- d.text((50, 150), "Model could not be loaded.", fill=(255, 0, 0))
385
- d.text((50, 200), "Using simplified interface for demonstration.", fill=(255, 0, 0))
386
- return warning_img, None
387
-
388
- warning_img = Image.new('RGB', (600, 400), color=(240, 240, 240))
389
- d = ImageDraw.Draw(warning_img)
390
- d.text((50, 150), "Model could not be loaded.", fill=(255, 0, 0))
391
- d.text((50, 200), "Using simplified interface for demonstration.", fill=(255, 0, 0))
392
- return warning_img, None
393
 
394
- # Prepare input parameters for the interactive functions
395
- image_input = {"image": image, "mask": kwargs.get("mask", None)}
396
- referring_image = kwargs.get("referring_image", None)
397
 
398
- # If referring image is provided, prepare it in the expected format
399
- refimg = None
400
- if referring_image is not None:
401
- refimg = {"image": referring_image, "mask": kwargs.get("referring_mask", None)}
 
402
 
403
- # Get text and audio parameters
404
- reftxt = kwargs.get("referring_text", "")
405
- audio_pth = kwargs.get("referring_audio", None)
406
- video_pth = kwargs.get("video", None)
407
 
408
- # Call the appropriate interactive function
 
 
 
409
  try:
410
- if 'Video' in task:
411
- return interactive_infer_video(model, audio, image_input, task, refimg, reftxt, audio_pth, video_pth)
412
- else:
413
- return interactive_infer_image(model, audio, image_input, task, refimg, reftxt, audio_pth, video_pth)
414
  except Exception as e:
415
- print(f"Error during inference: {e}")
 
416
  traceback.print_exc()
417
- warning_img = Image.new('RGB', (600, 400), color=(240, 240, 240))
418
- d = ImageDraw.Draw(warning_img)
419
- d.text((50, 150), f"Error: {str(e)}", fill=(255, 0, 0))
420
- d.text((50, 200), "Please check logs for details.", fill=(255, 0, 0))
421
- return warning_img, None
422
-
423
- '''
424
- launch app
425
- '''
426
- title = "SEEM: Segment Everything Everywhere All At Once"
427
-
428
- # Update description based on model loading status
429
- if model_loaded:
430
- model_status = f"<span style=\"color:green;\">✓ Model loaded successfully</span> (SEEM {cur_model})"
431
- else:
432
- model_status = "<span style=\"color:orange;\">⚠ Running in demonstration mode</span> (model not loaded)"
433
-
434
- description = f"""
435
- <div style="text-align: center; font-weight: bold;">
436
- <span style="font-size: 18px" id="paper-info">
437
- [<a href="https://github.com/UX-Decoder/Segment-Everything-Everywhere-All-At-Once" target="_blank">GitHub</a>]
438
- [<a href="https://arxiv.org/pdf/2304.06718.pdf" target="_blank">arXiv</a>]
439
- </span>
440
- </div>
441
- <div style="text-align: left; font-weight: bold;">
442
- <br>
443
- &#x1F32A Status: {model_status}
444
- </p>
445
- </div>
446
- """
447
-
448
- article = "SEEM Demo" + (" (Simplified Interface)" if not model_loaded else "")
449
- inputs = [
450
- gr.Image(label="[Stroke] Draw on Image", type="pil"),
451
- gr.CheckboxGroup(choices=["Stroke", "Example", "Text", "Audio", "Video", "Panoptic"], label="Interactive Mode"),
452
- gr.Image(label="[Example] Draw on Referring Image", type="pil"),
453
- gr.Textbox(label="[Text] Referring Text"),
454
- gr.Audio(label="[Audio] Referring Audio", source="microphone", type="filepath"),
455
- gr.Video(label="[Video] Referring Video Segmentation", format="mp4")
456
- ]
457
-
458
- outputs = [
459
- gr.outputs.Image(type="pil", label="Segmentation Results (COCO classes as label)"),
460
- gr.outputs.Video(label="Video Segmentation Results (COCO classes as label)")
461
- ]
462
 
463
- gr.Interface(
464
- fn=inference,
465
- inputs=inputs,
466
- outputs=outputs,
467
- examples=[
468
- ["examples/corgi1.webp", ["Text"], "examples/corgi2.jpg", "The corgi.", None, None],
469
- ["examples/river1.png", ["Text", "Audio"], "examples/river2.png", "The green trees.", "examples/river1.wav", None],
470
- ["examples/zebras1.jpg", ["Example"], "examples/zebras2.jpg", "", None, None],
471
- ["examples/fries1.png", ["Example"], "examples/fries2.png", "", None, None],
472
- ["examples/placeholder.png", ["Video"], "examples/ref_vase.JPG", "", None, "examples/vasedeck.mp4"],
473
- ],
474
- title=title,
475
- description=description,
476
- article=article,
477
- allow_flagging='never',
478
- cache_examples=False,
479
- ).launch()
 
6
  # Written by Xueyan Zou ([email protected]), Jianwei Yang ([email protected])
7
  # --------------------------------------------------------
8
 
9
+ # Hugging Face Spaces Launcher
 
10
  import os
11
  import sys
12
  import subprocess
13
+ import importlib.util
14
+ import logging
15
+ import time
16
+
17
+ # Configure logging
18
+ logging.basicConfig(level=logging.INFO,
19
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
20
+ logger = logging.getLogger("SEEM-HF")
21
+
22
+ def run_command(cmd, description=None):
23
+ """Run a shell command and log its output"""
24
+ if description:
25
+ logger.info(f"Running: {description}")
26
+ logger.info(f"Command: {cmd}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
+ try:
29
+ process = subprocess.Popen(
30
+ cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
31
+ universal_newlines=True
32
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
+ # Stream and log output in real-time
35
+ for line in process.stdout:
36
+ line = line.rstrip()
37
+ logger.info(line)
 
 
 
38
 
39
+ process.wait()
40
+ return process.returncode == 0
41
+ except Exception as e:
42
+ logger.error(f"Error executing command: {e}")
43
+ return False
44
+
45
+ def install_dependencies():
46
+ """Install required dependencies"""
47
+ # Check if ffmpeg is installed
48
+ logger.info("Checking for ffmpeg...")
49
+ if not run_command("which ffmpeg", "Checking ffmpeg"):
50
+ logger.info("Installing ffmpeg...")
51
+ run_command("apt-get update && apt-get install -y ffmpeg", "Installing ffmpeg")
 
 
 
 
 
52
 
53
+ # Install Python dependencies
54
+ logger.info("Installing Python dependencies...")
55
+ if os.path.exists("assets/requirements/requirements.txt"):
56
+ run_command("pip install -r assets/requirements/requirements.txt", "Installing base requirements")
57
+ else:
58
+ logger.warning("Base requirements file not found, creating minimal requirements")
59
+ with open("requirements.txt", "w") as f:
60
+ f.write("""torch>=1.12.0
61
+ torchvision>=0.13.0
62
+ opencv-python-headless>=4.5.0
63
+ numpy>=1.23.5
64
+ gradio>=3.13.0
65
+ Pillow>=9.0.0
66
+ openai-whisper
 
 
 
 
 
67
  """)
68
+ run_command("pip install -r requirements.txt", "Installing minimal requirements")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
+ if os.path.exists("assets/requirements/requirements_custom.txt"):
71
+ run_command("pip install -r assets/requirements/requirements_custom.txt", "Installing custom requirements")
72
+
73
+ def setup_environment():
74
+ """Set up the necessary directories and environment"""
75
+ # Create necessary directories
76
+ os.makedirs('utils', exist_ok=True)
77
+ os.makedirs('modeling', exist_ok=True)
78
+ os.makedirs('modeling/architectures', exist_ok=True)
79
+ os.makedirs('tasks', exist_ok=True)
80
+ os.makedirs('examples', exist_ok=True)
81
+ logger.info("Created required directories")
82
 
83
+ # Make sure demo/seem directory exists
84
+ if not os.path.exists("demo/seem"):
85
+ logger.error("demo/seem directory not found!")
86
+ return False
87
+
88
+ return True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
+ def main():
91
+ """Main entry point"""
92
+ logger.info("Starting SEEM Hugging Face Space")
93
+
94
+ # Install dependencies
95
+ install_dependencies()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
+ # Setup environment
98
+ if not setup_environment():
99
+ return
100
 
101
+ # Prepare to run the actual app
102
+ app_path = "demo/seem/app.py"
103
+ if not os.path.exists(app_path):
104
+ logger.error(f"Application file not found at {app_path}!")
105
+ return
106
 
107
+ logger.info(f"Loading application from {app_path}")
 
 
 
108
 
109
+ # Add the demo directory to Python path
110
+ sys.path.insert(0, os.path.abspath('demo'))
111
+
112
+ # Load and run the app module
113
  try:
114
+ spec = importlib.util.spec_from_file_location("seem_app", app_path)
115
+ seem_app = importlib.util.module_from_spec(spec)
116
+ spec.loader.exec_module(seem_app)
117
+ logger.info("SEEM application loaded successfully")
118
  except Exception as e:
119
+ logger.error(f"Error loading application: {e}")
120
+ import traceback
121
  traceback.print_exc()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
+ if __name__ == "__main__":
124
+ main()