xmrt commited on
Commit
7df5b0c
·
1 Parent(s): b18c135
Files changed (1) hide show
  1. main_noweb.py +2 -55
main_noweb.py CHANGED
@@ -2,8 +2,6 @@
2
  import mmpose
3
  from mmpose.apis import MMPoseInferencer
4
 
5
- # Ultralytics
6
- from ultralytics import YOLO
7
  import torch
8
 
9
  # Gradio
@@ -32,10 +30,9 @@ else:
32
  os.system("nvidia-smi")
33
 
34
  print("[INFO]: Imported modules!")
35
- human = MMPoseInferencer("dekr_hrnet-w32_8xb10-140e_coco-512x512")
36
  hand = MMPoseInferencer("hand")
37
- human3d = MMPoseInferencer(pose3d="human3d")
38
- track_model = YOLO('yolov8n.pt') # Load an official Detect model
39
 
40
  print("[INFO]: Downloaded models!")
41
 
@@ -62,56 +59,6 @@ def check_extension(video):
62
  return video
63
 
64
 
65
- def tracking(video, model, boxes=True):
66
- print("[INFO] Is cuda available? ", torch.cuda.is_available())
67
- print(device)
68
-
69
- print("[INFO] Loading model...")
70
- # Load an official or custom model
71
-
72
- # Perform tracking with the model
73
- print("[INFO] Starting tracking!")
74
- # https://docs.ultralytics.com/modes/predict/
75
- annotated_frame = model(video, boxes=boxes, device=device)
76
-
77
- return annotated_frame
78
-
79
- def show_tracking(video_content):
80
-
81
- # https://docs.ultralytics.com/datasets/detect/coco/
82
- video = cv2.VideoCapture(video_content)
83
-
84
- # Track
85
- video_track = tracking(video_content, track_model.track)
86
-
87
- # Prepare to save video
88
- #out_file = os.path.join(vis_out_dir, "track.mp4")
89
- out_file = "track.mp4"
90
- print("[INFO]: TRACK", out_file)
91
-
92
- fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Codec for MP4 video
93
- fps = video.get(cv2.CAP_PROP_FPS)
94
- height, width, _ = video_track[0][0].orig_img.shape
95
- size = (width,height)
96
-
97
- out_track = cv2.VideoWriter(out_file, fourcc, fps, size)
98
-
99
- # Go through frames and write them
100
- for frame_track in video_track:
101
- result_track = frame_track[0].plot() # plot a BGR numpy array of predictions
102
- out_track.write(result_track)
103
-
104
- print("[INFO] Done with frames")
105
- #print(type(result_pose)) numpy ndarray
106
-
107
- out_track.release()
108
-
109
- video.release()
110
- cv2.destroyAllWindows() # Closing window
111
-
112
- return out_file
113
-
114
-
115
  def pose3d(video, kpt_threshold):
116
  video = check_extension(video)
117
  print(device)
 
2
  import mmpose
3
  from mmpose.apis import MMPoseInferencer
4
 
 
 
5
  import torch
6
 
7
  # Gradio
 
30
  os.system("nvidia-smi")
31
 
32
  print("[INFO]: Imported modules!")
33
+ human = MMPoseInferencer("dekr_hrnet-w32_8xb10-140e_coco-512x512") # ipr_res50_debias-8xb64-210e_coco-256x256-055a7699_20220913
34
  hand = MMPoseInferencer("hand")
35
+ human3d = MMPoseInferencer("simplebaseline3d_h36m") # pose3d="human3d"
 
36
 
37
  print("[INFO]: Downloaded models!")
38
 
 
59
  return video
60
 
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  def pose3d(video, kpt_threshold):
63
  video = check_extension(video)
64
  print(device)