Spaces:
Sleeping
Sleeping
update app.py
Browse files
app.py
CHANGED
@@ -1,185 +1,45 @@
|
|
1 |
-
|
2 |
-
# import cv2
|
3 |
-
# import torch
|
4 |
-
# import pandas as pd
|
5 |
-
# from PIL import Image
|
6 |
-
# from transformers import AutoImageProcessor, AutoModelForImageClassification
|
7 |
-
# from tqdm import tqdm
|
8 |
-
# import json
|
9 |
-
# import shutil
|
10 |
-
# from fastapi.middleware.cors import CORSMiddleware
|
11 |
-
# from fastapi.responses import HTMLResponse
|
12 |
-
|
13 |
-
# app = FastAPI()
|
14 |
-
|
15 |
-
# # Add CORS middleware to allow requests from localhost:8080 (or any origin you specify)
|
16 |
-
# app.add_middleware(
|
17 |
-
# CORSMiddleware,
|
18 |
-
# # allow_origins=["http://localhost:8080"], # Replace with the URL of your Vue.js app
|
19 |
-
# allow_origins=["http://localhost:8080"], # Replace with the URL of your Vue.js app
|
20 |
-
# allow_credentials=True,
|
21 |
-
# allow_methods=["*"], # Allows all HTTP methods (GET, POST, etc.)
|
22 |
-
# allow_headers=["*"], # Allows all headers (such as Content-Type, Authorization, etc.)
|
23 |
-
# )
|
24 |
-
|
25 |
-
# # Charger le processor et le modèle fine-tuné depuis le chemin local
|
26 |
-
# local_model_path = r'./vit-finetuned-ucf101'
|
27 |
-
# processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
|
28 |
-
# model = AutoModelForImageClassification.from_pretrained(local_model_path)
|
29 |
-
# # model = AutoModelForImageClassification.from_pretrained("2nzi/vit-finetuned-ucf101")
|
30 |
-
# model.eval()
|
31 |
-
|
32 |
-
# # Fonction pour classifier une image
|
33 |
-
# def classifier_image(image):
|
34 |
-
# image_pil = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
35 |
-
# inputs = processor(images=image_pil, return_tensors="pt")
|
36 |
-
# with torch.no_grad():
|
37 |
-
# outputs = model(**inputs)
|
38 |
-
# logits = outputs.logits
|
39 |
-
# predicted_class_idx = logits.argmax(-1).item()
|
40 |
-
# predicted_class = model.config.id2label[predicted_class_idx]
|
41 |
-
# return predicted_class
|
42 |
-
|
43 |
-
# # Fonction pour traiter la vidéo et identifier les séquences de "Surfing"
|
44 |
-
# def identifier_sequences_surfing(video_path, intervalle=0.5):
|
45 |
-
# cap = cv2.VideoCapture(video_path)
|
46 |
-
# frame_rate = cap.get(cv2.CAP_PROP_FPS)
|
47 |
-
# total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
48 |
-
# frame_interval = int(frame_rate * intervalle)
|
49 |
-
|
50 |
-
# resultats = []
|
51 |
-
# sequences_surfing = []
|
52 |
-
# frame_index = 0
|
53 |
-
# in_surf_sequence = False
|
54 |
-
# start_timestamp = None
|
55 |
-
|
56 |
-
# with tqdm(total=total_frames, desc="Traitement des frames de la vidéo", unit="frame") as pbar:
|
57 |
-
# success, frame = cap.read()
|
58 |
-
# while success:
|
59 |
-
# if frame_index % frame_interval == 0:
|
60 |
-
# timestamp = round(frame_index / frame_rate, 2) # Maintain precision to the centisecond level
|
61 |
-
# classe = classifier_image(frame)
|
62 |
-
# resultats.append({"Timestamp": timestamp, "Classe": classe})
|
63 |
-
|
64 |
-
# if classe == "Surfing" and not in_surf_sequence:
|
65 |
-
# in_surf_sequence = True
|
66 |
-
# start_timestamp = timestamp
|
67 |
-
|
68 |
-
# elif classe != "Surfing" and in_surf_sequence:
|
69 |
-
# # Vérifier l'image suivante pour confirmer si c'était une erreur ponctuelle
|
70 |
-
# success_next, frame_next = cap.read()
|
71 |
-
# next_timestamp = round((frame_index + frame_interval) / frame_rate, 2)
|
72 |
-
# classe_next = None
|
73 |
-
|
74 |
-
# if success_next:
|
75 |
-
# classe_next = classifier_image(frame_next)
|
76 |
-
# resultats.append({"Timestamp": next_timestamp, "Classe": classe_next})
|
77 |
-
|
78 |
-
# # Si l'image suivante est "Surfing", on ignore l'erreur ponctuelle
|
79 |
-
# if classe_next == "Surfing":
|
80 |
-
# success = success_next
|
81 |
-
# frame = frame_next
|
82 |
-
# frame_index += frame_interval
|
83 |
-
# pbar.update(frame_interval)
|
84 |
-
# continue
|
85 |
-
# else:
|
86 |
-
# # Sinon, terminer la séquence "Surfing"
|
87 |
-
# in_surf_sequence = False
|
88 |
-
# end_timestamp = timestamp
|
89 |
-
# sequences_surfing.append((start_timestamp, end_timestamp))
|
90 |
-
|
91 |
-
# success, frame = cap.read()
|
92 |
-
# frame_index += 1
|
93 |
-
# pbar.update(1)
|
94 |
-
|
95 |
-
# # Si on est toujours dans une séquence "Surfing" à la fin de la vidéo
|
96 |
-
# if in_surf_sequence:
|
97 |
-
# sequences_surfing.append((start_timestamp, round(frame_index / frame_rate, 2)))
|
98 |
-
|
99 |
-
# cap.release()
|
100 |
-
# dataframe_sequences = pd.DataFrame(sequences_surfing, columns=["Début", "Fin"])
|
101 |
-
# return dataframe_sequences
|
102 |
-
|
103 |
-
# # Fonction pour convertir les séquences en format JSON
|
104 |
-
# def convertir_sequences_en_json(dataframe):
|
105 |
-
# events = []
|
106 |
-
# blocks = []
|
107 |
-
# for idx, row in dataframe.iterrows():
|
108 |
-
# block = {
|
109 |
-
# "id": f"Surfing{idx + 1}",
|
110 |
-
# "start": round(row["Début"], 2),
|
111 |
-
# "end": round(row["Fin"], 2)
|
112 |
-
# }
|
113 |
-
# blocks.append(block)
|
114 |
-
# event = {
|
115 |
-
# "event": "Surfing",
|
116 |
-
# "blocks": blocks
|
117 |
-
# }
|
118 |
-
# events.append(event)
|
119 |
-
# return events
|
120 |
-
|
121 |
-
# @app.post("/analyze_video/")
|
122 |
-
# async def analyze_video(file: UploadFile = File(...)):
|
123 |
-
# with open("uploaded_video.mp4", "wb") as buffer:
|
124 |
-
# shutil.copyfileobj(file.file, buffer)
|
125 |
-
|
126 |
-
# dataframe_sequences = identifier_sequences_surfing("uploaded_video.mp4", intervalle=1)
|
127 |
-
# json_result = convertir_sequences_en_json(dataframe_sequences)
|
128 |
-
# return json_result
|
129 |
-
|
130 |
-
# @app.get("/", response_class=HTMLResponse)
|
131 |
-
# async def index():
|
132 |
-
# return (
|
133 |
-
# """
|
134 |
-
# <html>
|
135 |
-
# <body>
|
136 |
-
# <h1>Hello world!</h1>
|
137 |
-
# <p>This `/` is the most simple and default endpoint.</p>
|
138 |
-
# <p>If you want to learn more, check out the documentation of the API at
|
139 |
-
# <a href='/docs'>/docs</a> or
|
140 |
-
# <a href='https://2nzi-video-sequence-labeling.hf.space/docs' target='_blank'>external docs</a>.
|
141 |
-
# </p>
|
142 |
-
# </body>
|
143 |
-
# </html>
|
144 |
-
# """
|
145 |
-
# )
|
146 |
-
|
147 |
-
|
148 |
-
# # Lancer l'application avec uvicorn (command line)
|
149 |
-
# # uvicorn main:app --reload
|
150 |
-
# # http://localhost:8000/docs#/
|
151 |
-
# # (.venv) PS C:\Users\antoi\Documents\Work_Learn\Labeling-Deploy\FastAPI> uvicorn main:app --host 0.0.0.0 --port 8000 --workers 1
|
152 |
-
|
153 |
-
|
154 |
-
from fastapi import FastAPI, UploadFile, File
|
155 |
import cv2
|
156 |
import torch
|
157 |
import pandas as pd
|
158 |
from PIL import Image
|
159 |
from transformers import AutoImageProcessor, AutoModelForImageClassification
|
160 |
from tqdm import tqdm
|
161 |
-
import json
|
162 |
import shutil
|
163 |
from fastapi.middleware.cors import CORSMiddleware
|
164 |
from fastapi.responses import HTMLResponse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
|
166 |
app = FastAPI()
|
167 |
|
168 |
-
# Add CORS middleware to allow requests from
|
169 |
app.add_middleware(
|
170 |
CORSMiddleware,
|
171 |
-
|
172 |
-
|
|
|
|
|
173 |
allow_credentials=True,
|
174 |
-
allow_methods=["*"], #
|
175 |
-
allow_headers=["*"], #
|
176 |
)
|
177 |
|
178 |
-
# Charger le
|
179 |
local_model_path = r'./vit-finetuned-ucf101'
|
180 |
processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
|
181 |
model = AutoModelForImageClassification.from_pretrained(local_model_path)
|
182 |
-
# model = AutoModelForImageClassification.from_pretrained("2nzi/vit-finetuned-ucf101")
|
183 |
model.eval()
|
184 |
|
185 |
# Fonction pour classifier une image
|
@@ -200,7 +60,6 @@ def identifier_sequences_surfing(video_path, intervalle=0.5):
|
|
200 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
201 |
frame_interval = int(frame_rate * intervalle)
|
202 |
|
203 |
-
resultats = []
|
204 |
sequences_surfing = []
|
205 |
frame_index = 0
|
206 |
in_surf_sequence = False
|
@@ -210,42 +69,21 @@ def identifier_sequences_surfing(video_path, intervalle=0.5):
|
|
210 |
success, frame = cap.read()
|
211 |
while success:
|
212 |
if frame_index % frame_interval == 0:
|
213 |
-
timestamp = round(frame_index / frame_rate, 2)
|
214 |
classe = classifier_image(frame)
|
215 |
-
resultats.append({"Timestamp": timestamp, "Classe": classe})
|
216 |
|
217 |
if classe == "Surfing" and not in_surf_sequence:
|
218 |
in_surf_sequence = True
|
219 |
start_timestamp = timestamp
|
220 |
-
|
221 |
elif classe != "Surfing" and in_surf_sequence:
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
classe_next = None
|
226 |
-
|
227 |
-
if success_next:
|
228 |
-
classe_next = classifier_image(frame_next)
|
229 |
-
resultats.append({"Timestamp": next_timestamp, "Classe": classe_next})
|
230 |
-
|
231 |
-
# Si l'image suivante est "Surfing", on ignore l'erreur ponctuelle
|
232 |
-
if classe_next == "Surfing":
|
233 |
-
success = success_next
|
234 |
-
frame = frame_next
|
235 |
-
frame_index += frame_interval
|
236 |
-
pbar.update(frame_interval)
|
237 |
-
continue
|
238 |
-
else:
|
239 |
-
# Sinon, terminer la séquence "Surfing"
|
240 |
-
in_surf_sequence = False
|
241 |
-
end_timestamp = timestamp
|
242 |
-
sequences_surfing.append((start_timestamp, end_timestamp))
|
243 |
|
244 |
success, frame = cap.read()
|
245 |
frame_index += 1
|
246 |
pbar.update(1)
|
247 |
|
248 |
-
# Si on est toujours dans une séquence "Surfing" à la fin de la vidéo
|
249 |
if in_surf_sequence:
|
250 |
sequences_surfing.append((start_timestamp, round(frame_index / frame_rate, 2)))
|
251 |
|
@@ -271,25 +109,61 @@ def convertir_sequences_en_json(dataframe):
|
|
271 |
events.append(event)
|
272 |
return events
|
273 |
|
274 |
-
|
275 |
-
import os
|
276 |
-
import tempfile
|
277 |
-
|
278 |
@app.post("/analyze_video/")
|
279 |
-
async def analyze_video(file: UploadFile = File(...)):
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
288 |
|
289 |
-
# Supprimer le fichier temporaire après utilisation
|
290 |
-
os.remove(tmp_path)
|
291 |
-
|
292 |
-
return {"filename": file.filename, "result": json_result}
|
293 |
|
294 |
@app.get("/", response_class=HTMLResponse)
|
295 |
async def index():
|
@@ -308,8 +182,7 @@ async def index():
|
|
308 |
"""
|
309 |
)
|
310 |
|
311 |
-
|
312 |
# Lancer l'application avec uvicorn (command line)
|
313 |
# uvicorn main:app --reload
|
314 |
# http://localhost:8000/docs#/
|
315 |
-
# (.venv) PS C:\Users\antoi\Documents\Work_Learn\Labeling-Deploy\FastAPI> uvicorn main:app --host 0.0.0.0 --port 8000 --workers 1
|
|
|
1 |
+
from fastapi import FastAPI, UploadFile, File, HTTPException
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import cv2
|
3 |
import torch
|
4 |
import pandas as pd
|
5 |
from PIL import Image
|
6 |
from transformers import AutoImageProcessor, AutoModelForImageClassification
|
7 |
from tqdm import tqdm
|
|
|
8 |
import shutil
|
9 |
from fastapi.middleware.cors import CORSMiddleware
|
10 |
from fastapi.responses import HTMLResponse
|
11 |
+
from huggingface_hub import HfApi
|
12 |
+
import os
|
13 |
+
from dotenv import load_dotenv
|
14 |
+
|
15 |
+
# Charger les variables d'environnement, y compris la clé API Hugging Face
|
16 |
+
load_dotenv()
|
17 |
+
|
18 |
+
api_key = os.getenv("HUGGINGFACE_API_KEY")
|
19 |
+
if not api_key:
|
20 |
+
raise ValueError("La clé API Hugging Face n'est pas définie dans le fichier .env.")
|
21 |
+
|
22 |
+
# Initialiser l'API Hugging Face
|
23 |
+
hf_api = HfApi()
|
24 |
|
25 |
app = FastAPI()
|
26 |
|
27 |
+
# Add CORS middleware to allow requests from Vue.js frontend
|
28 |
app.add_middleware(
|
29 |
CORSMiddleware,
|
30 |
+
allow_origins=[
|
31 |
+
"http://localhost:8080",
|
32 |
+
"https://labeling2-163849140747.europe-west9.run.app/",
|
33 |
+
],
|
34 |
allow_credentials=True,
|
35 |
+
allow_methods=["*"], # Permet toutes les méthodes HTTP (GET, POST, etc.)
|
36 |
+
allow_headers=["*"], # Permet tous les en-têtes (Content-Type, Authorization, etc.)
|
37 |
)
|
38 |
|
39 |
+
# Charger le processeur d'image et le modèle fine-tuné localement
|
40 |
local_model_path = r'./vit-finetuned-ucf101'
|
41 |
processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
|
42 |
model = AutoModelForImageClassification.from_pretrained(local_model_path)
|
|
|
43 |
model.eval()
|
44 |
|
45 |
# Fonction pour classifier une image
|
|
|
60 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
61 |
frame_interval = int(frame_rate * intervalle)
|
62 |
|
|
|
63 |
sequences_surfing = []
|
64 |
frame_index = 0
|
65 |
in_surf_sequence = False
|
|
|
69 |
success, frame = cap.read()
|
70 |
while success:
|
71 |
if frame_index % frame_interval == 0:
|
72 |
+
timestamp = round(frame_index / frame_rate, 2)
|
73 |
classe = classifier_image(frame)
|
|
|
74 |
|
75 |
if classe == "Surfing" and not in_surf_sequence:
|
76 |
in_surf_sequence = True
|
77 |
start_timestamp = timestamp
|
|
|
78 |
elif classe != "Surfing" and in_surf_sequence:
|
79 |
+
in_surf_sequence = False
|
80 |
+
end_timestamp = timestamp
|
81 |
+
sequences_surfing.append((start_timestamp, end_timestamp))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
success, frame = cap.read()
|
84 |
frame_index += 1
|
85 |
pbar.update(1)
|
86 |
|
|
|
87 |
if in_surf_sequence:
|
88 |
sequences_surfing.append((start_timestamp, round(frame_index / frame_rate, 2)))
|
89 |
|
|
|
109 |
events.append(event)
|
110 |
return events
|
111 |
|
112 |
+
# Endpoint pour analyser la vidéo et uploader sur Hugging Face
|
|
|
|
|
|
|
113 |
@app.post("/analyze_video/")
|
114 |
+
async def analyze_video(user_name: str, file: UploadFile = File(...)):
|
115 |
+
try:
|
116 |
+
# Sauvegarder la vidéo temporairement
|
117 |
+
temp_file_path = f"/tmp/{file.filename}"
|
118 |
+
with open(temp_file_path, "wb") as buffer:
|
119 |
+
shutil.copyfileobj(file.file, buffer)
|
120 |
+
|
121 |
+
# Uploader la vidéo sur Hugging Face Hub
|
122 |
+
dataset_name = "2nzi/Video-Sequence-Labeling"
|
123 |
+
target_path_in_repo = f"{user_name}/raw/{file.filename}"
|
124 |
+
|
125 |
+
hf_api.upload_file(
|
126 |
+
path_or_fileobj=temp_file_path,
|
127 |
+
path_in_repo=target_path_in_repo,
|
128 |
+
repo_id=dataset_name,
|
129 |
+
repo_type="dataset",
|
130 |
+
token=api_key
|
131 |
+
)
|
132 |
+
|
133 |
+
# Analyser la vidéo pour trouver des séquences "Surfing"
|
134 |
+
dataframe_sequences = identifier_sequences_surfing(temp_file_path, intervalle=1)
|
135 |
+
json_result = convertir_sequences_en_json(dataframe_sequences)
|
136 |
+
|
137 |
+
# Supprimer le fichier temporaire après l'upload
|
138 |
+
os.remove(temp_file_path)
|
139 |
+
|
140 |
+
return {"message": "Video uploaded and analyzed successfully!",
|
141 |
+
"file_url": f"https://huggingface.co/datasets/{dataset_name}/resolve/main/{target_path_in_repo}",
|
142 |
+
"analysis": json_result}
|
143 |
+
|
144 |
+
except Exception as e:
|
145 |
+
raise HTTPException(status_code=500, detail=f"Failed to upload or analyze video: {str(e)}")
|
146 |
+
|
147 |
+
# Fonction pour uploader une vidéo vers un dataset Hugging Face
|
148 |
+
def upload_to_hf_dataset(user_name: str, video_path: str):
|
149 |
+
dataset_name = "2nzi/Video-Sequence-Labeling"
|
150 |
+
repo_path = f"{user_name}/raw/{os.path.basename(video_path)}"
|
151 |
+
|
152 |
+
try:
|
153 |
+
hf_api.upload_file(
|
154 |
+
path_or_fileobj=video_path,
|
155 |
+
path_in_repo=repo_path,
|
156 |
+
repo_id=dataset_name,
|
157 |
+
repo_type="dataset",
|
158 |
+
token=api_key
|
159 |
+
)
|
160 |
+
|
161 |
+
# Retourner l'URL de la vidéo après l'upload
|
162 |
+
url = f"https://huggingface.co/datasets/{dataset_name}/resolve/main/{repo_path}"
|
163 |
+
return {"status": "success", "url": url}
|
164 |
+
except Exception as e:
|
165 |
+
return {"status": "error", "message": str(e)}
|
166 |
|
|
|
|
|
|
|
|
|
167 |
|
168 |
@app.get("/", response_class=HTMLResponse)
|
169 |
async def index():
|
|
|
182 |
"""
|
183 |
)
|
184 |
|
|
|
185 |
# Lancer l'application avec uvicorn (command line)
|
186 |
# uvicorn main:app --reload
|
187 |
# http://localhost:8000/docs#/
|
188 |
+
# (.venv) PS C:\Users\antoi\Documents\Work_Learn\Labeling-Deploy\FastAPI> uvicorn main:app --host 0.0.0.0 --port 8000 --workers 1
|