HemaAM commited on
Commit
aca01f2
·
verified ·
1 Parent(s): bc8fbb9

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +293 -0
app.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import sys
4
+ import traceback
5
+ from timeit import default_timer as timer
6
+
7
+ import gradio as gr
8
+ import torch
9
+ from PIL import Image
10
+ from torchvision import transforms
11
+ from dotenv import load_dotenv
12
+ import boto3
13
+
14
+ # --- Setup ---
15
+ load_dotenv()
16
+ print("Starting application with debug info...")
17
+ print(f"Python version: {sys.version}")
18
+ print(f"Torch version: {torch.__version__}")
19
+ print(f"Device: {torch.device('cuda' if torch.cuda.is_available() else 'cpu')}")
20
+
21
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
+ try:
23
+ torch.set_default_device(device)
24
+ print(f"Default device set to: {device}")
25
+ except Exception as e:
26
+ print(f"Error setting default device: {e}")
27
+ # Fall back to older method if needed
28
+ if torch.__version__ < '2.0.0':
29
+ print("Using older torch version method for device handling")
30
+
31
+ # --- Download from S3 (CPU models only) ---
32
+ def download_from_s3():
33
+ print("Attempting to download artifacts from S3...")
34
+ BUCKET_NAME = 'mybucket-emlo-mumbai'
35
+ ARTIFACTS = [
36
+ 'kserve-ig/vegfruits-classifier-prod/pths/vegfruits_cpu.pt',
37
+ 'kserve-ig/sports-classifier-prod/pths/sports_cpu.pt',
38
+
39
+ 'kserve-ig/vegfruits-classifier-prod/index_to_name.json',
40
+ 'kserve-ig/sports-classifier-prod/index_to_name.json',
41
+ ]
42
+ os.makedirs("vegfruits", exist_ok=True)
43
+ os.makedirs("sports", exist_ok=True)
44
+
45
+ try:
46
+ aws_key = os.getenv("AWS_ACCESS_KEY_ID")
47
+ aws_secret = os.getenv("AWS_SECRET_ACCESS_KEY")
48
+ print(f"AWS credentials available: {bool(aws_key and aws_secret)}")
49
+
50
+ s3 = boto3.client(
51
+ "s3",
52
+ aws_access_key_id=aws_key,
53
+ aws_secret_access_key=aws_secret,
54
+ region_name="ap-south-1"
55
+ )
56
+
57
+ for artifact in ARTIFACTS:
58
+ if not os.path.exists(artifact):
59
+ artifact_extract = artifact.split("/")[-1]
60
+ if "vegfruits" in artifact:
61
+ local_name = "vegfruits"
62
+ if "sports" in artifact:
63
+ local_name = "sports"
64
+ s3.download_file(BUCKET_NAME, artifact, os.path.join(local_name, artifact_extract))
65
+ print(f"Successfully downloaded {artifact} as {os.path.join(local_name, artifact_extract)}")
66
+ else:
67
+ print(f"{artifact} already exists, skipping download")
68
+ except Exception as e:
69
+ print(f"Error during S3 download: {e}")
70
+ traceback.print_exc()
71
+
72
+ # --- Image Transform ---
73
+ transform = transforms.Compose([
74
+ transforms.Resize((224, 224)),
75
+ transforms.ToTensor(),
76
+ transforms.Normalize(mean=[0.485, 0.456, 0.406],
77
+ std=[0.229, 0.224, 0.225])
78
+ ])
79
+
80
+ # --- Load models ---
81
+ def load_model(name):
82
+ print(f"Loading model: {name}")
83
+ # mybucket-emlo-mumbai/kserve-ig/vegfruits-classifier-prod/pths/
84
+ path = f"{name}/{name}_cpu.pt"
85
+ try:
86
+ if not os.path.exists(path):
87
+ print(f"ERROR: Model file not found at {path}")
88
+ return None
89
+
90
+ model = torch.jit.load(path)
91
+ print(f"Model loaded successfully from {path}")
92
+ model.to(device)
93
+ print(f"Model moved to {device}")
94
+ model.eval()
95
+ print(f"Model set to evaluation mode")
96
+ return model
97
+ except Exception as e:
98
+ print(f"Error loading model {name}: {e}")
99
+ traceback.print_exc()
100
+ return None
101
+
102
+ # --- Load class mappings ---
103
+ def load_classnames(name):
104
+ print(f"Loading class mappings for: {name}")
105
+ file_path = f"{name}/index_to_name.json"
106
+ try:
107
+ if not os.path.exists(file_path):
108
+ print(f"ERROR: Class mapping file not found at {file_path}")
109
+ return {}
110
+
111
+ with open(file_path) as f:
112
+ mapping = json.load(f)
113
+ print(f"Class mappings loaded successfully from {file_path}")
114
+ return mapping
115
+ # # Debug info
116
+ # print(f"Raw mapping sample (first 3 items): {list(mapping.items())[:3]}")
117
+
118
+ # # Convert keys to integers and create reverse mapping
119
+ # try:
120
+ # idx2lbl = {int(v): k for k, v in mapping.items()}
121
+ # print(f"Converted mapping sample (first 3 items): {list(idx2lbl.items())[:3]}")
122
+ # return idx2lbl, mapping
123
+ # except Exception as e:
124
+ # print(f"Error converting class mappings: {e}")
125
+ # # Fallback to string keys if int conversion fails
126
+ # return {v: k for k, v in mapping.items()}
127
+ except Exception as e:
128
+ print(f"Error loading class mappings for {name}: {e}")
129
+ traceback.print_exc()
130
+ return {}
131
+
132
+ # --- Predict functions ---
133
+ @torch.no_grad()
134
+ def predict(img, model, idx2lbl):
135
+ print(f"Prediction request received. Input type: {type(img)}")
136
+
137
+ # Handle non-image inputs
138
+ if img is None:
139
+ print("Received None image")
140
+ return {"No image provided": 1.0}, 0.0
141
+
142
+ if isinstance(img, bool):
143
+ print(f"Received boolean input: {img}")
144
+ return {"Boolean input received, expected image": 1.0}, 0.0
145
+
146
+ # Verify we have a valid image
147
+ if not isinstance(img, Image.Image):
148
+ print(f"WARNING: Input is not a PIL Image but {type(img)}")
149
+ try:
150
+ if hasattr(img, 'convert'):
151
+ print("Object has convert method, attempting to use as image")
152
+ else:
153
+ print("Object cannot be used as an image")
154
+ return {"Invalid image format": 1.0}, 0.0
155
+ except Exception as e:
156
+ print(f"Error checking image: {e}")
157
+ return {"Error processing input": 1.0}, 0.0
158
+
159
+ try:
160
+ print("Starting prediction process")
161
+ start = timer()
162
+
163
+ # Debug image properties
164
+ print(f"Image size: {img.size if hasattr(img, 'size') else 'unknown'}")
165
+ print(f"Image mode: {img.mode if hasattr(img, 'mode') else 'unknown'}")
166
+
167
+ # Transform image
168
+ print("Transforming image")
169
+ img_tensor = transform(img).to(device)
170
+ print(f"Image transformed to tensor of shape {img_tensor.shape}")
171
+
172
+ # Run model
173
+ print("Running model inference")
174
+ logits = model(img_tensor.unsqueeze(0))
175
+ print(f"Model output shape: {logits.shape}")
176
+
177
+ # Process output
178
+ print("Processing model output")
179
+ probs = torch.softmax(logits, dim=-1)
180
+ top5 = torch.topk(probs, min(5, probs.shape[1]))
181
+
182
+ # Create predictions dictionary
183
+ print("Creating predictions dictionary")
184
+ preds = {}
185
+ for i, (v, idx) in enumerate(zip(top5.values[0], top5.indices[0])):
186
+ idx_item = idx.item()
187
+ print(f"Processing top prediction {i+1}: idx={idx_item}, value={v.item():.4f}")
188
+
189
+ if str(idx_item) in idx2lbl:
190
+ print(f"inside predict - {idx_item}")
191
+ label = idx2lbl[str(idx_item)]
192
+ preds[label] = round(v.item(), 4)
193
+ print(f"Mapped to label: {label}")
194
+ else:
195
+ print(f"WARNING: Index {idx_item} not found in class mapping")
196
+ preds[f"Unknown-{idx_item}"] = round(v.item(), 4)
197
+
198
+ elapsed = round(timer() - start, 4)
199
+ print(f"Prediction completed in {elapsed}s")
200
+ return preds, elapsed
201
+ except Exception as e:
202
+ print(f"Prediction error: {e}")
203
+ traceback.print_exc()
204
+ return {"Error": 0.0}, 0.0
205
+
206
+ # --- App logic ---
207
+ def main():
208
+ print("Initializing application...")
209
+
210
+ try:
211
+ download_from_s3()
212
+ except Exception as e:
213
+ print(f"Error in S3 download: {e}")
214
+ traceback.print_exc()
215
+
216
+ print("Loading models and class mappings")
217
+ smodel = load_model("sports")
218
+ vfmodel = load_model("vegfruits")
219
+ sports_map = load_classnames("sports")
220
+ vegfruits_map = load_classnames("vegfruits")
221
+
222
+ def sports_fn(img):
223
+ print("\n--- Sports Classification Request ---")
224
+ print(f"Input type: {type(img)}")
225
+ if img is None:
226
+ print("No image provided")
227
+ return {"No image provided": 1.0}, 0.0
228
+ if isinstance(img, bool):
229
+ print(f"Received boolean: {img}")
230
+ return {"Boolean received (expected image)": 1.0}, 0.0
231
+ try:
232
+ return predict(img, smodel, sports_map)
233
+ except Exception as e:
234
+ print(f"Error in sports_fn: {e}")
235
+ traceback.print_exc()
236
+ return {"Error in sports classifier": 1.0}, 0.0
237
+
238
+ def veg_fn(img):
239
+ print("\n--- VegFruits Classification Request ---")
240
+ print(f"Input type: {type(img)}")
241
+ if img is None:
242
+ print("No image provided")
243
+ return {"No image provided": 1.0}, 0.0
244
+ if isinstance(img, bool):
245
+ print(f"Received boolean: {img}")
246
+ return {"Boolean received (expected image)": 1.0}, 0.0
247
+ try:
248
+ return predict(img, vfmodel, vegfruits_map)
249
+ except Exception as e:
250
+ print(f"Error in veg_fn: {e}")
251
+ traceback.print_exc()
252
+ return {"Error in vegfruits classifier": 1.0}, 0.0
253
+
254
+ print("Creating Gradio interfaces")
255
+ try:
256
+ sports_interface = gr.Interface(
257
+ fn=sports_fn,
258
+ inputs=gr.Image(type="pil"),
259
+ outputs=[
260
+ gr.Label(num_top_classes=5),
261
+ gr.Number(label="Prediction Time (s)")
262
+ ],
263
+ title="Sports Classifier",
264
+ cache_examples=False
265
+ )
266
+ print("Sports interface created successfully")
267
+
268
+ veg_interface = gr.Interface(
269
+ fn=veg_fn,
270
+ inputs=gr.Image(type="pil"),
271
+ outputs=[
272
+ gr.Label(num_top_classes=5),
273
+ gr.Number(label="Prediction Time (s)")
274
+ ],
275
+ title="VegFruits Classifier",
276
+ cache_examples=False
277
+ )
278
+ print("VegFruits interface created successfully")
279
+
280
+ demo = gr.TabbedInterface(
281
+ interface_list=[sports_interface, veg_interface],
282
+ tab_names=["Sports", "VegFruits"]
283
+ )
284
+ print("TabbedInterface created successfully")
285
+
286
+ print("Launching Gradio app...")
287
+ demo.launch(share=True)
288
+ except Exception as e:
289
+ print(f"Error creating Gradio interface: {e}")
290
+ traceback.print_exc()
291
+
292
+ if __name__ == "__main__":
293
+ main()