Hajorda commited on
Commit
a4e0d82
Β·
verified Β·
1 Parent(s): 975d88f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +161 -0
app.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py (for a Hugging Face Space using Gradio)
2
+ import gradio as gr
3
+ import torch
4
+ import pytorch_lightning as pl
5
+ from timm import create_model
6
+ import torch.nn as nn
7
+ from box import Box
8
+ import albumentations as A
9
+ from albumentations.pytorch.transforms import ToTensorV2
10
+ import cv2
11
+ import pickle
12
+ from PIL import Image
13
+ import numpy as np
14
+ import os
15
+ import requests # For fetching funny cat GIFs
16
+ from huggingface_hub import hf_hub_download
17
+
18
+ # --- Re-use your model definition and loading functions ---
19
+ # (This part would be similar to your inference.py)
20
+
21
+ HF_USERNAME = "Hajorda" # Or the username of the model owner
22
+ HF_MODEL_NAME = "keduClasifier"
23
+ REPO_ID = f"{HF_USERNAME}/{HF_MODEL_NAME}"
24
+
25
+ cfg_dict_for_inference = {
26
+ 'model_name': 'swin_tiny_patch4_window7_224', # Match training
27
+ 'dropout_backbone': 0.1, # Match training
28
+ 'dropout_fc': 0.2, # Match training
29
+ 'img_size': (224, 224),
30
+ 'num_classes': 37, # IMPORTANT: This must be correct for your trained model
31
+ }
32
+ cfg_inference = Box(cfg_dict_for_inference)
33
+
34
+ class PetBreedModel(pl.LightningModule): # Paste your PetBreedModel class here
35
+ def __init__(self, cfg: Box):
36
+ super().__init__()
37
+ self.cfg = cfg
38
+ self.backbone = create_model(
39
+ self.cfg.model_name, pretrained=False, num_classes=0,
40
+ in_chans=3, drop_rate=self.cfg.dropout_backbone
41
+ )
42
+ h, w = self.cfg.img_size
43
+ dummy_input = torch.randn(1, 3, h, w)
44
+ with torch.no_grad(): num_features = self.backbone(dummy_input).shape[-1]
45
+ self.fc = nn.Sequential(
46
+ nn.Linear(num_features, num_features // 2), nn.ReLU(),
47
+ nn.Dropout(self.cfg.dropout_fc),
48
+ nn.Linear(num_features // 2, self.cfg.num_classes)
49
+ )
50
+ def forward(self, x):
51
+ features = self.backbone(x); output = self.fc(features)
52
+ return output
53
+
54
+ def load_model_from_hf_for_space(repo_id=REPO_ID, ckpt_filename="pytorch_model.ckpt"):
55
+ model_path = hf_hub_download(repo_id=repo_id, filename=ckpt_filename)
56
+ # Important: Ensure cfg_inference is correctly defined with num_classes
57
+ if cfg_inference.num_classes is None:
58
+ raise ValueError("num_classes must be set in cfg_inference to load the model for Gradio.")
59
+ loaded_model = PetBreedModel.load_from_checkpoint(model_path, cfg=cfg_inference, strict=False)
60
+ loaded_model.eval()
61
+ return loaded_model
62
+
63
+ def load_label_encoder_from_hf_for_space(repo_id=REPO_ID, le_filename="label_encoder.pkl"):
64
+ le_path = hf_hub_download(repo_id=repo_id, filename=le_filename)
65
+ with open(le_path, 'rb') as f: label_encoder = pickle.load(f)
66
+ return label_encoder
67
+
68
+ # Load model and encoder once when the app starts
69
+ model = load_model_from_hf_for_space()
70
+ label_encoder = load_label_encoder_from_hf_for_space()
71
+ device = "cuda" if torch.cuda.is_available() else "cpu"
72
+ model.to(device)
73
+
74
+ # --- Funny elements ---
75
+ funny_cat_keywords = ["funny cat", "silly cat", "cat meme", "derp cat"]
76
+ GIPHY_API_KEY = "YOUR_GIPHY_API_KEY" # Optional: For more variety, get a Giphy API key
77
+
78
+ def get_funny_cat_gif(breed_name):
79
+ try:
80
+ # Use a public API if you don't have a Giphy key, or a simpler source
81
+ # For example, a predefined list of GIFs
82
+ predefined_gifs = {
83
+ "abyssinian": "https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExaWN4bDNzNWVzM2VqNHE4Ym5zN2ZzZHF0Zzh0bGRqZzRjMnhsZW5pZCZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/3oriO0OEd9QIDdllqo/giphy.gif",
84
+ "siamese": "https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExa3g0dHZtZmRncWN0cnZkNnVnMGRtYjN2ajZ2d3o1cHZtaW50ZHQ5ayZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/ICOgUNjpvO0PC/giphy.gif",
85
+ "default": "https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExNWMwNnU4NW9nZTV5c3Z0eThsOHhsOWN0Nnh0a3VzbjFxeGU0bjFuNiZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/BzyTuYCmvSORqs1ABM/giphy.gif"
86
+ }
87
+ return predefined_gifs.get(breed_name.lower().replace(" ", "_"), predefined_gifs["default"])
88
+
89
+ # If using Giphy API:
90
+ # search_term = f"{breed_name} {random.choice(funny_cat_keywords)}"
91
+ # params = {'api_key': GIPHY_API_KEY, 'q': search_term, 'limit': 1, 'rating': 'g'}
92
+ # response = requests.get("http://api.giphy.com/v1/gifs/search", params=params)
93
+ # response.raise_for_status()
94
+ # return response.json()['data'][0]['images']['original']['url']
95
+ except Exception as e:
96
+ print(f"Error fetching GIF: {e}")
97
+ return predefined_gifs["default"] # Fallback
98
+
99
+ # --- Gradio Interface Function ---
100
+ def classify_cat_breed(image_input):
101
+ # Gradio provides image as a NumPy array
102
+ img_rgb = cv2.cvtColor(image_input, cv2.COLOR_BGR2RGB) # Ensure it's RGB if needed
103
+
104
+ h, w = cfg_inference.img_size
105
+ transforms_gradio = A.Compose([
106
+ A.Resize(height=h, width=w),
107
+ A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
108
+ ToTensorV2(),
109
+ ])
110
+ input_tensor = transforms_gradio(image=img_rgb)['image'].unsqueeze(0).to(device)
111
+
112
+ with torch.no_grad():
113
+ logits = model(input_tensor)
114
+ probabilities = torch.softmax(logits, dim=1)
115
+ # Get top N predictions if you want
116
+ # top_probs, top_indices = torch.topk(probabilities, 3, dim=1)
117
+
118
+ # For single prediction:
119
+ confidence, predicted_idx = torch.max(probabilities, dim=1)
120
+
121
+ predicted_breed_id = predicted_idx.item()
122
+ predicted_breed_name = label_encoder.inverse_transform([predicted_breed_id])[0]
123
+ conf_score = confidence.item()
124
+
125
+ # Funny message and GIF
126
+ funny_message = f"I'm {conf_score*100:.1f}% sure this adorable furball is a {predicted_breed_name}! What a purrfect specimen!"
127
+ if conf_score < 0.7:
128
+ funny_message += " ...Or maybe it's a new, super-rare breed only I can see. πŸ˜‰"
129
+
130
+ gif_url = get_funny_cat_gif(predicted_breed_name)
131
+
132
+ # Gradio expects a dictionary for multiple outputs if you name them
133
+ # Or a tuple if you don't name them in gr.Interface outputs
134
+ return (
135
+ f"{predicted_breed_name} (Confidence: {conf_score*100:.2f}%)",
136
+ funny_message,
137
+ gif_url # Gradio can display images/GIFs from URLs
138
+ )
139
+
140
+ # --- Define the Gradio Interface ---
141
+ title = "😸 Purrfect Breed Guesser 3000 😼"
142
+ description = "Upload a picture of a cat, and I'll (hilariously) try to guess its breed! Powered by AI and a bit of cat-titude."
143
+ article = "<p style='text-align: center'>Model based on Swin Transformer, fine-tuned on the Oxford-IIIT Pet Dataset. <a href='https://huggingface.co/YOUR_HF_USERNAME/my-pet-breed-classifier-swin-tiny' target='_blank'>Model Card</a></p>"
144
+
145
+ iface = gr.Interface(
146
+ fn=classify_cat_breed,
147
+ inputs=gr.Image(type="numpy", label="Upload Cat Pic! πŸ“Έ"),
148
+ outputs=[
149
+ gr.Textbox(label="🧐 My Guess Is..."),
150
+ gr.Textbox(label="πŸ’¬ My Deep Thoughts..."),
151
+ gr.Image(type="filepath", label="πŸŽ‰ Celebration GIF! πŸŽ‰") # 'filepath' for URLs
152
+ ],
153
+ title=title,
154
+ description=description,
155
+ article=article,
156
+ examples=[["example_cat1.jpg"], ["example_cat2.jpg"]], # Add paths to example images in your Space repo
157
+ theme=gr.themes.Soft() # Or try other themes!
158
+ )
159
+
160
+ if __name__ == "__main__":
161
+ iface.launch()