Ahmedhassan54 commited on
Commit
f52233f
·
verified ·
1 Parent(s): 117fc44

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +38 -12
  2. app.py +80 -0
  3. model.py +188 -0
  4. requirements.txt +5 -0
README.md CHANGED
@@ -1,12 +1,38 @@
1
- ---
2
- title: Image Classification
3
- emoji: 🐢
4
- colorFrom: yellow
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 5.34.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Cat vs Dog Classifier
3
+ emoji: 🐱🐶
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.21.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ # Cat vs Dog Image Classifier
14
+
15
+ A deep learning model that classifies images of cats and dogs with TensorFlow/Keras.
16
+
17
+ ![Demo](https://example.com/demo.gif) <!-- Replace with actual demo GIF -->
18
+
19
+ ## 🚀 Try it out!
20
+
21
+ [![Open in Spaces](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue.svg)](https://huggingface.co/spaces/Ahmedhassan54/Image-Classification)
22
+
23
+ ## 🛠️ Technical Details
24
+
25
+ ### Model Architecture
26
+ ```python
27
+ Sequential([
28
+ Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
29
+ MaxPooling2D((2,2)),
30
+ Conv2D(64, (3,3), activation='relu'),
31
+ MaxPooling2D((2,2)),
32
+ Conv2D(128, (3,3), activation='relu'),
33
+ MaxPooling2D((2,2)),
34
+ Flatten(),
35
+ Dense(512, activation='relu'),
36
+ Dropout(0.5),
37
+ Dense(1, activation='sigmoid')
38
+ ])
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ import tensorflow as tf
4
+ import numpy as np
5
+ from PIL import Image
6
+ from huggingface_hub import hf_hub_download
7
+ import os
8
+
9
+ # Configuration
10
+ MODEL_REPO = "Ahmedhassan54/Image-Classification" # Replace with your HF username and repo
11
+ MODEL_FILE = "best_model.h5"
12
+
13
+ # Download model from Hugging Face Hub
14
+ def load_model_from_hf():
15
+ try:
16
+ # Check if model exists locally first
17
+ if not os.path.exists(MODEL_FILE):
18
+ print("Downloading model from Hugging Face Hub...")
19
+ model_path = hf_hub_download(
20
+ repo_id=MODEL_REPO,
21
+ filename=MODEL_FILE,
22
+ cache_dir="."
23
+ )
24
+ # Copy to current directory for easier access
25
+ os.system(f"cp {model_path} {MODEL_FILE}")
26
+
27
+ # Load the model
28
+ model = tf.keras.models.load_model(MODEL_FILE)
29
+ print("Model loaded successfully!")
30
+ return model
31
+ except Exception as e:
32
+ print(f"Error loading model: {str(e)}")
33
+ raise
34
+
35
+ # Load the model when the app starts
36
+ model = load_model_from_hf()
37
+
38
+ # Image classification function
39
+ def classify_image(image):
40
+ try:
41
+ # Preprocess the image
42
+ image = image.resize((150, 150)) # Match model's expected input size
43
+ image_array = np.array(image) / 255.0 # Normalize
44
+ image_array = np.expand_dims(image_array, axis=0) # Add batch dimension
45
+
46
+ # Make prediction
47
+ prediction = model.predict(image_array)
48
+ confidence = float(prediction[0][0])
49
+
50
+ # Format results
51
+ if confidence > 0.5:
52
+ return {
53
+ "Dog": confidence * 100,
54
+ "Cat": (1 - confidence) * 100
55
+ }
56
+ else:
57
+ return {
58
+ "Cat": (1 - confidence) * 100,
59
+ "Dog": confidence * 100
60
+ }
61
+ except Exception as e:
62
+ return f"Error processing image: {str(e)}"
63
+
64
+ # Gradio interface
65
+ demo = gr.Interface(
66
+ fn=classify_image,
67
+ inputs=gr.Image(type="pil", label="Upload Image"),
68
+ outputs=gr.Label(num_top_classes=2, label="Predictions"),
69
+ title="🐱 Cat vs Dog Classifier 🐶",
70
+ description="Upload an image to classify whether it's a cat or dog",
71
+ examples=[
72
+ ["https://upload.wikimedia.org/wikipedia/commons/1/15/Cat_August_2010-4.jpg"],
73
+ ["https://upload.wikimedia.org/wikipedia/commons/d/d9/Collage_of_Nine_Dogs.jpg"]
74
+ ],
75
+ allow_flagging="never"
76
+ )
77
+
78
+ # Launch the app
79
+ if __name__ == "__main__":
80
+ demo.launch(debug=True, server_port=7860)
model.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Image Classification for Cat vs Dog Dataset (Fixed Version)
2
+ # Run in Google Colab with GPU
3
+
4
+ ## 1. Setup Environment
5
+ !nvidia-smi
6
+ !pip install tensorboard-plugin-profile
7
+
8
+ ## 2. Import Libraries
9
+ import tensorflow as tf
10
+ from tensorflow.keras import layers, models, callbacks
11
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
12
+ import numpy as np
13
+ import matplotlib.pyplot as plt
14
+ import datetime
15
+ from sklearn.metrics import classification_report, confusion_matrix
16
+ import seaborn as sns
17
+ import os
18
+ import zipfile
19
+ from google.colab import files
20
+ from shutil import move
21
+ from pathlib import Path
22
+
23
+ print("TensorFlow version:", tf.__version__)
24
+
25
+ ## 3. Upload and Reorganize Your Dataset
26
+ # Upload your zip file
27
+ uploaded = files.upload()
28
+ zip_filename = list(uploaded.keys())[0]
29
+
30
+ # Extract the zip file
31
+ with zipfile.ZipFile(zip_filename, 'r') as zip_ref:
32
+ zip_ref.extractall('extracted_dataset')
33
+
34
+ # Verify extraction
35
+ !ls extracted_dataset
36
+
37
+ # Your dataset has images directly in custom_dataset/train/ (not in cat/dog subfolders)
38
+ # We need to reorganize them into proper class folders
39
+ def organize_dataset(input_dir, output_dir):
40
+ # Create class directories
41
+ os.makedirs(os.path.join(output_dir, 'cat'), exist_ok=True)
42
+ os.makedirs(os.path.join(output_dir, 'dog'), exist_ok=True)
43
+
44
+ # Move cat images
45
+ for file in Path(input_dir).glob('cat.*.jpg'):
46
+ move(str(file), os.path.join(output_dir, 'cat', file.name))
47
+
48
+ # Move dog images
49
+ for file in Path(input_dir).glob('dog.*.jpg'):
50
+ move(str(file), os.path.join(output_dir, 'dog', file.name))
51
+
52
+ # Reorganize the dataset
53
+ input_path = 'extracted_dataset/custom_dataset/train'
54
+ output_path = 'organized_dataset/train'
55
+ organize_dataset(input_path, output_path)
56
+
57
+ # Verify the new structure
58
+ !ls organized_dataset/train
59
+ !ls organized_dataset/train/cat | head -5
60
+ !ls organized_dataset/train/dog | head -5
61
+
62
+ ## 4. Create Data Generators
63
+ # Parameters
64
+ IMG_SIZE = (150, 150)
65
+ BATCH_SIZE = 32
66
+
67
+ # Data generators with augmentation
68
+ train_datagen = ImageDataGenerator(
69
+ rescale=1./255,
70
+ rotation_range=20,
71
+ width_shift_range=0.2,
72
+ height_shift_range=0.2,
73
+ shear_range=0.2,
74
+ zoom_range=0.2,
75
+ horizontal_flip=True,
76
+ validation_split=0.2 # 20% for validation
77
+ )
78
+
79
+ # Training generator
80
+ train_generator = train_datagen.flow_from_directory(
81
+ 'organized_dataset/train',
82
+ target_size=IMG_SIZE,
83
+ batch_size=BATCH_SIZE,
84
+ class_mode='binary',
85
+ subset='training',
86
+ shuffle=True
87
+ )
88
+
89
+ # Validation generator
90
+ validation_generator = train_datagen.flow_from_directory(
91
+ 'organized_dataset/train',
92
+ target_size=IMG_SIZE,
93
+ batch_size=BATCH_SIZE,
94
+ class_mode='binary',
95
+ subset='validation',
96
+ shuffle=True
97
+ )
98
+
99
+ # Get class names
100
+ class_names = list(train_generator.class_indices.keys())
101
+ print("\nDetected classes:", class_names)
102
+ print("Number of training samples:", train_generator.samples)
103
+ print("Number of validation samples:", validation_generator.samples)
104
+
105
+ # Visualize samples
106
+ plt.figure(figsize=(12, 9))
107
+ for i in range(9):
108
+ img, label = next(train_generator)
109
+ plt.subplot(3, 3, i+1)
110
+ plt.imshow(img[i])
111
+ plt.title(class_names[int(label[i])])
112
+ plt.axis('off')
113
+ plt.suptitle("Sample Training Images")
114
+ plt.show()
115
+
116
+ ## 5. Build Model
117
+ def build_model(input_shape):
118
+ model = models.Sequential([
119
+ layers.Conv2D(32, (3,3), activation='relu', input_shape=input_shape),
120
+ layers.MaxPooling2D((2,2)),
121
+
122
+ layers.Conv2D(64, (3,3), activation='relu'),
123
+ layers.MaxPooling2D((2,2)),
124
+
125
+ layers.Conv2D(128, (3,3), activation='relu'),
126
+ layers.MaxPooling2D((2,2)),
127
+
128
+ layers.Flatten(),
129
+ layers.Dense(512, activation='relu'),
130
+ layers.Dropout(0.5),
131
+ layers.Dense(1, activation='sigmoid') # Binary output
132
+ ])
133
+
134
+ model.compile(
135
+ optimizer='adam',
136
+ loss='binary_crossentropy',
137
+ metrics=['accuracy']
138
+ )
139
+ return model
140
+
141
+ model = build_model(input_shape=(IMG_SIZE[0], IMG_SIZE[1], 3))
142
+ model.summary()
143
+
144
+ ## 6. Train Model
145
+ log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
146
+
147
+ callbacks = [
148
+ callbacks.EarlyStopping(patience=5, restore_best_weights=True),
149
+ callbacks.ModelCheckpoint('best_model.h5', save_best_only=True),
150
+ callbacks.TensorBoard(log_dir=log_dir),
151
+ callbacks.ReduceLROnPlateau(factor=0.1, patience=3)
152
+ ]
153
+
154
+ history = model.fit(
155
+ train_generator,
156
+ steps_per_epoch=train_generator.samples // BATCH_SIZE,
157
+ epochs=30,
158
+ validation_data=validation_generator,
159
+ validation_steps=validation_generator.samples // BATCH_SIZE,
160
+ callbacks=callbacks
161
+ )
162
+
163
+ ## 7. Evaluate Model
164
+ # Plot training history
165
+ plt.figure(figsize=(12, 4))
166
+ plt.subplot(1, 2, 1)
167
+ plt.plot(history.history['accuracy'], label='Train')
168
+ plt.plot(history.history['val_accuracy'], label='Validation')
169
+ plt.title('Accuracy')
170
+ plt.legend()
171
+
172
+ plt.subplot(1, 2, 2)
173
+ plt.plot(history.history['loss'], label='Train')
174
+ plt.plot(history.history['val_loss'], label='Validation')
175
+ plt.title('Loss')
176
+ plt.legend()
177
+ plt.show()
178
+
179
+ ## 8. Save Model
180
+ model.save('cat_dog_classifier.h5')
181
+
182
+ # Convert to TFLite
183
+ converter = tf.lite.TFLiteConverter.from_keras_model(model)
184
+ tflite_model = converter.convert()
185
+ with open('cat_dog.tflite', 'wb') as f:
186
+ f.write(tflite_model)
187
+
188
+ print("\nModel saved in HDF5 and TFLite formats")
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ tensorflow
2
+ gradio
3
+ pillow
4
+ numpy
5
+ huggingface-hub