nisharg nargund commited on
Commit
c354f28
·
1 Parent(s): 03de125

Delete bone.ipynb

Browse files
Files changed (1) hide show
  1. bone.ipynb +0 -294
bone.ipynb DELETED
@@ -1,294 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 13,
6
- "metadata": {},
7
- "outputs": [],
8
- "source": [
9
- "import tensorflow as tf\n",
10
- "from tensorflow import keras\n",
11
- "from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout\n",
12
- "from keras import Sequential\n",
13
- "import numpy as np\n",
14
- "import pandas as pd\n",
15
- "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
16
- "from tensorflow.keras.preprocessing import image"
17
- ]
18
- },
19
- {
20
- "cell_type": "code",
21
- "execution_count": 2,
22
- "metadata": {},
23
- "outputs": [
24
- {
25
- "name": "stdout",
26
- "output_type": "stream",
27
- "text": [
28
- "Zip file extracted successfully.\n"
29
- ]
30
- }
31
- ],
32
- "source": [
33
- "from zipfile import ZipFile\n",
34
- "\n",
35
- "zip_file_path = 'bone_frac.zip'\n",
36
- "\n",
37
- "with ZipFile(zip_file_path, 'r') as zip_ref:\n",
38
- " zip_ref.extractall()\n",
39
- "\n",
40
- "print(\"Zip file extracted successfully.\")\n"
41
- ]
42
- },
43
- {
44
- "cell_type": "code",
45
- "execution_count": 3,
46
- "metadata": {},
47
- "outputs": [],
48
- "source": [
49
- "Training = 'archive (6)/train'\n",
50
- "Validation = 'archive (6)/val'"
51
- ]
52
- },
53
- {
54
- "cell_type": "code",
55
- "execution_count": 4,
56
- "metadata": {},
57
- "outputs": [],
58
- "source": [
59
- "img_width, img_height = 224, 224\n",
60
- "batch_size = 32"
61
- ]
62
- },
63
- {
64
- "cell_type": "code",
65
- "execution_count": 7,
66
- "metadata": {},
67
- "outputs": [
68
- {
69
- "name": "stdout",
70
- "output_type": "stream",
71
- "text": [
72
- "Found 8863 images belonging to 2 classes.\n",
73
- "Found 600 images belonging to 2 classes.\n"
74
- ]
75
- }
76
- ],
77
- "source": [
78
- "#Data Augmentation\n",
79
- "\n",
80
- "train_datagen = ImageDataGenerator(\n",
81
- " rescale=1.0/255,\n",
82
- " rotation_range=20,\n",
83
- " width_shift_range=0.2,\n",
84
- " height_shift_range=0.2,\n",
85
- " shear_range=0.2,\n",
86
- " zoom_range=0.2,\n",
87
- " horizontal_flip=True,\n",
88
- " fill_mode='nearest'\n",
89
- ")\n",
90
- "\n",
91
- "#Rescale validation images \n",
92
- "validation_datagen = ImageDataGenerator(rescale=1.0/255)\n",
93
- "\n",
94
- "#loading train n val data:\n",
95
- "\n",
96
- "train_generator = train_datagen.flow_from_directory(\n",
97
- " Training,\n",
98
- " target_size=(img_width, img_height),\n",
99
- " batch_size=batch_size,\n",
100
- " class_mode='binary'\n",
101
- ")\n",
102
- "\n",
103
- "validation_generator = validation_datagen.flow_from_directory(\n",
104
- " Validation,\n",
105
- " target_size=(img_width, img_height),\n",
106
- " batch_size=batch_size,\n",
107
- " class_mode='binary'\n",
108
- ")"
109
- ]
110
- },
111
- {
112
- "cell_type": "code",
113
- "execution_count": 8,
114
- "metadata": {},
115
- "outputs": [],
116
- "source": [
117
- "#building CNN model\n",
118
- "\n",
119
- "model = Sequential()\n",
120
- "\n",
121
- "model.add(Conv2D(32, (3,3), activation='relu', input_shape=(img_width, img_height, 3)))\n",
122
- "model.add(MaxPooling2D((2,2)))\n",
123
- "\n",
124
- "model.add(Conv2D(64, (3,3), activation='relu'))\n",
125
- "model.add(MaxPooling2D((2,2)))\n",
126
- "\n",
127
- "model.add(Conv2D(128, (3,3), activation='relu'))\n",
128
- "model.add(MaxPooling2D((2,2)))\n",
129
- "\n",
130
- "model.add(Flatten())\n",
131
- "\n",
132
- "model.add(Dense(128, activation='relu'))\n",
133
- "model.add(Dropout(0.5))\n",
134
- "\n",
135
- "model.add(Dense(1, activation='sigmoid'))\n",
136
- "\n"
137
- ]
138
- },
139
- {
140
- "cell_type": "code",
141
- "execution_count": 9,
142
- "metadata": {},
143
- "outputs": [],
144
- "source": [
145
- "model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy'])"
146
- ]
147
- },
148
- {
149
- "cell_type": "code",
150
- "execution_count": 11,
151
- "metadata": {},
152
- "outputs": [
153
- {
154
- "name": "stdout",
155
- "output_type": "stream",
156
- "text": [
157
- "Epoch 1/5\n",
158
- "276/276 [==============================] - 450s 2s/step - loss: 0.6812 - accuracy: 0.5570 - val_loss: 0.6559 - val_accuracy: 0.5533\n",
159
- "Epoch 2/5\n",
160
- "276/276 [==============================] - 457s 2s/step - loss: 0.6691 - accuracy: 0.5919 - val_loss: 0.6212 - val_accuracy: 0.6000\n",
161
- "Epoch 3/5\n",
162
- "276/276 [==============================] - 301s 1s/step - loss: 0.6513 - accuracy: 0.5942 - val_loss: 0.5682 - val_accuracy: 0.6800\n",
163
- "Epoch 4/5\n",
164
- "276/276 [==============================] - 302s 1s/step - loss: 0.6283 - accuracy: 0.6159 - val_loss: 0.6609 - val_accuracy: 0.5000\n",
165
- "Epoch 5/5\n",
166
- "276/276 [==============================] - 303s 1s/step - loss: 0.6163 - accuracy: 0.6440 - val_loss: 0.5883 - val_accuracy: 0.6767\n"
167
- ]
168
- }
169
- ],
170
- "source": [
171
- "history = model.fit(\n",
172
- " train_generator,\n",
173
- " steps_per_epoch=train_generator.samples / batch_size,\n",
174
- " validation_data=validation_generator,\n",
175
- " validation_steps=(validation_generator.samples / batch_size),\n",
176
- " epochs=5)"
177
- ]
178
- },
179
- {
180
- "cell_type": "code",
181
- "execution_count": 12,
182
- "metadata": {},
183
- "outputs": [
184
- {
185
- "name": "stdout",
186
- "output_type": "stream",
187
- "text": [
188
- "19/19 [==============================] - 4s 203ms/step - loss: 0.5883 - accuracy: 0.6767\n",
189
- "Test accuracy: 67.67%\n"
190
- ]
191
- }
192
- ],
193
- "source": [
194
- "test_loss, test_acc = model.evaluate(validation_generator)\n",
195
- "print(f'Test accuracy: {test_acc * 100: .2f}%') #.2f means float no. upto 2 decimals"
196
- ]
197
- },
198
- {
199
- "cell_type": "code",
200
- "execution_count": 14,
201
- "metadata": {},
202
- "outputs": [],
203
- "source": [
204
- "model.save('bone_model.h5')"
205
- ]
206
- },
207
- {
208
- "cell_type": "code",
209
- "execution_count": 30,
210
- "metadata": {},
211
- "outputs": [],
212
- "source": [
213
- "model = tf.keras.models.load_model('bone_model.h5')\n",
214
- "\n",
215
- "img_path = 'archive (6)/val/fractured\\9.jpg'\n",
216
- "img = image.load_img(img_path, target_size=(224,224))\n",
217
- "img_array = image.img_to_array(img)\n",
218
- "img_array = np.expand_dims(img_array, axis=0)\n",
219
- "img_array /= 255.0"
220
- ]
221
- },
222
- {
223
- "cell_type": "code",
224
- "execution_count": 31,
225
- "metadata": {},
226
- "outputs": [
227
- {
228
- "name": "stdout",
229
- "output_type": "stream",
230
- "text": [
231
- "1/1 [==============================] - 0s 76ms/step\n"
232
- ]
233
- }
234
- ],
235
- "source": [
236
- "#making prediction\n",
237
- "\n",
238
- "prediction = model.predict(img_array)\n",
239
- "predicted_class=int(np.round(prediction)[0][0]) #[0][0]\n",
240
- "\n",
241
- "class_labels = ['Not Fractured', 'Fractured']\n",
242
- "\n",
243
- "\n",
244
- "\n"
245
- ]
246
- },
247
- {
248
- "cell_type": "code",
249
- "execution_count": 35,
250
- "metadata": {},
251
- "outputs": [
252
- {
253
- "name": "stdout",
254
- "output_type": "stream",
255
- "text": [
256
- "Predicted class: Fractured (Confidence: 57.78%)\n"
257
- ]
258
- }
259
- ],
260
- "source": [
261
- "print(f\"Predicted class: {class_labels[predicted_class]} (Confidence: {prediction[0][0] * 100:.2f}%)\")"
262
- ]
263
- },
264
- {
265
- "cell_type": "code",
266
- "execution_count": null,
267
- "metadata": {},
268
- "outputs": [],
269
- "source": []
270
- }
271
- ],
272
- "metadata": {
273
- "kernelspec": {
274
- "display_name": "Python 3",
275
- "language": "python",
276
- "name": "python3"
277
- },
278
- "language_info": {
279
- "codemirror_mode": {
280
- "name": "ipython",
281
- "version": 3
282
- },
283
- "file_extension": ".py",
284
- "mimetype": "text/x-python",
285
- "name": "python",
286
- "nbconvert_exporter": "python",
287
- "pygments_lexer": "ipython3",
288
- "version": "3.10.7"
289
- },
290
- "orig_nbformat": 4
291
- },
292
- "nbformat": 4,
293
- "nbformat_minor": 2
294
- }