timnirmal commited on
Commit
3b1a998
·
verified ·
1 Parent(s): ae30993

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. .github/workflows/update_space.yml +28 -0
  2. README.md +2 -8
  3. main.py +206 -0
  4. weights/best.pt +3 -0
  5. weights/last.pt +3 -0
.github/workflows/update_space.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Python script
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v2
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9'
20
+
21
+ - name: Install Gradio
22
+ run: python -m pip install gradio
23
+
24
+ - name: Log in to Hugging Face
25
+ run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
26
+
27
+ - name: Deploy to Spaces
28
+ run: gradio deploy
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Mw Hf Upload
3
- emoji: 📊
4
- colorFrom: gray
5
- colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 5.42.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: mw_hf_upload
3
+ app_file: main.py
 
 
4
  sdk: gradio
5
  sdk_version: 5.42.0
 
 
6
  ---
 
 
main.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # yolo11_gradio_single_textlist_enhance.py
2
+ from ultralytics import YOLO
3
+ import gradio as gr
4
+ import cv2, numpy as np, random
5
+
6
+ DEFAULT_MODEL = "weights/best.pt"
7
+ _MODEL_CACHE = {}
8
+
9
+ def get_model(path:str):
10
+ if path not in _MODEL_CACHE:
11
+ _MODEL_CACHE[path] = YOLO(path)
12
+ return _MODEL_CACHE[path]
13
+
14
+ # ---------- utilities ----------
15
+ def class_color(cid:int):
16
+ random.seed(int(cid) + 12345)
17
+ return tuple(int(x) for x in np.array([random.randrange(60,255) for _ in range(3)]))
18
+
19
+ def ensure_odd(v:int, minv:int=3):
20
+ v = int(v)
21
+ if v < minv: v = minv
22
+ if v % 2 == 0: v += 1
23
+ return v
24
+
25
+ def to_bgr(img):
26
+ if len(img.shape) == 2: # gray -> BGR
27
+ return cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
28
+ return img
29
+
30
+ # ---------- enhancements (applied in this order) ----------
31
+ def enhance_image(img_bgr,
32
+ use_gray: bool,
33
+ use_clahe: bool, clahe_clip: float, clahe_grid: int,
34
+ use_bc: bool, alpha_gain: float, beta_bias: float,
35
+ use_unsharp: bool, us_amount: float, us_radius: float,
36
+ use_thresh: bool, th_block: int, th_C: int,
37
+ use_morph: bool, morph_k: int, morph_iters: int,
38
+ use_invert: bool):
39
+ base = img_bgr.copy()
40
+ work_gray = None
41
+
42
+ if use_gray or use_clahe or use_thresh:
43
+ work_gray = cv2.cvtColor(base, cv2.COLOR_BGR2GRAY)
44
+
45
+ # CLAHE on gray
46
+ if use_clahe:
47
+ clahe = cv2.createCLAHE(clipLimit=float(clahe_clip),
48
+ tileGridSize=(int(clahe_grid), int(clahe_grid)))
49
+ work_gray = clahe.apply(work_gray)
50
+
51
+ # Brightness/Contrast (linear gain/bias)
52
+ if use_bc:
53
+ if work_gray is not None:
54
+ work_gray = cv2.convertScaleAbs(work_gray, alpha=float(alpha_gain), beta=float(beta_bias))
55
+ else:
56
+ base = cv2.convertScaleAbs(base, alpha=float(alpha_gain), beta=float(beta_bias))
57
+
58
+ # Unsharp mask (detail boost)
59
+ if use_unsharp:
60
+ # apply on gray if exists, else on color
61
+ if work_gray is not None:
62
+ blur = cv2.GaussianBlur(work_gray, (0,0), sigmaX=float(us_radius), sigmaY=float(us_radius))
63
+ work_gray = cv2.addWeighted(work_gray, 1.0 + float(us_amount), blur, -float(us_amount), 0)
64
+ else:
65
+ blur = cv2.GaussianBlur(base, (0,0), sigmaX=float(us_radius), sigmaY=float(us_radius))
66
+ base = cv2.addWeighted(base, 1.0 + float(us_amount), blur, -float(us_amount), 0)
67
+
68
+ # Adaptive threshold (great for floor plans)
69
+ if use_thresh:
70
+ bs = ensure_odd(int(th_block), 3)
71
+ work_gray = cv2.adaptiveThreshold(
72
+ work_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, bs, int(th_C)
73
+ )
74
+
75
+ # Merge back to BGR if we are working in gray
76
+ if work_gray is not None:
77
+ base = to_bgr(work_gray)
78
+
79
+ # Morphological clean-up (OPEN to remove speckles)
80
+ if use_morph:
81
+ k = max(1, int(morph_k))
82
+ kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (k, k))
83
+ base = cv2.morphologyEx(base, cv2.MORPH_OPEN, kernel, iterations=int(morph_iters))
84
+
85
+ # Invert black/white (sometimes better for plans)
86
+ if use_invert:
87
+ base = cv2.bitwise_not(base)
88
+
89
+ return base
90
+
91
+ # ---------- drawing ----------
92
+ def draw_transparent_boxes(img_bgr, boxes, alpha=0.5):
93
+ overlay = img_bgr.copy()
94
+ if boxes is not None and len(boxes):
95
+ xyxy = boxes.xyxy.cpu().numpy()
96
+ cls = boxes.cls.cpu().numpy().astype(int)
97
+ for bb, cl in zip(xyxy, cls):
98
+ x1, y1, x2, y2 = [int(round(v)) for v in bb]
99
+ cv2.rectangle(overlay, (x1, y1), (x2, y2), class_color(cl), thickness=-1)
100
+ return cv2.addWeighted(overlay, float(alpha), img_bgr, 1 - float(alpha), 0.0)
101
+
102
+ def detections_to_text(result):
103
+ if result.boxes is None or len(result.boxes) == 0:
104
+ return "Detections: 0"
105
+ names = result.names
106
+ xyxy = result.boxes.xyxy.cpu().numpy()
107
+ conf = result.boxes.conf.cpu().numpy()
108
+ cls = result.boxes.cls.cpu().numpy().astype(int)
109
+ lines = [f"{i:02d}. {names.get(int(c), str(int(c)))}\t{cf:.2f}\t[{int(x1)},{int(y1)},{int(x2)},{int(y2)}]"
110
+ for i, ((x1,y1,x2,y2), cf, c) in enumerate(zip(xyxy, conf, cls), start=1)]
111
+ return "Detections: " + str(len(lines)) + "\n" + "\n".join(lines)
112
+
113
+ # ---------- main inference ----------
114
+ def infer(img_rgb, model_path, conf, iou, alpha,
115
+ # toggles
116
+ use_gray, use_clahe, use_bc, use_unsharp, use_thresh, use_morph, use_invert,
117
+ detect_on_enh,
118
+ # params
119
+ clahe_clip, clahe_grid,
120
+ alpha_gain, beta_bias,
121
+ us_amount, us_radius,
122
+ th_block, th_C,
123
+ morph_k, morph_iters):
124
+ if img_rgb is None:
125
+ return None, "Upload an image to run inference."
126
+
127
+ model = get_model(model_path)
128
+ img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
129
+
130
+ # Enhance image for visualization (and optionally for detection)
131
+ enh_bgr = enhance_image(
132
+ img_bgr,
133
+ use_gray, use_clahe, clahe_clip, int(clahe_grid),
134
+ use_bc, alpha_gain, beta_bias,
135
+ use_unsharp, us_amount, us_radius,
136
+ use_thresh, int(th_block), int(th_C),
137
+ use_morph, int(morph_k), int(morph_iters),
138
+ use_invert
139
+ )
140
+
141
+ det_input = enh_bgr if detect_on_enh else img_bgr
142
+ r = model.predict(det_input, conf=float(conf), iou=float(iou), verbose=False)[0]
143
+
144
+ # Draw only transparent boxes (no labels)
145
+ out_bgr = draw_transparent_boxes(enh_bgr, r.boxes, alpha=float(alpha))
146
+ out_rgb = cv2.cvtColor(out_bgr, cv2.COLOR_BGR2RGB)
147
+
148
+ # Detection list in textbox
149
+ det_text = detections_to_text(r)
150
+ return out_rgb, det_text
151
+
152
+ # ---------- UI ----------
153
+ with gr.Blocks(title="YOLOv11 — Split View with Floor-Plan Enhancers") as demo:
154
+ gr.Markdown("## MW Floor Plan")
155
+ with gr.Row():
156
+ with gr.Column(scale=1):
157
+ inp = gr.Image(type="numpy", label="Upload image", height=520)
158
+ model_path = gr.Textbox(value=DEFAULT_MODEL, label="Model (.pt)")
159
+ with gr.Row():
160
+ conf = gr.Slider(0, 1, value=0.25, step=0.01, label="Confidence")
161
+ iou = gr.Slider(0, 1, value=0.45, step=0.01, label="IoU")
162
+ alpha = gr.Slider(0, 1, value=0.50, step=0.05, label="Box transparency")
163
+ with gr.Column(scale=1):
164
+ out_img = gr.Image(label="Result (enhanced + transparent boxes)", height=520)
165
+ det_box = gr.Textbox(label="Detections (class, conf, [x1,y1,x2,y2])",
166
+ lines=12, interactive=False)
167
+
168
+ gr.Markdown("### Enhancements (tick to enable) — ordered top→bottom for best floor-plan contrast")
169
+ with gr.Row():
170
+ use_gray = gr.Checkbox(value=True, label="Grayscale")
171
+ use_clahe = gr.Checkbox(value=False, label="Auto-contrast (CLAHE)")
172
+ use_bc = gr.Checkbox(value=False, label="Brightness/Contrast")
173
+ use_unsharp = gr.Checkbox(value=True, label="Unsharp mask (sharpen)")
174
+ use_thresh = gr.Checkbox(value=False, label="Adaptive threshold (binarize)")
175
+ use_morph = gr.Checkbox(value=False, label="Morphological clean-up (OPEN)")
176
+ use_invert = gr.Checkbox(value=False, label="Invert B/W")
177
+ detect_on_enh= gr.Checkbox(value=True, label="Run detection on enhanced image")
178
+
179
+ with gr.Row():
180
+ clahe_clip = gr.Slider(1.0, 6.0, value=2.0, step=0.1, label="CLAHE clipLimit")
181
+ clahe_grid = gr.Slider(4, 16, value=8, step=1, label="CLAHE tileGridSize")
182
+ alpha_gain = gr.Slider(0.5, 2.0, value=1.2, step=0.05, label="Contrast gain (alpha)")
183
+ beta_bias = gr.Slider(-64, 64, value=0, step=1, label="Brightness bias (beta)")
184
+ with gr.Row():
185
+ us_amount = gr.Slider(0.0, 2.0, value=0.8, step=0.05, label="Unsharp amount")
186
+ us_radius = gr.Slider(0.3, 5.0, value=1.2, step=0.1, label="Unsharp radius (sigma)")
187
+ th_block = gr.Slider(3, 51, value=25, step=2, label="Adaptive block size (odd)")
188
+ th_C = gr.Slider(-15, 15, value=5, step=1, label="Adaptive C")
189
+ with gr.Row():
190
+ morph_k = gr.Slider(1, 9, value=3, step=1, label="Morph kernel size")
191
+ morph_iters = gr.Slider(0, 5, value=1, step=1, label="Morph iterations")
192
+
193
+ run = gr.Button("Run")
194
+ # wire events
195
+ inputs = [inp, model_path, conf, iou, alpha,
196
+ use_gray, use_clahe, use_bc, use_unsharp, use_thresh, use_morph, use_invert,
197
+ detect_on_enh,
198
+ clahe_clip, clahe_grid, alpha_gain, beta_bias, us_amount, us_radius,
199
+ th_block, th_C, morph_k, morph_iters]
200
+ outputs = [out_img, det_box]
201
+
202
+ run.click(infer, inputs, outputs)
203
+ inp.change(infer, inputs, outputs) # auto-run on upload
204
+
205
+ if __name__ == "__main__":
206
+ demo.launch(share=True)
weights/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7229bc3b8772ffc3aa4b7c71a0685253add0acf6ffddf599a9d66894938f6924
3
+ size 51182738
weights/last.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fea629c8b95788e1fbabac528afb7219f6ff2ce019e5a5910fae65e5d000c241
3
+ size 51182738