Spaces:
Runtime error
Runtime error
| #ref: https://huggingface.co/spaces/Prgckwb/dicom-viewer/blob/main/app.py | |
| #ref: https://huggingface.co/spaces/basilshaji/Lung_Nodule_Segmentation | |
| import gradio as gr | |
| import numpy as np | |
| import polars as pl | |
| import pydicom | |
| from PIL import Image | |
| from pydicom.errors import InvalidDicomError | |
| import gradio as gr | |
| import cv2 | |
| import requests | |
| import os | |
| import torch | |
| import numpy as np | |
| from yolov5.models.experimental import attempt_load | |
| from yolov5.utils.general import non_max_suppression | |
| from yolov5.utils.augmentations import letterbox | |
| # Load YOLOv5 model (placeholder) | |
| model_path = "best.pt" # Path to your YOLOv5 model | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Use GPU if available | |
| model = attempt_load(model_path, device=device) # Placeholder for model loading | |
| model.eval() # Set the model to evaluation mode | |
| def preprocess_image(image): | |
| img = letterbox(image, 640, stride=32, auto=True)[0] # Resize and pad to 640x640 | |
| img = img.transpose(2, 0, 1)[::-1] # Convert BGR to RGB, | |
| img = np.ascontiguousarray(img) | |
| img = torch.from_numpy(img).to(device) | |
| img = img.float() # uint8 to fp16/32 | |
| img /= 255.0 # 0 - 255 to 0.0 - 1.0 | |
| if img.ndimension() == 3: | |
| img = img.unsqueeze(0) | |
| return img, image | |
| def infer(model, img): | |
| with torch.no_grad(): | |
| pred = model(img)[0] | |
| return pred | |
| def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): | |
| if ratio_pad is None: # calculate from img0_shape | |
| gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new | |
| pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding | |
| else: | |
| gain = ratio_pad[0] | |
| pad = ratio_pad[1] | |
| coords[:, [0, 2]] -= pad[0] # x padding | |
| coords[:, [1, 3]] -= pad[1] # y padding | |
| coords[:, :4] /= gain | |
| coords[:, :4].clip_(min=0, max=img1_shape[0]) # clip boxes | |
| return coords | |
| def postprocess(pred, img0, img): | |
| pred = non_max_suppression(pred, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False) | |
| results = [] | |
| for det in pred: # detections per image | |
| if len(det): | |
| det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round() | |
| for *xyxy, conf, cls in reversed(det): | |
| results.append((xyxy, conf, cls)) | |
| return results | |
| def detect_objects(image_path): | |
| dicom_image, dicom_meta = read_and_preprocess_dicom(image_path) | |
| img, img0 = preprocess_image(dicom_image) | |
| pred = infer(model, img) | |
| results = postprocess(pred, dicom_image, img) | |
| #return results, dicom_image | |
| return results, dicom_image, dicom_meta | |
| def draw_bounding_boxes(img, results): | |
| dets = [] | |
| for (x1, y1, x2, y2), conf, cls in results: | |
| x1, y1, x2, y2, cls = map(int, [x1, y1, x2, y2, cls]) | |
| conf = round(conf.detach().item(), 4) | |
| print("results in draw_bounding box:",[(x1, y1, x2, y2), conf, cls]) | |
| dets.append([(x1, y1, x2, y2), conf, cls]) | |
| cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2) | |
| cv2.putText(img, f'{model.names[int(cls)]} {conf:.2f}', (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2) | |
| return img, dets | |
| def show_preds_image(filepath): | |
| results, img0, dicom_meta = detect_objects(filepath) | |
| img_with_boxes, results = draw_bounding_boxes(img0, results) | |
| print("In show_preds_image:Intance Number=", dicom_meta.loc[dicom_meta.Key == 'Intance Number', 'Value'].iloc[0]) | |
| #return cv2.cvtColor(img_with_boxes, cv2.COLOR_BGR2RGB) | |
| return cv2.cvtColor(img_with_boxes, cv2.COLOR_BGR2RGB), results, dicom_meta | |
| def read_and_preprocess_dicom(file_path: str): | |
| """ | |
| Function to read and preprocess DICOM files | |
| :param file_path: Path to the DICOM file | |
| :return: Image data (in CV2 format) and metadata (in pandas DataFrame format) | |
| """ | |
| try: | |
| # Read the DICOM file | |
| dicom_data = pydicom.dcmread(file_path) | |
| except InvalidDicomError: | |
| raise gr.Error("The uploaded file is not a valid DICOM file.") | |
| # Get the pixel data | |
| try: | |
| pixel_array = dicom_data.pixel_array | |
| except AttributeError: | |
| raise gr.Error("The uploaded DICOM file has no pixel data.") | |
| # Normalize the pixel data to 8-bit and convert to a PIL image | |
| if pixel_array.dtype != np.uint8: | |
| pixel_array = ((pixel_array - np.min(pixel_array)) / (np.max(pixel_array) - np.min(pixel_array)) * 255).astype( | |
| np.uint8) | |
| image_pil = Image.fromarray(pixel_array) | |
| image = image_pil.convert('RGB') | |
| #print("In preprocess dicom:", image.size) | |
| image = np.array(image)[:,:,::-1].copy() | |
| # shape | |
| #print("In preprocess dicom-image.shape2:",image.shape) | |
| # Collect metadata in dictionary format and convert to DataFrame | |
| metadata_dict = {elem.name: str(elem.value) for elem in dicom_data.iterall() if elem.name != 'Pixel Data'} | |
| df_metadata = pl.DataFrame({ | |
| "Key": list(metadata_dict.keys()), | |
| "Value": list(metadata_dict.values()) | |
| }) | |
| return image, df_metadata.to_pandas() # Convert to pandas DataFrame for Gradio compatibility | |
| # Define Gradio components | |
| input_component = gr.File(label="Input DICOM Data") | |
| #output_component = gr.components.Image(type="numpy", label="Output Image") | |
| dicom_image = gr.Image(type="numpy", label="Output Image") | |
| dicom_meta = gr.Dataframe(headers=None, label="Metadata") | |
| dets_res = gr.Text(label="Detections") | |
| output_component = [dicom_image, dets_res, dicom_meta] | |
| # Create Gradio interface | |
| interface = gr.Interface( | |
| fn=show_preds_image, | |
| inputs=input_component, | |
| outputs=output_component, | |
| title="Lung Nodule Detection", | |
| examples=['samples/81_80.dcm','samples/110_109.dcm','samples/189_188.dcm'], | |
| description= "This online deployment proves the effectiveness and efficient function of the machine learning model in identifying lung cancer nodules.", | |
| live=False, | |
| ) | |
| interface.launch() | |
| ''' | |
| def build_interface(): | |
| """ | |
| Function to build the Gradio interface | |
| """ | |
| theme = gr.themes.Soft( | |
| primary_hue=gr.themes.colors.emerald, | |
| secondary_hue=gr.themes.colors.emerald | |
| ) | |
| with gr.Blocks(title='DICOM Viewer', theme=theme) as demo: | |
| gr.Markdown( | |
| """ | |
| # DICOM Viewer | |
| This app reads a DICOM file and displays the image and metadata. | |
| """ | |
| ) | |
| with gr.Column(): | |
| file_path = gr.File(label="Input DICOM Data") | |
| with gr.Row(): | |
| dicom_image = gr.Image(type="numpy", label="Output Image") | |
| #dicom_image = gr.Image(type="pil", label="DICOM Image") | |
| dicom_meta = gr.Dataframe(headers=None, label="Metadata") | |
| inputs = [file_path] | |
| outputs = [dicom_image, dicom_meta] | |
| file_path.upload(fn=read_and_preprocess_dicom, inputs=inputs, outputs=outputs) | |
| clear_button = gr.ClearButton(components=inputs + outputs, ) | |
| example = gr.Examples( | |
| ['samples/81_80.dcm','samples/110_109.dcm','samples/189_188.dcm'], | |
| inputs=inputs, | |
| outputs=outputs, | |
| #outputs=dicom_image, | |
| #fn=read_and_preprocess_dicom, | |
| fn=show_preds_image, | |
| cache_examples=True | |
| ) | |
| return demo | |
| if __name__ == '__main__': | |
| demo = build_interface() | |
| demo.launch | |
| ''' |