Spaces:
Sleeping
Sleeping
kgupta21
commited on
Commit
·
ecd47e6
1
Parent(s):
2e68325
Add radiology teaching application
Browse files- .gitattributes +1 -3
- README.md +35 -13
- app.py +121 -0
- requirements.txt +6 -0
.gitattributes
CHANGED
@@ -23,13 +23,11 @@
|
|
23 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
23 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
|
|
26 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
|
|
27 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
28 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
29 |
*.wasm filter=lfs diff=lfs merge=lfs -text
|
30 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
31 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
32 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,13 +1,35 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Radiology Report Training System
|
2 |
+
|
3 |
+
A Gradio app for teaching radiology residents and medical students how to read and report chest X-rays. This app uses the MIMIC-CXR dataset and DeepSeek's LLM for report analysis and feedback.
|
4 |
+
|
5 |
+
## Features
|
6 |
+
|
7 |
+
- View chest X-ray images with zoom functionality
|
8 |
+
- Input findings through typing or dictation
|
9 |
+
- Compare against ground truth findings and impressions
|
10 |
+
- Get AI-powered feedback on your report quality
|
11 |
+
- Hide ground truth for self-testing
|
12 |
+
- Support for both dataset images and custom uploads
|
13 |
+
|
14 |
+
## How to Use
|
15 |
+
|
16 |
+
1. Enter your DeepSeek API key in the provided input box
|
17 |
+
2. Click "Load Random Case" to get a new chest X-ray
|
18 |
+
3. Study the image and write your findings in the input box
|
19 |
+
4. Click "Submit Report" to get feedback
|
20 |
+
5. Use the "Hide Ground Truth" checkbox to test yourself
|
21 |
+
|
22 |
+
## Dataset
|
23 |
+
|
24 |
+
This app uses the [MIMIC-CXR dataset](https://huggingface.co/datasets/itsanmolgupta/mimic-cxr-dataset) (limited to 10 cases for this MVP version).
|
25 |
+
|
26 |
+
## Technical Details
|
27 |
+
|
28 |
+
- Built with Gradio
|
29 |
+
- Uses DeepSeek's LLM for report analysis
|
30 |
+
- Supports image upload and clipboard paste
|
31 |
+
- Provides zoom functionality for detailed image examination
|
32 |
+
|
33 |
+
## Requirements
|
34 |
+
|
35 |
+
See requirements.txt for full dependencies.
|
app.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
from datasets import load_dataset
|
4 |
+
from openai import OpenAI
|
5 |
+
from PIL import Image
|
6 |
+
import io
|
7 |
+
import base64
|
8 |
+
|
9 |
+
# Load only 10 rows from the dataset
|
10 |
+
dataset = load_dataset("itsanmolgupta/mimic-cxr-dataset", split="train").select(range(10))
|
11 |
+
df = pd.DataFrame(dataset)
|
12 |
+
|
13 |
+
def encode_image_to_base64(image_bytes):
|
14 |
+
return base64.b64encode(image_bytes).decode('utf-8')
|
15 |
+
|
16 |
+
def analyze_report(user_findings, ground_truth_findings, ground_truth_impression, api_key):
|
17 |
+
if not api_key:
|
18 |
+
return "Please provide a DeepSeek API key to analyze the report."
|
19 |
+
|
20 |
+
try:
|
21 |
+
client = OpenAI(api_key=api_key, base_url="https://api.deepseek.com")
|
22 |
+
|
23 |
+
prompt = f"""You are an expert radiologist reviewing a trainee's chest X-ray report.
|
24 |
+
|
25 |
+
Trainee's Findings:
|
26 |
+
{user_findings}
|
27 |
+
|
28 |
+
Ground Truth Findings:
|
29 |
+
{ground_truth_findings}
|
30 |
+
|
31 |
+
Ground Truth Impression:
|
32 |
+
{ground_truth_impression}
|
33 |
+
|
34 |
+
Please provide:
|
35 |
+
1. Number of important findings missed by the trainee (list them)
|
36 |
+
2. Quality assessment of the trainee's report (structure, completeness, accuracy)
|
37 |
+
3. Constructive feedback for improvement
|
38 |
+
|
39 |
+
Format your response in clear sections."""
|
40 |
+
|
41 |
+
response = client.chat.completions.create(
|
42 |
+
model="deepseek-chat",
|
43 |
+
messages=[
|
44 |
+
{"role": "system", "content": "You are an expert radiologist providing constructive feedback."},
|
45 |
+
{"role": "user", "content": prompt}
|
46 |
+
],
|
47 |
+
stream=False
|
48 |
+
)
|
49 |
+
|
50 |
+
return response.choices[0].message.content
|
51 |
+
except Exception as e:
|
52 |
+
return f"Error analyzing report: {str(e)}"
|
53 |
+
|
54 |
+
def load_random_case(hide_ground_truth):
|
55 |
+
# Randomly select a case from our dataset
|
56 |
+
random_case = df.sample(n=1).iloc[0]
|
57 |
+
|
58 |
+
# Get the image, findings, and impression
|
59 |
+
image = random_case['image']
|
60 |
+
findings = "" if hide_ground_truth else random_case['findings']
|
61 |
+
impression = "" if hide_ground_truth else random_case['impression']
|
62 |
+
|
63 |
+
return image, findings, impression
|
64 |
+
|
65 |
+
def process_case(image, user_findings, hide_ground_truth, api_key, current_findings="", current_impression=""):
|
66 |
+
if hide_ground_truth:
|
67 |
+
return "", "", ""
|
68 |
+
else:
|
69 |
+
analysis = analyze_report(user_findings, current_findings, current_impression, api_key)
|
70 |
+
return current_findings, current_impression, analysis
|
71 |
+
|
72 |
+
# Create the Gradio interface
|
73 |
+
with gr.Blocks() as demo:
|
74 |
+
gr.Markdown("# Radiology Report Training System")
|
75 |
+
gr.Markdown("### Practice your chest X-ray reading and reporting skills")
|
76 |
+
|
77 |
+
with gr.Row():
|
78 |
+
with gr.Column():
|
79 |
+
image_display = gr.Image(label="Chest X-ray Image", type="pil")
|
80 |
+
api_key_input = gr.Textbox(label="DeepSeek API Key", type="password")
|
81 |
+
hide_truth = gr.Checkbox(label="Hide Ground Truth", value=False)
|
82 |
+
load_btn = gr.Button("Load Random Case")
|
83 |
+
|
84 |
+
with gr.Column():
|
85 |
+
user_findings_input = gr.Textbox(label="Your Findings", lines=10, placeholder="Type or dictate your findings here...")
|
86 |
+
ground_truth_findings = gr.Textbox(label="Ground Truth Findings", lines=5, interactive=False)
|
87 |
+
ground_truth_impression = gr.Textbox(label="Ground Truth Impression", lines=5, interactive=False)
|
88 |
+
analysis_output = gr.Textbox(label="Analysis and Feedback", lines=10, interactive=False)
|
89 |
+
submit_btn = gr.Button("Submit Report")
|
90 |
+
|
91 |
+
# Event handlers
|
92 |
+
load_btn.click(
|
93 |
+
fn=load_random_case,
|
94 |
+
inputs=[hide_truth],
|
95 |
+
outputs=[image_display, ground_truth_findings, ground_truth_impression]
|
96 |
+
)
|
97 |
+
|
98 |
+
submit_btn.click(
|
99 |
+
fn=process_case,
|
100 |
+
inputs=[
|
101 |
+
image_display,
|
102 |
+
user_findings_input,
|
103 |
+
hide_truth,
|
104 |
+
api_key_input,
|
105 |
+
ground_truth_findings,
|
106 |
+
ground_truth_impression
|
107 |
+
],
|
108 |
+
outputs=[
|
109 |
+
ground_truth_findings,
|
110 |
+
ground_truth_impression,
|
111 |
+
analysis_output
|
112 |
+
]
|
113 |
+
)
|
114 |
+
|
115 |
+
hide_truth.change(
|
116 |
+
fn=lambda x: ("", "", "") if x else (ground_truth_findings.value, ground_truth_impression.value, ""),
|
117 |
+
inputs=[hide_truth],
|
118 |
+
outputs=[ground_truth_findings, ground_truth_impression, analysis_output]
|
119 |
+
)
|
120 |
+
|
121 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio>=4.16.0
|
2 |
+
pandas>=2.0.0
|
3 |
+
datasets>=2.15.0
|
4 |
+
openai>=1.0.0
|
5 |
+
Pillow>=10.0.0
|
6 |
+
huggingface-hub>=0.20.0
|