Nateware commited on
Commit
ad45b6e
Β·
1 Parent(s): 78c8cdc
Files changed (2) hide show
  1. app.py +80 -25
  2. requirements.txt +7 -2
app.py CHANGED
@@ -4,6 +4,7 @@ import time
4
  import gradio as gr
5
  import requests
6
  from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
 
7
  from PIL import Image
8
  from io import BytesIO
9
  import logging
@@ -19,30 +20,46 @@ device = None
19
  model_loaded = False
20
 
21
  def load_model():
22
- """Load the AI model exactly like in Colab"""
23
  global model, processor, device, model_loaded
24
 
25
- logger.info("Loading AI model (Colab style)...")
26
 
27
- # === Load AI Model === (exactly like Colab)
28
- model_id = "mychen76/paligemma-3b-mix-448-med_30k-ct-brain"
 
29
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
30
  dtype = torch.float16 if torch.cuda.is_available() else torch.float32
31
 
32
  logger.info(f"Using device: {device}")
33
  logger.info(f"Using dtype: {dtype}")
34
  logger.info(f"CUDA available: {torch.cuda.is_available()}")
 
 
35
 
36
  try:
37
- # Load exactly like Colab (no token, no trust_remote_code)
38
  logger.info("Loading processor...")
39
- processor = AutoProcessor.from_pretrained(model_id)
40
 
41
- logger.info("Loading model...")
 
42
  model = PaliGemmaForConditionalGeneration.from_pretrained(
43
- model_id,
44
- torch_dtype=dtype
45
- ).to(device).eval()
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  logger.info("Model loaded successfully!")
48
  model_loaded = True
@@ -51,6 +68,15 @@ def load_model():
51
  except Exception as e:
52
  logger.error(f"Error loading model: {e}")
53
  logger.error(f"Error type: {type(e)}")
 
 
 
 
 
 
 
 
 
54
  model_loaded = False
55
  return False
56
 
@@ -70,11 +96,17 @@ def analyze_brain_scan(image, patient_name="", patient_age="", symptoms=""):
70
  ## ⚠️ Model Loading Error
71
 
72
  The AI model is not available. This could be due to:
73
- - Model loading issues
74
- - Memory limitations
75
- - Network connectivity
 
 
 
 
 
 
76
 
77
- Please check the logs or try refreshing.
78
  """
79
 
80
  if image is None:
@@ -102,6 +134,11 @@ Please check the logs or try refreshing.
102
  **AI Findings:**
103
  {result}
104
 
 
 
 
 
 
105
  **Note:** This is an AI-generated analysis for educational purposes only.
106
  Always consult with qualified medical professionals for actual diagnosis.
107
  """
@@ -116,7 +153,7 @@ def create_api_response(image, patient_name="", patient_age="", symptoms=""):
116
  """Create API-compatible response for integration"""
117
  try:
118
  if not model_loaded or model is None:
119
- return {"error": "Model not loaded"}
120
 
121
  if image is None:
122
  return {"error": "No image provided"}
@@ -138,7 +175,8 @@ def create_api_response(image, patient_name="", patient_age="", symptoms=""):
138
  "symptoms": symptoms
139
  },
140
  "model_info": {
141
- "model_id": "mychen76/paligemma-3b-mix-448-med_30k-ct-brain",
 
142
  "device": str(device),
143
  "model_loaded": model_loaded
144
  }
@@ -161,10 +199,22 @@ def get_model_status():
161
  - **Model Object**: {type(model).__name__ if model else 'None'}
162
  - **Processor Object**: {type(processor).__name__ if processor else 'None'}
163
  - **PyTorch Version**: {torch.__version__}
 
 
 
 
 
 
 
 
 
 
 
 
164
  """
165
 
166
  # Load model at startup
167
- logger.info("Initializing Brain CT Analyzer (Colab Style)...")
168
  load_success = load_model()
169
  if load_success:
170
  logger.info("Model loaded successfully!")
@@ -177,9 +227,11 @@ with gr.Blocks(title="Brain CT Analyzer", theme=gr.themes.Soft()) as demo:
177
  # 🧠 Brain CT Analyzer
178
 
179
  Upload a brain CT scan image for AI-powered analysis. This tool uses the PaliGemma medical model
180
- to provide preliminary findings.
181
 
182
  **⚠️ Important:** This is for educational/research purposes only. Always consult qualified medical professionals.
 
 
183
  """)
184
 
185
  # Model status section
@@ -221,7 +273,7 @@ with gr.Blocks(title="Brain CT Analyzer", theme=gr.themes.Soft()) as demo:
221
  with gr.Column(scale=1):
222
  result_output = gr.Markdown(
223
  label="Analysis Results",
224
- value="Upload an image and click 'Analyze Brain Scan' to see results." if model_loaded else "⚠️ Model not loaded. Check status above."
225
  )
226
 
227
  # API endpoint simulation
@@ -245,16 +297,19 @@ with gr.Blocks(title="Brain CT Analyzer", theme=gr.themes.Soft()) as demo:
245
  # Instructions
246
  gr.Markdown("""
247
  ## πŸ“‹ Usage Instructions:
248
- 1. Upload a brain CT scan image (JPEG or PNG)
249
- 2. Optionally fill in patient information
250
- 3. Click "Analyze Brain Scan" to get AI findings
251
- 4. Review the results in the output panel
 
 
252
 
253
  ## πŸ”— Integration:
254
  This interface can be integrated with your medical app using the Gradio API.
255
 
256
  ## βœ… Based on Working Colab Code:
257
- This version uses the exact same model loading and inference code as your working Google Colab notebook.
 
258
  """)
259
 
260
  if __name__ == "__main__":
@@ -262,4 +317,4 @@ if __name__ == "__main__":
262
  server_name="0.0.0.0",
263
  server_port=7860,
264
  share=True
265
- )
 
4
  import gradio as gr
5
  import requests
6
  from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
7
+ from peft import PeftModel
8
  from PIL import Image
9
  from io import BytesIO
10
  import logging
 
20
  model_loaded = False
21
 
22
  def load_model():
23
+ """Load the AI model with PEFT adapter (Colab style)"""
24
  global model, processor, device, model_loaded
25
 
26
+ logger.info("Loading AI model with PEFT adapter (Colab style)...")
27
 
28
+ # === Load AI Model === (base model + adapter)
29
+ base_model_id = "google/paligemma-3b-mix-448"
30
+ adapter_model_id = "mychen76/paligemma-3b-mix-448-med_30k-ct-brain"
31
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
  dtype = torch.float16 if torch.cuda.is_available() else torch.float32
33
 
34
  logger.info(f"Using device: {device}")
35
  logger.info(f"Using dtype: {dtype}")
36
  logger.info(f"CUDA available: {torch.cuda.is_available()}")
37
+ logger.info(f"Base model: {base_model_id}")
38
+ logger.info(f"Adapter model: {adapter_model_id}")
39
 
40
  try:
41
+ # Load processor from base model
42
  logger.info("Loading processor...")
43
+ processor = AutoProcessor.from_pretrained(base_model_id)
44
 
45
+ # Load base model
46
+ logger.info("Loading base model...")
47
  model = PaliGemmaForConditionalGeneration.from_pretrained(
48
+ base_model_id,
49
+ torch_dtype=dtype,
50
+ device_map="auto" if torch.cuda.is_available() else None
51
+ )
52
+
53
+ # Load PEFT adapter
54
+ logger.info("Loading PEFT adapter...")
55
+ model = PeftModel.from_pretrained(model, adapter_model_id)
56
+
57
+ # Set to eval mode
58
+ model.eval()
59
+
60
+ # Move to device if not using device_map
61
+ if not torch.cuda.is_available():
62
+ model = model.to(device)
63
 
64
  logger.info("Model loaded successfully!")
65
  model_loaded = True
 
68
  except Exception as e:
69
  logger.error(f"Error loading model: {e}")
70
  logger.error(f"Error type: {type(e)}")
71
+
72
+ # If license error, provide helpful message
73
+ if "license" in str(e).lower() or "access" in str(e).lower():
74
+ logger.error("This appears to be a license/access issue with the base model.")
75
+ logger.error("You may need to:")
76
+ logger.error("1. Accept the license for google/paligemma-3b-mix-448 on HuggingFace")
77
+ logger.error("2. Login with: huggingface-cli login")
78
+ logger.error("3. Use your HuggingFace token")
79
+
80
  model_loaded = False
81
  return False
82
 
 
96
  ## ⚠️ Model Loading Error
97
 
98
  The AI model is not available. This could be due to:
99
+ - **License Issue**: The base model requires accepting Google's license
100
+ - **PEFT Loading Issue**: Problem loading the medical adapter
101
+ - **Memory limitations**: Insufficient resources
102
+ - **Network connectivity**: Download issues
103
+
104
+ **To fix this:**
105
+ 1. Accept the license for `google/paligemma-3b-mix-448` on HuggingFace
106
+ 2. Login with your HuggingFace token: `huggingface-cli login`
107
+ 3. Restart the application
108
 
109
+ Please check the logs for more details.
110
  """
111
 
112
  if image is None:
 
134
  **AI Findings:**
135
  {result}
136
 
137
+ **Model Info:**
138
+ - Base Model: google/paligemma-3b-mix-448
139
+ - Medical Adapter: mychen76/paligemma-3b-mix-448-med_30k-ct-brain
140
+ - Device: {device}
141
+
142
  **Note:** This is an AI-generated analysis for educational purposes only.
143
  Always consult with qualified medical professionals for actual diagnosis.
144
  """
 
153
  """Create API-compatible response for integration"""
154
  try:
155
  if not model_loaded or model is None:
156
+ return {"error": "Model not loaded - check license and authentication"}
157
 
158
  if image is None:
159
  return {"error": "No image provided"}
 
175
  "symptoms": symptoms
176
  },
177
  "model_info": {
178
+ "base_model": "google/paligemma-3b-mix-448",
179
+ "adapter_model": "mychen76/paligemma-3b-mix-448-med_30k-ct-brain",
180
  "device": str(device),
181
  "model_loaded": model_loaded
182
  }
 
199
  - **Model Object**: {type(model).__name__ if model else 'None'}
200
  - **Processor Object**: {type(processor).__name__ if processor else 'None'}
201
  - **PyTorch Version**: {torch.__version__}
202
+
203
+ ## πŸ“‹ Model Configuration
204
+
205
+ - **Base Model**: google/paligemma-3b-mix-448
206
+ - **Medical Adapter**: mychen76/paligemma-3b-mix-448-med_30k-ct-brain
207
+ - **Model Type**: PEFT/LoRA Fine-tuned
208
+
209
+ ## ⚠️ Requirements
210
+
211
+ - HuggingFace account with accepted license for PaliGemma
212
+ - HuggingFace token authentication
213
+ - PEFT library for adapter loading
214
  """
215
 
216
  # Load model at startup
217
+ logger.info("Initializing Brain CT Analyzer with PEFT (Colab Style)...")
218
  load_success = load_model()
219
  if load_success:
220
  logger.info("Model loaded successfully!")
 
227
  # 🧠 Brain CT Analyzer
228
 
229
  Upload a brain CT scan image for AI-powered analysis. This tool uses the PaliGemma medical model
230
+ with specialized medical fine-tuning to provide preliminary findings.
231
 
232
  **⚠️ Important:** This is for educational/research purposes only. Always consult qualified medical professionals.
233
+
234
+ **πŸ”‘ Requirements:** This model requires accepting Google's PaliGemma license and HuggingFace authentication.
235
  """)
236
 
237
  # Model status section
 
273
  with gr.Column(scale=1):
274
  result_output = gr.Markdown(
275
  label="Analysis Results",
276
+ value="Upload an image and click 'Analyze Brain Scan' to see results." if model_loaded else "⚠️ Model not loaded. Check status above and ensure license acceptance."
277
  )
278
 
279
  # API endpoint simulation
 
297
  # Instructions
298
  gr.Markdown("""
299
  ## πŸ“‹ Usage Instructions:
300
+ 1. **Accept License**: Go to [google/paligemma-3b-mix-448](https://huggingface.co/google/paligemma-3b-mix-448) and accept the license
301
+ 2. **Authenticate**: Login with `huggingface-cli login` using your token
302
+ 3. Upload a brain CT scan image (JPEG or PNG)
303
+ 4. Optionally fill in patient information
304
+ 5. Click "Analyze Brain Scan" to get AI findings
305
+ 6. Review the results in the output panel
306
 
307
  ## πŸ”— Integration:
308
  This interface can be integrated with your medical app using the Gradio API.
309
 
310
  ## βœ… Based on Working Colab Code:
311
+ This version uses PEFT to load the medical fine-tuned adapter on top of the base PaliGemma model,
312
+ exactly matching your working Google Colab setup.
313
  """)
314
 
315
  if __name__ == "__main__":
 
317
  server_name="0.0.0.0",
318
  server_port=7860,
319
  share=True
320
+ )
requirements.txt CHANGED
@@ -1,5 +1,10 @@
1
  torch>=2.0.0
 
2
  transformers>=4.40.0
 
 
3
  gradio>=4.0.0
4
- pillow>=9.0.0
5
- accelerate>=0.20.0
 
 
 
1
  torch>=2.0.0
2
+ torchvision>=0.15.0
3
  transformers>=4.40.0
4
+ peft>=0.10.0
5
+ accelerate>=0.20.0
6
  gradio>=4.0.0
7
+ Pillow>=9.0.0
8
+ requests>=2.25.0
9
+ numpy>=1.21.0
10
+ huggingface_hub>=0.16.0