Spaces:
Running
Running
Commit
·
17e1736
1
Parent(s):
fd6669d
Upd HF token usage:
Browse files- requirements.txt +4 -1
- vlm.py +1 -1
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
# requirements.txt
|
2 |
-
# **
|
3 |
google-genai
|
4 |
huggingface_hub
|
5 |
# **RAG**
|
@@ -12,6 +12,9 @@ sentencepiece
|
|
12 |
# **Environment**
|
13 |
python-dotenv # Not used in Streamlit deployment
|
14 |
pymongo
|
|
|
|
|
|
|
15 |
# **Deployment**
|
16 |
uvicorn
|
17 |
fastapi
|
|
|
1 |
# requirements.txt
|
2 |
+
# **LLMs**
|
3 |
google-genai
|
4 |
huggingface_hub
|
5 |
# **RAG**
|
|
|
12 |
# **Environment**
|
13 |
python-dotenv # Not used in Streamlit deployment
|
14 |
pymongo
|
15 |
+
# **VLMs**
|
16 |
+
transformers
|
17 |
+
pillow
|
18 |
# **Deployment**
|
19 |
uvicorn
|
20 |
fastapi
|
vlm.py
CHANGED
@@ -19,7 +19,7 @@ def load_vlm():
|
|
19 |
global vlm_pipe
|
20 |
if vlm_pipe is None:
|
21 |
logger.info("⏳ Loading MedGEMMA model via Transformers pipeline...")
|
22 |
-
vlm_pipe = pipeline("image-to-text", model="google/medgemma-4b", device_map="auto")
|
23 |
logger.info("✅ MedGEMMA model ready.")
|
24 |
return vlm_pipe
|
25 |
|
|
|
19 |
global vlm_pipe
|
20 |
if vlm_pipe is None:
|
21 |
logger.info("⏳ Loading MedGEMMA model via Transformers pipeline...")
|
22 |
+
vlm_pipe = pipeline("image-to-text", model="google/medgemma-4b", use_auth_token=HF_TOKEN, device_map="auto")
|
23 |
logger.info("✅ MedGEMMA model ready.")
|
24 |
return vlm_pipe
|
25 |
|