yongyeol commited on
Commit
faca888
Β·
verified Β·
1 Parent(s): 07cf72c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -62,10 +62,13 @@ except ModuleNotFoundError:
62
  # ─────────────────────────────────────────────────────────────
63
  # 4. 이미지 캑셔닝 λͺ¨λΈ
64
  # ─────────────────────────────────────────────────────────────
 
65
  caption_model = VisionEncoderDecoderModel.from_pretrained(
66
  "nlpconnect/vit-gpt2-image-captioning",
67
- use_safetensors=True # OK
68
- ).eval() # 평가 λͺ¨λ“œλ‘œ
 
 
69
 
70
  feature_extractor = ViTImageProcessor.from_pretrained(
71
  "nlpconnect/vit-gpt2-image-captioning"
 
62
  # ─────────────────────────────────────────────────────────────
63
  # 4. 이미지 캑셔닝 λͺ¨λΈ
64
  # ─────────────────────────────────────────────────────────────
65
+ # 4. 이미지 캑셔닝 λͺ¨λΈ ------------------------------------
66
  caption_model = VisionEncoderDecoderModel.from_pretrained(
67
  "nlpconnect/vit-gpt2-image-captioning",
68
+ use_safetensors=True, # κ·ΈλŒ€λ‘œ
69
+ low_cpu_mem_usage=False, # ← meta λ‘œλ”© λΉ„ν™œμ„±ν™”
70
+ device_map=None # ← Accelerate μžλ™ λΆ„ν•  끄기
71
+ ).eval() # 평가 λͺ¨λ“œ
72
 
73
  feature_extractor = ViTImageProcessor.from_pretrained(
74
  "nlpconnect/vit-gpt2-image-captioning"