File size: 425 Bytes
7d7b08f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
from transformers import AutoModel, CLIPImageProcessor, CLIPTokenizer
import torch

model_name_or_path = "BAAI/EVA-CLIP-8B" 
image_size = 224

processor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14")

model = AutoModel.from_pretrained(
    model_name_or_path, 
    torch_dtype=torch.bfloat16,
    trust_remote_code=True).to('cuda').eval()


tokenizer = CLIPTokenizer.from_pretrained(model_name_or_path)