Spaces:
Sleeping
Sleeping
Update clip_model.py
Browse files- clip_model.py +14 -15
clip_model.py
CHANGED
@@ -3,6 +3,20 @@ import torch
|
|
3 |
from PIL import Image
|
4 |
from transformers import ChineseCLIPProcessor, ChineseCLIPModel
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
class ClipModel:
|
7 |
def __init__(self, model_name="OFA-Sys/chinese-clip-vit-base-patch16", model_path=None, vocab_path='./chiikawa/word_list.txt'):
|
8 |
# Set device
|
@@ -26,21 +40,6 @@ class ClipModel:
|
|
26 |
with open(vocab_path, 'r', encoding='utf-8') as f:
|
27 |
self.vocab = [line.strip() for line in f.readlines()]
|
28 |
|
29 |
-
def check_memory_usage():
|
30 |
-
# Get memory details
|
31 |
-
memory_info = psutil.virtual_memory()
|
32 |
-
|
33 |
-
total_memory = memory_info.total / (1024 * 1024) # Convert bytes to MB
|
34 |
-
available_memory = memory_info.available / (1024 * 1024)
|
35 |
-
used_memory = memory_info.used / (1024 * 1024)
|
36 |
-
memory_usage_percent = memory_info.percent
|
37 |
-
|
38 |
-
print(f"^^^^^^ Total Memory: {total_memory:.2f} MB ^^^^^^")
|
39 |
-
print(f"^^^^^^ Available Memory: {available_memory:.2f} MB ^^^^^^")
|
40 |
-
print(f"^^^^^^ Used Memory: {used_memory:.2f} MB ^^^^^^")
|
41 |
-
print(f"^^^^^^ Memory Usage (%): {memory_usage_percent}% ^^^^^^")
|
42 |
-
|
43 |
-
|
44 |
def clip_result(self, image_path, top_k=3):
|
45 |
"""
|
46 |
給定圖片路徑,返回最接近的 top_k 詞彙
|
|
|
3 |
from PIL import Image
|
4 |
from transformers import ChineseCLIPProcessor, ChineseCLIPModel
|
5 |
|
6 |
+
def check_memory_usage():
|
7 |
+
# Get memory details
|
8 |
+
memory_info = psutil.virtual_memory()
|
9 |
+
|
10 |
+
total_memory = memory_info.total / (1024 * 1024) # Convert bytes to MB
|
11 |
+
available_memory = memory_info.available / (1024 * 1024)
|
12 |
+
used_memory = memory_info.used / (1024 * 1024)
|
13 |
+
memory_usage_percent = memory_info.percent
|
14 |
+
|
15 |
+
print(f"^^^^^^ Total Memory: {total_memory:.2f} MB ^^^^^^")
|
16 |
+
print(f"^^^^^^ Available Memory: {available_memory:.2f} MB ^^^^^^")
|
17 |
+
print(f"^^^^^^ Used Memory: {used_memory:.2f} MB ^^^^^^")
|
18 |
+
print(f"^^^^^^ Memory Usage (%): {memory_usage_percent}% ^^^^^^")
|
19 |
+
|
20 |
class ClipModel:
|
21 |
def __init__(self, model_name="OFA-Sys/chinese-clip-vit-base-patch16", model_path=None, vocab_path='./chiikawa/word_list.txt'):
|
22 |
# Set device
|
|
|
40 |
with open(vocab_path, 'r', encoding='utf-8') as f:
|
41 |
self.vocab = [line.strip() for line in f.readlines()]
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
def clip_result(self, image_path, top_k=3):
|
44 |
"""
|
45 |
給定圖片路徑,返回最接近的 top_k 詞彙
|