kovacsvi commited on
Commit
2180861
·
1 Parent(s): c9b32c5

use cuda:0 as torch device

Browse files
interfaces/cap.py CHANGED
@@ -85,14 +85,15 @@ def build_huggingface_path(language: str, domain: str):
85
 
86
  @spaces.GPU(duration=5)
87
  def predict(text, model_id, tokenizer_id):
88
- model = AutoModelForSequenceClassification.from_pretrained(model_id, low_cpu_mem_usage=True, device_map="auto", offload_folder="offload", token=HF_TOKEN)
 
89
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
90
 
91
  inputs = tokenizer(text,
92
  max_length=256,
93
  truncation=True,
94
  padding="do_not_pad",
95
- return_tensors="pt")
96
  model.eval()
97
 
98
  with torch.no_grad():
 
85
 
86
  @spaces.GPU(duration=5)
87
  def predict(text, model_id, tokenizer_id):
88
+ device = torch.device("cuda:0")
89
+ model = AutoModelForSequenceClassification.from_pretrained(model_id, low_cpu_mem_usage=True, device_map="auto", offload_folder="offload", token=HF_TOKEN).to(device)
90
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
91
 
92
  inputs = tokenizer(text,
93
  max_length=256,
94
  truncation=True,
95
  padding="do_not_pad",
96
+ return_tensors="pt").to(device)
97
  model.eval()
98
 
99
  with torch.no_grad():
interfaces/cap_minor_media.py CHANGED
@@ -59,6 +59,8 @@ def build_huggingface_path(language: str, domain: str):
59
 
60
  @spaces.GPU(duration=5)
61
  def predict(text, major_model_id, minor_model_id, tokenizer_id, HF_TOKEN=None):
 
 
62
  # Load major and minor models + tokenizer
63
  major_model = AutoModelForSequenceClassification.from_pretrained(
64
  major_model_id,
@@ -66,7 +68,7 @@ def predict(text, major_model_id, minor_model_id, tokenizer_id, HF_TOKEN=None):
66
  device_map="auto",
67
  offload_folder="offload",
68
  token=HF_TOKEN
69
- )
70
 
71
  minor_model = AutoModelForSequenceClassification.from_pretrained(
72
  minor_model_id,
@@ -74,12 +76,12 @@ def predict(text, major_model_id, minor_model_id, tokenizer_id, HF_TOKEN=None):
74
  device_map="auto",
75
  offload_folder="offload",
76
  token=HF_TOKEN
77
- )
78
 
79
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
80
 
81
  # Tokenize input
82
- inputs = tokenizer(text, max_length=256, truncation=True, padding="do_not_pad", return_tensors="pt")
83
 
84
  # Predict major topic
85
  major_model.eval()
 
59
 
60
  @spaces.GPU(duration=5)
61
  def predict(text, major_model_id, minor_model_id, tokenizer_id, HF_TOKEN=None):
62
+ device = torch.device("cuda:0")
63
+
64
  # Load major and minor models + tokenizer
65
  major_model = AutoModelForSequenceClassification.from_pretrained(
66
  major_model_id,
 
68
  device_map="auto",
69
  offload_folder="offload",
70
  token=HF_TOKEN
71
+ ).to(device)
72
 
73
  minor_model = AutoModelForSequenceClassification.from_pretrained(
74
  minor_model_id,
 
76
  device_map="auto",
77
  offload_folder="offload",
78
  token=HF_TOKEN
79
+ ).to(device)
80
 
81
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
82
 
83
  # Tokenize input
84
+ inputs = tokenizer(text, max_length=256, truncation=True, padding="do_not_pad", return_tensors="pt").to(device)
85
 
86
  # Predict major topic
87
  major_model.eval()