kovacsvi commited on
Commit
c9b32c5
·
1 Parent(s): d54a8e4

removed to(device)

Browse files
interfaces/cap.py CHANGED
@@ -85,7 +85,6 @@ def build_huggingface_path(language: str, domain: str):
85
 
86
  @spaces.GPU(duration=5)
87
  def predict(text, model_id, tokenizer_id):
88
- device = torch.device("cpu")
89
  model = AutoModelForSequenceClassification.from_pretrained(model_id, low_cpu_mem_usage=True, device_map="auto", offload_folder="offload", token=HF_TOKEN)
90
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
91
 
@@ -93,7 +92,7 @@ def predict(text, model_id, tokenizer_id):
93
  max_length=256,
94
  truncation=True,
95
  padding="do_not_pad",
96
- return_tensors="pt").to(device)
97
  model.eval()
98
 
99
  with torch.no_grad():
 
85
 
86
  @spaces.GPU(duration=5)
87
  def predict(text, model_id, tokenizer_id):
 
88
  model = AutoModelForSequenceClassification.from_pretrained(model_id, low_cpu_mem_usage=True, device_map="auto", offload_folder="offload", token=HF_TOKEN)
89
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
90
 
 
92
  max_length=256,
93
  truncation=True,
94
  padding="do_not_pad",
95
+ return_tensors="pt")
96
  model.eval()
97
 
98
  with torch.no_grad():
interfaces/cap_minor_media.py CHANGED
@@ -59,8 +59,6 @@ def build_huggingface_path(language: str, domain: str):
59
 
60
  @spaces.GPU(duration=5)
61
  def predict(text, major_model_id, minor_model_id, tokenizer_id, HF_TOKEN=None):
62
- device = torch.device("cpu")
63
-
64
  # Load major and minor models + tokenizer
65
  major_model = AutoModelForSequenceClassification.from_pretrained(
66
  major_model_id,
@@ -68,7 +66,7 @@ def predict(text, major_model_id, minor_model_id, tokenizer_id, HF_TOKEN=None):
68
  device_map="auto",
69
  offload_folder="offload",
70
  token=HF_TOKEN
71
- ).to(device)
72
 
73
  minor_model = AutoModelForSequenceClassification.from_pretrained(
74
  minor_model_id,
@@ -76,12 +74,12 @@ def predict(text, major_model_id, minor_model_id, tokenizer_id, HF_TOKEN=None):
76
  device_map="auto",
77
  offload_folder="offload",
78
  token=HF_TOKEN
79
- ).to(device)
80
 
81
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
82
 
83
  # Tokenize input
84
- inputs = tokenizer(text, max_length=256, truncation=True, padding="do_not_pad", return_tensors="pt").to(device)
85
 
86
  # Predict major topic
87
  major_model.eval()
 
59
 
60
  @spaces.GPU(duration=5)
61
  def predict(text, major_model_id, minor_model_id, tokenizer_id, HF_TOKEN=None):
 
 
62
  # Load major and minor models + tokenizer
63
  major_model = AutoModelForSequenceClassification.from_pretrained(
64
  major_model_id,
 
66
  device_map="auto",
67
  offload_folder="offload",
68
  token=HF_TOKEN
69
+ )
70
 
71
  minor_model = AutoModelForSequenceClassification.from_pretrained(
72
  minor_model_id,
 
74
  device_map="auto",
75
  offload_folder="offload",
76
  token=HF_TOKEN
77
+ )
78
 
79
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
80
 
81
  # Tokenize input
82
+ inputs = tokenizer(text, max_length=256, truncation=True, padding="do_not_pad", return_tensors="pt")
83
 
84
  # Predict major topic
85
  major_model.eval()