kovacsvi commited on
Commit
ecdbfcf
·
1 Parent(s): 79bbcba

revert back to cpu : (

Browse files
interfaces/cap.py CHANGED
@@ -83,9 +83,9 @@ def build_huggingface_path(language: str, domain: str):
83
  else:
84
  return "poltextlab/xlm-roberta-large-pooled-cap"
85
 
86
- @spaces.GPU
87
  def predict(text, model_id, tokenizer_id):
88
- device = torch.device("cuda:0")
89
  model = AutoModelForSequenceClassification.from_pretrained(model_id, low_cpu_mem_usage=True, device_map="auto", offload_folder="offload", token=HF_TOKEN).to(device)
90
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
91
 
 
83
  else:
84
  return "poltextlab/xlm-roberta-large-pooled-cap"
85
 
86
+ #@spaces.GPU
87
  def predict(text, model_id, tokenizer_id):
88
+ device = torch.device("cpu")
89
  model = AutoModelForSequenceClassification.from_pretrained(model_id, low_cpu_mem_usage=True, device_map="auto", offload_folder="offload", token=HF_TOKEN).to(device)
90
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
91
 
interfaces/cap_minor_media.py CHANGED
@@ -57,9 +57,9 @@ def check_huggingface_path(checkpoint_path: str):
57
  def build_huggingface_path(language: str, domain: str):
58
  return ("poltextlab/xlm-roberta-large-pooled-cap-media", "poltextlab/xlm-roberta-large-pooled-cap-minor-v3")
59
 
60
- @spaces.GPU(duration=30)
61
  def predict(text, major_model_id, minor_model_id, tokenizer_id, HF_TOKEN=None):
62
- device = torch.device("cuda:0")
63
 
64
  # Load major and minor models + tokenizer
65
  major_model = AutoModelForSequenceClassification.from_pretrained(
 
57
  def build_huggingface_path(language: str, domain: str):
58
  return ("poltextlab/xlm-roberta-large-pooled-cap-media", "poltextlab/xlm-roberta-large-pooled-cap-minor-v3")
59
 
60
+ #@spaces.GPU(duration=30)
61
  def predict(text, major_model_id, minor_model_id, tokenizer_id, HF_TOKEN=None):
62
+ device = torch.device("cpu")
63
 
64
  # Load major and minor models + tokenizer
65
  major_model = AutoModelForSequenceClassification.from_pretrained(