pbordesinstadeep commited on
Commit
9378c69
·
verified ·
1 Parent(s): 75f8034

Update isoformer_tokenizer.py

Browse files
Files changed (1) hide show
  1. isoformer_tokenizer.py +11 -7
isoformer_tokenizer.py CHANGED
@@ -39,18 +39,22 @@ class IsoformerTokenizer(PreTrainedTokenizer):
39
  **kwargs
40
  ):
41
 
42
- dna_hf_tokenizer = EsmTokenizer("dna_vocab_list.txt", model_max_length=196608)
 
 
 
 
 
43
  dna_hf_tokenizer.eos_token = None # Stops the tokenizer adding an EOS/SEP token at the end
44
  dna_hf_tokenizer.init_kwargs["eos_token"] = None # Ensures it doesn't come back when reloading
45
  dna_hf_tokenizer.bos_token = None # Stops the tokenizer adding an BOS/SEP token at the end
46
  dna_hf_tokenizer.init_kwargs["bos_token"] = None # Ensures it doesn't come back when reloading
47
 
48
-
49
- rna_hf_tokenizer = EsmTokenizer("rna_vocab_list.txt", model_max_length=1024)
50
  rna_hf_tokenizer.eos_token = None # Stops the tokenizer adding an EOS/SEP token at the end
51
  rna_hf_tokenizer.init_kwargs["eos_token"] = None # Ensures it doesn't come back when reloading
52
 
53
- protein_hf_tokenizer = EsmTokenizer("protein_vocab_list.txt", model_max_length=1024)
54
  # protein_hf_tokenizer.eos_token = None # Stops the tokenizer adding an EOS/SEP token at the end
55
  # protein_hf_tokenizer.init_kwargs["eos_token"] = None # Ensures it doesn't come back when reloading
56
 
@@ -58,9 +62,9 @@ class IsoformerTokenizer(PreTrainedTokenizer):
58
  self.rna_tokenizer = rna_hf_tokenizer
59
  self.protein_tokenizer = protein_hf_tokenizer
60
 
61
- self.dna_tokens = open("dna_vocab_list.txt", "r").read() .split("\n")
62
- self.rna_tokens = open("rna_vocab_list.txt", "r").read() .split("\n")
63
- self.protein_tokens = open("protein_vocab_list.txt", "r").read() .split("\n")
64
 
65
  super().__init__(**kwargs)
66
 
 
39
  **kwargs
40
  ):
41
 
42
+ pretrained_model_path = kwargs.get("pretrained_model_name_or_path", "")
43
+ dna_vocab_path = os.path.join(pretrained_model_path, "dna_vocab_list.txt")
44
+ rna_vocab_path = os.path.join(pretrained_model_path, "rna_vocab_list.txt")
45
+ protein_vocab_path = os.path.join(pretrained_model_path, "protein_vocab_list.txt")
46
+
47
+ dna_hf_tokenizer = EsmTokenizer(dna_vocab_path, model_max_length=196608)
48
  dna_hf_tokenizer.eos_token = None # Stops the tokenizer adding an EOS/SEP token at the end
49
  dna_hf_tokenizer.init_kwargs["eos_token"] = None # Ensures it doesn't come back when reloading
50
  dna_hf_tokenizer.bos_token = None # Stops the tokenizer adding an BOS/SEP token at the end
51
  dna_hf_tokenizer.init_kwargs["bos_token"] = None # Ensures it doesn't come back when reloading
52
 
53
+ rna_hf_tokenizer = EsmTokenizer(rna_vocab_path, model_max_length=1024)
 
54
  rna_hf_tokenizer.eos_token = None # Stops the tokenizer adding an EOS/SEP token at the end
55
  rna_hf_tokenizer.init_kwargs["eos_token"] = None # Ensures it doesn't come back when reloading
56
 
57
+ protein_hf_tokenizer = EsmTokenizer(protein_vocab_path, model_max_length=1024)
58
  # protein_hf_tokenizer.eos_token = None # Stops the tokenizer adding an EOS/SEP token at the end
59
  # protein_hf_tokenizer.init_kwargs["eos_token"] = None # Ensures it doesn't come back when reloading
60
 
 
62
  self.rna_tokenizer = rna_hf_tokenizer
63
  self.protein_tokenizer = protein_hf_tokenizer
64
 
65
+ self.dna_tokens = open(dna_vocab_path, "r").read() .split("\n")
66
+ self.rna_tokens = open(rna_vocab_path, "r").read() .split("\n")
67
+ self.protein_tokens = open(protein_vocab_path, "r").read() .split("\n")
68
 
69
  super().__init__(**kwargs)
70