MrPotato commited on
Commit
df20b7b
·
1 Parent(s): 3398a76

changed chunk generation

Browse files
Files changed (1) hide show
  1. ref_seg_ger.py +3 -3
ref_seg_ger.py CHANGED
@@ -138,7 +138,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
138
  # ]
139
 
140
  # DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
141
- TOKENIZER = AutoTokenizer.from_pretrained("xlm-roberta-base")
142
 
143
  def _info(self):
144
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
@@ -217,8 +217,8 @@ class RefSeg(datasets.GeneratorBasedBuilder):
217
  for i, row in df.iterrows():
218
 
219
  #tokenized_input = row['token'].split(' ')
220
- print(Whitespace(row['token']))
221
- tokenized_input, offsets = zip(*Whitespace(row['token']))
222
  print(tokenized_input)
223
  if f.endswith('Cermaine_0.xml.csv'):
224
  print(tokenized_input)
 
138
  # ]
139
 
140
  # DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
141
+ TOKENIZER = Whitespace()
142
 
143
  def _info(self):
144
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
 
217
  for i, row in df.iterrows():
218
 
219
  #tokenized_input = row['token'].split(' ')
220
+ print(self.TOKENIZER.pre_tokenize(row['token']))
221
+ tokenized_input, offsets = zip(*self.TOKENIZER.pre_tokenize(row['token']))
222
  print(tokenized_input)
223
  if f.endswith('Cermaine_0.xml.csv'):
224
  print(tokenized_input)