MrPotato commited on
Commit
bbb2d92
·
1 Parent(s): 1824930

changed chunk generation

Browse files
Files changed (1) hide show
  1. ref_seg_ger.py +5 -2
ref_seg_ger.py CHANGED
@@ -19,6 +19,7 @@ import os
19
  import numpy as np
20
  from PIL import Image
21
  from transformers import AutoTokenizer
 
22
  import datasets
23
  from itertools import chain
24
  import pandas as pd
@@ -206,7 +207,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
206
  #print(filepath)
207
  #print(split)
208
  paths = glob(filepath + '/' + split + '/*.csv')
209
- #print(paths)
210
  key = 0
211
  for f in paths:
212
  print(f)
@@ -215,7 +216,9 @@ class RefSeg(datasets.GeneratorBasedBuilder):
215
  labels = []
216
  for i, row in df.iterrows():
217
 
218
- tokenized_input = row['token'].split(' ')
 
 
219
  if f.endswith('Cermaine_0.xml.csv'):
220
  print(tokenized_input)
221
  if len(tokenized_input) > 1:
 
19
  import numpy as np
20
  from PIL import Image
21
  from transformers import AutoTokenizer
22
+ from tokenizers.pre_tokenizers import Whitespace
23
  import datasets
24
  from itertools import chain
25
  import pandas as pd
 
207
  #print(filepath)
208
  #print(split)
209
  paths = glob(filepath + '/' + split + '/*.csv')
210
+ print(paths)
211
  key = 0
212
  for f in paths:
213
  print(f)
 
216
  labels = []
217
  for i, row in df.iterrows():
218
 
219
+ #tokenized_input = row['token'].split(' ')
220
+ tokenized_input, offsets = zip(*Whitespace(row['token']))
221
+ print(tokenized_input)
222
  if f.endswith('Cermaine_0.xml.csv'):
223
  print(tokenized_input)
224
  if len(tokenized_input) > 1: