add cycle
Browse files- extractor.py +2 -1
extractor.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
from transformers import RobertaTokenizerFast, AutoModelForTokenClassification
|
2 |
import re
|
3 |
import torch
|
|
|
4 |
|
5 |
tokenizer = RobertaTokenizerFast.from_pretrained("mrfirdauss/robert-base-finetuned-cv")
|
6 |
model = AutoModelForTokenClassification.from_pretrained("mrfirdauss/robert-base-finetuned-cv")
|
@@ -138,7 +139,7 @@ def predict(text):
|
|
138 |
profile['links'].append(links['text'])
|
139 |
# Process experiences and education
|
140 |
print(process_tokens(data, 'EXPERIENCES DESC'))
|
141 |
-
for designation, company, experience_desc in zip(process_tokens(data, 'DESIGNATION'),process_tokens(data, 'COMPANY'),process_tokens(data, 'EXPERIENCES DESC')
|
142 |
profile['experiences'].append({
|
143 |
"start": None,
|
144 |
"end": None,
|
|
|
1 |
from transformers import RobertaTokenizerFast, AutoModelForTokenClassification
|
2 |
import re
|
3 |
import torch
|
4 |
+
from itertools import cycle
|
5 |
|
6 |
tokenizer = RobertaTokenizerFast.from_pretrained("mrfirdauss/robert-base-finetuned-cv")
|
7 |
model = AutoModelForTokenClassification.from_pretrained("mrfirdauss/robert-base-finetuned-cv")
|
|
|
139 |
profile['links'].append(links['text'])
|
140 |
# Process experiences and education
|
141 |
print(process_tokens(data, 'EXPERIENCES DESC'))
|
142 |
+
for designation, company, experience_desc in zip(cycle(process_tokens(data, 'DESIGNATION')),cycle(process_tokens(data, 'COMPANY')),cycle(process_tokens(data, 'EXPERIENCES DESC'))):
|
143 |
profile['experiences'].append({
|
144 |
"start": None,
|
145 |
"end": None,
|