Spaces:
Running
Running
Commit
·
e8429a7
1
Parent(s):
1c18be2
- inference.py +5 -0
inference.py
CHANGED
@@ -48,10 +48,15 @@ def predict_absa(text):
|
|
48 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
49 |
with torch.no_grad():
|
50 |
output = model(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"])
|
|
|
51 |
pred_ids = output['logits'][0].tolist()
|
|
|
52 |
|
53 |
tokens = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
|
54 |
labels = [id2label.get(i, 'O') for i in pred_ids]
|
|
|
|
|
|
|
55 |
|
56 |
result = bio_to_spans(tokens, labels)
|
57 |
return [{"aspect": asp, "sentiment": pol} for asp, pol in result]
|
|
|
48 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
49 |
with torch.no_grad():
|
50 |
output = model(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"])
|
51 |
+
print("output: ",output)
|
52 |
pred_ids = output['logits'][0].tolist()
|
53 |
+
print("pred_ids: ",pred_ids)
|
54 |
|
55 |
tokens = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
|
56 |
labels = [id2label.get(i, 'O') for i in pred_ids]
|
57 |
+
print("tokens: ",tokens)
|
58 |
+
print("labels: ",labels)
|
59 |
+
|
60 |
|
61 |
result = bio_to_spans(tokens, labels)
|
62 |
return [{"aspect": asp, "sentiment": pol} for asp, pol in result]
|