Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,7 @@ class MultiTaskModel(nn.Module):
|
|
10 |
self.encoder = AutoModel.from_pretrained(base_model_name)
|
11 |
hidden_size = self.encoder.config.hidden_size
|
12 |
self.topik_classifier = nn.Linear(hidden_size, num_topic_classes)
|
13 |
-
self.
|
14 |
|
15 |
def forward(self, input_ids, attention_mask):
|
16 |
outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask)
|
@@ -21,7 +21,7 @@ class MultiTaskModel(nn.Module):
|
|
21 |
|
22 |
# Load tokenizer & model
|
23 |
tokenizer = AutoTokenizer.from_pretrained("tokenizer")
|
24 |
-
model = MultiTaskModel("indobenchmark/indobert-base-p1", num_topic_classes=
|
25 |
model.load_state_dict(torch.load("model.pt", map_location=torch.device("cpu")))
|
26 |
model.eval()
|
27 |
|
|
|
10 |
self.encoder = AutoModel.from_pretrained(base_model_name)
|
11 |
hidden_size = self.encoder.config.hidden_size
|
12 |
self.topik_classifier = nn.Linear(hidden_size, num_topic_classes)
|
13 |
+
self.sentiment_classifier = nn.Linear(hidden_size, num_sentiment_classes)
|
14 |
|
15 |
def forward(self, input_ids, attention_mask):
|
16 |
outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask)
|
|
|
21 |
|
22 |
# Load tokenizer & model
|
23 |
tokenizer = AutoTokenizer.from_pretrained("tokenizer")
|
24 |
+
model = MultiTaskModel("indobenchmark/indobert-base-p1", num_topic_classes=5, num_sentiment_classes=3)
|
25 |
model.load_state_dict(torch.load("model.pt", map_location=torch.device("cpu")))
|
26 |
model.eval()
|
27 |
|