Commit
·
19da3a0
1
Parent(s):
b62cda2
Update README.md
Browse files
README.md
CHANGED
@@ -32,7 +32,7 @@ Here is how to use this model to get the features of a given text in PyTorch:
|
|
32 |
import random
|
33 |
from transformers import AutoTokenizer, AutoModelWithLMHead
|
34 |
|
35 |
-
tokenizer = AutoTokenizer.from_pretrained('macedonizer/mk-gpt2')
|
36 |
model = AutoModelWithLMHead.from_pretrained('macedonizer/mk-gpt2')
|
37 |
|
38 |
input_text = 'Скопје е '
|
@@ -57,10 +57,10 @@ else: \
|
|
57 |
max_length=1024, \
|
58 |
top_p=0.95, \
|
59 |
num_return_sequences=1, \
|
60 |
-
)
|
61 |
|
62 |
decoded_output = [] \
|
63 |
for sample in output: \
|
64 |
-
decoded_output.append(tokenizer.decode(sample, skip_special_tokens=True))
|
65 |
|
66 |
print(decoded_output)
|
|
|
32 |
import random
|
33 |
from transformers import AutoTokenizer, AutoModelWithLMHead
|
34 |
|
35 |
+
tokenizer = AutoTokenizer.from_pretrained('macedonizer/mk-gpt2') \
|
36 |
model = AutoModelWithLMHead.from_pretrained('macedonizer/mk-gpt2')
|
37 |
|
38 |
input_text = 'Скопје е '
|
|
|
57 |
max_length=1024, \
|
58 |
top_p=0.95, \
|
59 |
num_return_sequences=1, \
|
60 |
+
)
|
61 |
|
62 |
decoded_output = [] \
|
63 |
for sample in output: \
|
64 |
+
decoded_output.append(tokenizer.decode(sample, skip_special_tokens=True))
|
65 |
|
66 |
print(decoded_output)
|