Spaces:
Running
Running
torchmoji init model
Browse files
app.py
CHANGED
|
@@ -14,7 +14,7 @@ from transformers import AutoModel, AutoTokenizer
|
|
| 14 |
model_name = "Pendrokar/TorchMoji"
|
| 15 |
model = AutoModel.from_pretrained(model_name)
|
| 16 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 17 |
-
model_path =
|
| 18 |
vocab_path = './' + model_name + "/vocabulary.json"
|
| 19 |
|
| 20 |
def top_elements(array, k):
|
|
@@ -23,22 +23,12 @@ def top_elements(array, k):
|
|
| 23 |
|
| 24 |
maxlen = 30
|
| 25 |
|
| 26 |
-
# print('Tokenizing using dictionary from {}'.format(vocab_path))
|
| 27 |
-
# with open(vocab_path, 'r') as f:
|
| 28 |
-
# vocabulary = json.load(f)
|
| 29 |
-
|
| 30 |
st = SentenceTokenizer(tokenizer.get_added_vocab(), maxlen)
|
| 31 |
|
| 32 |
-
print('Loading model from {}.'.format(model_path))
|
| 33 |
model = torchmoji_emojis(model_path)
|
| 34 |
-
print(model)
|
| 35 |
-
|
| 36 |
-
def doImportableFunction():
|
| 37 |
-
return
|
| 38 |
|
| 39 |
def predict(deepmoji_analysis):
|
| 40 |
output_text = "\n"
|
| 41 |
-
print('Running predictions.')
|
| 42 |
tokenized, _, _ = st.tokenize_sentences(TEST_SENTENCES)
|
| 43 |
prob = model(tokenized)
|
| 44 |
|
|
|
|
| 14 |
model_name = "Pendrokar/TorchMoji"
|
| 15 |
model = AutoModel.from_pretrained(model_name)
|
| 16 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 17 |
+
model_path = './' + model_name + "/pytorch_model.bin"
|
| 18 |
vocab_path = './' + model_name + "/vocabulary.json"
|
| 19 |
|
| 20 |
def top_elements(array, k):
|
|
|
|
| 23 |
|
| 24 |
maxlen = 30
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
st = SentenceTokenizer(tokenizer.get_added_vocab(), maxlen)
|
| 27 |
|
|
|
|
| 28 |
model = torchmoji_emojis(model_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
def predict(deepmoji_analysis):
|
| 31 |
output_text = "\n"
|
|
|
|
| 32 |
tokenized, _, _ = st.tokenize_sentences(TEST_SENTENCES)
|
| 33 |
prob = model(tokenized)
|
| 34 |
|