Spaces:
Sleeping
Sleeping
VishwaTechnologiesPvtLtd
commited on
Commit
·
f89a774
1
Parent(s):
a2ff264
T5Tokenizer
Browse files
backend/services/QuestionGenerator.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
from transformers import pipeline,
|
| 2 |
from .IQuestionGenerator import IQuestionGenerator
|
| 3 |
from backend.services.SentenceCheck import SentenceCheck
|
| 4 |
from backend.models.AIParamModel import AIParam
|
|
@@ -8,7 +8,7 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
| 8 |
print(f"[QuestionGenerator] Using device: {device}")
|
| 9 |
|
| 10 |
# valhalla model with slow tokenizer
|
| 11 |
-
tokenizer_qg_simple =
|
| 12 |
model_qg_simple = AutoModelForSeq2SeqLM.from_pretrained("valhalla/t5-small-qg-hl")
|
| 13 |
|
| 14 |
qg_simple = pipeline(
|
|
@@ -19,7 +19,7 @@ qg_simple = pipeline(
|
|
| 19 |
)
|
| 20 |
|
| 21 |
# iarfmoose model with slow tokenizer
|
| 22 |
-
tokenizer_qg_advanced =
|
| 23 |
model_qg_advanced = AutoModelForSeq2SeqLM.from_pretrained("iarfmoose/t5-base-question-generator")
|
| 24 |
|
| 25 |
qg_advanced = pipeline(
|
|
|
|
| 1 |
+
from transformers import pipeline, T5Tokenizer, AutoModelForSeq2SeqLM
|
| 2 |
from .IQuestionGenerator import IQuestionGenerator
|
| 3 |
from backend.services.SentenceCheck import SentenceCheck
|
| 4 |
from backend.models.AIParamModel import AIParam
|
|
|
|
| 8 |
print(f"[QuestionGenerator] Using device: {device}")
|
| 9 |
|
| 10 |
# valhalla model with slow tokenizer
|
| 11 |
+
tokenizer_qg_simple = T5Tokenizer.from_pretrained("valhalla/t5-small-qg-hl")
|
| 12 |
model_qg_simple = AutoModelForSeq2SeqLM.from_pretrained("valhalla/t5-small-qg-hl")
|
| 13 |
|
| 14 |
qg_simple = pipeline(
|
|
|
|
| 19 |
)
|
| 20 |
|
| 21 |
# iarfmoose model with slow tokenizer
|
| 22 |
+
tokenizer_qg_advanced = T5Tokenizer.from_pretrained("iarfmoose/t5-base-question-generator")
|
| 23 |
model_qg_advanced = AutoModelForSeq2SeqLM.from_pretrained("iarfmoose/t5-base-question-generator")
|
| 24 |
|
| 25 |
qg_advanced = pipeline(
|