|
import torch |
|
import spaces |
|
import re |
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
|
from huggingface_hub import login |
|
import os |
|
|
|
max_length = 512 |
|
auth_token = os.getenv('HF_SPACE_TOKEN') |
|
login(token=auth_token) |
|
|
|
def get_tokenizer(src_lang, tgt_lang): |
|
"""Initialise et retourne le tokenizer approprié""" |
|
if src_lang == "mos_Latn" and tgt_lang == "fra_Latn": |
|
model_id = "ArissBandoss/3b-new-400" |
|
else: |
|
model_id = "ArissBandoss/nllb-200-distilled-600M-finetuned-fr-to-mos-V4" |
|
|
|
return AutoTokenizer.from_pretrained(model_id, token=auth_token) |
|
|
|
def split_text_by_tokens(text, src_lang, tgt_lang, max_tokens_per_chunk=200): |
|
""" |
|
Divise le texte en chunks en respectant les phrases et en comptant les tokens. |
|
""" |
|
tokenizer = get_tokenizer(src_lang, tgt_lang) |
|
tokenizer.src_lang = src_lang |
|
|
|
|
|
sentences = re.split(r'([.!?])', text) |
|
chunks = [] |
|
current_chunk = "" |
|
current_tokens = 0 |
|
|
|
for i in range(0, len(sentences), 2): |
|
|
|
if i + 1 < len(sentences): |
|
sentence = sentences[i] + sentences[i+1] |
|
else: |
|
sentence = sentences[i] |
|
|
|
|
|
sentence_tokens = len(tokenizer.encode(sentence)) |
|
|
|
|
|
if current_tokens + sentence_tokens > max_tokens_per_chunk and current_chunk: |
|
chunks.append(current_chunk.strip()) |
|
current_chunk = sentence |
|
current_tokens = sentence_tokens |
|
else: |
|
current_chunk += sentence |
|
current_tokens += sentence_tokens |
|
|
|
|
|
if current_chunk: |
|
chunks.append(current_chunk.strip()) |
|
|
|
return chunks |
|
|
|
@spaces.GPU |
|
def goai_traduction(text, src_lang, tgt_lang, max_tokens_per_chunk=200): |
|
|
|
tokenizer = get_tokenizer(src_lang, tgt_lang) |
|
tokenizer.src_lang = src_lang |
|
|
|
text_tokens = len(tokenizer.encode(text)) |
|
|
|
if text_tokens > max_tokens_per_chunk: |
|
chunks = split_text_by_tokens(text, src_lang, tgt_lang, max_tokens_per_chunk) |
|
translations = [] |
|
|
|
for chunk in chunks: |
|
translated_chunk = translate_chunk(chunk, src_lang, tgt_lang) |
|
translations.append(translated_chunk) |
|
|
|
return " ".join(translations) |
|
else: |
|
return translate_chunk(text, src_lang, tgt_lang) |
|
|
|
def translate_chunk(text, src_lang, tgt_lang): |
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
if src_lang == "mos_Latn" and tgt_lang == "fra_Latn": |
|
model_id = "ArissBandoss/3b-new-400" |
|
else: |
|
model_id = "ArissBandoss/nllb-200-distilled-600M-finetuned-fr-to-mos-V4" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id, token=auth_token) |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, token=auth_token).to(device) |
|
|
|
|
|
tokenizer.src_lang = src_lang |
|
|
|
|
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(device) |
|
|
|
|
|
tgt_lang_id = tokenizer.convert_tokens_to_ids(tgt_lang) |
|
|
|
|
|
outputs = model.generate( |
|
**inputs, |
|
forced_bos_token_id=tgt_lang_id, |
|
max_new_tokens=512, |
|
num_beams=5, |
|
no_repeat_ngram_size=3, |
|
repetition_penalty=1.5, |
|
length_penalty=1.0, |
|
early_stopping=True |
|
) |
|
|
|
|
|
translation = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] |
|
|
|
return translation |
|
|
|
def real_time_traduction(input_text, src_lang, tgt_lang): |
|
return goai_traduction(input_text, src_lang, tgt_lang, max_tokens_per_chunk=200) |