import torch import spaces from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer, GenerationConfig from peft import PeftModel, PeftConfig import os import unicodedata from huggingface_hub import login max_length = 512 auth_token = os.getenv('HF_SPACE_TOKEN') login(token=auth_token) @spaces.GPU def goai_traduction(text, src_lang, tgt_lang): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if src_lang == "mos_Latn" and tgt_lang == "fra_Latn": model_id = "ArissBandoss/3b-new-400" else: model_id = "ArissBandoss/nllb-200-distilled-600M-finetuned-fr-to-mos-V4" tokenizer = AutoTokenizer.from_pretrained(model_id, token=auth_token) model = AutoModelForSeq2SeqLM.from_pretrained(model_id, token=auth_token).to(device) # Configuration du tokenizer tokenizer.src_lang = src_lang # Tokenisation inputs = tokenizer(text, return_tensors="pt", truncation=False).to(device) input_length = inputs["input_ids"].shape[1] # ID du token de langue cible tgt_lang_id = tokenizer.convert_tokens_to_ids(tgt_lang) # ID du token EOS eos_token_id = tokenizer.eos_token_id # Bloquer complètement le token EOS jusqu'à un certain point outputs = model.generate( **inputs, forced_bos_token_id=tgt_lang_id, max_new_tokens=1024, num_beams=5, repetition_penalty=2.0, length_penalty=2, ) # Décodage translation = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] return translation def real_time_traduction(input_text, src_lang, tgt_lang): return goai_traduction(input_text, src_lang, tgt_lang)