import torch import spaces from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer from peft import PeftModel, PeftConfig import os import unicodedata from huggingface_hub import login max_length = 512 auth_token = os.getenv('HF_SPACE_TOKEN') login(token=auth_token) @spaces.GPU def goai_traduction(text, src_lang, tgt_lang): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if src_lang == "fra_Latn" and tgt_lang == "mos_Latn": model_id = "ArissBandoss/nllb-200-distilled-600M-finetuned-fr-to-mos-V4" elif src_lang == "mos_Latn" and tgt_lang == "fra_Latn": model_id = "ArissBandoss/mos2fr-3B-1200" else: model_id = "ArissBandoss/nllb-200-distilled-600M-finetuned-fr-to-mos-V4" tokenizer = AutoTokenizer.from_pretrained(model_id, token=auth_token, truncation=True, max_length=512) model = AutoModelForSeq2SeqLM.from_pretrained(model_id, token=auth_token).to(device) # Ajout du code de langue source tokenizer.src_lang = src_lang # Tokenisation du texte d'entrée inputs = tokenizer(text, return_tensors="pt").to(device) print(inputs) # Utilisation de convert_tokens_to_ids au lieu de lang_code_to_id tgt_lang_id = tokenizer.convert_tokens_to_ids(tgt_lang) # Génération avec paramètres améliorés outputs = model.generate( **inputs, forced_bos_token_id=tgt_lang_id, eos_token_id=tokenizer.eos_token_id max_length=512, num_beams=4, do_sample=False, no_repeat_ngram_size=3 ) print("Token IDs:", outputs) print("Tokens:", [tokenizer.decode([tok]) for tok in outputs[0]]) # Décodage de la sortie translation = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] print("ici translation") print(translation) return translation def real_time_traduction(input_text, src_lang, tgt_lang): return goai_traduction(input_text, src_lang, tgt_lang)