import sentencepiece as spm from datasets import concatenate_datasets, load_dataset, load_from_disk import logging from datasets import DatasetDict, Dataset sp = spm.SentencePieceProcessor() sp.load('ta_te_kan_ml_50kspm_tokenizer.model') sentence = "ಮಾಡು ಮಾಡುವುದು ಮಾಡಲು विद्यालय आलय" # ಮಾ', 'ಡುವುದು', '▁ಮಾಡಲು', '▁'] # 9 ['ಕುಳ', 'ಿತು', 'ಕೊಳ್ಳಿ', '▁▁', 'ಕುಳ', 'ಿತು', 'ಕೊ', 'ಳ್', 'ಲು'] # 'ഇ', 'ര', 'ിക്കും', '▁ഇരിക്ക', 'ുക', '▁' # ['विश्व', '▁कप', '▁में', '▁काल', '▁अंश', '▁काला', 'ंश', '▁ऑस्ट्रेलिया', '▁के', '▁खिलाफ', '▁भारत', '▁की', '▁हार', '▁के', '▁बाद', '▁भारतीय', '▁क्रिकेट', '▁प्रशंसा', '▁काफी', '▁निराश', '▁थे', '।'] # विश्व', '▁कप', '▁में', '▁ऑस्ट्रेलिया', '▁के', '▁खिलाफ', '▁भारत', '▁की', '▁हार', '▁के', '▁बाद', '▁भारतीय', '▁क्रिकेट', '▁प्रशंसा', '▁काफी', '▁निराश', '▁थे', '।' # 19 ['विश्व', '▁कप', '▁में', '▁ऑस्ट्रेलिया', '▁के', '▁खिलाफ', '▁भारत', '▁की', '▁हार', '▁के', '▁बाद', '▁भारतीय', '▁क्रिकेट', '▁प्रश', 'ंसक', '▁काफी', '▁निराश', '▁थे', '।'] # काला', 'ंश' # 19 ['ലോക', 'ക', 'പ്പിൽ', '▁ഓസ്', '▁ട്ര', 'േലിയ', 'യ്', '▁', 'ക്കെതിരെ', '▁ഇന്ത്യ', '▁തോറ്റ', 'തോടെ', '▁ഇന്ത്യൻ', '▁ക്രിക്കറ്റ്', '▁ആരാധകർ', '▁ഏറെ', '▁നിരാശ', 'യിലാണ്', '.'] tokens = sp.encode(sentence, out_type=str) print(len(tokens) , tokens) exit() sentence = "विश्व कप में ऑस्ट्रेलिया के खिलाफ भारत की हार के बाद भारतीय क्रिकेट प्रशंसक काफी निराश थे।" tokens = sp.encode(sentence, out_type=str) print(len(tokens) , tokens) decoded_sentence = sp.decode(tokens) exit() # exit() # print("Tokens:", tokens) # print("Decoded sentence:", decoded_sentence) # exit() def initialize_logger(log_file): logging.basicConfig(filename=log_file, level=logging.INFO, format='%(asctime)s: %(message)s') def log_parameters(vocab_size, pretrained_model, en_fertility_score, hi_fertility_score , bn_fertility_score , log_file='parameters.log'): initialize_logger(log_file) logging.info(f"Vocabulary Size: {vocab_size}, Tokenizer type: {pretrained_model}, English Fertility Score: {en_fertility_score} , Hindi Fertility Score: {hi_fertility_score}, Bengali Fertility Score: {bn_fertility_score}") dataset_hi= load_dataset('ai4bharat/samanantar', 'hi', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE') dataset_bn= load_dataset('ai4bharat/samanantar', 'bn', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE') test_en = dataset_hi['src'][:10000] test_hi = dataset_hi['tgt'][:10000] test_bn = dataset_bn['tgt'][:10000] en_fertility_score=0 hi_fertility_score=0 bn_fertility_score=0 for data in test_en: tok=sp.encode(data, out_type=str) en_fertility_score += (len(tok)) / len(data.split()) en_fertility_score=en_fertility_score/10000 for data in test_hi: tok=sp.encode(data, out_type=str) hi_fertility_score += (len(tok)) / len(data.split()) hi_fertility_score=hi_fertility_score/10000 for data in test_bn: tok=sp.encode(data, out_type=str) bn_fertility_score += (len(tok)) / len(data.split()) bn_fertility_score=bn_fertility_score/10000 log_parameters(32000, "en-only-spm", en_fertility_score, hi_fertility_score , bn_fertility_score )