File size: 2,750 Bytes
3d68455
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# Usage argument: path of the folder whose token you want to count

import os
import pandas as pd
import json
import gzip
from tqdm import tqdm
from multiprocessing import Pool
from transformers import AutoTokenizer
import sys

# Variables to set
HF_TOKEN = ""
base_path = "root"


# Add huggingface token
os.environ["HF_TOKEN"] = HF_TOKEN

tokenizer = AutoTokenizer.from_pretrained('google/gemma-7b')

def count_words_in_file(file_path):
    language_id = os.path.basename(file_path).split('_')[0]
    # words_count = 0
    token_count = 0


    try:
        with gzip.open(file_path, 'rt') as f:
            for line in f:
                data = json.loads(line)
                # words_count += len(data["raw_content"].split())
                token_count += len(tokenizer.encode(data["raw_content"]))
    except:
        print("bad zip file")
        return language_id, 0
    return language_id, token_count

def process_folder(folder_path):
    files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith('.json.gz')]
    folder_name = folder_path.split('/')[-1]
    with Pool() as pool:
        results = list(tqdm(pool.imap(count_words_in_file, files), total=len(files), desc=f'Processing {folder_name}') )
    return results

def main(input_folder, output_file):
    # folders = [f.path for f in os.scandir(input_folder) if f.is_dir()]

    snaps = ["2018-17", "2018-22", "2018-26", "2018-30", "2018-34", "2018-39", "2018-43", "2018-47", "2018-51", "2019-04", "2019-09", "2019-13", "2019-18", "2019-22", "2019-26", "2019-30", "2019-35", "2019-39", "2019-43", "2019-47", "2019-51", "2020-05", "2020-10", "2020-16", "2020-24", "2020-29", "2020-34", "2020-40", "2020-45", "2020-50", "2021-04", "2021-10", "2021-17", "2021-21", "2021-25", "2021-31", "2021-39", "2021-43", "2021-49", "2022-05", "2022-21", "2022-27", "2022-33", "2022-40", "2022-49", "2023-06", "2023-14", "2023-23", "2023-40", "2023-50", "2024-10"]

    langs = ["as", "bn", "gu", "kn", "hi", "ml", "mr", "ne", "or", "sa", "sd", "ta", "ur", "te", "mai"]

    column_names = {}
    column_names["snapshot-id"] = sorted(snaps)

    for lang in sorted(langs):
        column_names[lang] = [0] * len(snaps)
    
    for idx, folder in enumerate(sorted(snaps)):
        results = process_folder(input_folder + "/" + folder)
        for language_id, number in results:
            column_names[language_id][idx] += number
        print([column_names[language_id][idx] for language_id in langs])

    df = pd.DataFrame(column_names)

    df.to_csv(output_file, index=False)

if __name__ == "__main__":
    folder = sys.argv[1]
    input_folder = os.path.join(base_path, folder)
    output_file = folder + "_token_counts.csv"
    main(input_folder, output_file)