File size: 2,728 Bytes
804c12f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import logging
import numpy as np
from datasets import Dataset, concatenate_datasets
from rich.logging import RichHandler
import tqdm
tqdm.tqdm.pandas()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(RichHandler(rich_tracebacks=True))
# Turn off logging for datasets
logging.getLogger("datasets").setLevel(logging.ERROR)
class Pipeline:
def __init__(self, datasources):
self.datasources = datasources
def run(self, dry_run=False):
for i in range(len(self.datasources)):
self.datasources[i]["dataset"] = self.datasources[i]["dataset"].to_pandas()
column_name = self.datasources[i]["columns"][0]
logger.info(f"Running datasource: {self.datasources[i]['name']}")
for cleaner_func in self.datasources[i]["cleaners"]:
self.datasources[i]["dataset"] = apply_cleaner(
self.datasources[i]["dataset"],
column_name,
cleaner_func
)
for filter_func in self.datasources[i]["filters"]:
self.datasources[i]["dataset"] = apply_filter(
self.datasources[i]["dataset"],
column_name,
filter_func,
dry_run
)
self.datasources[i]["dataset"] = smart_from_pandas(self.datasources[i]["dataset"])
def apply_cleaner(dataframe, column_name, cleaner_func):
logger.info(f"Running cleaner: {cleaner_func.__name__} on {column_name}")
func = lambda x: cleaner_func(x[column_name])
dataframe[column_name] = dataframe.progress_apply(func, axis=1)
return dataframe
def apply_filter(dataframe, column_name, filter_func, dry_run):
logger.info(f"Running filter: {filter_func.__name__} on {column_name}")
criteria_column_name = f"{column_name}_{filter_func.__name__}_criteria"
func = lambda x: filter_func(x[column_name], dry_run=dry_run)
dataframe[criteria_column_name] = dataframe.progress_apply(func, axis=1)
logger.info(f"Criteria statistics:\n{dataframe[criteria_column_name].describe()}")
if not dry_run:
func = lambda x: x[criteria_column_name]
dataframe = dataframe[dataframe.progress_apply(func, axis=1)]
dataframe = dataframe.drop(
[criteria_column_name, "__index_level_0__"],
axis=1,
errors='ignore'
)
return dataframe
def smart_from_pandas(df, chunk_size=200_000):
datasets = []
for g, batch in df.groupby(np.arange(len(df)) // chunk_size):
dataset = Dataset.from_pandas(batch, preserve_index=False)
datasets.append(dataset)
return concatenate_datasets(datasets)
|