mumospee_librispeech / script /process_from_parquet.py
jayliqinzhang's picture
Upload script/process_from_parquet.py with huggingface_hub
9f88c5c verified
import dask.dataframe as dd
from functools import partial
def read_parquet_file(parquet_file_path, npartitions=50, top=None):
print(f"Process parquet file from {parquet_file_path}")
file_name = parquet_file_path.split("/")[-1]
parquet_df = dd.read_parquet(parquet_file_path, engine="pyarrow")
parquet_df = parquet_df.repartition(npartitions=npartitions) # Smaller partitions
if top:
parquet_df = parquet_df.head(top, compute=False) # compute=False to keep it as DaskDataframe
return parquet_df, file_name
def process_parquet_df(parquet_df, file_name, process_row_func, process_partition):
# A new function of process_row_func to allow pre-defining parameters of process-row_func.
process_row_with_params = partial(process_row_func, parquet_file_name=file_name)
result_df = parquet_df.map_partitions(process_partition, process_row_with_params)
return result_df
def save_to_csv(df, final_path):
# Save the processed DataFrame to csv
df.to_csv(final_path, index=False, single_file=True)