File size: 1,978 Bytes
61f6e08 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import pandas as pd
from process_from_parquet import read_parquet_file, process_parquet_df, save_to_csv
from process_audio import process_audio_column
def process_partition(partition, process_row_with_params):
"""
Process the partition after first row processing.
Covert the series result to dataframe to further processing for audio partition.
"""
result = partition.apply(process_row_with_params, axis=1)
field_name = ["path", "url" ,"type", "duration", "language", "transcript", "tag", "split", "license", "audio"]
return pd.DataFrame(result.tolist(), columns=field_name) # Convert Series to DataFrame
def _get_split(parquet_file):
if "train" in parquet_file:
return "train"
elif "test" in parquet_file:
return "test"
elif "dev" in parquet_file:
return "validation"
else:
return "train"
def process_row(row, parquet_file_name):
"""
The function to process each row from dataframe.
Return the metadata as dictionary.
"""
metadata = {}
metadata["audio"] = row["audio"]
metadata["url"] = f"https://huggingface.co/datasets/meetween/mumospee_librispeech/resolve/main/librispeech-parquet/{parquet_file_name}"
metadata["transcript"] = row["text"]
metadata["type"] = "audio"
metadata["language"] = "en"
metadata["tag"] = "Librispeech"
metadata["split"] = _get_split(parquet_file_name)
metadata["license"] = "CC-BY-4.0"
return metadata
def main(config):
parquet_df, file_name = read_parquet_file(config["parquet_file_path"], top=config["top"])
processed_df = process_parquet_df(parquet_df=parquet_df,
file_name=file_name,
process_row_func=process_row,
process_partition=process_partition)
result_df = process_audio_column(processed_df)
save_to_csv(result_df, final_path=config["final_path"])
|