PopVQA / scripts /build_dataset.py
idoco's picture
Upload script to reproduce dataset
de70fb3 verified
from dotenv import load_dotenv
load_dotenv()
from dataset_utils import *
import argparse
'''
This is a script to build (or expand) the PopVQA dataset.
Before running this script, make sure your directory contains the following:
1. A CSV file with the base dataframe containing the columns 's_uri' (from wikidata) and 'type' (to match against the relation templates).
2. a dir named 'relation_templates' containing CSV files with the relation templates for each type. The templates are triplets 'uri' - 'relation' - 'template'.
See the existing files for reference.
Run the script with the following command:
python build_dataset.py --base-df <path_to_base_df> --start <start_step> --end <end_step>
'''
def main(args):
dir_name, file_name = os.path.split(args.base_df)
base_name, _ = os.path.splitext(file_name)
base_df = pd.read_csv(args.base_df).drop_duplicates('s_uri')
assert 'type' in base_df.columns, "The base dataframe must contain a 'type' column."
types = base_df['type'].unique()
for entity_type in base_df['type'].unique():
template_path = os.path.join(dir_name, "relation_templates", f"relation_templates_{entity_type}.csv")
assert os.path.isfile(template_path), f"Missing relation template for type '{entity_type}' at: {template_path}"
all_question_dfs = []
for entity_type in types:
type_df = base_df[base_df['type'] == entity_type].copy()
type_dir = os.path.join(dir_name, entity_type)
os.makedirs(type_dir, exist_ok=True)
template_path = os.path.join(dir_name, "relation_templates", f"relation_templates_{entity_type}.csv")
templates = pd.read_csv(template_path)
print(f"Processing type: {entity_type}")
if args.start <= 0:
subject_to_relation = get_all_properties(type_df)
subject_to_relation = subject_to_relation[subject_to_relation['r_uri'].isin(templates['uri'])]
subject_to_relation.to_csv(os.path.join(type_dir, f"{base_name}_subject_to_relation.csv"), index=False)
if args.start <= 1 and args.end >= 1:
if args.start == 1:
subject_to_relation = pd.read_csv(os.path.join(type_dir, f"{base_name}_subject_to_relation.csv"))
aliases = get_aliases(subject_to_relation)
aliases.to_csv(os.path.join(type_dir, f"{base_name}_all_aliases.csv"), index=False)
if args.start <= 2 and args.end >= 2:
if args.start == 2:
subject_to_relation = pd.read_csv(os.path.join(type_dir, f"{base_name}_subject_to_relation.csv"))
aliases = pd.read_csv(os.path.join(type_dir, f"{base_name}_all_aliases.csv"))
a_types = attribute_type(subject_to_relation)
a_types.to_csv(os.path.join(type_dir, f"{base_name}_complete_attribute_types.csv"), index=False)
if args.start <= 3 and args.end >= 3:
if args.start == 3:
subject_to_relation = pd.read_csv(os.path.join(type_dir, f"{base_name}_subject_to_relation.csv"))
aliases = pd.read_csv(os.path.join(type_dir, f"{base_name}_all_aliases.csv"))
a_types = pd.read_csv(os.path.join(type_dir, f"{base_name}_complete_attribute_types.csv"))
triplets = aggregate_triplets(type_df, aliases, subject_to_relation, a_types, add_unesco=False)
triplets.to_csv(os.path.join(type_dir, f"{base_name}_question_triplets.csv"), index=False)
if args.start <= 4 and args.end >= 4:
if args.start == 4:
triplets = pd.read_csv(os.path.join(type_dir, f"{base_name}_question_triplets.csv"))
triplets = build_prompts(type_df, triplets, templates)
triplets['type'] = entity_type
triplets.to_csv(os.path.join(type_dir, f"{base_name}_questions.csv"), index=False)
all_question_dfs.append(triplets)
# Combine all question files and write to the top-level directory
if all_question_dfs:
combined_df = pd.concat(all_question_dfs, ignore_index=True)
combined_df.to_csv(os.path.join(dir_name, f"{base_name}_all_questions.csv"), index=False)
print(f"Combined questions file saved to {os.path.join(dir_name, f'{base_name}_all_questions.csv')}")
def get_exp_parser():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--base-df', type=str)
parser.add_argument('--start', type=int, default=0, help="Start step for building the dataset.")
parser.add_argument('--end', type=int, default=4, help="End step for building the dataset.")
return parser
if __name__ == "__main__":
parser = get_exp_parser()
args = parser.parse_args()
main(args)