File size: 3,175 Bytes
dfc2480
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import json
import argparse


def create_entailment_data(train_data):
    entailment_data = [d for d in train_data if len(d['evidence']) > 0]
    # entailment data
    for d in entailment_data:
        entailment_answer = d['answer'].lower()
        if d['answer'].lower() not in ['yes', 'no']:
            entailment_answer = 'maybe'
        if len(d['history']) > 0:
            # this means that not all the information needed to get to the answer were provided in the scenario
            # (they were provided in the history). Therefore the entailment label should be 'maybe'
            entailment_answer = 'maybe'

        d['entailment_answer'] = entailment_answer

    entailment_path = 'train_entailment_sharc.json'
    with open(entailment_path, 'w') as f:
        f.write(json.dumps(entailment_data, indent=True))
    print('Wrote ShARC entailment data to ' + entailment_path)

    return entailment_data


def filter_train_data(sharc_train_path, sharc_dev_path):
    sharc_train_data = json.load(open(sharc_train_path))
    sharc_dev_data = json.load(open(sharc_dev_path))
    sharc_data = sharc_train_data + sharc_dev_data

    train_utterance_ids = open('train_utterance_ids.txt').read().splitlines()
    train_data = [d for d in sharc_data if d['utterance_id'] in train_utterance_ids]
    return train_data


def create_qa_data(entailment_data):
    qa_data = []
    for d in entailment_data:
        for e in d['evidence']:
            q_key = 'follow_up_question'
            a_key = 'follow_up_answer'
            if 'follow_up_question' in e:
                qa_data.append({
                    'utterance_id': d['utterance_id'],
                    'context': d['scenario'],
                    'question': e[q_key],
                    'answer': e[a_key].lower()
                })

        for h in d['history']:
            qa_data.append({
                'utterance_id': d['utterance_id'],
                'context': d['scenario'],
                'question': h['follow_up_question'],
                'answer': 'maybe'
            })
        if d['answer'].lower() not in ['yes', 'no']:
            qa_data.append({
                'utterance_id': d['utterance_id'],
                'context': d['scenario'],
                'question': d['answer'],
                'answer': 'maybe'
            })
    qa_path = 'train_qa_sharc.json'
    with open(qa_path, 'w') as f:
        f.write(json.dumps(entailment_data, indent=True))
    print('Wrote ShARC QA data to ' + qa_path)
    return qa_data


if __name__ == '__main__':
    parser = argparse.ArgumentParser('Script for generating entailment and QA data from ShARC for training')
    parser.add_argument('-sharc_train_path', type=str, default='sharc_train.json', help='path to ShARC train file')
    parser.add_argument('-sharc_dev_path', type=str, default='sharc_dev.json', help='path to ShARC dev file')

    args = parser.parse_args()
    sharc_train_path = args.sharc_train_path
    sharc_dev_path = args.sharc_dev_path

    train_data = filter_train_data(sharc_train_path, sharc_dev_path)
    entailment_data = create_entailment_data(train_data)
    qa_data = create_qa_data(entailment_data)