python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# define the grammar
# cf https://docs.google.com/presentation/d/1gzC878kkIgDL015c-kLFXEBc1SAuq_XOsheQowsyIEA/edit?usp=sharing
#####
## define node types
class InternalNode(Object):
def __init__(self, name):
self.name = name
self.node_choices = []
self.intern_node_children = {}
self.categorical_children = {}
seld.span_children = {}
self.node_value = False
class SpanLeaf(Object):
def __init__(self):
self.span_value = False
class CategoricalLeaf(Object):
def __init__(self):
self.choices = []
self.cat_value = False
#####
# define leaves
class LocationTypeLeaf(CategoricalLeaf):
def __init__(self, absolute):
super(LocationTypeLeaf).__init__(self)
self.choices += ['Coordinates', 'AgentPos',
'SpeakerPos', 'SpeakerLook']
if not self.absolute:
self.choices += ['BlockObject', 'Mob']
class ConditionTypeLeaf(CategoricalLeaf):
def __init__(self):
super(ConditionTypeLeaf).__init__(self)
self.choices += ['AdjacentToBlockType', 'Never']
class RepeatTypeLeaf(CategoricalLeaf):
def __init__(self):
super(RepeatTypeLeaf).__init__(self)
self.choices += ['FOR', 'ALL']
class RepeatDirectionLeaf(CategoricalLeaf):
def __init__(self):
super(RepeatDirectionLeaf).__init__(self)
self.choices += ['RIGHT', 'UP'] # TODO: check with Kavya
class RelativeDirectionLeaf(CategoricalLeaf):
def __init__(self):
super(RelativeDirectionLeaf).__init__(self)
self.choices += ['LEFT' , 'RIGHT', 'UP', 'DOWN',
'FRONT', 'BACK', 'AWAY']
#####
# build tree
def make_repeat_node(name):
repeat = InternalNode(name)
repeat.node_choices = [True, False]
repeat.categorical_children['repeat_type'] = RepeatTypeLeaf()
repeat.categorical_children['repeat_dir'] = RepeatTypeLeaf()
repeat.span_children['repeat_count'] = SpanLeaf()
return repeat
def make_location_node(loc_name, ref_name,
ref_loc_name, repeat_name):
repeat = make_repeat_node(repeat_name)
lr_location = InternalNode(ref_loc_name)
lr_location.node_choices = [True, False]
lr_location.categorical_children['location_type'] = LocationTypeLeaf(True)
lr_location.span_children['coordinates'] = SpanLeaf()
l_ref_object = InternalNode(ref_name)
l_ref_object.node_choices = [True, False]
l_ref_object.intern_node_children[ref_loc_name] = lr_location
l_ref_object.intern_node_children[repeat_name] = lr_repeat
l_ref_object.span_children['has_name_'] = SpanLeaf()
l_ref_object.span_children['has_colour_'] = SpanLeaf()
l_ref_object.span_children['has_size_'] = SpanLeaf()
location = InternalNode(loc_name)
location.node_choices = [True, False]
location.intern_node_children[ref_name] = l_ref_object
location.categorical_children['location_type'] = LocationTypeLeaf(False)
location.categorical_children['relative_direction'] = RelativeDirectionLeaf()
location.span_children['coordinates'] = SpanLeaf()
return location
def make_full_action()
# ACTION_LOCATION
action_location = make_location_node('action_location', 'al_ref_object',
'alr_location', 'alr_repeat')
# STOP_CONDITION
stop_condition = InternalNode('stop_condition')
stop_condition.node_choices = [True, False]
stop_condition.categorical_children['condition_type'] = ConditionTypeLeaf(True)
stop_condition.span_children['block_type'] = SpanLeaf()
# SCHEMATIC
s_repeat = make_repeat_node('s_repeat')
schematic = InternalNode('schematic')
schematic.node_choices = [True, False]
schematic.intern_node_children['s_repeat'] = s_repeat
for k in ["has_block_type_" , "has_name_", "has_attribute_", "has_size_" , "has_orientation_",
"has_thickness_", "has_colour_", "has_height_", "has_length_", "has_radius_",
"has_slope_", "has_width_", "has_base_", "has_distance_"]:
schematic.span_children[k] = SpanLeaf()
# ACTION_REPEAT
action_repeat = make_repeat_node('action_repeat')
# ACTION_REF_OBJECT
ar_location = make_location_node('ar_location', 'arl_ref_object',
'arlr_location', 'arlr_repeat')
ar_repeat = InternalNode('ar_repeat')
ar_repeat.node_choices = [True, False]
ar_repeat.categorical_children['repeat_type'] = RepeatTypeLeaf()
ar_repeat.categorical_children['repeat_dir'] = RepeatTypeLeaf()
ar_repeat.span_children['repeat_count'] = SpanLeaf()
action_ref_object = InternalNode('action_ref_object')
action_ref_object.node_choices = [True, False]
action_ref_object.intern_node_children['ar_location'] = ar_location
action_ref_object.intern_node_children['ar_repeat'] = ar_repeat
action_ref_object.span_children['has_name_'] = SpanLeaf()
action_ref_object.span_children['has_colour_'] = SpanLeaf()
action_ref_object.span_children['has_size_'] = SpanLeaf()
# ROOT
action = InternalNode('action')
action.node_value = "Noop"
action.node_choices = ["Build", "Noop", "Span", "Fill",
"Destroy", "Move", "Undo", "Stop",
"Dig", "Tag", "FreeBuild", "Answer"]
action.intern_node_children['action_location'] = action_location
action.intern_node_children['stop_condition'] = stop_condition
action.intern_node_children['schematic'] = schematic
action.intern_node_children['action_repeat'] = action_repeat
action.intern_node_children['action_ref_object'] = action_ref_object
action.span_children['tag'] = SpanLeaf()
action.span_children['has_size_'] = SpanLeaf()
action.span_children['has_length_'] = SpanLeaf()
action.span_children['has_depth_'] = SpanLeaf()
action.span_children['has_width_'] = SpanLeaf()
return action
| craftassist-master | acl2020_submission/writeup/figures/acl_tree.py |
# 'location', 'move'
# 'reference_object', 'spawn'
# 'reference_object', 'destroy'
# 'schematic', 'dig'
# 'reference_object', fill
# 'reference_object', 'OtherAction'
# 'location', 'OtherAction'
# 'target_action_type', 'stop'
# 'target_action_type', 'resume'
# 'target_action_type', 'undo'
LOCATION_RADIO = [
{"text": "Not specified", "key": None, "tooltip": "The location information is missing."},
{
"text": "The location is represented using an indefinite noun like 'there' or 'over here'",
"key": "CONTAINS_COREFERENCE",
"tooltip": "e.g. 'there', 'here', 'over there' etc",
},
{
"text": "Exact numerical coordinates are given",
"key": "coordinates_check",
"tooltip": "Exact numeric coordinates are specified.",
"next": [
{
"text": "Click on all words representing the coordinates",
"key": "yes.coordinates",
"span": True,
"tooltip": "e.g. in 'make a box at 4 , 5 , 6' select all: '4 , 5 , 6'",
}
],
},
{
"text": "Where the speaker is looking",
"key": "SPEAKER_LOOK",
"tooltip": "e.g. 'where I am looking'",
},
{
"text": "Somewhere relative to where the speaker is looking",
"key": "SPEAKER_LOOK_REL",
"tooltip": "e.g. 'in front of where I am looking'",
"next": [
{
"text": "Where (which direction) in relation to where the speaker is looking?",
"key": "relative_direction",
"radio": [
{"text": "Left", "key": "LEFT"},
{"text": "Right", "key": "RIGHT"},
{"text": "Above", "key": "UP"},
{"text": "Below", "key": "DOWN"},
{"text": "In front", "key": "FRONT"},
{"text": "Behind", "key": "BACK"},
{"text": "Away from", "key": "AWAY"},
{"text": "Nearby or close to", "key": "NEAR"},
{"text": "Around", "key": "AROUND"},
{"text": "Exactly at", "key": "EXACT"},
],
}
],
},
{
"text": "Where the speaker is standing",
"key": "SPEAKER_POS",
"tooltip": "e.g. 'by me', 'where I am', 'where I am standing'",
},
{
"text": "Somewhere relative to where the speaker is standing",
"key": "SPEAKER_POS_REL",
"tooltip": "e.g. 'in front of where I am', 'behind me'",
"next": [
{
"text": "Where (which direction) in relation to where the speaker is standing?",
"key": "relative_direction",
"radio": [
{"text": "Left", "key": "LEFT"},
{"text": "Right", "key": "RIGHT"},
{"text": "Above", "key": "UP"},
{"text": "Below", "key": "DOWN"},
{"text": "In front", "key": "FRONT"},
{"text": "Behind", "key": "BACK"},
{"text": "Away from", "key": "AWAY"},
{"text": "Nearby or close to", "key": "NEAR"},
{"text": "Around", "key": "AROUND"},
{"text": "Exactly at", "key": "EXACT"},
],
}
],
},
{
"text": "Where the assistant is standing",
"key": "AGENT_POS",
"tooltip": "e.g. 'by you', 'where you are', 'where you are standing'",
},
{
"text": "Somewhere relative to where the assistant is standing",
"key": "AGENT_POS_REL",
"tooltip": "e.g. 'in front of you', 'behind you'",
"next": [
{
"text": "Where (which direction) in relation to where the assistant is standing?",
"key": "relative_direction",
"radio": [
{"text": "Left", "key": "LEFT"},
{"text": "Right", "key": "RIGHT"},
{"text": "Above", "key": "UP"},
{"text": "Below", "key": "DOWN"},
{"text": "In front", "key": "FRONT"},
{"text": "Behind", "key": "BACK"},
{"text": "Away from", "key": "AWAY"},
{"text": "Nearby or close to", "key": "NEAR"},
{"text": "Around", "key": "AROUND"},
{"text": "Exactly at", "key": "EXACT"},
],
}
],
},
]
LOCATION_REL_OBJECT_QUESTIONS = [
{
"text": "Click on all words specifying the object / area relative of which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [{"text": "Yes", "key": "yes"}, {"text": "No", "key": "no"}],
},
# {
# "text": "Is the location of the reference object mentioned ? Select all words.",
# "key": "reference_object.location",
# "span": True,
# "optional": True,
# "tooltip": "e.g. in 'to the right of the cow behind the house' select 'behind the house'",
# # "radio": LOCATION_RADIO,
# },
]
LOCATION_REL_OBJECT = [
{
"text": "Somewhere relative to (or exactly at) another object(s) / area(s)",
"key": "REFERENCE_OBJECT",
"next": [
{
"text": "Where (which direction) in relation to the other object(s)?",
"key": "relative_direction",
"radio": [
{
"text": "Left or towards the west direction",
"key": "LEFT",
"next": [
{
"text": "Click on all words specifying the object / area to the left of which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "Right or towards the east direction",
"key": "RIGHT",
"next": [
{
"text": "Click on all words specifying the object / area to the right of which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "Above or towards the north direction",
"key": "UP",
"next": [
{
"text": "Click on all words specifying the object / area above which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "Below or towards the south direction",
"key": "DOWN",
"next": [
{
"text": "Click on all words specifying the object / area below which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "In front",
"key": "FRONT",
"next": [
{
"text": "Click on all words specifying the object / area in front of which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "Behind",
"key": "BACK",
"next": [
{
"text": "Click on all words specifying the object / area at the back of which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "Away from",
"key": "AWAY",
"next": [
{
"text": "Click on all words specifying the object / area away from which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "Inside",
"key": "INSIDE",
"next": [
{
"text": "Click on all words specifying the object / area inside which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "Outside",
"key": "OUTSIDE",
"next": [
{
"text": "Click on all words specifying the object / area outside which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "Between two object(s) / area(s)",
"key": "BETWEEN",
"next": [
{
"text": "Click on all words specifying the first object / area relative to which the location is given",
"key": "reference_object_1.has_name",
"tooltip": "e.g. in 'make 5 copies between the car and the house' select 'car'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the first relative object?",
"key": "reference_object_1.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
{
"text": "Click on all words specifying the second object / area relative to which the location is given",
"key": "reference_object_2.has_name",
"tooltip": "e.g. in 'make 5 copies between the car and the house' select 'house'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the second relative object?",
"key": "reference_object_2.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "Nearby or close to",
"key": "NEAR",
"next": [
{
"text": "Click on all words specifying the object / area close to which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "Around",
"key": "AROUND",
"next": [
{
"text": "Click on all words specifying the object / area around which the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
{
"text": "Exactly at",
"key": "EXACT",
"next": [
{
"text": "Click on all words specifying the object / area exactly where the location is given",
"key": "reference_object.has_name",
"tooltip": "e.g. in 'make 5 copies to the left of the cow' select 'cow'",
"span": True,
},
{
"text": "Are there indefinite nouns or pronouns specifying the relative object?",
"key": "reference_object.contains_coreference",
"tooltip": "e.g. 'to the right of this', 'near that', 'behind these', 'next to those', 'underneath it' etc",
"add_radio_other": False,
"radio": [
{"text": "Yes", "key": "yes"},
{"text": "No", "key": "no"},
],
},
],
},
],
}
],
}
]
REF_OBJECT_OPTIONALS = [
{
"text": "What is the building material?",
"key": "reference_object.has_block_type",
"span": True,
"tooltip": "e.g. in 'destroy the tiny blue glass cube' select 'glass'",
},
{
"text": "What is the color?",
"key": "reference_object.has_colour",
"span": True,
"tooltip": "e.g. in 'destroy the tiny blue glass cube' select 'blue'",
},
{
"text": "What is the size?",
"key": "reference_object.has_size",
"span": True,
"tooltip": "e.g. in 'destroy the tiny blue glass cube' select 'tiny'",
},
{
"text": "What is the width?",
"key": "reference_object.has_width",
"span": True,
"tooltip": "e.g. in 'next to the 5 step wide hole' select '5'",
},
{
"text": "What is the height?",
"key": "reference_object.has_height",
"span": True,
"tooltip": "e.g. in 'next to the tower that is 20 blocks high' select '20'",
},
{
"text": "What is the depth?",
"key": "reference_object.has_depth",
"span": True,
"tooltip": "e.g. in 'fill the 20 block deep hole for me' select '20'",
},
]
def get_questions(child, action, optional_words=None):
QUESTION = None
if child == "schematic":
if action == "build":
# add tooltip
QUESTION = {
"text": "Click on all properties of the thing to be built mentioned in the highlighted text.",
"key": "schematic",
"checkbox": True,
"tooltip": "e.g. in 'make a blue square' click on 'Name' as well as 'Colour' since both are specified in 'blue square'",
"add_radio_other": False,
"radio": [
{
"text": "Name",
"key": "name_check",
"tooltip": "Select this if the name of the thing to be built is mentioned",
"next": [
{
"text": "Select all words that indicate the name of the thing to be built",
"key": "has_name",
"tooltip": "e.g. in 'Build a big green wooden house there' select 'house'",
"span": True,
}
],
},
{
"text": "Abstract/non-numeric size (e.g. 'big', 'small', etc.)",
"key": "size_check",
"tooltip": "Select this if the size of the thing to be built is specified",
"next": [
{
"text": "Select all words that represent the size",
"key": "has_size",
"span": True,
"tooltip": "e.g. in 'Build a big green wooden house there' select 'big'",
}
],
},
{
"text": "Colour",
"key": "colour_check",
"tooltip": "Select this if the colour of what needs to be built is specified",
"next": [
{
"text": "Select all words that represent the colour.",
"key": "has_colour",
"span": True,
"tooltip": "e.g. in 'Build a big green wooden house there' select 'green'",
}
],
},
{
"text": "The building material",
"key": "block_type_check",
"tooltip": "Select this if the building material is mentioned",
"next": [
{
"text": "What should it be built out of? Select all words.",
"key": "has_block_type",
"tooltip": "e.g. in 'Build a big green wooden house there' select 'wooden'",
"span": True,
}
],
},
{
"text": "Height",
"key": "height_check",
"tooltip": "Select this if the height is explicitly specified",
"next": [
{
"text": "Select all number words for height.",
"key": "has_height",
"span": True,
"tooltip": "e.g. in 'make a 5 block tall tower here' select '5'",
}
],
},
{
"text": "Width",
"key": "width_check",
"tooltip": "Select this if the width is explicitly specified",
"next": [
{
"text": "Select all number words for width",
"key": "has_width",
"span": True,
"tooltip": "e.g. in 'make a 4 blocks wide square there' select '4'",
}
],
},
{
"text": "Length",
"key": "length_check",
"tooltip": "Select this if the length is explicitly specified",
"next": [
{
"text": "Select all number words for length",
"key": "has_length",
"span": True,
"tooltip": "e.g. in 'make a 4 blocks long square there' select '4'",
}
],
},
{
"text": "Thickness",
"key": "thickness_check",
"tooltip": "Select this if the thickness is explicitly specified",
"next": [
{
"text": "Select all number words for thickness",
"key": "has_thickness",
"span": True,
"tooltip": "e.g. in 'make a hollow rectangle of thickness 3' select '3'",
}
],
},
{
"text": "Some other property not mentioned above",
"key": "tag_check",
"tooltip": "Select this if any propoerty not explicitly mentioned above is given",
"next": [
{
"text": "Select all words for this property",
"key": "has_tag",
"span": True,
"tooltip": "e.g. in 'make a bright cabin' select 'bright'",
}
],
},
],
}
elif action == "dig":
# add tooltip
QUESTION = {
"text": "Click on all properties of the thing to be dug mentioned in the highlighted text.",
"key": "schematic",
"checkbox": True,
"tooltip": "e.g. in 'dig a 10 x 10 pool' click on 'Name' as well as 'length' and 'width' since all are specified in '10 x 10 pool'",
"add_radio_other": False,
"radio": [
{
"text": "Name",
"key": "name_check",
"tooltip": "Select this if the name of the thing to be dug is mentioned",
"next": [
{
"text": "Select all words that indicate the name of the thing to be dug",
"key": "has_name",
"tooltip": "e.g. in 'dig a 10 x 10 pool there' select 'pool'",
"span": True,
}
],
},
{
"text": "Length",
"key": "length_check",
"tooltip": "Select this if the length is explicitly specified",
"next": [
{
"text": "Select all number words for length.",
"key": "has_length",
"span": True,
"tooltip": "e.g. in 'dig a 5 feet by 5 feet hole here' select '5'",
}
],
},
{
"text": "Width",
"key": "width_check",
"tooltip": "Select this if the width is explicitly specified",
"next": [
{
"text": "Select all number words for width",
"key": "has_width",
"span": True,
"tooltip": "e.g. in 'dig a 2 by 3 hole there' select '3'",
}
],
},
{
"text": "Depth",
"key": "depth_check",
"tooltip": "Select this if the depth is explicitly specified",
"next": [
{
"text": "Select all number words for depth",
"key": "has_depth",
"span": True,
"tooltip": "e.g. in 'dig a 1 x 2 x 3 pool' select '3'",
}
],
},
{
"text": "Abstract/non-numeric size (e.g. 'big', 'small', etc.)",
"key": "size_check",
"tooltip": "Select this if the size of the thing to be dug is specified without number words",
"next": [
{
"text": "Select all words that describe the abstract size",
"key": "has_size",
"span": True,
"tooltip": "e.g. in 'dig a big hole' select 'big'",
}
],
},
{
"text": "Some other property not mentioned above",
"key": "tag_check",
"tooltip": "Select this if any propoerty not explicitly mentioned above is given",
"next": [
{
"text": "Select all words for this property",
"key": "has_tag",
"span": True,
"tooltip": "e.g. in 'make a bright cabin' select 'bright'",
}
],
},
],
}
elif child == "location":
# location_rel_obj = construct_location_rel(location_in_rel=True)
question_1 = None
if action in ["build", "copy", "spawn", "dig"]:
question_1 = "Where should the " + optional_words + " happen?"
elif action == "move":
question_1 = "Where should the assistant move to"
elif action == "dance":
question_1 = "Where should the assistant dance"
elif action == "otheraction":
question_1 = "Give us more details about the location"
QUESTION = [
{"text": question_1, "key": "location", "radio": LOCATION_RADIO + LOCATION_REL_OBJECT},
{
"text": "If a number of steps is specified, how many ?",
"key": "location.steps",
"span": True,
"optional": True,
"tooltip": "e.g. in 'make a square 5 steps behind that' select '5'",
},
]
return QUESTION
elif child == "tag_val":
QUESTION = {
"text": "Click on options below to determine the intent of the text.",
"key": "memory_data",
"tooltip": "e.g. in 'good job' click on 'Feedback to the assistant'",
"add_radio_other": False,
"radio": [
{
"text": "Feedback to the assistant",
"key": "memory_type.reward",
"tooltip": "e.g. select for 'that was nice' or 'no that's wrong' ",
"next": [
{
"text": "Select the kind of feedback",
"key": "reward_value",
"add_radio_other": False,
"tooltip": "e.g. 'Positive feedback' for good things like 'you did a good job', 'that was a nice",
"radio": [
{
"text": "Positive feedback",
"key": "POSITIVE",
"tooltip": "e.g. for good things like 'you did a good job', 'that was a nice'",
},
{
"text": "Negative feedback",
"key": "NEGATIVE",
"tooltip": "e.g. for corrections like 'that was wrong', 'you failed'",
},
],
}
],
},
{
"text": "To assign tag, name or description",
"key": "memory_type.triple",
"tooltip": "e.g. 'that looks nice', 'tag the house as bright' etc",
"radio": [
{
"text": "The highlighted word(s) is a kind of colour",
"key": "has_colour",
},
{"text": "The highlighted word(s) represents size", "key": "has_size"},
{"text": "The highlighted word(s) is something else", "key": "has_tag"},
],
},
],
}
elif child == "reference_object" or (child == "filters" and action == "tag"):
# location_rel_obj = construct_location_rel(location_in_rel=False)
word = ""
if action == "otheraction":
question_1 = "There are words or pronouns that refer to the object (e.g. 'this', 'that', 'these', 'those', 'it' etc)"
question_2 = "What is the name of the reference object"
else:
if action == "copy":
word = "copied"
elif action == "freebuild":
word = "completed"
elif action == "destroy":
word = "destroyed"
elif action == "fill":
word = "filled"
elif action == "spawn":
word = "spawned"
elif action == "tag":
word = "tagged"
question_1 = (
"There are words or pronouns that refer to the object to be "
+ word
+ " (e.g. 'this', 'that', 'these', 'those', 'it' etc)"
)
question_2 = "What is the name of the object that should be " + word + "?"
QUESTION = [
{
"text": "Click on all mentioned properties of the object in highlighted text.",
"key": "reference_object",
"checkbox": True,
"tooltip": "e.g. in 'destroy the blue square' click on 'Name' as well as 'Colour' since both are specified in 'blue square'",
"add_radio_other": False,
"radio": [
{
"text": "Name",
"key": "name_check",
"tooltip": "Select this if the name / word for the object is mentioned",
"next": [{"text": question_2, "key": "has_name", "span": True}],
},
{
"text": question_1,
"key": "contains_coreference.yes",
"tooltip": "e.g. 'this', 'that', 'these', 'those', 'it' etc",
"add_radio_other": False,
},
{
"text": "The building material",
"key": "block_type_check",
"tooltip": "Select this if the building material of the object is mentioned",
"next": [
{
"text": "What is the building material? Select all words.",
"key": "has_block_type",
"span": True,
"tooltip": "e.g. in 'destroy the tiny blue glass cube' select 'glass'",
}
],
},
{
"text": "Colour",
"key": "colour_check",
"tooltip": "Select this if the colour of the object is specified",
"next": [
{
"text": "What is the color?",
"key": "has_colour",
"span": True,
"tooltip": "e.g. in 'destroy the tiny blue glass cube' select 'blue'",
}
],
},
{
"text": "Abstract/non-numeric size (e.g. 'big', 'small', etc.)",
"key": "size_check",
"tooltip": "Select this if the abstract/non-numeric size of the object is specified",
"next": [
{
"text": "What is the size?",
"key": "has_size",
"span": True,
"tooltip": "e.g. in 'destroy the tiny blue glass cube' select 'tiny'",
}
],
},
{
"text": "Height",
"key": "height_check",
"tooltip": "Select this if the height is explicitly specified",
"next": [
{
"text": "What is the height?",
"key": "has_height",
"span": True,
"tooltip": "e.g. in 'complete the 20 blocks high tower' select '20'",
}
],
},
{
"text": "Length",
"key": "length_check",
"tooltip": "Select this if the length is explicitly specified",
"next": [
{
"text": "Select all number words for length.",
"key": "has_length",
"span": True,
"tooltip": "e.g. in 'dig a 5 feet by 5 feet hole here' select '5'",
}
],
},
{
"text": "Width",
"key": "width_check",
"tooltip": "Select this if the width is explicitly specified",
"next": [
{
"text": "Select all number words for width",
"key": "has_width",
"span": True,
"tooltip": "e.g. in 'dig a 2 by 3 hole there' select '3'",
}
],
},
{
"text": "Depth",
"key": "depth_check",
"tooltip": "Select this if the depth is explicitly specified",
"next": [
{
"text": "Select all number words for depth",
"key": "has_depth",
"span": True,
"tooltip": "e.g. in 'dig a 1 x 2 x 3 pool' select '3'",
}
],
},
{
"text": "Some other property not mentioned above",
"key": "tag_check",
"tooltip": "Select this if any property not explicitly mentioned above is given",
"next": [
{
"text": "Select all words for this property",
"key": "has_tag",
"span": True,
"tooltip": "e.g. in 'make a bright cabin' select 'bright'",
}
],
},
],
},
{
"text": "Is the location of the reference object mentioned ? Select all words.",
"key": "reference_object.location",
# "span": True,
"tooltip": "e.g. in 'destroy the house behind the tree' select 'behind the tree'",
"radio": LOCATION_RADIO + LOCATION_REL_OBJECT
# [
# {
# "text": "Not specified",
# "key": None,
# "tooltip": "The location information is missing.",
# },
# {
# "text": "The location is represented using an indefinite noun like 'there' or 'over here'",
# "key": "CONTAINS_COREFERENCE",
# "tooltip": "e.g. 'there', 'here', 'over there' etc",
# },
# {
# "text": "Exact coordinates are given",
# "key": "coordinates_check",
# "tooltip": "Exact coordinates are specified.",
# "next": [
# {
# "text": "Click on all words representing the coordinates",
# "key": "yes.coordinates",
# "span": True,
# "tooltip": "e.g. in 'make a box at 4 , 5 , 6' select all: '4 , 5 , 6'",
# }
# ],
# },
# {
# "text": "Where the speaker is looking",
# "key": "SPEAKER_LOOK",
# "tooltip": "e.g. 'where I am looking'",
# },
# {
# "text": "Where the speaker is standing",
# "key": "SPEAKER_POS",
# "tooltip": "e.g. 'by me', 'where I am', 'where I am standing'",
# },
# {
# "text": "Where the assistant is standing",
# "key": "AGENT_POS",
# "tooltip": "e.g. 'by you', 'where you are', 'where you are standing'",
# },
# ],
},
]
return QUESTION
return QUESTION
| craftassist-master | acl2020_submission/annotation_tools/tools/question_flow_for_step_2.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
MAX_WORDS = 30
CSS_SCRIPT = """
<script>
var node = document.createElement('style');
"""
for j in range(1, 4):
for i in range(MAX_WORDS):
CSS_SCRIPT += """
if (! "${{word{j}{i}}}") {{
node.innerHTML += '.word{j}{i} {{ display: none }} '
}}
""".format(
i=i, j=j
)
CSS_SCRIPT += """
document.body.appendChild(node);
</script>
"""
JS_SCRIPT = """
$(function () {
$('[data-toggle="tooltip"]').tooltip()
})
"""
BEFORE = """
<!-- Bootstrap v3.0.3 -->
<link href="https://s3.amazonaws.com/mturk-public/bs30/css/bootstrap.min.css" rel="stylesheet" />
<section class="container" id="Other" style="margin-bottom:15px; padding: 10px 10px;
font-family: Verdana, Geneva, sans-serif; color:#333333; font-size:0.9em;">
<div class="row col-xs-12 col-md-12">
<!-- Instructions -->
<div class="panel panel-primary">
<div class="panel-heading"><strong>Instructions</strong></div>
<div class="panel-body" style="font-size:14px;">
<p><b>Your HIT will will be rejected if you don't answer all three commands before submitting.</b></p>
<p>Please help us determine the exact meaning of the command shown to you.
The command is given to an AI assistant to help out a player in the game of Minecraft.</p>
<p>For each command, you will answer a series of questions. Each question is either multiple-choice,
or requires you to select which words in the sentence correspond to which components of the command.</p>
<p>
<b>1. Place your mouse arrow over the questions and options for detailed tips.</b></br>
<b>2. When selecting the words, please select all words (along with properties of the thing).</b> So in "destroy the blue house" select "blue house" and not just "house"</br>
<b>3. Please also note that: </b>some questions are optional, click on "Click if specified" if you think answers to those are mentioned in the command.
</p>
<p>Few examples below:</p>
<p><b>"come"</b>
<ul>
<li>For "What action is being requested?", the answer is "Move or walk somewhere"</li>
</ul></p>
<p><b>"make two small cubes here"</b>
<ul>
<li>"What action is being requested?" -> "Build, make a copy or complete something"</li>
<li>"Is this an exact copy or duplicate of an existing object?" -> "No". The assistant is asked to "Build a fresh complete, specific object"</li>
<li>For "Select words specifying what needs to be built" select the words: 'small cubes'</li>
<li>For "Select words specifying where the construction needs to happen", click on the word: 'here'</li>
<li>For "How many times should this action be performed?", select "Repeatedly, a specific number of times"
and then "two" for 'How many times'</li>
</ul>
</p>
<p><b>"dig until you reach water"</b>
<ul>
<li>"What action is being requested?" -> "Dig"</li>
<li>For "How many times should this action be performed?" -> 'Repeated until a certain condition is met'</li>
<li>For "Until the assistant reaches some object(s) /area" select: "water"</li>
</ul>
<b>Note that: repeats may be disguised, for example: 'follow the pig' should be interpreted as "repeat forever: move to the location of the pig".</b>
</p>
<p><b>"go to the large pole near the bridge"</b></br>
<ul>
<li>"What action is being requested?" -> "Move or walk somewhere"</li>
<li>"Select words specifying the location to which the agent should move" -> "the large pole near the bridge". </li>
</ul>
</p>
</div>
</div>
<!-- Content Body -->
<section>
"""
AFTER = """
</section>
<!-- End Content Body -->
</div>
</section>
<style type="text/css">
fieldset {{
padding: 10px;
font-family: Georgia;
font-size: 14px;
background: #fbfbfb;
border-radius: 5px;
margin-bottom: 5px;
}}
.tooltip {{
font-family: Georgia;
font-size: 18px;
}}
.tooltip .tooltip-inner {{
background-color: #ffc;
color: #c00;
min-width: 250px;
}}
</style>
{CSS_SCRIPT}
<script src="https://code.jquery.com/jquery.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.0.3/js/bootstrap.min.js"></script>
<script>{JS_SCRIPT}</script>
""".format(
CSS_SCRIPT=CSS_SCRIPT, JS_SCRIPT=JS_SCRIPT
)
if __name__ == "__main__":
import render_questions
from question_flow_for_step_1 import *
print(BEFORE)
render_output = """<div style='font-size:16px;display:block' id='div_1'>"""
render_output += """<div class="well" style="position:sticky;position:-webkit-sticky;top:0;z-index:9999" id='cmd_1'>
<b>Command: ${command_1}</b></div>"""
render_output += render_questions.render_q(Q_ACTION, "root.1", show=True, sentence_id=1)
render_output += render_questions.render_q(Q_ACTION_LOOP, "root.1", show=True, sentence_id=1)
render_output += """</div><br><br>"""
render_output += """<br style=“line-height:50;”>"""
render_output += """<hr size="10">"""
render_output += """<div style='font-size:16px;display:block' id='div_2'>"""
render_output += """<div class="well" style="position:sticky;position:-webkit-sticky;top:0;z-index:9999">
<b>Command: ${command_2}</b></div> """
render_output += render_questions.render_q(Q_ACTION, "root.2", show=True, sentence_id=2)
render_output += render_questions.render_q(Q_ACTION_LOOP, "root.2", show=True, sentence_id=2)
render_output += """</div><br><br>"""
render_output += """<br style=“line-height:50;”>"""
render_output += """<hr size="10">"""
render_output += """<div style='font-size:16px;display:block' id='div_3'>"""
render_output += """<div class="well" style="position:sticky;position:-webkit-sticky;top:0;z-index:9999">
<b>Command: ${command_3}</b></div> """
render_output += render_questions.render_q(Q_ACTION, "root.3", show=True, sentence_id=3)
render_output += render_questions.render_q(Q_ACTION_LOOP, "root.3", show=True, sentence_id=3)
render_output += """</div><br><br>"""
print(render_output)
print(AFTER)
| craftassist-master | acl2020_submission/annotation_tools/tools/qualification_tool.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import re
def render_q(q, parent_id, show=True, show_siblings=True, sentence_id=""):
"""Return a fieldset for the given question"""
assert "key" in q, "Missing key for q: {}".format(q)
q_id = "{}.{}".format(parent_id, q["key"])
r = ""
r += '<fieldset id="{}" style="display:{}">'.format(q_id, "block" if show else "none")
r += label_tag(tooltip=q.get("tooltip")) + q["text"] + "</label>"
if "radio" in q:
r += render_radios(
q["radio"],
q_id,
add_other_opt=q.get("add_radio_other", True),
show_siblings=show_siblings,
sentence_id=sentence_id,
)
if "span" in q:
r += render_span(q_id, q.get("optional"), sentence_id=sentence_id)
r += "</fieldset>"
return r
def render_span(parent_id, optional=False, sentence_id=""):
r = ""
group_id = "{}.span".format(parent_id)
if optional:
onclick = """var x = document.getElementById('{}');
x.style.display = x.style.display == 'block' ? 'none' : 'block';""".format(
group_id
)
r += """<label class="btn btn-primary btn-sm" onclick="{}"
style="margin-left:10px">Click and select all words if specified</label>""".format(
onclick
)
r += '<div id="{}" class="btn-group" data-toggle="buttons" style="display:{}">'.format(
group_id, "none" if optional else "block"
)
for i in range(40):
input_id = "{}#{}".format(group_id, i)
r += """<label class="btn btn-default word{j}{i}"
name="{input_id}">""".format(
input_id=input_id, i=i, j=sentence_id
)
r += '<input type="checkbox" autocomplete="off" id="{input_id}" \
name="{input_id}">${{word{j}{i}}}'.format(
input_id=input_id, i=i, j=sentence_id
)
r += "</label>"
r += "</div>"
return r
def render_radios(opts, parent_id, add_other_opt=True, show_siblings=True, sentence_id=""):
if add_other_opt:
opts = opts + [{"text": "Other", "key": "Other"}]
r = ""
suffix = ""
for opt in opts:
opt_id = "{}.{}".format(parent_id, opt["key"])
nexts = opt.get("next", [])
# render child questions
suffix += (
'<div id="{}.next" style="display:none">'.format(opt_id)
+ "\n".join([render_q(n, opt_id, sentence_id=sentence_id) for n in nexts])
+ "</div>"
)
# get onchange function
sibling_ids = ["{}.{}".format(parent_id, o["key"]) for o in opts]
# child_ids = ["{}.{}".format(opt_id, n["key"]) for n in nexts]
onchange = "\n".join(
[
"""
console.log('Toggling {sid}');
if (document.getElementById('{sid}.next')) {{
document.getElementById('{sid}.next').style.display = \
document.getElementById('{sid}').checked ? 'block' : 'none';
}}
""".format(
sid=sid
)
for sid in sibling_ids
]
)
if not show_siblings:
onchange += "\n".join(
[
"""
console.log('Hiding siblings {sid}');
if (document.getElementById('div_{sid}')) {{
document.getElementById('div_{sid}').style.display = \
document.getElementById('{sid}').checked ? 'block' : 'none';
}}
""".format(
sid=sid
)
for sid in sibling_ids
]
)
# produce div for single option
r += '<div class="radio" id="div_{}">'.format(opt_id) + label_tag(opt.get("tooltip"))
r += """<input name="{}"
id="{}"
type="radio"
value="{}"
onchange="{}"
/>""".format(
parent_id, opt_id, opt["key"], onchange
)
r += opt["text"]
r += "</label></div>"
return r + suffix
def label_tag(tooltip=None):
if tooltip:
return '<label data-toggle="tooltip" data-placement="right" title="{}">'.format(tooltip)
else:
return "<label>"
def child_id(parent_id, text):
return parent_id + "." + re.sub(r"[^a-z]+", "-", text.lower().strip()).strip("-")
| craftassist-master | acl2020_submission/annotation_tools/tools/render_questions_tool_1.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
BEFORE = """
<!-- Bootstrap v3.0.3 -->
<link href="https://s3.amazonaws.com/mturk-public/bs30/css/bootstrap.min.css" rel="stylesheet" />
<section class="container" id="Other" style="margin-bottom:15px; padding: 10px 10px;
font-family: Verdana, Geneva, sans-serif; color:#333333; font-size:0.9em;">
<div class="row col-xs-12 col-md-12">
<!-- Instructions -->
<div class="panel panel-primary">
<div class="panel-heading"><strong>Instructions</strong></div>
<div class="panel-body" style="font-size:14px;">
<h1><strong>Split a composite command into individuals.</strong></h1>
<p>Please help us split a command into individual single commands.
The command shown to you here is given to an AI assistant to help out a player in the game of Minecraft.
You will be show a command that possibly implies a sequence or list of single commands and your task is to give us
single complete actions that are intended by the command shown to you.</p>
</br>
</br>
<p>Few valid examples below: </p>
<p>For <b>"hey bot please build a house and a cube"</b>
the answer is the following:
<ul>
<li>"hey bot please build a house" and </li>
<li>"hey bot please build a cube"</li>
</ul>
</p>
<p>For <b>"build a castle and then come back here"</b>
the answer is the following:
<ul>
<li>"build a castle" and </li>
<li>"come back here"</li>
</ul>
</p>
<p>For <b>"destroy the roof and build a stone ceiling in its place"</b>
the answer is the following:
<ul>
<li>"destroy the roof" and </li>
<li>"build a stone ceiling in its place"</li>
</ul>
</p>
<p>For <b>"move to the door and open it"</b>
the answer is the following:
<ul>
<li>"move to the door" and </li>
<li>"open the door"</li>
</ul>
</p>
<p>For <b>"i want you to undo the last two spawns and try again with new spawns" </b>
<ul>
<li>"undo the last two spawns" and </li>
<li>"do a new spawn"</li>
</ul>
<b>Note that: "do a new spawn" is a rewrite of "and try again with new spawns" to make that sub-command clear when seen in isolation.</b>
</p>
<p> Note that:</br>
<b>1. Some commands might have more than two splits. We've given you two more optional boxes.</b></br>
<b>2. Make sure that the commands you enter in text boxes are single and complete sentences by their own.</b></br>
<b>3. You might need to rewrite some commands when you split them, to make them clear in isolation.</b>
</p>
</div>
</div>
<div class="well" style="position:sticky;position:-webkit-sticky;top:0;z-index:9999">
<h2><strong>Command:</strong> ${sentence}</h2>
</div>
<!-- Content Body -->
<section>
"""
# AFTER = """
# </section>
# <!-- End Content Body -->
#
# </div>
# </section>
#
# <style type="text/css">
# fieldset {{
# padding: 10px;
# font-family: Georgia;
# font-size: 14px;
# background: #fbfbfb;
# border-radius: 5px;
# margin-bottom: 5px;
# }}
# .tooltip {{
# font-family: Georgia;
# font-size: 18px;
# }}
# .tooltip .tooltip-inner {{
# background-color: #ffc;
# color: #c00;
# min-width: 250px;
# }}
# </style>
#
# {CSS_SCRIPT}
#
# <script src="https://code.jquery.com/jquery.js"></script>
# <script src="https://netdna.bootstrapcdn.com/bootstrap/3.0.3/js/bootstrap.min.js"></script>
#
# <script>{JS_SCRIPT}</script>
# """.format(
# CSS_SCRIPT=CSS_SCRIPT, JS_SCRIPT=JS_SCRIPT
# )
BETWEEN = """
<section>
<fieldset>
<div class="input-group"><span style="font-family: verdana, geneva, sans-serif;font-size: 18px;">The individual commands. </span>
<p>Command 1 <textarea class="form-control" cols="150" name="command_1" rows="2"></textarea></p>
<p>Command 2 <textarea class="form-control" cols="150" name="command_2" rows="2"></textarea></p>
<p>Command 3 (optional)<textarea class="form-control" cols="150" name="command_3" rows="2"></textarea></p>
<p>Command 4 (optional)<textarea class="form-control" cols="150" name="command_4" rows="2"></textarea></p>
</div>
</fieldset>
</section>
<!-- End Content Body --></div>
</div>
</section>
<style type="text/css">fieldset { padding: 10px; background:#fbfbfb; border-radius:5px; margin-bottom:5px; }
</style>
"""
if __name__ == "__main__":
print(
BEFORE,
BETWEEN
# render_questions.render_q(Q_ACTION, "root", show=True),
# render_questions.render_q(Q_ACTION_LOOP, "root", show=True),
# AFTER,
)
| craftassist-master | acl2020_submission/annotation_tools/tools/composite_command_tool.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
from collections import Counter, defaultdict
import argparse
import ast
right_answer_count = Counter()
wrong_answer_count = Counter()
# compile sets of allowed answers
allowed_answers = defaultdict(set)
command = None
def read_gold_set(gold_set):
command = None
with open(gold_set, "r") as f:
for line in f:
line = line.strip()
if line == "":
continue
if line.startswith("{"):
try:
allowed_answers[command].add(line)
except:
print("Bad allowed answer:", line)
raise
else:
command = line
def compare_dicts(action_dict, allowed_dict):
# action_dict = ast.literal_eval(action_dict)
allowed_dict = ast.literal_eval(allowed_dict)
if "repeat" in allowed_dict:
if "repeat" not in action_dict:
return False
val = allowed_dict["repeat"]
val2 = action_dict["repeat"]
if val != val2:
if val[0] != val2[0]:
return False
val_dict1 = val[1]
val_dict2 = val2[1]
for k, v in val_dict2.items():
if k == "repeat_dir":
continue
if k not in val_dict1 or v != val_dict1[k]:
return False
for k, v in allowed_dict.items():
if k == "repeat":
continue
if k not in action_dict or action_dict[k] != v:
return False
return True
def get_wrong_stats(dict1, dict2):
""" {'repeat',
'schematic',
'dialogue_type',
'action_type',
'has_block_type',
'reference_object',
'tag_val',
'filters',
'location',
'target_action_type'}"""
st = {}
for k, v in dict2:
if k not in dict1:
print("missing key: %r" % (k))
if v != dict1[k]:
st[k] += 1
def evaluate_workers(worker_file):
worker_stats = {}
wrong_stats = {}
for k, v in allowed_answers.items():
wrong_stats[k] = {}
with open(worker_file) as f:
for line in f.readlines():
right_count = 0
wrong_count = 0
worker_id, answers = line.strip().split("\t")
answer_dicts = ast.literal_eval(answers)
# if worker didn't answer all questions, ignore
if len(answer_dicts.keys()) < 3:
print("Skipping: %r completed only %r" % (worker_id, len(answer_dicts.keys())))
continue
# otherwise read all answers
# k is sentence, v is dict
for k, v in answer_dicts.items():
# k has to be in allowed_answers
if k not in allowed_answers:
print("BADDDDDD")
# if answer doesn't match any allowed answer
if not any(compare_dicts(v, d) for d in allowed_answers[k]):
wrong_count += 1
for d in allowed_answers[k]:
stats = get_wrong_stats(v, d)
wrong_stats[k].update(stats)
return worker_stats
print(k, v)
else:
right_count += 1
# print("-" * 30)
worker_stats[worker_id] = int((right_count / (right_count + wrong_count)) * 100)
return worker_stats
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gold_set", type=str, required=True)
parser.add_argument("--worker_file", type=str, required=True)
parser.add_argument("--worker_stats_out", type=str, required=True)
args = parser.parse_args()
read_gold_set(args.gold_set)
stats = evaluate_workers(args.worker_file)
with open(args.worker_stats_out, "w") as f:
for worker_id, val in stats.items():
f.write(worker_id + "\t" + str(val) + "\n")
| craftassist-master | acl2020_submission/annotation_tools/tools/evaluate_qualification.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
MAX_WORDS = 40
CSS_SCRIPT = """
<script>
var node = document.createElement('style');
"""
for i in range(MAX_WORDS):
CSS_SCRIPT += """
if (! "${{word{i}}}") {{
node.innerHTML += '.word{i} {{ display: none }} '
}}
""".format(
i=i
)
CSS_SCRIPT += """
document.body.appendChild(node);
</script>
"""
JS_SCRIPT = """
$(function () {
$('[data-toggle="tooltip"]').tooltip()
})
"""
BEFORE = """
<!-- Bootstrap v3.0.3 -->
<link href="https://s3.amazonaws.com/mturk-public/bs30/css/bootstrap.min.css" rel="stylesheet" />
<section class="container" id="Other" style="margin-bottom:15px; padding: 10px 10px;
font-family: Verdana, Geneva, sans-serif; color:#333333; font-size:0.9em;">
<div class="row col-xs-12 col-md-12">
<!-- Instructions -->
<div class="panel panel-primary">
<div class="panel-heading"><strong>Instructions</strong></div>
<div class="panel-body" style="font-size:14px;">
<p>Please help us determine the exact meaning of the command shown to you.
The command is given to an AI assistant to help out a player in the game of Minecraft.</p>
<p>You will answer a series of questions. Each question is either multiple-choice,
or requires you to select which words in the sentence correspond to which components of the command.</p>
<p>
<b>1. Place your mouse arrow over the questions and options for detailed tips.</b></br>
<b>2. When selecting the words, please select all words (along with properties of the thing).</b> So in "destroy the blue house" select "blue house" and not just "house"</br>
<b>3. Please also note that: </b>some questions are optional, click on "Click if specified" if you think those are mentioned in the command.
</p>
<p>Few examples below:</p>
<p><b>"come"</b>
<ul>
<li>For "What action is being requested?", the answer is "Move or walk somewhere"</li>
</ul></p>
<p><b>"make two small cubes here"</b>
<ul>
<li>"What action is being requested?" -> "Build, make a copy or complete something"</li>
<li>"Is this an exact copy or duplicate of an existing object?" -> "No". The assistant is asked to "Build a fresh complete, specific object"</li>
<li>For "Select words specifying what needs to be built" select the words: 'small cubes'</li>
<li>For "Select words specifying where the construction needs to happen", click on the word: 'here'</li>
<li>For "How many times should this action be performed?", select "Repeatedly, a specific number of times"
and then "two" for 'How many times'</li>
</ul>
</p>
<p><b>"dig until you reach water"</b>
<ul>
<li>"What action is being requested?" -> "Dig"</li>
<li>For "How many times should this action be performed?" -> 'Repeated until a certain condition is met'</li>
<li>For "Until the assistant reaches some object(s) /area" select: "water"</li>
</ul>
<b>Note that: repeats may be disguised, for example: 'follow the pig' should be interpreted as "repeat forever: move to the location of the pig".</b>
</p>
<p><b>"go to the large pole near the bridge"</b></br>
<ul>
<li>"What action is being requested?" -> "Move or walk somewhere"</li>
<li>"Select words specifying the location to which the agent should move" -> "the large pole near the bridge". </li>
</ul>
</p>
<p><b>"construct a 4 x 4 house"</b></br>
<ul>
<li>"What action is being requested?" -> "Build, make a copy or complete something"</li>
<li>"Is this an exact copy or duplicate of an existing object?" -> "No". The assistant is asked to "Build a fresh complete, specific object"</li>
<li>For "Select words specifying what needs to be built" select the words: '4 x 4 house'</li>
</ul>
<b>Note that: For build and dig actions, the words for size of the thing should be selected as a part of what needs to be built / dug. For example: in "construct a 4 x 4 house", select "4 x 4 house" as the thing to be built.</b>
</p>
</div>
</div>
<div class="well" style="position:sticky;position:-webkit-sticky;top:0;z-index:9999">
<b>Command: ${command}</b></div>
<!-- Content Body -->
<section>
"""
AFTER = """
</section>
<!-- End Content Body -->
</div>
</section>
<style type="text/css">
fieldset {{
padding: 10px;
font-family: Georgia;
font-size: 14px;
background: #fbfbfb;
border-radius: 5px;
margin-bottom: 5px;
}}
.tooltip {{
font-family: Georgia;
font-size: 18px;
}}
.tooltip .tooltip-inner {{
background-color: #ffc;
color: #c00;
min-width: 250px;
}}
</style>
{CSS_SCRIPT}
<script src="https://code.jquery.com/jquery.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.0.3/js/bootstrap.min.js"></script>
<script>{JS_SCRIPT}</script>
""".format(
CSS_SCRIPT=CSS_SCRIPT, JS_SCRIPT=JS_SCRIPT
)
if __name__ == "__main__":
import render_questions_tool_1
from question_flow_for_step_1 import *
print(
BEFORE,
render_questions_tool_1.render_q(Q_ACTION, "root", show=True),
render_questions_tool_1.render_q(Q_ACTION_LOOP, "root", show=True),
AFTER,
)
| craftassist-master | acl2020_submission/annotation_tools/tools/annotation_tool_1.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
from annotation_tool_1 import MAX_WORDS
def print_csv_format(filename, option_num):
if option_num == 1:
# level 1
print("command", *["word{}".format(i) for i in range(MAX_WORDS)], sep=",")
with open(filename) as f:
for line in f.readlines():
command = line.replace(",", "").strip()
# This is default option for plain text to be rendered.
words = command.split()
print(command, *words, *([""] * (MAX_WORDS - len(words))), sep=",")
elif option_num == 2:
# level 2
print(
"command", "intent", "child", *["word{}".format(i) for i in range(MAX_WORDS)], sep=","
)
with open(filename) as f:
for line in f.readlines():
command = line.replace(",", "").strip()
# This option is if we need highlighted text to be rendered
# file will have : text + "\t" + text with spans in for highlighted words
parts = command.split("\t")
words = parts[0].split()
intent = parts[2]
child = parts[3]
print(parts[1], intent, child, *words, *([""] * (MAX_WORDS - len(words))), sep=",")
elif option_num == 3:
# qualification test
print(
"command_1",
*["word1{}".format(i) for i in range(MAX_WORDS)],
"command_2",
*["word2{}".format(i) for i in range(MAX_WORDS)],
"command_3",
*["word3{}".format(i) for i in range(MAX_WORDS)],
sep=","
)
with open(filename) as f:
l = []
for line in f.readlines():
command = line.replace(",", "").strip()
# This is default option for plain text to be rendered.
words = command.split()
l.append(",".join([command, *words, *([""] * (MAX_WORDS - len(words)))]))
print(",".join(l))
elif option_num == 4:
# composite command tool
print("sentence")
with open(filename) as f:
for line in f.readlines():
line = line.strip()
print(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", type=str, required=True)
parser.add_argument("--tool_num", type=int, default=1)
args = parser.parse_args()
print_csv_format(args.input_file, args.tool_num)
| craftassist-master | acl2020_submission/annotation_tools/tools/construct_input_for_turk.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
MAX_WORDS = 40
CSS_SCRIPT = """
<script>
var node = document.createElement('style');
"""
for i in range(MAX_WORDS):
CSS_SCRIPT += """
if (! "${{word{i}}}") {{
node.innerHTML += '.word{i} {{ display: none }} '
}}
""".format(
i=i
)
CSS_SCRIPT += """
document.body.appendChild(node);
</script>
"""
JS_SCRIPT = """
$(function () {
$('[data-toggle="tooltip"]').tooltip()
})
var child_name = document.getElementById("child_name_div").textContent;
var action_name = document.getElementById("intent_div").textContent;
var x = null;
var y = null;
var s = null;
x = action_name;
y = child_name;
s = action_name + "_" + child_name;
document.getElementById(s).style.display = "block";
"""
BEFORE = """
<!-- Bootstrap v3.0.3 -->
<link href="https://s3.amazonaws.com/mturk-public/bs30/css/bootstrap.min.css" rel="stylesheet" />
<section class="container" id="Other" style="margin-bottom:15px; padding: 10px 10px;
font-family: Verdana, Geneva, sans-serif; color:#333333; font-size:0.9em;">
<div class="row col-xs-12 col-md-12">
<!-- Instructions -->
<div class="panel panel-primary">
<div class="panel-heading"><strong>Instructions</strong></div>
<div class="panel-body" style="font-size:14px;">
<p>Please help us determine the exact meaning of the <span style='background-color: #FFFF00'>highlighted words</span> in the command shown below.
The command is given to an AI assistant to help a player in the game of Minecraft.</p>
<p>You will be answering a series of questions about the <span style='background-color: #FFFF00'><b>highlighted text</b></span>. Each question is either multiple-choice,
or requires you to select which words in the sentence correspond to which property of the thing.</p>
<p>
<b>1. Place your mouse arrow over the questions and options for detailed tips.</b></br>
<b>2. When selecting the words, please select all words (along with properties of the thing).</b> So in "destroy the blue house" select "blue house" and not just "house"</br>
<b>3. When answering the questions, remember that you are answering them to find more details about the highlighted words .</b>
</p>
<p>Few examples below:</p>
<p><b>"make a <span style='background-color: #FFFF00'>small red bright cube</span> there"</b>
<ul>
<li>Select 'Name', 'Abstract/non-numeric size' and 'Colour' properties from the radios.</li>
<li>For 'Select all words that indicate the name of the thing to be built' select 'cube'</li>
<li>For 'Select all words that represent the size' select 'small'</li>
<li>For 'Select all words that represent the colour' select 'red'</li>
<li>For 'Some other property not mentioned above' select 'bright'</li>
</ul></p>
<p><b>"destroy the house <span style='background-color: #FFFF00'>over there</span>"</b>
<li>For 'Where should the construction happen?' select "The location is represented using an indefinite noun like 'there' or 'over here'" </li>
</ul>
</p>
<p><b>"go to the cube <span style='background-color: #FFFF00'>behind me</span>"</b>
<li>For 'Where should the construction happen?' select "Somewhere relative to where the speaker is standing" </li>
<li>For 'Where (which direction) in relation to where the speaker is standing?' select 'Behind'</li>
</ul>
</p>
<p><b>"complete <span style='background-color: #FFFF00'>that</span>"</b>
<ul>
<li>Select "There are words or pronouns that refer to the object to be completed"</li>
</ul>
</p>
<p><b>"go <span style='background-color: #FFFF00'>behind the sheep</span>"</b>
<ul>
<li>Select "Somewhere relative to another object(s) / area(s)"</li>
<li>Select "Behind" for "Where (which direction) in relation to the other object(s)?"</li>
<li>Select "the sheep" for "Click on all words specifying the object / area relative to which location is given"</li>
</ul>
</p>
</div>
</div>
<div class="well" style="position:sticky;position:-webkit-sticky;top:0;z-index:9999">
<b>Command: ${command}</b></div>
<div id='intent_div' style='display:none'>${intent}</div>
<div id='child_name_div' style='display:none'>${child}</div>
<!-- Content Body -->
<section>
"""
AFTER = """
</section>
<!-- End Content Body -->
</div>
</section>
<style type="text/css">
fieldset {{
padding: 10px;
font-family: Georgia;
font-size: 14px;
background: #fbfbfb;
border-radius: 5px;
margin-bottom: 5px;
}}
.tooltip {{
font-family: Georgia;
font-size: 18px;
}}
.tooltip .tooltip-inner {{
background-color: #ffc;
color: #c00;
min-width: 250px;
}}
</style>
{CSS_SCRIPT}
<script src="https://code.jquery.com/jquery.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.0.3/js/bootstrap.min.js"></script>
<script>{JS_SCRIPT}</script>
""".format(
CSS_SCRIPT=CSS_SCRIPT, JS_SCRIPT=JS_SCRIPT
)
if __name__ == "__main__":
import render_questions_tool_2
from question_flow_for_step_2 import *
initial_content = {
"build_schematic": "Please specify details of the thing that needs to be built.",
"build_location": "Please specify details of the location at which the construction will happen.",
"dig_schematic": "Please specify details of the thing that needs to be dug.",
"dig_location": "Please specify details of the location at which the digging will happen.",
"copy_reference_object": "Please specify details of the thing that needs to be copied.",
"copy_location": "Please specify details of the location at which the copy will be made.",
"freebuild_reference_object": "Please specify details of the thing that needs to be completed.",
"move_location": "Please specify details of where the assistant should move.",
"destroy_reference_object": "Please specify details of the thing that needs to be destroyed.",
"spawn_reference_object": "Please specify details of what the assistant should spawn / generate",
"spawn_location": "Please specify details of where the assistant should spawn / place",
"otheraction_reference_object": "Please specify details of the reference object",
"otheraction_location": "Please specify details of the location",
"fill_reference_object": "Please specify details of the thing that should be filled",
"tag_filters": "Please specify details of the thing that being tagged",
"tag_tag_val": "Please specify details of the tag",
"dance_location": "Please specify details of where the dance should happen.",
}
optional_words = {
"build": "construction",
"copy": "copying",
"spawn": "spawn",
"dig": "digging",
}
action_children = {
"build": ["schematic", "location"],
"copy": ["reference_object", "location"],
"freebuild": ["reference_object"],
"move": ["location"],
"spawn": ["reference_object", "location"],
"fill": ["reference_object"],
"destroy": ["reference_object"],
"dance": ["location"],
"dig": ["schematic", "location"],
"tag": ["filters", "tag_val"],
"otheraction": ["reference_object", "location"],
}
print(BEFORE)
for action in action_children.keys():
for child in action_children[action]:
question = get_questions(child, action, optional_words.get(action, None))
if question is not None:
id_value = action + "_" + child
sentence = initial_content[id_value]
render_output = (
"""<div style='font-size:16px;display:none' id='"""
+ id_value
+ """'> <b>"""
+ sentence
+ """</b>"""
)
if type(question) == list:
for i, q in enumerate(question):
render_output += render_questions_tool_2.render_q(
q, "root." + action, show=True
)
render_output += """</div><br><br>"""
print(render_output)
else:
render_output += render_questions_tool_2.render_q(
question, "root." + action, show=True
)
render_output += """</div><br><br>"""
print(render_output)
print(AFTER)
| craftassist-master | acl2020_submission/annotation_tools/tools/annotation_tool_2.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
LOCATION_RADIO = [
{"text": "Not specified", "key": None},
{
"text": "Represented using word(s) that indicate reference to a location (e.g. 'there', 'here', 'over there' etc)",
"key": "coref_resolve_check",
"tooltip": "e.g. 'there', 'here', 'over there' etc",
"next": [
{
"text": "What are the word(s) representing the location?",
"key": "yes.coref_resolve",
"span": True,
}
],
},
{"text": "Where the speaker is looking (e.g. 'where I am looking')", "key": "SPEAKER_LOOK"},
{"text": "Where the speaker is standing (e.g. 'by me', 'where I am')", "key": "SpeakerPos"},
{
"text": "Where the assistant is standing (e.g. 'by you', 'where you are', 'where you are standing')",
"key": "AGENT_POS",
},
{
"text": "Somewhere relative to another object(s) / area(s)",
"key": "REFERENCE_OBJECT",
"next": [
{
"text": "In terms of number of steps, how many ?",
"key": "steps",
"span": True,
"optional": True,
},
{
"text": "Where in relation to the other object(s)?",
"key": "relative_direction",
"radio": [
{"text": "Left", "key": "LEFT"},
{"text": "Right", "key": "RIGHT"},
{"text": "Above", "key": "UP"},
{"text": "Below", "key": "DOWN"},
{"text": "In front", "key": "FRONT"},
{"text": "Behind", "key": "BACK"},
{"text": "Away from", "key": "AWAY"},
{"text": "Inside", "key": "INSIDE"},
{"text": "Outside", "key": "OUTSIDE"},
{"text": "Nearby or close to", "key": "NEAR"},
{"text": "Around", "key": "AROUND"},
{"text": "Exactly at", "key": "EXACT"},
],
},
{
"text": "Are there words or pronouns that represent the relative object (e.g. 'this', 'that', 'these', 'those', 'it' etc)?",
"key": "coref_resolve_check",
"tooltip": "e.g. 'this', 'that', 'these', 'those', 'it' etc",
"radio": [
{
"text": "Yes",
"key": "yes",
"next": [
{
"text": "What is the word / pronoun representing the object?",
"key": "coref_resolve",
"span": True,
}
],
},
{
"text": "No",
"key": "no",
"next": [
{
"text": "What is the name of the relative object(s) or area?",
"key": "has_name",
"span": True,
}
],
},
],
},
],
},
]
"""
{
"text": "Can this object(s) be represented using a pronoun? If so, what is it",
"key": "reference_object.coref_resolve",
"span": True,
"optional": True,
},
"""
REF_OBJECT_OPTIONALS = [
{
"text": "What is the building material?",
"key": "reference_object.has_block_type",
"span": True,
"optional": True,
},
{
"text": "What is the color?",
"key": "reference_object.has_colour",
"span": True,
"optional": True,
},
{
"text": "What is the size?",
"key": "reference_object.has_size",
"span": True,
"optional": True,
},
{
"text": "What is the width?",
"key": "reference_object.has_width",
"span": True,
"optional": True,
},
{
"text": "What is the height?",
"key": "reference_object.has_height",
"span": True,
"optional": True,
},
{
"text": "What is the depth?",
"key": "reference_object.has_depth",
"span": True,
"optional": True,
},
]
Q_ACTION = {
"text": 'What action is being instructed? If multiple separate actions are being instructed (e.g. "do X and then do Y"), select "Multiple separate actions"',
"key": "action_type",
"add_radio_other": False,
"radio": [
# BUILD
{
"text": "Build, copy or complete something",
"key": "BUILD",
"next": [
{
"text": "Is this an exact copy or duplicate of an existing object?",
"key": "COPY",
"add_radio_other": False,
"radio": [
# COPY
{
"text": "Yes",
"key": "yes",
"next": [
{
"text": "Are there words or pronouns that indicate reference to the object to be copied (e.g. 'this', 'that', 'these', 'those', 'it' etc)",
"key": "reference_object.coref_resolve_check",
"tooltip": "e.g. 'this', 'that', 'these', 'those', 'it' etc",
"add_radio_other": False,
"radio": [
{
"text": "Yes",
"key": "yes",
"next": [
{
"text": "What is the word / pronoun representing the object?",
"key": "coref_resolve",
"span": True,
}
],
},
{
"text": "No",
"key": "no",
"next": [
{
"text": "What is the name of the object that should be copied?",
"key": "has_name",
"span": True,
}
],
},
],
},
*REF_OBJECT_OPTIONALS,
# {
# "text": "What is the location of the object to be copied?",
# "key": "location",
# "radio": LOCATION_RADIO,
# },
],
},
# BUILD
{
"text": "No",
"key": "no",
"next": [
{
"text": "Is the assistant being asked to...",
"key": "FREEBUILD",
"add_radio_other": False,
"radio": [
{
"text": "Build a fresh complete, specific object",
"key": "BUILD",
"next": [
{
"text": "What is the name of the thing to be built ?",
"key": "schematic.has_name",
"span": True,
},
{
"text": "What is the building material (what should it be built out of)?",
"key": "schematic.has_block_type",
"span": True,
"optional": True,
},
{
"text": "What is the size?",
"key": "schematic.has_size",
"span": True,
"optional": True,
},
{
"text": "What is the width?",
"key": "schematic.has_width",
"span": True,
"optional": True,
},
{
"text": "What is the colour ?",
"key": "schematic.has_colour",
"span": True,
"optional": True,
},
{
"text": "What is the height?",
"key": "schematic.has_height",
"span": True,
"optional": True,
},
{
"text": "What is the depth?",
"key": "schematic.has_depth",
"span": True,
"optional": True,
},
{
"text": "What is the thickness?",
"key": "schematic.has_thickness",
"span": True,
"optional": True,
},
],
},
{
"text": "Help complete or finish an already existing object",
"key": "FREEBUILD",
"next": [
{
"text": "Are there word(s) / pronouns that indicate reference to the object to be completed (e.g. 'this', 'that', 'these', 'those', 'it' etc)?",
"key": "reference_object.coref_resolve_check",
"add_radio_other": False,
"tooltip": "e.g. 'this', 'that', 'these', 'those', 'it' etc",
"radio": [
{
"text": "Yes",
"key": "yes",
"next": [
{
"text": "What is the word / pronoun representing the object?",
"key": "coref_resolve",
"span": True,
}
],
},
{
"text": "No",
"key": "no",
"next": [
{
"text": "What is the name of the object that should be completed?",
"key": "has_name",
"span": True,
}
],
},
],
},
*REF_OBJECT_OPTIONALS,
],
},
],
}
],
},
],
},
{
"text": "Where should the construction / copying / completion happen?",
"key": "location",
"radio": LOCATION_RADIO,
},
],
},
# MOVE
{
"text": "Move or walk somewhere",
"key": "MOVE",
"next": [
{
"text": "Where should the assistant move to?",
"key": "location",
"radio": LOCATION_RADIO,
}
],
},
# SPAWN
{
"text": "Spawn something",
"key": "SPAWN",
"next": [
{
"text": "What is the name of the object that should be spawned?",
"key": "reference_object.has_name",
"span": True,
}
],
},
# DESTROY
{
"text": "Destroy, remove, or kill something",
"key": "DESTROY",
"next": [
{
"text": "Are there word(s) / pronouns that indicate reference to the object to be destroyed (e.g. 'this', 'that', 'these', 'those', 'it' etc)?",
"key": "reference_object.coref_resolve_check",
"tooltip": "e.g. 'this', 'that', 'these', 'those', 'it' etc",
"radio": [
{
"text": "Yes",
"key": "yes",
"next": [
{
"text": "What is the word / pronoun representing the object?",
"key": "coref_resolve",
"span": True,
}
],
},
{
"text": "No",
"key": "no",
"next": [
{
"text": "What is the name of the object that should be destroyed?",
"key": "has_name",
"span": True,
}
],
},
],
},
*REF_OBJECT_OPTIONALS,
{
"text": "What is the location of the object to be removed?",
"key": "location",
"radio": LOCATION_RADIO,
},
],
},
# DIG
{
"text": "Dig",
"key": "DIG",
"next": [
{
"text": "Where should the digging happen ?",
"key": "location",
"radio": LOCATION_RADIO,
},
{
"text": "What is the size of the thing to be dug?",
"key": "has_size",
"span": True,
"optional": True,
},
{
"text": "What is the width of the thing to be dug?",
"key": "has_width",
"span": True,
"optional": True,
},
{
"text": "What is the height of the thing to be dug?",
"key": "has_length",
"span": True,
"optional": True,
},
{
"text": "What is the depth of the thing to be dug?",
"key": "has_depth",
"span": True,
"optional": True,
},
],
},
# FILL
{
"text": "Fill something",
"key": "FILL",
"next": [
{
"text": "Where is the thing that should be filled ?",
"key": "location",
"radio": LOCATION_RADIO,
},
{
"text": "What should the thing be filled with?",
"key": "has_block_type",
"span": True,
"optional": True,
},
*REF_OBJECT_OPTIONALS,
],
},
# TAG
{
"text": "Assign a description, name, or tag to an object",
"key": "TAG",
"tooltip": "e.g. 'That thing is fluffy' or 'The blue building is my house'",
"next": [
{
"text": "What is the description, name, or tag being assigned?",
"key": "tag",
"span": True,
},
{
"text": "What object is being assigned a description, name, or tag?",
"key": "reference_object",
},
*REF_OBJECT_OPTIONALS,
{
"text": "What is the location of the object to be described, named, or tagged?",
"key": "location",
"radio": LOCATION_RADIO,
},
],
},
# STOP
{
"text": "Stop an action",
"key": "STOP",
"next": [
{
"text": "Is this a command to stop a particular action?",
"key": "target_action_type",
"radio": [
{"text": "Building or copying", "key": "BUILD"},
{"text": "Moving", "key": "MOVE"},
{"text": "Destroying", "key": "DESTROY"},
{"text": "Digging", "key": "DIG"},
{"text": "Filling", "key": "FILL"},
],
}
],
},
# RESUME
{
"text": "Resume an action",
"key": "RESUME",
"next": [
{
"text": "Is this a command to resume a particular action?",
"key": "target_action_type",
"radio": [
{"text": "Building or copying", "key": "BUILD"},
{"text": "Moving", "key": "MOVE"},
{"text": "Destroying", "key": "DESTROY"},
{"text": "Digging", "key": "DIG"},
{"text": "Filling", "key": "FILL"},
],
}
],
},
# UNDO
{
"text": "Undo or revert an action",
"key": "UNDO",
"next": [
{
"text": "Is this a command to undo a particular action?",
"key": "target_action_type",
"radio": [
{"text": "Building", "key": "BUILD"},
{"text": "Destroying", "key": "DESTROY"},
{"text": "Digging", "key": "DIG"},
{"text": "Filling", "key": "FILL"},
],
}
],
},
# ANSWER QUESTION
{
"text": "Answer a question",
"key": "ANSWER",
"tooltip": "e.g. 'How many trees are there?' or 'Tell me how deep that tunnel goes'",
"next": [
{
"text": "What is being asked about ?",
"key": "filters",
"radio": [
{
"text": "Where the assistant is heading",
"key": "type.AGENT.move_target", # assign TAG, move_target
},
{
"text": "Where the assistant is currently located",
"key": "type.AGENT.location", # assign TAG, location
},
{
"text": "Name of the action the assistant is performing",
"key": "type.ACTION.action_name", # assign TAG, action_name
},
{
"text": "Name of the object that an action is being performed on",
"key": "type.ACTION.action_reference_object_name", # # assign TAG, action_reference_object_name
"next": [
{
"text": "Which action is being asked about?",
"key": "action_type",
"radio": [
{"text": "Building", "key": "BUILD"},
{"text": "Destroying", "key": "DETSROY"},
{"text": "Digging", "key": "DIG"},
{"text": "Filling", "key": "FILL"},
{"text": "Spawning", "key": "SPAWN"},
{"text": "Moving", "key": "MOVE"},
],
}
],
},
{
"text": "Questions related to a specific object(s) / area(s)",
"key": "type.REFERENCE_OBJECT",
"next": [
{
"text": "Is this a yes/no question ?",
"key": "answer_type",
"radio": [
{"text": "Yes this is a yes/no question", "key": "EXISTS"},
{
"text": "No some specific attribute is being asked about",
"key": "TAG",
"next": [
{
"text": "What exactly is being asked about",
"key": "tag_name",
"radio": [
{"text": "The name", "key": "has_name"},
{"text": "The size", "key": "has_size"},
{
"text": "The colour",
"key": "has_colour",
},
],
}
],
},
],
},
{
"text": "Are there words or pronouns that represent the object being talked about(e.g. 'this', 'that', 'these', 'those', 'it' etc)?",
"key": "coref_resolve_check",
"tooltip": "e.g. 'this', 'that', 'these', 'those', 'it' etc",
"radio": [
{
"text": "Yes",
"key": "yes",
"next": [
{
"text": "What is the word / pronoun representing the object?",
"key": "coref_resolve",
"span": True,
}
],
},
{
"text": "No",
"key": "no",
"next": [
{
"text": "What is the name of the object(s) being asked about?",
"key": "has_name",
"span": True,
}
],
},
],
},
{
"text": "Is the building material being asked about?",
"key": "has_block_type",
"span": True,
"optional": True,
},
{
"text": "Is the color of the object being asked about?",
"key": "has_colour",
"span": True,
"optional": True,
},
{
"text": "Is the size of the object being asked about?",
"key": "has_size",
"span": True,
"optional": True,
},
{
"text": "Is the width of the object being asked about?",
"key": "has_width",
"span": True,
"optional": True,
},
{
"text": "Is the height of the object being asked about?",
"key": "has_height",
"span": True,
"optional": True,
},
{
"text": "Is the depth of the object being asked about?",
"key": "has_depth",
"span": True,
"optional": True,
},
],
},
],
}
],
},
# OTHER ACTION NOT LISTED
{
"text": "Another action not listed here",
"key": "OtherAction",
"tooltip": "The sentence is a command, but not one of the actions listed here",
"next": [
{
"text": "What object (if any) is the target of this action? e.g. for the sentence 'Sharpen this axe', select the word 'axe'",
"key": "reference_object.has_name",
"span": True,
},
*REF_OBJECT_OPTIONALS,
{
"text": "Where should the action take place?",
"key": "location",
"radio": LOCATION_RADIO,
},
],
},
# NOT ACTION
{
"text": "This sentence is not a command or request to do something",
"key": "NOOP",
"tooltip": "e.g. 'Yes', 'Hello', or 'What a nice day it is today'",
},
# MULTIPLE ACTIONS
{
"text": "Multiple separate actions",
"key": "COMPOSITE_ACTION",
"tooltip": "e.g. 'Build a cube and then run around'. Do not select this for a single repeated action, e.g. 'Build 5 cubes'",
},
],
}
REPEAT_DIR = [
{"text": "Not specified", "key": None},
{"text": "Forward", "key": "FRONT"},
{"text": "Backward", "key": "BACK"},
{"text": "Left", "key": "LEFT"},
{"text": "Right", "key": "RIGHT"},
{"text": "Up", "key": "UP"},
{"text": "Down", "key": "DOWN"},
{"text": "Around", "key": "AROUND"},
]
Q_ACTION_LOOP = {
"text": "How many times should this action be performed?",
"key": "loop",
"radio": [
{"text": "Just once, or not specified", "key": None},
{
"text": "Repeatedly, a specific number of times",
"key": "ntimes",
"next": [
{"text": "How many times?", "span": True, "key": "repeat_for"},
{
"text": "In which direction should the action be repeated?",
"key": "repeat_dir",
"radio": REPEAT_DIR,
},
],
},
{
"text": "Repeatedly, once for every object or for all objects",
"key": "repeat_all",
"tooltip": "e.g. 'Destroy the red blocks', or 'Build a shed in front of each house'",
"next": [
{
"text": "In which direction should the action be repeated?",
"key": "repeat_dir",
"radio": REPEAT_DIR,
}
],
},
{
"text": "Repeated forever",
"key": "forever",
"tooltip": "e.g. 'Keep building railroad tracks in that direction' or 'Collect diamonds until I tell you to stop'",
},
{
"text": "Repeated until a certain condition is met",
"key": "repeat_until",
# "tooltip": "e.g. 'Dig until you hit bedrock', 'Keep walking until you reach water'",
"next": [
{
"text": "Until the assistant reaches some object(s) /area",
"key": "adjacent_to_block_type",
"span": True,
}
],
},
],
}
| craftassist-master | acl2020_submission/annotation_tools/tools/all_question_flows.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import csv
def any_two(a, b, c):
return a == b or a == c or b == c
with open("Batch_3449808_batch_results.csv", "r") as f:
r = csv.DictReader(f)
r = [d for d in r]
whittled = [
{k: v for k, v in d.items() if (k.startswith("Answer.") or k == "Input.command") and v != ""}
for d in r
]
with open("results.tmp", "r") as f:
processed = f.readlines()
assert len(processed) == len(whittled)
faulty_processed_idxs = []
for i in range(181):
if not any_two(processed[3 * i], processed[3 * i + 1], processed[3 * i + 2]):
print(i)
print(whittled[3 * i])
# print(processed[3*i], processed[3*i+1], processed[3*i+2], '', sep='\n')
faulty_processed_idxs.append(i)
# for i in faulty_processed_idxs:
# print(whittled[3*i], whittled[3*i], whittled[3*i], '', sep='\n')
| craftassist-master | acl2020_submission/annotation_tools/tools/analyze_outputs.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import re
def render_q(q, parent_id, show=True, show_siblings=True):
"""Return a fieldset for the given question"""
assert "key" in q, "Missing key for q: {}".format(q)
r = ""
q_id = "{}.{}".format(parent_id, q["key"])
r += '<fieldset id="{}" style="display:{}">'.format(q_id, "block" if show else "none")
r += label_tag(tooltip=q.get("tooltip")) + q["text"] + "</label>"
if "radio" in q:
r += render_radios(
q["radio"],
q_id,
add_other_opt=q.get("add_radio_other", True),
show_siblings=show_siblings,
make_checkbox=q.get("checkbox", False),
)
if "span" in q:
r += render_span(q_id, q.get("optional"))
r += "</fieldset>"
return r
def render_span(parent_id, optional=False):
r = ""
group_id = "{}.span".format(parent_id)
if optional:
onclick = """var x = document.getElementById('{}');
x.style.display = x.style.display == 'block' ? 'none' : 'block';""".format(
group_id
)
r += """<label class="btn btn-primary btn-sm" onclick="{}"
style="margin-left:10px">Click and select all words if specified</label>""".format(
onclick
)
r += '<div id="{}" class="btn-group" data-toggle="buttons" style="display:{}">'.format(
group_id, "none" if optional else "block"
)
for i in range(40):
input_id = "{}#{}".format(group_id, i)
r += """<label class="btn btn-default word{i}"
name="{input_id}">""".format(
input_id=input_id, i=i
)
r += '<input type="checkbox" autocomplete="off" id="{input_id}" \
name="{input_id}">${{word{i}}}'.format(
input_id=input_id, i=i
)
r += "</label>"
r += "</div>"
return r
def render_radios(opts, parent_id, add_other_opt=True, show_siblings=True, make_checkbox=False):
if add_other_opt:
opts = opts + [{"text": "Other", "key": "Other"}]
r = ""
suffix = ""
for i, opt in enumerate(opts):
opt_id = "{}.{}".format(parent_id, opt["key"])
nexts = opt.get("next", [])
# render child questions
suffix += (
'<div id="{}.next" style="display:none">'.format(opt_id)
+ "\n".join([render_q(n, opt_id) for n in nexts])
+ "</div>"
)
# get onchange function
sibling_ids = ["{}.{}".format(parent_id, o["key"]) for o in opts]
# child_ids = ["{}.{}".format(opt_id, n["key"]) for n in nexts]
onchange = "\n".join(
[
"""
console.log('Toggling {sid}');
if (document.getElementById('{sid}.next')) {{
document.getElementById('{sid}.next').style.display = \
document.getElementById('{sid}').checked ? 'block' : 'none';
}}
""".format(
sid=sid
)
for sid in sibling_ids
]
)
if not show_siblings:
onchange += "\n".join(
[
"""
console.log('Hiding siblings {sid}');
if (document.getElementById('div_{sid}')) {{
document.getElementById('div_{sid}').style.display = \
document.getElementById('{sid}').checked ? 'block' : 'none';
}}
""".format(
sid=sid
)
for sid in sibling_ids
]
)
# produce div for single option
r += '<div class="radio" id="div_{}">'.format(opt_id) + label_tag(opt.get("tooltip"))
radio_name = parent_id + str(i) if make_checkbox else parent_id
r += """<input name="{}"
id="{}"
type="radio"
value="{}"
onchange="{}"
/>""".format(
radio_name, opt_id, opt["key"], onchange
)
r += opt["text"]
r += "</label></div>"
return r + suffix
def label_tag(tooltip=None):
if tooltip:
return '<label data-toggle="tooltip" data-placement="right" title="{}">'.format(tooltip)
else:
return "<label>"
def child_id(parent_id, text):
return parent_id + "." + re.sub(r"[^a-z]+", "-", text.lower().strip()).strip("-")
| craftassist-master | acl2020_submission/annotation_tools/tools/render_questions_tool_2.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
Q_ACTION = {
"text": 'What action is being requested? If multiple separate actions are being requested (e.g. "do X and then do Y"), select "Multiple separate actions"',
"key": "action_type",
"tooltip": "e.g. in 'Make few copies of the cube' it is : 'Build, make a copy or complete something'",
"add_radio_other": False,
"radio": [
# BUILD
{
"text": "Build, make a copy or complete something",
"key": "BUILD",
"tooltip": "The sentence requests construction or making copies of some object",
"next": [
{
"text": "Is this exact copy or duplicate of something that already exists?",
"key": "COPY",
"add_radio_other": False,
"radio": [
# COPY
{
"text": "Yes",
"key": "yes",
"next": [
{
"text": "Select words specifying the thing that needs to be copied",
"key": "reference_object",
"tooltip": "e.g. in 'Make 5 copies of the white sheep and put them behind the house' select 'the white sheep'",
"optional": True,
"span": True,
},
{
"text": "Select words specifying where the copy should be made",
"key": "location",
"tooltip": "e.g. in 'Make 5 copies of the white sheep and put them behind the house' select 'behind the house'",
"span": True,
"optional": True,
},
],
},
# BUILD
{
"text": "No",
"key": "no",
"next": [
{
"text": "Is the assistant being asked to...",
"key": "FREEBUILD",
"add_radio_other": False,
"radio": [
# Build
{
"text": "Build a specific object or objects from scratch",
"key": "BUILD",
"next": [
{
"text": "Select words specifying what needs to be built",
"key": "schematic",
"tooltip": "e.g. in 'construct two big wooden houses in front of the tower' select 'big wooden houses'",
"span": True,
},
{
"text": "Select words specifying where the thing should be built",
"key": "location",
"tooltip": "e.g. in 'construct two big wooden houses in front of the tower' select 'in front of the tower'",
"span": True,
"optional": True,
},
],
},
# Freebuild
{
"text": "Help complete or finish already existing object(s)",
"key": "FREEBUILD",
"next": [
{
"text": "Select words specifying what needs to be completed",
"key": "reference_object",
"tooltip": "e.g. in 'complete that for me please' select 'that'",
"span": True,
}
],
},
],
}
],
},
],
}
],
}, # Build , copy, freebuild finishes
# MOVE
{
"text": "Move or walk somewhere",
"key": "MOVE",
"tooltip": "The assistant is being asked to move",
"next": [
{
"text": "Select words specifying the location to which the agent should move",
"key": "location",
"span": True,
"tooltip": "e.g. in 'go to the sheep' select 'to the sheep'",
}
],
}, # MOVE finishes
# SPAWN
{
"text": "Spawn something (place an animal or creature in the game world)",
"key": "SPAWN",
"tooltip": "for example 'spawn a pig'.",
"next": [
{
"text": "Select words specifying what needs to be spawned",
"key": "reference_object",
"span": True,
"tooltip": "e.g. in 'spawn a pig' select 'a pig' or 'pig'",
},
{
"text": "Select words specifying where to spawn",
"key": "location",
"optional": True,
"span": True,
"tooltip": "e.g. in 'spawn a pig behind the house' select 'behind the house'",
},
],
},
# DESTROY
{
"text": "Destroy, remove, or kill something",
"key": "DESTROY",
"tooltip": "Something needs to be destroyed.",
"next": [
{
"text": "Select words specifying what needs to be destroyed",
"key": "reference_object",
"span": True,
"tooltip": "e.g. in 'destroy the red cube over there' select 'red cube over there'",
}
],
},
# DIG
{
"text": "Dig",
"key": "DIG",
"tooltip": "Digging of some kind needs to be done",
"next": [
{
"text": "Select words specifying what needs to be dug",
"key": "schematic",
"optional": True,
"tooltip": "e.g. in 'dig a big circular hole over there' select 'big circular hole'",
"span": True,
},
{
"text": "Select words specifying where the thing will be dug",
"key": "location",
"optional": True,
"span": True,
"tooltip": "e.g. in 'dig a big hole over there' select 'over there'",
},
],
},
# FILL
{
"text": "Fill something",
"key": "FILL",
"tooltip": "Fill or cover an object/area with something",
"next": [
{
"text": "Select words specifying what needs to be filled",
"key": "reference_object",
"span": True,
"tooltip": "e.g. in 'fill the mine with diamonds' select 'mine'",
},
{
"text": "Select words specifying what material is used for filling",
"key": "has_block_type",
"optional": True,
"span": True,
"tooltip": "e.g. in 'fill the mine with diamonds' select 'diamonds'",
},
],
},
# Tag
{
"text": "Assign a description, name, or tag to an object",
"key": "TAG",
"tooltip": "e.g. 'That thing is fluffy' or 'The blue building is my house'",
"next": [
{
"text": "Select words specifying the object that is being tagged",
"key": "filters",
"span": True,
"tooltip": "e.g. in 'this is bright' select 'this'",
},
{
"text": "Select words specifying the description or tag being assigned",
"key": "tag_val",
"span": True,
"tooltip": "e.g. in 'this is bright' select 'bright'",
},
],
},
# Answer
{
"text": "Answer a question",
"key": "ANSWER",
"tooltip": "e.g. 'How many trees are there?' or 'Tell me how deep that tunnel goes'",
},
# Dance
{
"text": "A movement where the path or step-sequence is more important than the destination",
"key": "DANCE",
"tooltip": "Dance or movement where the path is more important than the destination, e.g. go around the cube 4 times",
"next": [
{
"text": "Select words specifying where the dance needs to happen",
"key": "location",
"optional": True,
"span": True,
"tooltip": "e.g. in 'dance in front of the cube' select 'in front of the cube'",
}
],
},
# STOP
{
"text": "Stop an action",
"key": "STOP",
"tooltip": "Stop or pause something",
"next": [
{
"text": "Select words specifying which action needs to be stopped",
"key": "target_action_type",
"optional": True,
"span": True,
"tooltip": "e.g. in 'stop digging' select 'digging'",
}
],
},
# RESUME
{
"text": "Resume an action",
"key": "RESUME",
"tooltip": "Resume or continue something",
"next": [
{
"text": "Select words specifying which action needs to be resumed",
"key": "target_action_type",
"optional": True,
"span": True,
"tooltip": "e.g. in 'continue walking' select 'walking'",
}
],
},
# UNDO
{
"text": "Undo or revert an action",
"key": "UNDO",
"tooltip": "Undo a previously completed action",
"next": [
{
"text": "Select words specifying which action needs to be reverted",
"key": "target_action_type",
"optional": True,
"span": True,
"tooltip": "e.g. in 'undo what you built' select 'what you built'",
}
],
},
# MULTIPLE ACTIONS
{
"text": "Multiple separate actions",
"key": "COMPOSITE_ACTION",
"tooltip": "e.g. 'Build a cube and then run around'. Do not select this for a single repeated action, e.g. 'Build 5 cubes'",
},
# OTHER ACTION NOT LISTED
{
"text": "Another action not listed here",
"key": "OtherAction",
"tooltip": "In case the given sentence is a command, but not one of the command types listed above, please click this",
"next": [
{
"text": "What object (if any) is the target of this action?",
"key": "reference_object",
"span": True,
"optional": True,
"tooltip": "e.g. in 'Sharpen the axe behind me', select 'axe'",
},
{
"text": "Where should the action take place?",
"key": "location",
"span": True,
"optional": True,
"tooltip": "e.g. in 'Sharpen the axe behind me', select 'behind me'",
},
],
},
# NOT ACTION
{
"text": "This sentence is not a command or request to do something",
"key": "NOOP",
"tooltip": "In case the given sentence is not a command or request to do something, please click this",
},
],
}
REPEAT_DIR = [
{"text": "Not specified", "key": None, "tooltip": "The direction isn't specified"},
{
"text": "Forward",
"key": "FRONT",
"tooltip": "Repetition towards the front / forward direction",
},
{"text": "Backward", "key": "BACK", "tooltip": "Repetition towards the back"},
{"text": "Left", "key": "LEFT", "tooltip": "Repetition to the left"},
{"text": "Right", "key": "RIGHT", "tooltip": "Repetition to the left"},
{"text": "Up", "key": "UP", "tooltip": "Repetition upwards"},
{"text": "Down", "key": "DOWN", "tooltip": "Repetition downward"},
{"text": "Around", "key": "AROUND", "tooltip": "Repetition around"},
]
Q_ACTION_LOOP = {
"text": "How many times should this action be performed?",
"key": "loop",
"tooltip": "Does the above action or any part of it need to be repeated ?",
"add_radio_other": False,
"radio": [
{
"text": "Just once, or not specified",
"key": None,
"tooltip": "No repeats needed, it needs to be done exactly once.",
},
{
"text": "Repeatedly, a specific number of times",
"key": "ntimes",
"tooltip": "The action needs to be repeated a fixed number of times",
"next": [
{
"text": "How many times? Select all words",
"span": True,
"key": "repeat_for",
"tooltip": "e.g. in 'go around the cube twice' select 'twice'",
},
{
"text": "In which direction should the action be repeated?",
"key": "repeat_dir",
"radio": REPEAT_DIR,
"tooltip": "e.g. in 'go around the cube twice' select 'Around'",
},
],
},
{
"text": "Repeatedly, once for every object or for all objects",
"key": "repeat_all",
"tooltip": "e.g. 'Destroy every red block', or 'Build a shed in front of each house'",
"next": [
{
"text": "In which direction should the action be repeated?",
"key": "repeat_dir",
"radio": REPEAT_DIR,
"tooltip": "e.g. in 'stack 5 blocks' select 'Up' since stacking is done upwards",
}
],
},
{
"text": "Repeated forever",
"key": "forever",
"tooltip": "e.g. 'Keep building railroad tracks in that direction' or 'Collect diamonds until I tell you to stop'",
},
{
"text": "Repeated until a certain condition is met",
"key": "repeat_until",
"tooltip": "e.g. 'Dig until you hit bedrock', 'Keep walking until you reach water'",
"next": [
{
"text": "Until the assistant reaches a specific object(s) / area",
"key": "adjacent_to_block_type",
"optional": True,
"tooltip": "e.g. in 'Dig until you hit bedrock' select 'bedrock'",
"span": True,
},
{
"text": "Until some other condition is met",
"key": "condition_span",
"optional": True,
"tooltip": "e.g. in 'Keep building until it is sundown' select 'it is sundown'",
"span": True,
},
],
},
],
}
| craftassist-master | acl2020_submission/annotation_tools/tools/question_flow_for_step_1.py |
import argparse
import functools
import json
import logging
import logging.handlers
import os
import pickle
from os.path import isfile
from os.path import join as pjoin
from glob import glob
from tqdm import tqdm
from time import time
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import AutoModel, AutoTokenizer, BertConfig
from utils_parsing import *
from utils_caip import *
# training loop (all epochs at once)
def train(model, dataset, tokenizer, args):
# make data sampler
train_sampler = RandomSampler(dataset)
model_collate_fn = functools.partial(caip_collate, tokenizer=tokenizer)
train_dataloader = DataLoader(
dataset, sampler=train_sampler, batch_size=args.batch_size, collate_fn=model_collate_fn
)
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True)
# make optimizer
optimizer = OptimWarmupEncoderDecoder(model, args)
# training loop
for e in range(args.num_epochs):
loc_steps = 0
loc_loss = 0.0
loc_int_acc = 0.0
loc_span_acc = 0.0
loc_full_acc = 0.0
tot_steps = 0
tot_loss = 0.0
tot_accuracy = 0.0
st_time = time()
for step, batch in enumerate(epoch_iterator):
batch_examples = batch[-1]
batch_tensors = [
t.to(model.decoder.lm_head.predictions.decoder.weight.device) for t in batch[:4]
]
x, x_mask, y, y_mask = batch_tensors
outputs = model(x, x_mask, y, y_mask)
loss = outputs["loss"]
# backprop
loss.backward()
if step % args.param_update_freq == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
model.zero_grad()
# compute accuracy and add hard examples
lm_acc, sp_acc, full_acc = compute_accuracy(outputs, y)
if "hard" in dataset.dtypes:
if e > 0 or tot_steps > 2 * args.decoder_warmup_steps:
for acc, exple in zip(lm_acc, batch_examples):
if not acc.item():
if step % 200 == 100:
print("ADDING HE:", step, exple[0])
dataset.add_hard_example(exple)
# book-keeping
loc_int_acc += lm_acc.sum().item() / lm_acc.shape[0]
loc_span_acc += sp_acc.sum().item() / sp_acc.shape[0]
loc_full_acc += full_acc.sum().item() / full_acc.shape[0]
tot_accuracy += full_acc.sum().item() / full_acc.shape[0]
loc_loss += loss.item()
loc_steps += 1
tot_loss += loss.item()
tot_steps += 1
if step % 400 == 0:
print(
"{:2d} - {:5d} \t L: {:.3f} A: {:.3f} \t {:.2f}".format(
e, step, loc_loss / loc_steps, loc_full_acc / loc_steps, time() - st_time
)
)
logging.info(
"{:2d} - {:5d} \t L: {:.3f} A: {:.3f} \t {:.2f}".format(
e, step, loc_loss / loc_steps, loc_full_acc / loc_steps, time() - st_time
)
)
loc_loss = 0
loc_steps = 0
loc_int_acc = 0.0
loc_span_acc = 0.0
loc_full_acc = 0.0
return (tot_loss / tot_steps, tot_accuracy / tot_steps)
# same as training loop without back-propagation
def validate(model, dataset, tokenizer, args):
# make data sampler
train_sampler = SequentialSampler(dataset)
model_collate_fn = functools.partial(caip_collate, tokenizer=tokenizer)
train_dataloader = DataLoader(
dataset, sampler=train_sampler, batch_size=args.batch_size, collate_fn=model_collate_fn
)
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True)
# training loop
tot_steps = 0
tot_loss = 0.0
tot_int_acc = 0.0
tot_span_acc = 0.0
tot_accu = 0.0
for step, batch in enumerate(epoch_iterator):
batch_tensors = [
t.to(model.decoder.lm_head.predictions.decoder.weight.device) for t in batch[:4]
]
x, x_mask, y, y_mask = batch_tensors
outputs = model(x, x_mask, y, y_mask)
loss = outputs["loss"]
# compute accuracy and add hard examples
lm_acc, sp_acc, full_acc = compute_accuracy(outputs, y)
# book-keeping
tot_int_acc += lm_acc.sum().item() / lm_acc.shape[0]
tot_span_acc += sp_acc.sum().item() / sp_acc.shape[0]
tot_accu += full_acc.sum().item() / full_acc.shape[0]
tot_loss += loss.item()
tot_steps += 1
return (
tot_loss / tot_steps,
tot_int_acc / tot_steps,
tot_span_acc / tot_steps,
tot_accu / tot_steps,
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", default="reformatted_ttad_data", type=str, help="train/valid/test data"
)
parser.add_argument(
"--output_dir", default="caip_model_dir", type=str, help="Where we save the model"
)
parser.add_argument("--model_name", default="caip_parser", type=str, help="Model name")
parser.add_argument(
"--tree_voc_file",
default="caip_tree_voc_final.json",
type=str,
help="Pre-computed grammar and output vocabulary",
)
# model arguments
parser.add_argument(
"--pretrained_encoder_name",
default="distilbert-base-uncased",
type=str,
help="Pretrained text encoder "
"See full list at https://huggingface.co/transformers/pretrained_models.html",
)
parser.add_argument(
"--num_decoder_layers",
default=6,
type=int,
help="Number of transformer layers in the decoder",
)
parser.add_argument(
"--num_highway", default=1, type=int, help="Number of highway layers in the mapping model"
)
# optimization arguments
parser.add_argument(
"--optimizer", default="adam", type=str, help="Optimizer in [adam|adagrad]"
)
parser.add_argument("--batch_size", default=64, type=int, help="Batch size")
parser.add_argument("--param_update_freq", default=1, type=int, help="Group N batch updates")
parser.add_argument("--num_epochs", default=2, type=int, help="Number of training epochs")
parser.add_argument(
"--examples_per_epoch", default=-1, type=int, help="Number of training examples per epoch"
)
parser.add_argument(
"--train_encoder", action="store_true", help="Whether to finetune the encoder"
)
parser.add_argument(
"--encoder_warmup_steps",
default=1,
type=int,
help="Learning rate warmup steps for the encoder",
)
parser.add_argument(
"--encoder_learning_rate", default=0.0, type=float, help="Learning rate for the encoder"
)
parser.add_argument(
"--decoder_warmup_steps",
default=1000,
type=int,
help="Learning rate warmup steps for the decoder",
)
parser.add_argument(
"--decoder_learning_rate", default=1e-4, type=float, help="Learning rate for the decoder"
)
parser.add_argument(
"--lambda_span_loss",
default=0.5,
type=float,
help="Weighting between node and span prediction losses",
)
parser.add_argument(
"--node_label_smoothing",
default=0.0,
type=float,
help="Label smoothing for node prediction",
)
parser.add_argument(
"--span_label_smoothing",
default=0.0,
type=float,
help="Label smoothing for span prediction",
)
parser.add_argument(
"--dtype_samples",
default='[["templated", 1.0]]',
type=str,
help="Sampling probabilities for handling different data types",
)
parser.add_argument(
"--rephrase_proba", default=-1.0, type=float, help="Only specify probablility of rephrases"
)
parser.add_argument(
"--word_dropout",
default=0.0,
type=float,
help="Probability of replacing input token with [UNK]",
)
parser.add_argument(
"--encoder_dropout", default=0.0, type=float, help="Apply dropout to encoder output"
)
args = parser.parse_args()
# HACK: allows us to give rephrase proba only instead of full dictionary
if args.rephrase_proba > 0:
args.dtype_samples = json.dumps(
[["templated", 1.0 - args.rephrase_proba], ["rephrases", args.rephrase_proba]]
)
# set up logging
l_handler = logging.handlers.WatchedFileHandler(
os.environ.get("LOGFILE", "logs/%s.log" % args.model_name.split("/")[-1])
)
l_format = logging.Formatter(fmt="%(asctime)s - %(message)s", datefmt="%d-%b-%y %H:%M:%S")
l_handler.setFormatter(l_format)
l_root = logging.getLogger()
l_root.setLevel(os.environ.get("LOGLEVEL", "INFO"))
l_root.addHandler(l_handler)
# make dataset
if isfile(args.tree_voc_file):
print("loading grammar")
logging.info("loading grammar")
full_tree, tree_i2w = json.load(open(args.tree_voc_file))
else:
print("making grammar")
logging.info("making grammar")
data = {"train": {}, "valid": {}, "test": {}}
for spl in data:
for fname in glob(pjoin(args.data_dir, "{}/*.json".format(spl))):
print(fname)
data[spl][fname.split("/")[-1][:-5]] = json.load(open(fname))
full_tree, tree_i2w = make_full_tree(
[
(d_list, 1.0)
for spl, dtype_dict in data.items()
for dtype, d_list in dtype_dict.items()
]
)
json.dump((full_tree, tree_i2w), open(args.tree_voc_file, "w"))
tokenizer = AutoTokenizer.from_pretrained(args.pretrained_encoder_name)
logging.info("loading data")
train_dataset = CAIPDataset(
tokenizer,
args,
prefix="train",
sampling=True,
word_noise=args.word_dropout,
full_tree_voc=(full_tree, tree_i2w),
)
# make model
logging.info("making model")
enc_model = AutoModel.from_pretrained(args.pretrained_encoder_name)
bert_config = BertConfig.from_pretrained("bert-base-uncased")
bert_config.is_decoder = True
bert_config.vocab_size = len(tree_i2w) + 8 # special tokens
bert_config.num_hidden_layers = args.num_decoder_layers
dec_with_loss = DecoderWithLoss(bert_config, args, tokenizer)
encoder_decoder = EncoderDecoderWithLoss(enc_model, dec_with_loss, args)
# train_model
logging.info("training model")
encoder_decoder = encoder_decoder.cuda()
encoder_decoder.train()
loss, accu = train(encoder_decoder, train_dataset, tokenizer, args)
# save model
json.dump(
(full_tree, tree_i2w), open(pjoin(args.output_dir, args.model_name + "_tree.json"), "w")
)
pickle.dump(args, open(pjoin(args.output_dir, args.model_name + "_args.pk"), "wb"))
torch.save(encoder_decoder.state_dict(), pjoin(args.output_dir, args.model_name + ".pth"))
# evaluate model
_ = encoder_decoder.eval()
logging.info("evaluating model")
valid_template = CAIPDataset(
tokenizer, args, prefix="valid", dtype="templated", full_tree_voc=(full_tree, tree_i2w)
)
l, _, _, a = validate(encoder_decoder, valid_template, tokenizer, args)
print("evaluating on templated valid: \t Loss: {:.4f} \t Accuracy: {:.4f}".format(l, a))
logging.info("evaluating on templated valid: \t Loss: {:.4f} \t Accuracy: {:.4f}".format(l, a))
valid_rephrase = CAIPDataset(
tokenizer, args, prefix="valid", dtype="rephrases", full_tree_voc=(full_tree, tree_i2w)
)
l, _, _, a = validate(encoder_decoder, valid_rephrase, tokenizer, args)
print("evaluating on rephrases valid: \t Loss: {:.4f} \t Accuracy: {:.4f}".format(l, a))
logging.info("evaluating on rephrases valid: \t Loss: {:.4f} \t Accuracy: {:.4f}".format(l, a))
valid_prompts = CAIPDataset(
tokenizer, args, prefix="valid", dtype="prompts", full_tree_voc=(full_tree, tree_i2w)
)
l, _, _, a = validate(encoder_decoder, valid_prompts, tokenizer, args)
print("evaluating on prompts valid: \t Loss: {:.4f} \t Accuracy: {:.4f}".format(l, a))
logging.info("evaluating on prompts valid: \t Loss: {:.4f} \t Accuracy: {:.4f}".format(l, a))
valid_humanbot = CAIPDataset(
tokenizer, args, prefix="valid", dtype="humanbot", full_tree_voc=(full_tree, tree_i2w)
)
l, _, _, a = validate(encoder_decoder, valid_humanbot, tokenizer, args)
print("evaluating on humanbot valid: \t Loss: {:.4f} \t Accuracy: {:.4f}".format(l, a))
logging.info("evaluating on humanbot valid: \t Loss: {:.4f} \t Accuracy: {:.4f}".format(l, a))
if __name__ == "__main__":
main()
| craftassist-master | acl2020_submission/model_training_code/train_model.py |
import json
import numpy as np
import random
from os.path import isfile, isdir
from os.path import join as pjoin
import torch
from torch.utils.data import Dataset
#########
# Node typing: checking the type of a specific sub-tree (dict value)
#########
def is_span(val):
try:
a, (b, c) = val
return all([type(v) == int for v in [a, b, c]])
except (ValueError, TypeError):
return False
def is_span_list(val):
res = type(val) == list and len(val) > 0 and all([is_span(v) for v in val])
return res
def is_cat(val):
return type(val) == str or val is True or val is False
def is_cat_list(val):
res = (type(val) == list) and len(val) > 0 and all([is_cat(v) for v in val])
return res
def is_int(val):
return type(val) == dict
def is_int_list(val):
res = (type(val) == list) and len(val) > 0 and all([is_int(v) for v in val])
return res
#########
# Make grammar from dataset. Starts with empty full_tree
# then add all nodes found in the dataset
#########
# if new_tree is outside of what the grammar can handle, modifies grammar
# also counts number of occurence of each node
def add_tree(full_tree, new_tree, vocounts, nw=1):
for k, v in new_tree.items():
if k not in full_tree:
full_tree[k] = {"name": k, "children": {}, "values": {}, "count": 0}
full_tree[k]["count"] += nw
if is_cat(v):
full_tree[k]["values"][v] = full_tree[k]["values"].get(v, 0) + nw
w = "C:" + k + "|" + str(v)
vocounts[w] = vocounts.get(w, 0) + nw
elif is_int(v):
ws = "IB:" + k
we = "IE:" + k
vocounts[ws] = vocounts.get(ws, 0) + nw
vocounts[we] = vocounts.get(we, 0) + nw
add_tree(full_tree[k]["children"], v, vocounts, nw)
elif is_int_list(v):
ws = "ILB:" + k
wi = "IL&:" + k
we = "ILE:" + k
vocounts[ws] = vocounts.get(ws, 0) + nw
vocounts[wi] = vocounts.get(wi, 0) + nw
vocounts[we] = vocounts.get(we, 0) + nw
for c in v:
add_tree(full_tree[k]["children"], c, vocounts, nw)
elif is_span(v) or is_span_list(v):
w = "S:" + k
ws = "BE:" + k
vocounts[w] = vocounts.get(w, 0) + nw
vocounts[ws] = vocounts.get(ws, 0) + nw
# starts with an empty grammar and adds trees from the dataset
def make_full_tree(trees_weight_ls):
res = {}
vocounts = {}
for trees, weight in trees_weight_ls:
for dlg, tr in trees:
add_tree(res, tr, vocounts, weight)
tree_i2w = [k for k, v in sorted(vocounts.items(), key=lambda x: x[1], reverse=True)] + [
"BE:span"
]
return res, tree_i2w
#########
# Linearize and de-linearize trees
#########
# transforms tree into sequence of (token, start_span, end_span)
# idx_map maps the span ids before and after tokenization
def tree_to_seq(full_tree, tree, idx_map=None):
res = []
sorted_keys = sorted(
[k for k in tree.keys() if k in full_tree],
key=lambda x: full_tree[x]["count"],
reverse=True,
) + sorted([k for k, v in tree.items() if k not in full_tree])
for k in sorted_keys:
if is_cat(tree[k]):
res += [("C:" + k + "|" + str(tree[k]), -1, -1)]
elif is_span(tree[k]):
a, (b, c) = tree[k]
# res += [('S:' + k, idx_map[a][b][0], idx_map[a][c][1])]
res += [("S:" + k, -1, -1)]
res += [("BE:" + k, idx_map[a][b][0], idx_map[a][c][1])]
elif is_int(tree[k]):
res += (
[("IB:" + k, -1, -1)]
+ tree_to_seq(full_tree.get(k, {"children": {}})["children"], tree[k], idx_map)
+ [("IE:" + k, -1, -1)]
)
elif is_int_list(tree[k]):
res += [("ILB:" + k, -1, -1)]
for c in tree[k]:
res += tree_to_seq(full_tree.get(k, {"children": {}})["children"], c, idx_map) + [
("IL&:" + k, -1, -1)
]
res = res[:-1] + [("ILE:" + k, -1, -1)]
else:
raise NotImplementedError
return res
# selects sub-tree in (span in the output sequence) so we can apply recursively seq_to_tree
def select_spans(seq):
spans = [-1 for _ in seq]
active = {}
unopened = False
for i, (w, b_id, e_id) in enumerate(seq):
if w.startswith("IB:") or w.startswith("ILB:"):
active[w] = active.get(w, {})
active[w][i] = 0
for s_idx in active[w]:
active[w][s_idx] += 1
elif w.startswith("IE:") or w.startswith("ILE:"):
ws = w.replace("E:", "B:")
if ws not in active:
# closing an unopened bracket
unopened = True
else:
closed = []
for s_idx in active[ws]:
active[ws][s_idx] -= 1
if active[ws][s_idx] <= 0:
closed += [s_idx]
spans[s_idx] = i
for s_idx in closed:
del active[ws][s_idx]
# check whether all brackets have been closed
well_formed = (sum([len(ctr_dict) for ws, ctr_dict in active.items()]) == 0) and not unopened
for ws in active:
for s_idx in active[ws]:
spans[s_idx] = len(seq)
# create a dictionary of left bracket > right bracket
span_dict = {}
for s_idx, e_idx in enumerate(spans):
if e_idx > 0:
span_dict[s_idx] = e_idx
return (span_dict, well_formed)
# transforms sequence back into tree of nested dictionaries
# span_dict identifies the sub-sequences corresponding to sub-trees
def seq_to_tree(full_tree, seq, idx_rev_map=None, span_dct=None, start_id=0):
res = {}
if span_dct is None:
span_dict, well_formed = select_spans(seq)
else:
span_dict = span_dct
well_formed = True
idx = 0
while idx < len(seq):
if ":" not in seq[idx][0]:
idx += 1
continue
t, w = seq[idx][0].split(":")
# categorical node
if t == "C":
cat, val = w.split("|")
res[cat] = val
idx += 1
# span node
elif t == "S":
if idx + 1 < len(seq):
b_pre = seq[idx + 1][1]
e_pre = seq[idx + 1][2]
l_idx, b_idx = idx_rev_map[b_pre]
_, e_idx = idx_rev_map[e_pre]
res[w] = [l_idx, [b_idx, e_idx]]
else:
res[w] = [-1, [-1, -1]]
# idx += 1
idx += 2
# internal node
elif t == "IB":
sub_full_tree = full_tree.get(w, {"children": {}})["children"]
sub_span = (idx + 1, span_dict[start_id + idx] - start_id)
sub_seq = seq[sub_span[0] : sub_span[1]]
res[w] = seq_to_tree(
sub_full_tree, sub_seq, idx_rev_map, span_dict, start_id=start_id + sub_span[0]
)[0]
idx = sub_span[1]
# internal node list
elif t == "ILB":
sub_full_tree = full_tree.get(w, {"children": {}})["children"]
sub_span = (idx + 1, span_dict[start_id + idx] - start_id)
pre_sub_seq = seq[sub_span[0] : sub_span[1]]
# split sub-sequence by list items
sub_seq_ls_idx = (
[-1]
+ [i for i, sw in enumerate(pre_sub_seq) if sw[0] == "IL&:" + w]
+ [len(pre_sub_seq)]
)
sub_span_ls = [
(sub_span[0] + sub_seq_ls_idx[i] + 1, sub_span[0] + sub_seq_ls_idx[i + 1])
for i in range(len(sub_seq_ls_idx) - 1)
]
# read sub-trees
res[w] = []
for s_sub_span in sub_span_ls:
sub_seq = seq[s_sub_span[0] : s_sub_span[1]]
res[w] += [
seq_to_tree(
sub_full_tree,
sub_seq,
idx_rev_map,
span_dict,
start_id=start_id + s_sub_span[0],
)[0]
]
idx = sub_span[1]
# failure case??? TODO: raise error
else:
idx += 1
return (res, well_formed)
# returns empty tree if ta and tb are the same tree
def compare_tree(ta, tb):
res = {}
# internal node
if is_int(ta) or is_int_list(ta):
if is_int_list(ta):
ta = ta[0]
tb = tb[0]
for a in ta:
if a in tb:
comp = compare_tree(ta[a], tb[a])
if len(comp) > 0:
res[a] = comp
else:
res[a] = (ta[a], "")
for b in tb:
if b not in ta:
res[b] = ("", tb[b])
elif ta != tb:
res = (ta, tb)
return res
##################
# torch Dataset
##################
# helper function to align word indices before and after applying BPE
def align_post_tok(pre_tok, post_tok, seen_toks=0):
i, j, ci, cj = [0] * 4
idx_map = [[seen_toks, seen_toks] for _ in range(len(pre_tok.split()))]
while ci < len(pre_tok) and cj < len(post_tok):
if pre_tok[ci] == post_tok[cj]:
if pre_tok[ci] == " ":
i += 1
j += 1
if i > 0:
idx_map[i - 1][1] = j - 1 + seen_toks
idx_map[i][0] = j + seen_toks
ci += 1
cj += 1
elif post_tok[cj] == " ":
j += 1
cj += 1
elif pre_tok[ci] == " ":
i += 1
if i > 0:
idx_map[i - 1][0] = j - 1 + seen_toks
idx_map[i][1] = j + seen_toks
ci += 1
else:
cj += 1
idx_map[i][-1] = j + seen_toks
return idx_map
# applies BPE to input and creates mapping of span indices before and after BPE
def tokenize_mapidx(text, tokenizer):
# re-order lines: last chat in multi-chat is first in the list
# rev_lines = [line.strip() for line in text.split('<SEP>')]
# text_lines = [rev_lines[i - 1] for i in range(len(rev_lines), 0, -1)]
text_lines = [line.strip() for line in text.split("<SEP>")]
# tokenize text and linearize tree
seen_toks = 1
idx_maps = [[] for _ in text_lines]
res_toks = ["[CLS]"]
for lid, line in enumerate(text_lines):
tok_line = tokenizer.tokenize(line)
tok_join = " ".join(tok_line)
idx_maps[-1 - lid] = align_post_tok(line, tok_join, seen_toks)
res_toks += tok_line[:] + ["[SEP]"]
seen_toks += len(tok_line) + 1
return (" ".join(res_toks), idx_maps)
# takes raw text and tree, returns BPE-ed text and linearized tree
def tokenize_linearize(text, tree, tokenizer, full_tree, word_noise=0.0):
tok_text, idx_maps = tokenize_mapidx(text, tokenizer)
tokenized = " ".join(
[
"[UNK]" if w not in ["[CLS]", "[SEP]"] and random.random() < word_noise else w
for w in tok_text.split()
]
)
lin_tree = tree_to_seq(full_tree, tree, idx_maps)
return (tokenized, lin_tree)
# torch Dataset for the CAIP format, applies BPE and linearizes trees on-the-fly
class CAIPDataset(Dataset):
"""
CAIP: CraftAssist Instruction Parsing
"""
def __init__(
self,
tokenizer,
args,
prefix="train",
dtype="templated",
sampling=False,
word_noise=0.0,
full_tree_voc=None,
):
assert isdir(args.data_dir)
self.tokenizer = tokenizer
# We load the (input, tree) pairs for all data types and
# initialize the hard examples buffer
self.data = {}
self.sampling = sampling
self.word_noise = word_noise
dtype_samples = json.loads(args.dtype_samples)
self.dtype = dtype
self.dtypes = [t for t, p in dtype_samples]
self.sample_probas = np.array([p for t, p in dtype_samples])
self.sample_probas /= self.sample_probas.sum()
if prefix == "train":
for k in self.dtypes:
fname = pjoin(args.data_dir, prefix, k + ".json")
if isfile(fname):
print("loading", fname)
self.data[k] = json.load(open(fname))
else:
self.data[k] = []
self.hard_buffer_size = 1024
self.hard_buffer_counter = 0
else:
fname = pjoin(args.data_dir, prefix, dtype + ".json")
if isfile(fname):
print("loading", fname)
self.data[dtype] = json.load(open(fname))
else:
self.data[dtype] = []
# load meta-tree and tree vocabulary
if full_tree_voc is None:
print("making tree")
ftr, tr_i2w = make_full_tree(
[
(self.data["humanbot"], 3e5),
(self.data["prompts"], 1e5),
(self.data["templated"][:100000], 1),
]
)
self.full_tree = ftr
else:
full_tree, tr_i2w = full_tree_voc
self.full_tree = full_tree
spec_tokens = ["[PAD]", "unused", "[UNK]", "[CLS]", "[SEP]", "[MASK]", "<S>", "</S>"]
self.tree_voc = spec_tokens[:] + tr_i2w
self.tree_idxs = dict([(w, i) for i, w in enumerate(self.tree_voc)])
self.dataset_length = max([len(v) for v in self.data.values()])
if args.examples_per_epoch > 0:
self.dataset_length = min(self.dataset_length, args.examples_per_epoch)
def __len__(self):
return self.dataset_length
def __getitem__(self, idx):
# sample data type and get example
if self.sampling:
dtype = np.random.choice(self.dtypes, p=self.sample_probas)
if len(self.data[dtype]) == 0:
dtype = self.dtype
else:
dtype = self.dtype
p_text, p_tree = self.data[dtype][idx % len(self.data[dtype])]
text, tree = tokenize_linearize(
p_text, p_tree, self.tokenizer, self.full_tree, self.word_noise
)
text_idx_ls = [self.tokenizer._convert_token_to_id(w) for w in text.split()]
tree_idx_ls = [
[self.tree_idxs[w], bi, ei]
for w, bi, ei in [("<S>", -1, -1)] + tree + [("</S>", -1, -1)]
]
return (text_idx_ls, tree_idx_ls, (text, p_text, p_tree))
def add_hard_example(self, exple):
if self.hard_buffer_counter < self.hard_buffer_size:
self.data["hard"] += [exple]
else:
self.data["hard"][self.hard_buffer_counter % self.hard_buffer_size] = exple
self.hard_buffer_counter += 1
# applies padding and makes batch tensors
def caip_collate(batch, tokenizer):
# keep track of examples
pre_examples = [(p_text, p_tree) for x, y, (_, p_text, p_tree) in batch]
# input: text
batch_x_ls = [x for x, y, _ in batch]
x_len = max([len(x) for x in batch_x_ls])
x_mask_ls = [[1] * len(x) + [0] * (x_len - len(x)) for x in batch_x_ls]
batch_x_pad_ls = [x + [tokenizer.pad_token_id] * (x_len - len(x)) for x in batch_x_ls]
# output: linearized trees
batch_y_ls = [y for x, y, _ in batch]
y_len = max([len(y) for y in batch_y_ls])
y_mask_ls = [[1] * len(y) + [0] * (y_len - len(y)) for y in batch_y_ls]
batch_y_pad_ls = [y + [[0, -1, -1]] * (y_len - len(y)) for y in batch_y_ls] # 0 as padding idx
# tensorize
x = torch.tensor(batch_x_pad_ls)
x_mask = torch.tensor(x_mask_ls)
y = torch.tensor(batch_y_pad_ls)
y_mask = torch.tensor(y_mask_ls)
return (x, x_mask, y, y_mask, pre_examples)
| craftassist-master | acl2020_submission/model_training_code/utils_caip.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam, Adagrad
from transformers.modeling_bert import BertModel, BertOnlyMLMHead
from utils_caip import *
# --------------------------
# Transformer-based decoder module for sequence ans span prediction, computes the loss
# --------------------------
def my_xavier_init(m, gain=1):
for p in m.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain)
else:
nn.init.constant_(p, 0)
class HighwayLayer(torch.nn.Module):
def __init__(self, dim):
super(HighwayLayer, self).__init__()
self.gate_proj = nn.Linear(dim, dim, bias=True)
self.nlin_proj = nn.Linear(dim, dim, bias=True)
my_xavier_init(self.nlin_proj)
my_xavier_init(self.gate_proj)
nn.init.constant_(self.gate_proj.bias, -1)
def forward(self, x):
gate = torch.sigmoid(self.gate_proj(x))
nlin = torch.tanh(self.nlin_proj(x))
res = gate * nlin + (1 - gate) * x
return res
# single module to predict the output sequence and compute the
# loss if the target sequence is provided for convenience
class DecoderWithLoss(nn.Module):
def __init__(self, config, args, tokenizer):
super(DecoderWithLoss, self).__init__()
# model components
self.bert = BertModel(config)
self.lm_head = BertOnlyMLMHead(config)
self.span_b_proj = nn.ModuleList([HighwayLayer(768) for _ in range(args.num_highway)])
self.span_e_proj = nn.ModuleList([HighwayLayer(768) for _ in range(args.num_highway)])
# loss functions
if args.node_label_smoothing > 0:
self.lm_ce_loss = LabelSmoothingLoss(
args.node_label_smoothing, config.vocab_size, ignore_index=tokenizer.pad_token_id
)
else:
self.lm_ce_loss = torch.nn.CrossEntropyLoss(
ignore_index=tokenizer.pad_token_id, reduction="none"
)
self.span_ce_loss = torch.nn.CrossEntropyLoss(ignore_index=-1, reduction="none")
self.span_loss_lb = args.lambda_span_loss
# without loss, use at prediction time
# TODO: add previously computed y_rep
# y onlyhas the node indices (not the spans)
def step(self, y, y_mask, x_reps, x_mask):
y_rep = self.bert(
input_ids=y,
attention_mask=y_mask,
encoder_hidden_states=x_reps,
encoder_attention_mask=x_mask,
)[0]
y_mask_target = y_mask
lm_scores = self.lm_head(y_rep)
y_span_pre_b = y_rep
for hw in self.span_b_proj:
y_span_pre_b = hw(y_span_pre_b)
span_b_scores = (x_reps[:, None, :, :] * y_span_pre_b[:, :, None, :]).sum(dim=-1)
span_b_scores = (
span_b_scores + (1 - y_mask_target.type_as(span_b_scores))[:, :, None] * 1e9
)
y_span_pre_e = y_rep
for hw in self.span_e_proj:
y_span_pre_e = hw(y_span_pre_e)
span_e_scores = (x_reps[:, None, :, :] * y_span_pre_e[:, :, None, :]).sum(dim=-1)
span_e_scores = (
span_e_scores + (1 - y_mask_target.type_as(span_e_scores))[:, :, None] * 1e9
)
res = {
"lm_scores": torch.log_softmax(lm_scores, dim=-1).detach(),
"span_b_scores": torch.log_softmax(span_b_scores, dim=-1).detach(),
"span_e_scores": torch.log_softmax(span_e_scores, dim=-1).detach(),
}
return res
def forward(self, y, y_mask, x_reps, x_mask):
y_rep = self.bert(
input_ids=y[:, :-1, 0],
attention_mask=y_mask[:, :-1],
encoder_hidden_states=x_reps,
encoder_attention_mask=x_mask,
)[0]
y_mask_target = y_mask[:, 1:].contiguous()
# language modeling
lm_scores = self.lm_head(y_rep)
lm_lin_scores = lm_scores.view(-1, lm_scores.shape[-1])
lm_lin_targets = y[:, 1:, 0].contiguous().view(-1)
lm_lin_loss = self.lm_ce_loss(lm_lin_scores, lm_lin_targets)
lm_lin_mask = y_mask_target.view(-1)
lm_loss = lm_lin_loss.sum() / lm_lin_mask.sum()
# span prediction
## beginning of spans
y_span_pre_b = y_rep
for hw in self.span_b_proj:
y_span_pre_b = hw(y_span_pre_b)
span_b_scores = (x_reps[:, None, :, :] * y_span_pre_b[:, :, None, :]).sum(dim=-1)
span_b_scores = (
span_b_scores + (1 - y_mask_target.type_as(span_b_scores))[:, :, None] * 1e9
)
span_b_lin_scores = span_b_scores.view(-1, x_reps.shape[1])
span_b_lin_targets = y[:, 1:, 1].contiguous().view(-1)
span_b_lin_loss = self.span_ce_loss(span_b_lin_scores, span_b_lin_targets)
## end of spans
y_span_pre_e = y_rep
for hw in self.span_e_proj:
y_span_pre_e = hw(y_span_pre_e)
span_e_scores = (x_reps[:, None, :, :] * y_span_pre_e[:, :, None, :]).sum(dim=-1)
span_e_scores = (
span_e_scores + (1 - y_mask_target.type_as(span_e_scores))[:, :, None] * 1e9
)
span_e_lin_scores = span_e_scores.view(-1, span_e_scores.shape[-1])
span_e_lin_targets = y[:, 1:, 2].contiguous().view(-1)
span_e_lin_loss = self.span_ce_loss(span_e_lin_scores, span_e_lin_targets)
## joint span prediction
# TODO: predict full spans, enforce order
# combine
span_lin_loss = span_b_lin_loss + span_e_lin_loss
span_loss = span_lin_loss.sum() / (y[:, :, 1] >= 0).sum()
tot_loss = (1 - self.span_loss_lb) * lm_loss + self.span_loss_lb * span_loss
res = {
"lm_scores": lm_scores,
"span_b_scores": span_b_scores,
"span_e_scores": span_e_scores,
"loss": tot_loss,
}
return res
# combines DecoderWithLoss with pre-trained BERT encoder
class EncoderDecoderWithLoss(nn.Module):
def __init__(self, encoder, decoder, args):
super(EncoderDecoderWithLoss, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.train_encoder = args.train_encoder
def forward(self, x, x_mask, y, y_mask, x_reps=None):
if x_reps is None:
x_reps = self.encoder(input_ids=x, attention_mask=x_mask)[0]
if not self.train_encoder:
x_reps = x_reps.detach()
outputs = self.decoder(y, y_mask, x_reps, x_mask)
return outputs
# raw text input, tree output
# DEPRECATED: use beam search
def predict_tree(txt, model, tokenizer, dataset, ban_noop=False, noop_threshold=0.0):
model_device = model.decoder.lm_head.predictions.decoder.weight.device
# prepare batch
text, idx_maps = tokenize_mapidx(txt, tokenizer)
tree = [("<S>", -1, -1)]
text_idx_ls = [dataset.tokenizer._convert_token_to_id(w) for w in text.split()]
tree_idx_ls = [[dataset.tree_idxs[w], bi, ei] for w, bi, ei in tree]
pre_batch = [(text_idx_ls, tree_idx_ls, (text, txt, {}))]
batch = caip_collate(pre_batch, tokenizer)
batch = [t.to(model_device) for t in batch[:4]]
x, x_mask, y, y_mask = batch
y = y[:, :, 0]
x_reps = model.encoder(input_ids=x, attention_mask=x_mask)[0].detach()
res = [("<S>", -1, -1)]
next_id = -1
noop_predicted = False
for i in range(100):
if i > 0:
y = torch.cat([y, torch.LongTensor([[next_id]]).to(model_device)], dim=1)
y_mask = torch.cat(
[y_mask, torch.LongTensor([1]).unsqueeze(dim=0).to(model_device)], dim=1
)
outputs = model.decoder.step(y, y_mask, x_reps, x_mask)
# next word
lm_scores = outputs["lm_scores"]
s_lm_scores, s_lm_ids = lm_scores[0, -1].sort(dim=-1, descending=True)
next_id = s_lm_ids[0].item()
if "NOOP" in dataset.tree_voc[next_id]:
if ban_noop or s_lm_scores[0].item() < noop_threshold:
next_id = s_lm_ids[1].item()
noop_predicted = True
print("---- replacing NOOP with", dataset.tree_voc[next_id])
next_w = dataset.tree_voc[next_id]
# predicted span
span_b_scores = outputs["span_b_scores"]
span_e_scores = outputs["span_e_scores"]
_, s_sb_ids = span_b_scores[0, -1].sort(dim=-1, descending=True)
_, s_se_ids = span_e_scores[0, -1].sort(dim=-1, descending=True)
b_id = s_sb_ids[0].item()
e_id = s_se_ids[0].item()
res += [(next_w, b_id, e_id)]
if next_w == "</S>":
break
# only keep span predictions for span nodes, then map back to tree
res = [(w, b, e) if w.startswith("BE:") else (w, -1, -1) for w, b, e in res]
idx_rev_map = [(0, 0)] * len(text.split())
for line_id, idx_map in enumerate(idx_maps):
for pre_id, (a, b) in enumerate(idx_map):
idx_rev_map[a] = (line_id, pre_id)
idx_rev_map[b] = (line_id, pre_id)
idx_rev_map[-1] = idx_rev_map[-2]
res_tree, _ = seq_to_tree(dataset.full_tree, res[1:-1], idx_rev_map)
return (res_tree, noop_predicted, (text, res))
# beam prediction. Only uses node prediction scores (not the span scores)
def beam_search(txt, model, tokenizer, dataset, beam_size=5, well_formed_pen=1e2):
model_device = model.decoder.lm_head.predictions.decoder.weight.device
# prepare batch
text, idx_maps = tokenize_mapidx(txt, tokenizer)
idx_rev_map = [(0, 0)] * len(text.split())
for line_id, idx_map in enumerate(idx_maps):
for pre_id, (a, b) in enumerate(idx_map):
idx_rev_map[a] = (line_id, pre_id)
idx_rev_map[b] = (line_id, pre_id)
idx_rev_map[-1] = idx_rev_map[-2]
tree = [("<S>", -1, -1)]
text_idx_ls = [dataset.tokenizer._convert_token_to_id(w) for w in text.split()]
tree_idx_ls = [[dataset.tree_idxs[w], bi, ei] for w, bi, ei in tree]
pre_batch = [(text_idx_ls, tree_idx_ls, (text, txt, {}))]
batch = caip_collate(pre_batch, tokenizer)
batch = [t.to(model_device) for t in batch[:4]]
x, x_mask, y, y_mask = batch
x_reps = model.encoder(input_ids=x, attention_mask=x_mask)[0].detach()
x_mask = x_mask.expand(beam_size, -1)
x_reps = x_reps.expand(beam_size, -1, -1)
# start decoding
y = torch.LongTensor([[dataset.tree_idxs["<S>"]] for _ in range(beam_size)]).to(
model_device
) # B x 1
beam_scores = torch.Tensor([-1e9 for _ in range(beam_size)]).to(model_device) # B
beam_scores[0] = 0
beam_seqs = [[("<S>", -1, -1)] for _ in range(beam_size)]
finished = [False for _ in range(beam_size)]
pad_scores = torch.Tensor([-1e9] * len(dataset.tree_voc)).to(model_device)
pad_scores[dataset.tree_idxs["[PAD]"]] = 0
for i in range(100):
outputs = model.decoder.step(y, y_mask, x_reps, x_mask)
# next word
lm_scores = outputs["lm_scores"][:, -1, :] # B x V
for i, fshed in enumerate(finished):
if fshed:
lm_scores[i] = pad_scores
beam_lm_scores = lm_scores + beam_scores[:, None] # B x V
beam_lm_lin = beam_lm_scores.view(-1)
s_scores, s_ids = beam_lm_lin.sort(dim=-1, descending=True)
s_beam_ids = s_ids // beam_lm_scores.shape[-1]
s_word_ids = s_ids % beam_lm_scores.shape[-1]
# re-order and add next token
beam_scores = s_scores[:beam_size]
n_beam_ids = s_beam_ids[:beam_size]
n_word_ids = s_word_ids[:beam_size]
n_words = [dataset.tree_voc[nw_id.item()] for nw_id in n_word_ids]
y = torch.cat([y[n_beam_ids], n_word_ids[:, None]], dim=1)
# find out which of the beams are finished
pre_finished = [finished[b_id.item()] for b_id in n_beam_ids]
new_finished = [w_id.item() == dataset.tree_idxs["</S>"] for w_id in n_word_ids]
finished = [p or n for p, n in zip(pre_finished, new_finished)]
n_mask = 1 - torch.Tensor(finished).type_as(y_mask)
y_mask = torch.cat([y_mask[n_beam_ids], n_mask[:, None]], dim=1)
# predicted span
span_b_scores = outputs["span_b_scores"][:, -1, :][n_beam_ids] # B x T
span_e_scores = outputs["span_e_scores"][:, -1, :][n_beam_ids] # B x T
span_be_scores = span_b_scores[:, :, None] + span_e_scores[:, None, :]
invalid_scores = torch.tril(torch.ones(span_be_scores.shape), diagonal=-1) * -1e9
span_be_scores += invalid_scores.type_as(span_be_scores)
span_be_lin = span_be_scores.view(span_be_scores.shape[0], -1)
_, s_sbe_ids = span_be_lin.sort(dim=-1, descending=True)
s_sb_ids = s_sbe_ids[:, 0] // span_b_scores.shape[-1]
s_se_ids = s_sbe_ids[:, 0] % span_b_scores.shape[-1]
beam_b_ids = [bb_id.item() for bb_id in s_sb_ids]
beam_e_ids = [be_id.item() for be_id in s_se_ids]
# update beam_seq
beam_seqs = [
beam_seqs[n_beam_ids[i].item()] + [(n_words[i], beam_b_ids[i], beam_e_ids[i])]
for i in range(beam_size)
]
# penalize poorly formed trees
for i, seq in enumerate(beam_seqs):
if seq[-1][0] == "</S>":
_, well_formed = select_spans(seq)
if not well_formed:
beam_scores[i] -= well_formed_pen
# check whether all beams have reached EOS
if all(finished):
break
# only keep span predictions for span nodes, then map back to tree
beam_seqs = [
[(w, b, e) if w.startswith("BE:") else (w, -1, -1) for w, b, e in res if w != "[PAD]"]
for res in beam_seqs
]
# delinearize predicted sequences into tree
beam_trees = [seq_to_tree(dataset.full_tree, res[1:-1], idx_rev_map)[0] for res in beam_seqs]
pre_res = [
(tree, score.item(), seq) for tree, score, seq in zip(beam_trees, beam_scores, beam_seqs)
]
# sort one last time to have well-formed trees on top
res = sorted(pre_res, key=lambda x: x[1], reverse=True)
return res
# util function for validation and selecting hard examples
def compute_accuracy(outputs, y):
lm_targets = y[:, 1:, 0]
lm_preds = outputs["lm_scores"].max(dim=-1)[1]
lm_acc = ((lm_preds == lm_targets) * (lm_targets > 6)).sum(dim=1) == (lm_targets > 6).sum(
dim=1
)
sb_targets = y[:, 1:, 1]
sb_preds = outputs["span_b_scores"].max(dim=-1)[1]
sb_acc = ((sb_preds == sb_targets) * (sb_targets >= 0)).sum(dim=1) == (sb_targets >= 0).sum(
dim=1
)
se_targets = y[:, 1:, 2]
se_preds = outputs["span_e_scores"].max(dim=-1)[1]
se_acc = ((se_preds == se_targets) * (se_targets >= 0)).sum(dim=1) == (se_targets >= 0).sum(
dim=1
)
sp_acc = sb_acc * se_acc
full_acc = lm_acc * sp_acc
return (lm_acc, sp_acc, full_acc)
# --------------------------
# Custom wrapper for Adam optimizer,
# handles lr warmup and smaller lr for encoder fine-tuning
# --------------------------
class OptimWarmupEncoderDecoder(object):
def __init__(self, model, args):
self.encoder = model.encoder
self.decoder = model.decoder
self.lr = {"encoder": args.encoder_learning_rate, "decoder": args.decoder_learning_rate}
self.warmup_steps = {
"encoder": args.encoder_warmup_steps,
"decoder": args.decoder_warmup_steps,
}
if args.optimizer == "adam":
self.optimizers = {
"encoder": Adam(model.encoder.parameters(), lr=self.lr["encoder"]),
"decoder": Adam(model.decoder.parameters(), lr=self.lr["decoder"]),
}
elif args.optimizer == "adagrad":
self.optimizers = {
"encoder": Adagrad(model.encoder.parameters(), lr=self.lr["encoder"]),
"decoder": Adagrad(model.decoder.parameters(), lr=self.lr["decoder"]),
}
else:
raise NotImplementedError
self._step = 0
def _update_rate(self, stack):
return self.lr[stack] * min(
(self._step / self.warmup_steps[stack]), (self._step / self.warmup_steps[stack]) ** 0.5
)
def zero_grad(self):
self.optimizer_decoder.zero_grad()
self.optimizer_encoder.zero_grad()
def step(self):
self._step += 1
for stack, optimizer in self.optimizers.items():
new_rate = self._update_rate(stack)
for param_group in optimizer.param_groups:
param_group["lr"] = new_rate
optimizer.step()
# --------------------------
# Label smoothing loss
# --------------------------
class LabelSmoothingLoss(nn.Module):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=-1):
assert 0.0 <= label_smoothing <= 1.0
super(LabelSmoothingLoss, self).__init__()
self.ignore_index = ignore_index
self.voc_size = tgt_vocab_size
if ignore_index >= 0:
self.smoothing = label_smoothing / (tgt_vocab_size - 2)
else:
self.smoothing = label_smoothing / (tgt_vocab_size - 1)
self.confidence = 1.0 - label_smoothing
def forward(self, output, target):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
with torch.no_grad():
s_target = torch.zeros_like(output)
s_target.fill_(self.smoothing)
if self.ignore_index >= 0:
s_target[:, self.ignore_index] = 0
t_cap = target.masked_fill(target == self.ignore_index, 0)
s_target.scatter_(1, t_cap.unsqueeze(1), self.confidence)
kl_div = F.kl_div(output.log_softmax(dim=-1), s_target, reduction="none")
kl_mask = (target != self.ignore_index).type_as(kl_div).unsqueeze(1)
return (kl_div * kl_mask).sum(dim=-1)
| craftassist-master | acl2020_submission/model_training_code/utils_parsing.py |
import json
import math
import pickle
import torch
from transformers import AutoModel, AutoTokenizer, BertConfig
from utils_parsing import *
from utils_caip import *
from train_model import *
class TTADBertModel(object):
def __init__(self, model_dir, data_dir, model_name="caip_test_model"):
model_name = model_dir + model_name
args = pickle.load(open(model_name + "_args.pk", "rb"))
args.data_dir = data_dir
self.tokenizer = AutoTokenizer.from_pretrained(args.pretrained_encoder_name)
full_tree, tree_i2w = json.load(open(model_name + "_tree.json"))
self.dataset = CAIPDataset(
self.tokenizer, args, prefix="", full_tree_voc=(full_tree, tree_i2w)
)
enc_model = AutoModel.from_pretrained(args.pretrained_encoder_name)
bert_config = BertConfig.from_pretrained("bert-base-uncased")
bert_config.is_decoder = True
bert_config.vocab_size = len(tree_i2w) + 8
bert_config.num_hidden_layers = args.num_decoder_layers
dec_with_loss = DecoderWithLoss(bert_config, args, self.tokenizer)
self.encoder_decoder = EncoderDecoderWithLoss(enc_model, dec_with_loss, args)
map_location = None if torch.cuda.is_available() else torch.device("cpu")
self.encoder_decoder.load_state_dict(
torch.load(model_name + ".pth", map_location=map_location), strict=False
)
self.encoder_decoder = (
self.encoder_decoder.cuda()
if torch.cuda.is_available()
else self.encoder_decoder.cpu()
)
self.encoder_decoder.eval()
def parse(self, chat, noop_thres=0.95, beam_size=5, well_formed_pen=1e2):
btr = beam_search(
chat, self.encoder_decoder, self.tokenizer, self.dataset, beam_size, well_formed_pen
)
if btr[0][0].get("dialogue_type", "NONE") == "NOOP" and math.exp(btr[0][1]) < noop_thres:
tree = btr[1][0]
else:
tree = btr[0][0]
return tree
| craftassist-master | acl2020_submission/model_training_code/query_model.py |
import sys
sys.path.append('/hdd2/dyah/liger/liger')
import os
import torch
import argparse
import numpy as np
from liger import Liger
from flyingsquid_cluster import Flyingsquid_Cluster
from core import load_config
from utils import evaluate_thresholds, cluster_embeddings
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
def main(args):
cfg = load_config(args.config)
n_clusters = cfg['n_clusters']
engine = "ada"
dataset = cfg['dataset']
thresholds = cfg['thresholds']
T = cfg['T']
embedding_path = cfg['embedding_path']
if 'data_path' in cfg:
data_path = cfg['data_path']
else:
data_path = embedding_path
if 'embedding_type' in cfg:
embedding_type = cfg['embedding_type']
L_dev_raw_orig = torch.load(os.path.join(data_path, f'val_L_{embedding_type}.pt')).detach().cpu().numpy()
Y_dev_raw = torch.load(os.path.join(data_path, f'val_Y_{embedding_type}.pt')).detach().cpu().numpy()
Y_test_raw = torch.load(os.path.join(data_path, f'test_Y_{embedding_type}.pt')).detach().cpu().numpy()
L_train_raw_orig = torch.load(os.path.join(data_path, f'train_L_{embedding_type}.pt')).detach().cpu().numpy()
L_test_raw_orig = torch.load(os.path.join(data_path, f'test_L_{embedding_type}.pt')).detach().cpu().numpy()
avg_embeddings_train = torch.load(os.path.join(embedding_path, f'train_feature_{embedding_type}.pt')).detach().cpu().numpy()
avg_embeddings_dev = torch.load(os.path.join(embedding_path, f'val_feature_{embedding_type}.pt')).detach().cpu().numpy()
avg_embeddings_test = torch.load(os.path.join(embedding_path, f'test_feature_{embedding_type}.pt')).detach().cpu().numpy()
else:
L_dev_raw_orig = torch.load(os.path.join(data_path, 'val_L.pt')).detach().cpu().numpy()
Y_dev_raw = torch.load(os.path.join(data_path, 'val_Y.pt')).detach().cpu().numpy()
Y_test_raw = torch.load(os.path.join(data_path, 'test_Y.pt')).detach().cpu().numpy()
L_train_raw_orig = torch.load(os.path.join(data_path, 'train_L.pt')).detach().cpu().numpy()
L_test_raw_orig = torch.load(os.path.join(data_path, 'test_L.pt')).detach().cpu().numpy()
avg_embeddings_train = torch.load(os.path.join(embedding_path, 'train_feature.pt')).detach().cpu().numpy()
avg_embeddings_dev = torch.load(os.path.join(embedding_path,'val_feature.pt')).detach().cpu().numpy()
avg_embeddings_test = torch.load(os.path.join(embedding_path,'test_feature.pt')).detach().cpu().numpy()
if dataset == "weather":
thresholds = [thresholds[0] for i in range(L_dev_raw_orig.shape[1])]
liger = Liger()
L_train_expanded = liger.expand_lfs(
L_train_raw_orig, L_train_raw_orig, avg_embeddings_train, avg_embeddings_train,
thresholds = thresholds)
L_dev_expanded = liger.expand_lfs(
L_train_raw_orig, L_dev_raw_orig, avg_embeddings_train, avg_embeddings_dev,
thresholds = thresholds)
L_test_expanded = liger.expand_lfs(
L_train_raw_orig, L_test_raw_orig, avg_embeddings_train, avg_embeddings_test,
thresholds = thresholds)
L_train_raw = L_train_expanded
L_dev_raw = L_dev_expanded
L_test_raw = L_test_expanded
L_train = L_train_raw[:L_train_raw.shape[0] - (L_train_raw.shape[0] % T)]
L_dev = L_dev_raw[:L_dev_raw.shape[0] - (L_dev_raw.shape[0] % T)]
L_test = L_test_raw[:L_test_raw.shape[0] - (L_test_raw.shape[0] % T)]
Y_dev = Y_dev_raw[:Y_dev_raw.shape[0] - (Y_dev_raw.shape[0] % T)]
Y_test = Y_test_raw[:Y_test_raw.shape[0] - (Y_test_raw.shape[0] % T)]
m_per_task = L_train.shape[1]
m = T * m_per_task
v = T
kmeans, embedding_groups, train_cluster_labels = cluster_embeddings(avg_embeddings_train, n_clusters)
dev_cluster_labels = kmeans.predict(avg_embeddings_dev)
test_cluster_labels = kmeans.predict(avg_embeddings_test)
cluster_models = []
for i in range(len(embedding_groups)):
cluster_models.append(Flyingsquid_Cluster(X=embedding_groups[i], mu=kmeans.cluster_centers_[i], T=T, m_per_task=m_per_task))
neg_balances_to_try = np.arange(.01, .99, .05)
tune_by = 'f1'
if dataset == 'spam' or dataset =='weather':
tune_by = 'acc'
evaluate_thresholds(thresholds, cluster_models, neg_balances_to_try, \
L_train_expanded, L_dev_expanded, L_test_expanded, \
Y_dev_raw, Y_test_raw, train_cluster_labels, dev_cluster_labels, test_cluster_labels,\
evaluate_test=False, tune_test=True, tune_by=tune_by)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, help='config file path')
args = parser.parse_args()
main(args)
| liger-main | run_liger.py |
import yaml
DEGFAULTS = {}
def _merge(src, dst):
for k, v in src.items():
if k in dst:
if isinstance(v, dict):
_merge(src[k], dst[k])
else:
dst[k] = v
def _check_and_update_model_params(config):
return
def load_config(config_file,defaults = DEGFAULTS):
with open(config_file, 'r') as config_file:
config = yaml.load(config_file,Loader=yaml.FullLoader)
_merge(defaults,config)
return config | liger-main | core/config.py |
from .config import load_config | liger-main | core/__init__.py |
from .utils import evaluate_models, test_model, evaluate_thresholds, cluster_embeddings | liger-main | utils/__init__.py |
import numpy as np
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
from sklearn.cluster import KMeans
def evaluate_models(FS_cluster_models, neg_balances_to_try, mode='dev', tune_by='f1'):
for i, FS_cluster in enumerate(FS_cluster_models):
accs = []
f1s = []
for cb in neg_balances_to_try:
data_temporal = FS_cluster.data_temporal
triplet_model = FS_cluster.triplet_models[cb]
preds_individual = triplet_model.predict_proba_marginalized(data_temporal[f'L_{mode}']).reshape(len(data_temporal[f'Y_{mode}']))
_, _, f1, _ = precision_recall_fscore_support(data_temporal[f'Y_{mode}'], [
1 if pred > 0.5 else -1 for pred in preds_individual
])
acc = accuracy_score(data_temporal[f'Y_{mode}'], [
1 if pred > 0.5 else -1 for pred in preds_individual
])
if len(f1) > 1:
accs.append(acc)
f1s.append(f1[1])
else:
continue
best_acc_idx = np.argmax(np.array(accs))
best_f1_idx = np.argmax(np.array(f1s))
if tune_by == 'f1':
FS_cluster.set_best_cb(neg_balances_to_try[best_f1_idx])
else:
FS_cluster.set_best_cb(neg_balances_to_try[best_acc_idx])
FS_cluster_models[i] = FS_cluster
preds_all = []
Y_arranged = []
for i, FS_cluster in enumerate(FS_cluster_models):
data_temporal = FS_cluster.data_temporal
triplet_model = FS_cluster.triplet_models[FS_cluster.best_cb]
preds_individual = triplet_model.predict_proba_marginalized(
data_temporal[f'L_{mode}']).reshape(len(data_temporal[f'Y_{mode}']))
preds_all.extend(preds_individual)
Y_arranged.extend(data_temporal[f'Y_{mode}'])
best_pre, best_rec, best_f1, best_support = precision_recall_fscore_support(Y_arranged, [
1 if pred > 0.5 else -1 for pred in preds_all
])
best_acc = accuracy_score(Y_arranged, [
1 if pred > 0.5 else -1 for pred in preds_all
])
return best_acc, best_pre[1], best_rec[1], best_f1[1], best_support[1], FS_cluster_models
def test_model(FS_cluster_models, best_cbs):
preds_all = []
Y_arranged = []
for i, FS_cluster in enumerate(FS_cluster_models):
data_temporal = FS_cluster.data_temporal
triplet_model = FS_cluster.triplet_models[best_cbs[i]]
preds_individual = triplet_model.predict_proba_marginalized(
data_temporal[f'L_test']).reshape(len(data_temporal[f'Y_test']))
preds_all.extend(preds_individual)
Y_arranged.extend(data_temporal[f'Y_test'])
pre, rec, f1, support = precision_recall_fscore_support(Y_arranged, [
1 if pred > 0.5 else -1 for pred in preds_all
])
acc = accuracy_score(Y_arranged, [
1 if pred > 0.5 else -1 for pred in preds_all
])
return acc, pre[1], rec[1], f1[1], support[1]
def evaluate_thresholds(thresholds, cluster_models, neg_balances_to_try,
L_train_expanded, L_dev_expanded, L_test_expanded,
Y_dev_raw, Y_test_raw, train_cluster_labels, dev_cluster_labels, test_cluster_labels,
evaluate_test=False, tune_test=False, best_cbs=None, tune_by='f1'):
L_train_raw = L_train_expanded
L_dev_raw = L_dev_expanded
L_test_raw = L_test_expanded
T = 1
L_train = L_train_raw[:L_train_raw.shape[0] - (L_train_raw.shape[0] % T)]
L_dev = L_dev_raw[:L_dev_raw.shape[0] - (L_dev_raw.shape[0] % T)]
L_test = L_test_raw[:L_test_raw.shape[0] - (L_test_raw.shape[0] % T)]
Y_dev = Y_dev_raw[:Y_dev_raw.shape[0] - (Y_dev_raw.shape[0] % T)]
Y_test = Y_test_raw[:Y_test_raw.shape[0] - (Y_test_raw.shape[0] % T)]
m_per_task = L_train.shape[1]
m = T * m_per_task
v = T
for cluster_idx, FS_cluster in enumerate(cluster_models):
points_in_cluster = np.argwhere(train_cluster_labels == cluster_idx)
L_train_cluster = L_train[points_in_cluster]
points_in_cluster = np.argwhere(dev_cluster_labels == cluster_idx)
L_dev_cluster = L_dev[points_in_cluster]
Y_dev_cluster = Y_dev[points_in_cluster]
points_in_cluster = np.argwhere(test_cluster_labels == cluster_idx)
L_test_cluster = L_test[points_in_cluster]
Y_test_cluster = Y_test[points_in_cluster]
n_frames_train = L_train_cluster.shape[0]
n_frames_dev = L_dev_cluster.shape[0]
n_frames_test = L_test_cluster.shape[0]
n_frames_Y_dev = Y_dev_cluster.shape[0]
n_frames_Y_test = Y_test_cluster.shape[0]
n_seqs_train = n_frames_train // T
n_seqs_dev = n_frames_dev // T
n_seqs_test = n_frames_test // T
n_seqs_Y_dev = n_frames_Y_dev // T
n_seqs_Y_test = n_frames_Y_test // T
data_temporal_cluster = {
'L_train': np.reshape(L_train_cluster, (n_seqs_train, m)),
'L_dev': np.reshape(L_dev_cluster, (n_seqs_dev, m)),
'Y_dev': np.reshape(Y_dev_cluster, (n_seqs_Y_dev, v)),
'L_test': np.reshape(L_test_cluster, (n_seqs_test, m)),
'Y_test': np.reshape(Y_test_cluster, (n_seqs_Y_test, v))
}
FS_cluster.set_data_temporal(data_temporal_cluster)
cluster_models[cluster_idx] = FS_cluster
for i, FS_cluster in enumerate(cluster_models):
for neg_balance in neg_balances_to_try:
FS_cluster.fit(FS_cluster.data_temporal['L_train'], FS_cluster.data_temporal['Y_dev'], neg_balance)
cluster_models[i] = FS_cluster
acc, pre, rec, f1, support, FS_cluster_dev = evaluate_models(cluster_models, neg_balances_to_try, mode = 'dev', tune_by=tune_by)
print('Dev Thresholds: {}'.format(thresholds))
print('Dev Acc: {:.2%}\tPre: {:.2%}\tRec: {:.2%}\tF1: {:.2%}'.format(
acc, pre, rec, f1))
if tune_test:
acc_test, pre_test, rec_test, f1_test, support_test, FS_cluster_test = evaluate_models(cluster_models, neg_balances_to_try, mode = 'test', tune_by=tune_by)
print('Test Thresholds: {}'.format(thresholds))
print('Test Acc: {:.2%}\tPre: {:.2%}\tRec: {:.2%}\tF1: {:.2%}'.format(
acc_test, pre_test, rec_test, f1_test))
return acc, pre, rec, f1, support, acc_test, pre_test, rec_test, f1_test, support_test, FS_cluster_dev, FS_cluster_test
if evaluate_test:
acc_test, pre_test, rec_test, f1_test, support_test = test_model(cluster_models,best_cbs,)
print('Test Thresholds: {}'.format(thresholds))
print('Test Acc: {:.2%}\tPre: {:.2%}\tRec: {:.2%}\tF1: {:.2%}'.format(
acc_test, pre_test, rec_test, f1_test))
return acc, pre, rec, f1, support
def cluster_embeddings(embeddings, n_clusters):
kmeans = KMeans(n_clusters, random_state=0).fit(embeddings)
embedding_groups = []
for cluster_idx in range(n_clusters):
embedding_idxs = np.argwhere(kmeans.labels_ == cluster_idx).flatten()
cluster_embeddings = np.take(embeddings, embedding_idxs, axis=0)
embedding_groups.append(cluster_embeddings)
return kmeans, embedding_groups, kmeans.labels_ | liger-main | utils/utils.py |
import numpy as np
from sklearn.metrics import pairwise
class Liger:
def __init__(self):
pass
def expand_lfs(self, L_train, L_mat, train_embs, mat_embs, thresholds):
m = L_mat.shape[1]
expanded_L_mat = np.copy(L_mat)
dist_from_mat_to_train = pairwise.cosine_similarity(
mat_embs, train_embs
)
train_support_pos = [
np.argwhere(L_train[:, i] == 1).flatten()
for i in range(m)
]
train_support_neg = [
np.argwhere(L_train[:, i] == -1).flatten()
for i in range(m)
]
mat_abstains = [
np.argwhere(L_mat[:, i] == 0).flatten()
for i in range(m)
]
pos_dists = [
dist_from_mat_to_train[mat_abstains[i]][:, train_support_pos[i]]
for i in range(m)
]
neg_dists = [
dist_from_mat_to_train[mat_abstains[i]][:, train_support_neg[i]]
for i in range(m)
]
closest_pos = [
np.max(pos_dists[i], axis=1)
if pos_dists[i].shape[1] > 0 else np.full(mat_abstains[i].shape, -1)
for i in range(m)
]
closest_neg = [
np.max(neg_dists[i], axis=1)
if neg_dists[i].shape[1] > 0 else np.full(mat_abstains[i].shape, -1)
for i in range(m)
]
new_pos = [
(closest_pos[i] > closest_neg[i]) & (closest_pos[i] > thresholds[i])
for i in range(m)
]
new_neg = [
(closest_neg[i] > closest_pos[i]) & (closest_neg[i] > thresholds[i])
for i in range(m)
]
for i in range(m):
expanded_L_mat[mat_abstains[i][new_pos[i]], i] = 1
expanded_L_mat[mat_abstains[i][new_neg[i]], i] = -1
return expanded_L_mat
| liger-main | liger/liger.py |
from .liger import Liger
from .flyingsquid_cluster import Flyingsquid_Cluster | liger-main | liger/__init__.py |
from flyingsquid.label_model import LabelModel
import numpy as np
class Flyingsquid_Cluster:
def __init__(self, X, mu, T, m_per_task):
self.X = X
self.mu = mu
self.triplet_models = {}
self.T = T
self.m_per_task = m_per_task
self.m = T * m_per_task
self.v = T
def get_class_balance(self, all_negative_balance):
class_balance = np.array([all_negative_balance] +
[0 for i in range(2 ** self.T - 2)] +
[1 - all_negative_balance])
return class_balance
def set_data_temporal(self, data_temporal):
self.data_temporal = data_temporal
def fit(self, L_train_temporal, Y_dev_temporal, all_neg_balance):
if all_neg_balance not in self.triplet_models:
self.triplet_models[all_neg_balance] = []
cb = self.get_class_balance(all_neg_balance)
triplet_model = LabelModel(
self.m, self.v,
[(i, i + 1) for i in range(self.v - 1)], # chain dependencies for tasks
[(i + self.m_per_task * j, j) # LF's have dependencies to the frames they vote on
for i in range(self.m_per_task) for j in range(self.v)],
[], # no dependencies between LFs
allow_abstentions = True
)
triplet_model.fit(
L_train_temporal,
Y_dev = Y_dev_temporal,
class_balance = cb,
solve_method = 'triplet_median'
)
self.triplet_models[all_neg_balance] = triplet_model
def set_best_cb(self, cb):
self.best_cb = cb | liger-main | liger/flyingsquid_cluster.py |
import torch
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import argparse
import sys, os
if sys.version_info[0] < 3:
import cPickle as cp
else:
import _pickle as cp
from copy import deepcopy
sys.path.append("./models")
sys.path.append("./kernels")
sys.path.append("./utils")
sys.path.append("./..")
from gaussian_exact import GaussianKernel
from rff import RFF
from circulant_rff import CirculantRFF
from nystrom import Nystrom
from ensemble_nystrom import EnsembleNystrom
from quantizer import Quantizer
from logistic_regression import LogisticRegression
from ridge_regression import RidgeRegression
from kernel_regressor import KernelRidgeRegression
from data_loader import load_data
import halp
import halp.optim
import halp.quantize
from train_utils import train, evaluate, ProgressMonitor
from train_utils import get_sample_kernel_metrics, get_sample_kernel_F_norm, sample_data
# imports for fixed design runs
from misc_utils import expected_loss
from scipy.optimize import minimize
# EPS to prevent numerical issue in closed form ridge regression solver
EPS = 1e-10
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="logistic_regression")
parser.add_argument("--minibatch", type=int, default=64)
# parser.add_argument("--dataset", type=str, default="census")
parser.add_argument("--l2_reg", type=float, default=0.0)
parser.add_argument("--kernel_sigma", type=float, default=30.0)
parser.add_argument("--n_feat", type=int, default=32)
parser.add_argument("--random_seed", type=int, default=1)
parser.add_argument("--n_bit_feat", type=int, default=32)
parser.add_argument("--n_bit_model", type=int, default=32)
parser.add_argument("--scale_model", type=float, default=0.00001)
parser.add_argument("--do_fp_feat", action="store_true")
parser.add_argument("--learning_rate", type=float, default=0.1)
parser.add_argument("--data_path", type=str, default="../data/census/")
parser.add_argument("--epoch", type=int, default=40)
parser.add_argument("--cuda", action="store_true")
parser.add_argument("--opt", type=str, default="sgd")
parser.add_argument("--opt_mu", type=float, default=10.0)
parser.add_argument("--opt_epoch_T", type=float, default=1.0,
help="The # of epochs as interval between two consecutive scale updates/full gradient calculation")
parser.add_argument("--save_path", type=str, default="./test")
parser.add_argument("--approx_type", type=str, default="rff", help="specify using exact, rff or nystrom")
parser.add_argument("--collect_sample_metrics", action="store_true",
help="True if we want to collect metrics from the subsampled kernel matrix")
parser.add_argument("--n_sample", type=int, default=-1,
help="samples for metric measurements, including approximation error and etc.")
parser.add_argument("--fixed_design", action="store_true",
help="do fixed design experiment")
parser.add_argument("--fixed_design_noise_sigma", type=float, help="label noise std")
parser.add_argument("--fixed_design_auto_l2_reg", action="store_true",
help="if true, we auto search for the optimal lambda")
parser.add_argument("--closed_form_sol", action="store_true", help="use closed form solution")
parser.add_argument("--fixed_epoch_number", action="store_true", help="if the flag is not used, use early stopping")
parser.add_argument("--exit_after_collect_metric", action="store_true", help="if the flag is used, \
we only do metric collection on kernel matrix without doing trainining")
parser.add_argument("--n_ensemble_nystrom", type=int, default=1, help="number of learners in ensembled nystrom")
args = parser.parse_args()
if __name__ == "__main__":
np.random.seed(args.random_seed)
use_cuda = torch.cuda.is_available() and args.cuda
torch.manual_seed(args.random_seed)
if use_cuda:
torch.cuda.manual_seed(args.random_seed)
# torch.cuda.manual_seed_all(args.seed)
# load dataset
X_train, X_val, Y_train, Y_val = load_data(args.data_path)
if args.fixed_design:
print("fixed design using label noise sigma ", args.fixed_design_noise_sigma)
Y_train_orig = Y_train.copy()
X_val = X_train.copy()
Y_val = Y_train.copy()
Y_train += np.random.normal(scale=args.fixed_design_noise_sigma, size=Y_train.shape)
Y_val += np.random.normal(scale=args.fixed_design_noise_sigma, size=Y_train.shape)
if args.n_sample > 0:
# downsample if specified
X_train, Y_train = sample_data(X_train, Y_train, args.n_sample)
X_val, Y_val = sample_data(X_val, Y_val, args.n_sample)
assert X_train.shape[0] == Y_train.shape[0]
assert X_val.shape[0] == Y_val.shape[0]
print(X_train.shape[0], " training sample ", X_val.shape[0], "evaluation sample")
X_train = torch.DoubleTensor(X_train)
X_val = torch.DoubleTensor(X_val)
if args.model == "ridge_regression":
Y_train = torch.DoubleTensor(Y_train)
Y_val = torch.DoubleTensor(Y_val)
elif args.model == "logistic_regression":
Y_train = Y_train.reshape( (Y_train.size) )
Y_val = Y_val.reshape( (Y_val.size) )
n_class = np.unique(np.hstack( (Y_train, Y_val) ) ).size
Y_train = torch.LongTensor(np.array(Y_train.tolist() ).reshape(Y_train.size, 1) )
Y_val = torch.LongTensor(np.array(Y_val.tolist() ).reshape(Y_val.size, 1) )
else:
raise Exception("model not supported")
# setup dataloader
train_data = \
torch.utils.data.TensorDataset(X_train, Y_train)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.minibatch, shuffle=False)
val_data = \
torch.utils.data.TensorDataset(X_val, Y_val)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.minibatch, shuffle=False)
# setup gaussian kernel
n_input_feat = X_train.shape[1]
kernel = GaussianKernel(sigma=args.kernel_sigma)
if args.approx_type == "exact":
print("exact kernel mode")
# raise Exception("SGD based exact kernel is not implemented yet!")
kernel_approx = kernel
quantizer = None
elif args.approx_type == "nystrom":
print("fp nystrom mode")
kernel_approx = Nystrom(args.n_feat, kernel=kernel, rand_seed=args.random_seed)
kernel_approx.setup(X_train)
quantizer = None
elif args.approx_type == "ensemble_nystrom":
print("ensembled nystrom mode with ", args.n_ensemble_nystrom, "learner")
kernel_approx = EnsembleNystrom(args.n_feat, n_learner=args.n_ensemble_nystrom, kernel=kernel, rand_seed=args.random_seed)
kernel_approx.setup(X_train)
if args.do_fp_feat:
quantizer = None
else:
# decide on the range of representation from training sample based features
train_feat = kernel_approx.get_feat(X_train)
min_val = torch.min(train_feat)
max_val = torch.max(train_feat)
quantizer = Quantizer(args.n_bit_feat, min_val, max_val,
rand_seed=args.random_seed, use_cuda=use_cuda)
print("range for quantizing nystrom ensemble ", min_val, max_val)
print("feature quantization scale, bit ", quantizer.scale, quantizer.nbit)
elif args.approx_type == "rff":
if args.do_fp_feat == False:
print("lp rff feature mode")
assert args.n_bit_feat >= 1
n_quantized_rff = args.n_feat
print("# feature ", n_quantized_rff)
kernel_approx = RFF(n_quantized_rff, n_input_feat, kernel, rand_seed=args.random_seed)
min_val = -np.sqrt(2.0/float(n_quantized_rff) )
max_val = np.sqrt(2.0/float(n_quantized_rff) )
quantizer = Quantizer(args.n_bit_feat, min_val, max_val,
rand_seed=args.random_seed, use_cuda=use_cuda)
print("feature quantization scale, bit ", quantizer.scale, quantizer.nbit)
elif args.do_fp_feat == True:
print("fp rff feature mode")
kernel_approx = RFF(args.n_feat, n_input_feat, kernel, rand_seed=args.random_seed)
quantizer = None
elif args.approx_type == "cir_rff":
if args.do_fp_feat == False:
print("lp circulant rff feature mode")
assert args.n_bit_feat >= 1
n_quantized_rff = args.n_feat
print("# feature ", n_quantized_rff)
kernel_approx = CirculantRFF(n_quantized_rff, n_input_feat, kernel, rand_seed=args.random_seed)
min_val = -np.sqrt(2.0/float(n_quantized_rff) )
max_val = np.sqrt(2.0/float(n_quantized_rff) )
quantizer = Quantizer(args.n_bit_feat, min_val, max_val,
rand_seed=args.random_seed, use_cuda=use_cuda, for_lm_halp=( (args.opt == "lm_halp_svrg") or (args.opt == "lm_halp_sgd") ) )
print("feature quantization scale, bit ", quantizer.scale, quantizer.nbit)
elif args.do_fp_feat == True:
print("fp circulant rff feature mode")
kernel_approx = CirculantRFF(args.n_feat, n_input_feat, kernel, rand_seed=args.random_seed)
quantizer = None
else:
raise Exception("kernel approximation type not specified or not supported!")
kernel.torch(cuda=use_cuda)
kernel_approx.torch(cuda=use_cuda)
if args.fixed_design or args.closed_form_sol:
# for fixed design experiments and closed form solution form real setting
if args.fixed_design_auto_l2_reg:
# get kernel matrix and get the decomposition
assert isinstance(X_train, torch.DoubleTensor)
print("fixed design lambda calculation using kernel ", type(kernel_approx))
kernel_mat = kernel_approx.get_kernel_matrix(X_train, X_train, quantizer, quantizer)
assert isinstance(kernel_mat, torch.DoubleTensor)
U, S, _ = np.linalg.svd(kernel_mat.cpu().numpy().astype(np.float64) )
# numerically figure out the best lambda in the fixed design setting
x0 = 1.0
f = lambda lam: expected_loss(lam,U,S,Y_train_orig,args.fixed_design_noise_sigma)
res = minimize(f, x0, bounds=[(0.0, None)], options={'xtol': 1e-6, 'disp': True})
loss = f(res.x)
print("fixed design opt reg and loss", res.x, loss)
args.l2_reg = max(res.x[0], EPS)
else:
# construct model
if args.model == "logistic_regression":
model = LogisticRegression(input_dim=kernel_approx.n_feat,
n_class=n_class, reg_lambda=args.l2_reg)
elif args.model == "ridge_regression":
model = RidgeRegression(input_dim=kernel_approx.n_feat, reg_lambda=args.l2_reg)
if use_cuda:
model.cuda()
model.double()
# set up optimizer
if args.opt == "sgd":
print("using sgd optimizer")
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.l2_reg)
elif args.opt == "lpsgd":
print("using lp sgd optimizer")
optimizer = halp.optim.LPSGD(model.parameters(), lr=args.learning_rate,
scale_factor=args.scale_model, bits=args.n_bit_model, weight_decay=args.l2_reg)
print("model quantization scale and bit ", optimizer._scale_factor, optimizer._bits)
elif args.opt == "halp":
print("using halp optimizer")
optimizer = halp.optim.HALP(model.parameters(), lr=args.learning_rate,
T=int(args.opt_epoch_T * X_train.size(0) / float(args.minibatch) ),
data_loader=train_loader, mu=args.opt_mu, bits=args.n_bit_model, weight_decay=args.l2_reg)
print("model quantization, interval, mu, bit", optimizer.T, optimizer._mu,
optimizer._bits, optimizer._biased)
elif args.opt == "lm_halp_svrg":
print("using lm halp svrg optimizer")
optimizer = halp.optim.LMHALP(model.parameters(), lr=args.learning_rate,
T=int(args.opt_epoch_T * X_train.size(0) / float(args.minibatch) ),
data_loader=train_loader, mu=args.opt_mu, bits=args.n_bit_model,
weight_decay=args.l2_reg, data_scale=quantizer.scale)
print("model quantization, interval, mu, bit", optimizer.T, optimizer._mu,
optimizer._bits, optimizer._biased)
elif args.opt == "lm_halp_sgd":
print("using lm halp sgd optimizer")
optimizer = halp.optim.BitCenterLMSGD(model.parameters(), lr=args.learning_rate,
T=int(args.opt_epoch_T * X_train.size(0) / float(args.minibatch) ),
data_loader=train_loader, mu=args.opt_mu, bits=args.n_bit_model,
weight_decay=args.l2_reg, data_scale=quantizer.scale)
print("model quantization, interval, mu, bit", optimizer.T, optimizer._mu,
optimizer._bits, optimizer._biased)
else:
raise Exception("optimizer not supported")
# collect metrics
if args.collect_sample_metrics:
print("start doing sample metric collection with ", X_train.size(0), " training samples")
if use_cuda:
metric_dict_sample_val, spectrum_sample_val, spectrum_sample_val_exact = \
get_sample_kernel_metrics(X_val.cuda(), kernel, kernel_approx, quantizer, args.l2_reg)
else:
metric_dict_sample_val, spectrum_sample_val, spectrum_sample_val_exact = \
get_sample_kernel_metrics(X_val, kernel, kernel_approx, quantizer, args.l2_reg)
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
with open(args.save_path + "/metric_sample_eval.json", "wb") as f:
cp.dump(metric_dict_sample_val, f)
np.save(args.save_path + "/spectrum_eval.npy", spectrum_sample_val)
np.save(args.save_path + "/spectrum_eval_exact.npy", spectrum_sample_val_exact)
print("Sample metric collection done!")
if args.exit_after_collect_metric:
print("exit after collect metric")
exit(0)
if args.fixed_design or args.closed_form_sol:
# for fixed design experiments and closed form solution form real setting
if use_cuda:
raise Exception("closed from solution does not support cuda mode")
print("closed form using kernel type ", args.approx_type)
regressor = KernelRidgeRegression(kernel_approx, reg_lambda=args.l2_reg)
print("start to do regression!")
# print("test quantizer", quantizer)
regressor.fit(X_train, Y_train, quantizer=quantizer)
print("finish regression!")
train_error = regressor.get_train_error()
pred = regressor.predict(X_val, quantizer_train=quantizer, quantizer_test=quantizer)
test_error = regressor.get_test_error(Y_val)
print("test error ", test_error)
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
np.savetxt(args.save_path + "/train_loss.txt", np.array(train_error).reshape( (1, ) ) )
np.savetxt(args.save_path + "/eval_metric.txt", np.array(test_error).reshape( (1, ) ) )
np.savetxt(args.save_path + "/lambda.txt", np.array(args.l2_reg).reshape( (1, ) ) )
else:
# setup sgd training process
train_loss = []
eval_metric = []
monitor_signal_history = []
if args.model == "logistic_regression":
monitor = ProgressMonitor(init_lr=args.learning_rate, lr_decay_fac=2.0, min_lr=0.00001, min_metric_better=True, decay_thresh=0.99)
elif args.model == "ridge_regression":
monitor = ProgressMonitor(init_lr=args.learning_rate, lr_decay_fac=2.0, min_lr=0.00001, min_metric_better=True, decay_thresh=0.99)
else:
raise Exception("model not supported!")
for epoch in range(args.epoch):
# train for one epoch
loss_per_step = train(args, model, epoch, train_loader, optimizer, quantizer, kernel_approx)
train_loss += loss_per_step
# evaluate and save evaluate metric
metric, monitor_signal = evaluate(args, model, epoch, val_loader, quantizer, kernel_approx)
eval_metric.append(metric)
monitor_signal_history.append(monitor_signal)
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
np.savetxt(args.save_path + "/train_loss.txt", train_loss)
np.savetxt(args.save_path + "/eval_metric.txt", eval_metric)
np.savetxt(args.save_path + "/monitor_signal.txt", monitor_signal_history)
if not args.fixed_epoch_number:
print("using early stopping on lr")
early_stop = monitor.end_of_epoch(monitor_signal, model, optimizer, epoch)
if early_stop:
break
| lp_rffs-master | run_model.py |
import numpy as np
import scipy
import torch
from nystrom import Nystrom
from gaussian_exact import GaussianKernel
import sys
sys.path.append("../utils")
from misc_utils import set_random_seed
from quantizer import Quantizer
import math
EPS = 1e-15
class EnsembleNystrom(object):
def __init__(self, n_feat, n_learner, kernel=None, rand_seed=1):
self.n_feat_per_learner = n_feat // n_learner
self.n_learner = n_learner
self.kernel = kernel
self.rand_seed = rand_seed
self.n_feat = n_feat
def setup(self, X, n_landmark=None):
'''
X is in the shape of [n_sample, n_dimension]
call setup() once before using Nystrom
'''
if self.n_feat > X.size(0):
self.n_feat = X.size(0)
self.n_feat_per_learner = self.n_feat // self.n_learner
self.learners = []
np.random.seed(self.rand_seed)
perm = np.random.permutation(np.arange(X.size(0) ) )
# perm = np.arange(X.size(0) )
for i in range(self.n_learner):
self.learners.append(
Nystrom(self.n_feat_per_learner, self.kernel, self.rand_seed) )
start_idx = i * self.n_feat_per_learner
end_idx = min( (i + 1) * self.n_feat_per_learner, X.size(0) )
self.learners[-1].setup(X[perm[start_idx:end_idx], :] )
def get_feat(self, X):
feat_list = []
for learner in self.learners:
feat_list.append(learner.get_feat(X) )
feat = torch.cat(feat_list, dim=1) / math.sqrt(float(len(self.learners) ) )
print("normalizing features with ", math.sqrt(float(len(self.learners) ) ) )
assert feat.size(1) == self.n_feat_per_learner * self.n_learner
return feat
def get_kernel_matrix(self, X1, X2, quantizer1=None, quantizer2=None, consistent_quant_seed=True):
feat_x1 = self.get_feat(X1)
feat_x2 = self.get_feat(X2)
# quantization
if consistent_quant_seed and (quantizer1 is not None) and (quantizer2 is not None):
assert quantizer1.rand_seed == quantizer2.rand_seed, "quantizer random seed are different under consistent quant seed mode!"
if quantizer1 != None:
if consistent_quant_seed and list(feat_x1.size() ) == list(feat_x2.size() ):
print("quantizing rff_x1 with random seed", quantizer1.rand_seed)
set_random_seed(quantizer1.rand_seed)
else:
print("quantizing rff_x1 without fixed random seed")
# print("quantization 1 activated ", X1.shape)
# print("quantizer 1 bits", quantizer1.nbit)
# print("quantizer 1 scale", quantizer1.scale)
feat_x1 = quantizer1.quantize(feat_x1)
if quantizer2 != None:
if consistent_quant_seed:
print("quantizing rff_x2 with random seed", quantizer2.rand_seed)
set_random_seed(quantizer2.rand_seed)
# print("quantization 2 activated ", X2.shape)
# print("quantizer 2 bits", quantizer2.nbit)
# print("quantizer 2 scale", quantizer2.scale)
feat_x2 = quantizer2.quantize(feat_x2)
if consistent_quant_seed and list(feat_x1.size() ) == list(feat_x2.size() ):
np.testing.assert_array_almost_equal(feat_x1.cpu().numpy(), feat_x2.cpu().numpy() )
return torch.mm(feat_x1, torch.transpose(feat_x2, 0, 1) )
def torch(self, cuda):
for learner in self.learners:
learner.torch(cuda)
def cpu(self):
for learner in self.learners:
learner.cpu()
def test_ensemble_nystrom_full_prec_one_learner():
# test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel
n_sample = 150
n_feat = n_sample
input_val1 = torch.DoubleTensor(np.random.normal(size=[n_sample, n_feat] ) ).double()
input_val2 = input_val1
# input_val2 = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double()
# get exact gaussian kernel
kernel = GaussianKernel(sigma=10.0)
kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2)
# nystrom method
approx = Nystrom(n_feat, kernel=kernel)
approx.setup(input_val1)
feat = approx.get_feat(input_val1)
approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2)
# ensembleed nystrom method
approx_ensemble = EnsembleNystrom(n_feat, n_learner=1, kernel=kernel)
approx_ensemble.setup(input_val1)
feat_ensemble = approx_ensemble.get_feat(input_val1)
approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(input_val1, input_val2)
np.testing.assert_array_almost_equal(np.sum(feat.cpu().numpy()**2),
np.sum(feat_ensemble.cpu().numpy()**2) )
np.testing.assert_array_almost_equal(np.sum(approx_kernel_mat.cpu().numpy()**2),
np.sum(approx_kernel_mat_ensemble.cpu().numpy()**2) )
print("single learner ensembled nystrom test passed!")
def test_ensemble_nystrom_full_prec_three_learner():
# test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel
n_sample = 150
n_feat = n_sample
input_val1 = torch.DoubleTensor(np.random.normal(size=[n_sample, n_feat] ) ).double()
input_val2 = input_val1
# input_val2 = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double()
# get exact gaussian kernel
kernel = GaussianKernel(sigma=10.0)
kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2)
# nystrom method
approx = Nystrom(n_feat, kernel=kernel)
approx.setup(input_val1)
feat = approx.get_feat(input_val1)
approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2)
# ensembleed nystrom method
approx_ensemble = EnsembleNystrom(n_feat, n_learner=3, kernel=kernel)
approx_ensemble.setup(input_val1)
feat_ensemble = approx_ensemble.get_feat(input_val1)
assert feat_ensemble.size(0) == n_sample
assert feat_ensemble.size(1) == n_feat
approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(input_val1, input_val2)
print("single learner ensembled nystrom test passed!")
def test_ensemble_nystrom_low_prec():
# test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel
n_sample = 150
n_feat = n_sample
input_val1 = torch.DoubleTensor(np.random.normal(size=[n_sample, n_feat] ) ).double()
input_val2 = input_val1
# input_val2 = torch.DoubleTensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double()
# get exact gaussian kernel
kernel = GaussianKernel(sigma=10.0)
kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2)
# setup quantizer
quantizer = Quantizer(4, torch.min(input_val1), torch.max(input_val1), rand_seed=2, use_cuda=False)
# nystrom method
approx = Nystrom(n_feat, kernel=kernel)
approx.setup(input_val1)
feat = approx.get_feat(input_val1)
approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2, quantizer, quantizer)
# ensembleed nystrom method
approx_ensemble = EnsembleNystrom(n_feat, n_learner=1, kernel=kernel)
approx_ensemble.setup(input_val1)
feat_ensemble = approx_ensemble.get_feat(input_val1)
approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(input_val1, input_val2,
quantizer, quantizer, consistent_quant_seed=True)
approx_kernel_mat_ensemble = approx_ensemble.get_kernel_matrix(input_val1, input_val2,
quantizer, quantizer, consistent_quant_seed=True)
print("single learner ensembled nystrom quantizerd version test passed!")
if __name__ == "__main__":
test_ensemble_nystrom_full_prec_one_learner()
test_ensemble_nystrom_full_prec_three_learner()
test_ensemble_nystrom_low_prec()
| lp_rffs-master | kernels/ensemble_nystrom.py |
import numpy as np
import scipy
import torch
from gaussian_exact import GaussianKernel
EPS = 1e-15
class Nystrom(object):
def __init__(self, n_feat, kernel=None, rand_seed=1):
self.n_feat = n_feat
self.kernel = kernel
self.rand_seed = rand_seed
def setup(self, X, n_landmark=None):
'''
X is in the shape of [n_sample, n_dimension]
call setup() once before using Nystrom
'''
# if n feat > n sample then make n feat = n sample
if self.n_feat > X.size(0):
self.n_feat = X.size(0)
np.random.seed(self.rand_seed)
perm = np.random.permutation(np.arange(X.size(0) ) )
# using the standard way to select n_feat landmark points
if n_landmark is None:
n_landmark = min(self.n_feat, X.size(0) )
print("# landmarks ", n_landmark)
self.landmark = X[perm[:n_landmark], :]
self.n_landmark = n_landmark
self.K_landmark = \
self.kernel.get_kernel_matrix(self.landmark, self.landmark)
U, S, _ = np.linalg.svd(self.K_landmark.cpu().numpy() )
self.U_d = torch.DoubleTensor(U[:, :n_landmark] )
self.S_d = torch.DoubleTensor(S[:n_landmark] )
self.A_d = torch.mm(self.U_d, torch.diag(1.0/torch.sqrt(self.S_d) ) )
def get_feat(self, X):
kernel_matrix = self.kernel.get_kernel_matrix(X, self.landmark)
feat = torch.mm(kernel_matrix, self.A_d)
return feat
def get_kernel_matrix(self, X1, X2, quantizer1=None, quantizer2=None):
feat_x1 = self.get_feat(X1)
feat_x2 = self.get_feat(X2)
return torch.mm(feat_x1, torch.transpose(feat_x2, 0, 1) )
def torch(self, cuda):
if cuda:
self.A_d = self.A_d.cuda()
self.landmark = self.landmark.cuda()
def cpu(self):
self.A_d = self.A_d.cpu()
self.landmark = self.landmark.cpu()
# test full dimension case match exact kernel results
def test_nystrom_full():
# test if keep all the dimensions is the nystrom kernel matrix equals to the exact kernel
n_sample = 15
n_feat = n_sample
input_val1 = torch.Tensor(np.random.normal(size=[n_sample, n_feat] ) ).double()
input_val2 = torch.Tensor(np.random.normal(size=[n_sample - 1, n_feat] ) ).double()
# get exact gaussian kernel
kernel = GaussianKernel(sigma=np.random.normal() )
kernel_mat = kernel.get_kernel_matrix(input_val1, input_val2)
approx = Nystrom(n_feat, kernel=kernel)
approx.setup(input_val1)
approx_kernel_mat = approx.get_kernel_matrix(input_val1, input_val2)
np.testing.assert_array_almost_equal(kernel_mat.cpu().numpy(), approx_kernel_mat.cpu().numpy() )
print("nystrom full dimension test passed!")
if __name__ == "__main__":
test_nystrom_full()
| lp_rffs-master | kernels/nystrom.py |
import numpy as np
import torch
import sys
sys.path.append("../utils")
from misc_utils import set_random_seed
from gaussian_exact import GaussianKernel
class RFF(object):
def __init__(self, n_feat, n_input_feat, kernel=None, rand_seed=1):
self.n_feat = n_feat # number of rff features
self.kernel = kernel
self.n_input_feat = n_input_feat # dimension of the original input
self.rand_seed = rand_seed
self.get_gaussian_wb()
def get_gaussian_wb(self):
# print("using sigma ", 1.0/float(self.kernel.sigma), "using rand seed ", self.rand_seed)
np.random.seed(self.rand_seed)
self.w = np.random.normal(scale=1.0/float(self.kernel.sigma),
size=(self.n_feat, self.n_input_feat) )
# print("using n rff features ", self.w.shape[0] )
np.random.seed(self.rand_seed)
self.b = np.random.uniform(low=0.0, high=2.0 * np.pi, size=(self.n_feat, 1) )
def torch(self, cuda=False):
self.w = torch.DoubleTensor(self.w)
self.b = torch.DoubleTensor(self.b)
if cuda:
self.w = self.w.cuda()
self.b = self.b.cuda()
def cpu(self):
self.w = self.w.cpu()
self.b = self.b.cpu()
def get_cos_feat(self, input_val, dtype="double"):
# input are original representaiton with the shape [n_sample, n_dim]
if isinstance(self.kernel, GaussianKernel):
if isinstance(input_val, np.ndarray):
self.input = input_val.T
self.feat = np.sqrt(2/float(self.n_feat) ) * np.cos(np.dot(self.w, self.input) + self.b)
if dtype=="double":
return torch.DoubleTensor(self.feat.T)
else:
return torch.FloatTensor(self.feat.T)
else:
self.input = torch.transpose(input_val, 0, 1)
self.feat = float(np.sqrt(2/float(self.n_feat) ) ) * torch.cos(torch.mm(self.w, self.input) + self.b)
return torch.transpose(self.feat, 0, 1)
else:
raise Exception("the kernel type is not supported yet")
def get_sin_cos_feat(self, input_val):
pass
def get_kernel_matrix(self, X1, X2, quantizer1=None, quantizer2=None, consistent_quant_seed=True):
'''
X1 shape is [n_sample, n_dim], if force_consistent_random_seed is True
the quantization will use the same random seed for quantizing rff_x1 and rff_x2
'''
rff_x1 = self.get_cos_feat(X1)
rff_x2 = self.get_cos_feat(X2)
if consistent_quant_seed and (quantizer1 is not None) and (quantizer2 is not None):
assert quantizer1.rand_seed == quantizer2.rand_seed, "quantizer random seed are different under consistent quant seed mode!"
if quantizer1 != None:
if consistent_quant_seed and list(rff_x1.size() ) == list(rff_x2.size() ):
print("quantizing rff_x1 with random seed", quantizer1.rand_seed)
set_random_seed(quantizer1.rand_seed)
else:
print("quantizing rff_x1 without fixed random seed")
# print("quantization 1 activated ", X1.shape)
# print("quantizer 1 bits", quantizer1.nbit)
# print("quantizer 1 scale", quantizer1.scale)
rff_x1 = quantizer1.quantize(rff_x1)
if quantizer2 != None:
if consistent_quant_seed:
print("quantizing rff_x2 with random seed", quantizer2.rand_seed)
set_random_seed(quantizer2.rand_seed)
# print("quantization 2 activated ", X2.shape)
# print("quantizer 2 bits", quantizer2.nbit)
# print("quantizer 2 scale", quantizer2.scale)
rff_x2 = quantizer2.quantize(rff_x2)
self.rff_x1, self.rff_x2 = rff_x1, rff_x2
return torch.mm(rff_x1, torch.transpose(rff_x2, 0, 1) )
def test_pytorch_gaussian_kernel():
n_feat = 10
input_val = np.ones( [2, n_feat] )
input_val[0, :] *= 1
input_val[0, :] *= 2
# get exact gaussian kernel
kernel = GaussianKernel(sigma=2.0)
kernel_mat = kernel.get_kernel_matrix(input_val, input_val)
kernel_mat_torch = kernel.get_kernel_matrix(torch.Tensor(input_val), torch.Tensor(input_val) )
np.testing.assert_array_almost_equal(kernel_mat.cpu().numpy(), kernel_mat_torch.cpu().numpy() )
print("gaussian kernel pytorch version test passed!")
def test_rff_generation():
n_feat = 10
n_rff_feat = 1000000
input_val = np.ones( [2, n_feat] )
input_val[0, :] *= 1
input_val[0, :] *= 2
# get exact gaussian kernel
kernel = GaussianKernel(sigma=2.0)
kernel_mat = kernel.get_kernel_matrix(input_val, input_val)
# get RFF approximate kernel matrix
rff = RFF(n_rff_feat, n_feat, kernel=kernel)
rff.get_gaussian_wb()
approx_kernel_mat = rff.get_kernel_matrix(input_val, input_val)
np.testing.assert_array_almost_equal(approx_kernel_mat.cpu().numpy(), kernel_mat.cpu().numpy(), decimal=3)
print("rff generation test passed!")
def test_rff_generation2():
n_feat = 10
n_rff_feat = 1000000
input_val = np.ones( [2, n_feat] )
input_val[0, :] *= 1
input_val[0, :] *= 2
# get exact gaussian kernel
kernel = GaussianKernel(sigma=2.0)
# kernel_mat = kernel.get_kernel_matrix(input_val, input_val)
# get RFF approximate kernel matrix
rff = RFF(n_rff_feat, n_feat, kernel=kernel)
rff.get_gaussian_wb()
approx_kernel_mat = rff.get_kernel_matrix(input_val, input_val)
rff.torch(cuda=False)
approx_kernel_mat2 = rff.get_kernel_matrix(torch.DoubleTensor(input_val), torch.DoubleTensor(input_val) )
np.testing.assert_array_almost_equal(approx_kernel_mat.cpu().numpy(), approx_kernel_mat2.cpu().numpy(), decimal=6)
print("rff generation test 2 passed!")
if __name__ == "__main__":
test_pytorch_gaussian_kernel()
test_rff_generation()
test_rff_generation2()
| lp_rffs-master | kernels/rff.py |
import numpy as np
import torch
import sys
# class GaussianKernelSpec(object):
# def __init__(self, sigma):
# self.sigma = sigma
class GaussianKernel(object):
def __init__(self, sigma):
self.sigma = sigma
self.dist_func = torch.nn.PairwiseDistance(p=2)
def get_kernel_matrix(self, X1, X2, quantizer1=None, quantizer2=None, dtype="float"):
'''
the input value has shape [n_sample, n_dim]
quantizer is dummy here
dtype only works for numpy input for X1 X2
'''
if isinstance(X1, np.ndarray) and isinstance(X2, np.ndarray):
n_sample_X1 = X1.shape[0]
norms_X1 = np.linalg.norm(X1, axis=1).reshape(n_sample_X1, 1)
n_sample_X2 = X2.shape[0]
norms_X2 = np.linalg.norm(X2, axis=1).reshape(n_sample_X2, 1)
cross = np.dot(X1, X2.T)
# print("using sigma ", self.sigma)
kernel = np.exp(-0.5 / float(self.sigma)**2 \
* (np.tile(norms_X1**2, (1, n_sample_X2) ) + np.tile( (norms_X2.T)**2, (n_sample_X1, 1) ) \
-2 * cross) )
if dtype == "float":
return torch.Tensor(kernel).float()
else:
return torch.Tensor(kernel).double()
else:
## to prevent memory explosion on GPU, we can do the following operations on CPU and move results
## back to GPU
#is_cuda_tensor = X1.is_cuda
#if is_cuda_tensor and use_cpu_comp:
# X1 = X1.cpu()
# X2 = X2.cpu()
norms_X1 = (X1**2).sum(1).view(-1, 1)
norms_X2 = (X2**2).sum(1).view(-1, 1)
norms_X1 = norms_X1.repeat(1, int(X2.size(0) ) )
norms_X2 = torch.transpose(norms_X2.repeat(1, int(X1.size(0) ) ), 0, 1)
cross = torch.mm(X1, torch.transpose(X2, 0, 1) )
kernel = torch.exp(-0.5 / float(self.sigma)**2 * (norms_X1 + norms_X2 - 2* cross) )
#if is_cuda_tensor and use_cpu_comp:
# return kernel.cuda()
#else:
# return kernel
return kernel
def torch(self, cuda=False):
'''
adapt the interface to the model launching wrapper
'''
pass
def cpu(self):
'''
adapt the interface when switch parameter of some kernels back to cpu mode
'''
pass | lp_rffs-master | kernels/gaussian_exact.py |
import numpy as np
import torch
from gaussian_exact import GaussianKernel
from rff import RFF
from scipy.linalg import circulant
import math
class CirculantRFF(RFF):
'''
RFF using circulant random projection matrix
'''
def __init__(self, n_feat, n_input_feat, kernel=None, rand_seed=1):
super(CirculantRFF, self).__init__(n_feat, n_input_feat, kernel, rand_seed)
def get_gaussian_wb(self):
self.w = np.zeros( (self.n_feat, self.n_input_feat) )
if self.n_feat < self.n_input_feat:
raise Exception("the dimension of projected features should be large than or equal to dimension of the raw features")
np.random.seed(self.rand_seed)
for i in range(int(math.ceil(self.n_feat / float(self.n_input_feat) ) ) ):
generator = np.random.normal(scale=1.0/float(self.kernel.sigma), size=(self.n_input_feat,) )
cir = circulant(generator)
flip = np.diag(2 * np.random.randint(0, 2, size=(self.n_input_feat) ) - 1).astype(np.float64)
row_start = i * self.n_input_feat
row_end = min( (i + 1) * self.n_input_feat, self.n_feat)
self.w[row_start:row_end, :] = np.dot(cir, flip)[:row_end - row_start,:]
np.random.seed(self.rand_seed)
self.b = np.random.uniform(low=0.0, high=2.0 * np.pi, size=(self.n_feat, 1) )
def test_circulant_rff():
'''
test if the circulant structure is observed in the circulant RFF object
'''
n_feat = 1000
n_rff_feat = 1000
seed = 2
input_val = torch.DoubleTensor(np.ones( [100, n_feat] ) )
kernel = GaussianKernel(sigma=5.0)
kernel_cir = CirculantRFF(n_rff_feat, n_feat, kernel=kernel, rand_seed=seed)
kernel_basic = RFF(n_rff_feat, n_feat, kernel=kernel, rand_seed=seed)
kernel_cir.torch()
kernel_basic.torch()
print("should see column circulant structure", kernel_cir.w.cpu().numpy() )
np.testing.assert_array_almost_equal(np.abs(kernel_cir.b.cpu().numpy() ),
np.abs(kernel_basic.b.cpu().numpy() ) )
# np.testing.assert_array_almost_equal(np.abs(kernel_cir.w.cpu().numpy() ),
# np.abs(kernel_basic.w.cpu().numpy() ) )
print("should see similar row std between basic rff and circulant rff",
np.std(np.abs(kernel_cir.w.cpu().numpy() ), axis=1)[:10],
np.std(np.abs(kernel_basic.w.cpu().numpy() ), axis=1)[:10] )
print("circulant rff test passed!")
if __name__ == "__main__":
test_circulant_rff()
| lp_rffs-master | kernels/circulant_rff.py |
import torch
from torch.autograd import Variable
import numpy as np
from copy import deepcopy
import sys
sys.path.append("../utils")
from misc_utils import delta_approximation
def train(args, model, epoch, train_loader, optimizer, quantizer, kernel):
train_loss = []
use_cuda = torch.cuda.is_available() and args.cuda
for i, minibatch in enumerate(train_loader):
X, Y = minibatch
if use_cuda:
X = X.cuda()
Y = Y.cuda()
optimizer.zero_grad()
if args.opt == "halp":
# We need to add this function to models when we want to use SVRG
def closure(data=X, target=Y):
if use_cuda:
data = data.cuda()
target = target.cuda()
if args.approx_type == "rff" or args.approx_type == "cir_rff":
data = kernel.get_cos_feat(data)
elif args.approx_type == "nystrom":
data = kernel.get_feat(data)
else:
raise Exception("kernel approximation type not supported!")
if quantizer != None:
# print("halp use quantizer")
data = quantizer.quantize(data)
if data.size(0) != target.size(0):
raise Exception("minibatch on data and target does not agree in closure")
if not isinstance(data, torch.autograd.variable.Variable):
data = Variable(data, requires_grad=False)
if not isinstance(target, torch.autograd.variable.Variable):
target = Variable(target, requires_grad=False)
cost = model.forward(data, target)
cost.backward()
return cost
loss = optimizer.step(closure)
train_loss.append(loss[0].data.cpu().numpy() )
elif (args.opt == "lm_halp_svrg") or (args.opt == "lm_halp_sgd"):
# We need to add this function to models when we want to use SVRG
def closure(data=X, target=Y, feat=None):
if use_cuda:
data = data.cuda()
target = target.cuda()
if feat is not None:
feat = feat.cuda()
if feat is None:
if args.approx_type == "rff" or args.approx_type == "cir_rff":
data = kernel.get_cos_feat(data)
elif args.approx_type == "nystrom":
data = kernel.get_feat(data)
else:
raise Exception("kernel approximation type not supported!")
if quantizer != None:
# print("halp use quantizer")
data = quantizer.quantize(data)
if data.size(0) != target.size(0):
raise Exception("minibatch on data and target does not agree in closure")
if not isinstance(data, torch.autograd.variable.Variable):
data = Variable(data, requires_grad=False)
else:
# if we directly pass in the quantized feature, we directly use it without regeneration
# this is for the case of LM halp where we need to sync the quantization for prev and curr model.
data = feat
if not isinstance(target, torch.autograd.variable.Variable):
target = Variable(target, requires_grad=False)
cost = model.forward(data, target)
model.output.retain_grad()
cost.backward()
# extract the data X and grad of the output of
return cost, data, model.output.grad
loss = optimizer.step(closure)
train_loss.append(loss[0].data.cpu().numpy() )
else:
if args.approx_type == "rff" or args.approx_type == "cir_rff":
X = kernel.get_cos_feat(X)
elif args.approx_type == "nystrom":
X = kernel.get_feat(X)
else:
raise Exception("kernel approximation type not supported!")
if quantizer != None:
# print("train use quantizer")
X = quantizer.quantize(X)
X = Variable(X, requires_grad=False)
Y = Variable(Y, requires_grad=False)
loss = model.forward(X, Y)
train_loss.append(loss[0].data.cpu().numpy() )
loss.backward()
optimizer.step()
# print("epoch ", epoch, "step", i, "loss", loss[0] )
return train_loss
def evaluate(args, model, epoch, val_loader, quantizer, kernel):
# perform evaluation
sample_cnt = 0
use_cuda = torch.cuda.is_available() and args.cuda
if args.model == "logistic_regression":
correct_cnt = 0
cross_entropy_accum = 0.0
for i, minibatch in enumerate(val_loader):
X, Y = minibatch
if use_cuda:
X = X.cuda()
Y = Y.cuda()
if args.approx_type == "rff" or args.approx_type == "cir_rff":
X = kernel.get_cos_feat(X)
elif args.approx_type == "nystrom":
X = kernel.get_feat(X)
else:
raise Exception("kernel approximation type not supported!")
if quantizer != None:
# print("test use quantizer")
X = quantizer.quantize(X)
X = Variable(X, requires_grad=False)
Y = Variable(Y, requires_grad=False)
pred, output = model.predict(X)
correct_cnt += np.sum(pred.reshape(pred.size, 1) == Y.data.cpu().numpy() )
if len(list(Y.size() ) ) == 2:
Y = Y.squeeze()
cross_entropy_accum += model.criterion(output, Y).data.cpu().numpy()[0]
sample_cnt += pred.size
print("eval_acc at epoch ", epoch, "step", i, " iterations ", " acc ", correct_cnt / float(sample_cnt), " cross entropy ", cross_entropy_accum / float(sample_cnt) )
return correct_cnt / float(sample_cnt), cross_entropy_accum / float(sample_cnt)
else:
l2_accum = 0.0
for i, minibatch in enumerate(val_loader):
X, Y = minibatch
if use_cuda:
X = X.cuda()
Y = Y.cuda()
if args.approx_type == "rff" or args.approx_type == "cir_rff":
X = kernel.get_cos_feat(X)
elif args.approx_type == "nystrom":
X = kernel.get_feat(X)
else:
raise Exception("kernel approximation type not supported!")
if quantizer != None:
# print("test use quantizer")
X = quantizer.quantize(X)
X = Variable(X, requires_grad=False)
Y = Variable(Y, requires_grad=False)
pred = model.predict(X)
l2_accum += np.sum( (pred.reshape(pred.size, 1) \
- Y.data.cpu().numpy().reshape(pred.size, 1) )**2)
sample_cnt += pred.size
print("eval_l2 at epoch ", epoch, "step", i, " iterations ", " loss ", np.sqrt(l2_accum / float(sample_cnt) ) )
return l2_accum / float(sample_cnt), l2_accum / float(sample_cnt)
def sample_data(X, Y, n_sample):
'''
X is in the shape of [n_sample, n_feat]
'''
if isinstance(X, np.ndarray):
# perm = np.random.permutation(np.arange(X.shape[0] ) )
total_sample = X.shape[0]
n_sample = min(n_sample, X.shape[0])
else:
total_sample = X.size(0)
n_sample = min(n_sample, X.size(0) )
if n_sample == total_sample:
return X, Y
else:
perm = np.random.permutation(np.arange(total_sample) )
X_sample = X[perm[:n_sample], :]
Y_sample = Y[perm[:n_sample] ]
return X_sample, Y_sample
def get_matrix_spectrum(X):
# linalg.eigh can give negative value on cencus regression dataset
# So we use svd here and we have not seen numerical issue yet.
# currently only works for symetric matrix
# when using torch mm for X1X1, it can produce slight different values in
# the upper and lower parts, but tested to be within tolerance using
# np.testing.assert_array_almost_equal
# if not torch.equal(X, torch.transpose(X, 0, 1) ):
# raise Exception("Kernel matrix is not symetric!")
#S, U = np.linalg.eigh(X.cpu().numpy().astype(np.float64), UPLO='U')
#if np.min(S) <= 0:
# print("numpy eigh gives negative values, switch to use SVD")
U, S, _ = np.linalg.svd(X.cpu().numpy().astype(np.float64) )
return S
#####################################################################
# function to calculate Delta
#####################################################################
def get_sample_kernel_metrics(X, kernel, kernel_approx, quantizer, l2_reg):
# X = sample_data(X_all, n_sample)
is_cuda_tensor = X.is_cuda
if is_cuda_tensor:
kernel.cpu()
kernel_approx.cpu()
X = X.cpu()
kernel_mat = kernel.get_kernel_matrix(X, X)
kernel_mat_approx = kernel_approx.get_kernel_matrix(X, X, quantizer, quantizer)
# # need to use double for XXT if we want the torch equal to hold.
# if not torch.equal(kernel_mat_approx, torch.transpose(kernel_mat_approx, 0, 1) ):
# raise Exception("Kernel matrix is not symetric!")
error_matrix = kernel_mat_approx.cpu() - kernel_mat.cpu()
F_norm_error = torch.sum(error_matrix**2)
spectral_norm_error = np.max(np.abs(get_matrix_spectrum(error_matrix) ) )
# spectrum = get_matrix_spectrum(kernel_mat_approx)
# spectrum_exact = get_matrix_spectrum(kernel_mat)
print("calculation delta with lambda = ", l2_reg)
delta_right, delta_left = delta_approximation(kernel_mat.cpu().numpy().astype(np.float64),
kernel_mat_approx.cpu().numpy().astype(np.float64), l2_reg)
spectrum = None
spectrum_exact = None
metric_dict = {"F_norm_error": float(F_norm_error),
"Delta_left": float(delta_left),
"Delta_right": float(delta_right),
"spectral_norm_error": float(spectral_norm_error) }
print(metric_dict)
if is_cuda_tensor:
kernel.torch(cuda=True)
kernel_approx.torch(cuda=True)
return metric_dict, spectrum, spectrum_exact
def get_sample_kernel_F_norm(X, kernel, kernel_approx, quantizer, l2_reg):
is_cuda_tensor = X.is_cuda
if is_cuda_tensor:
kernel.cpu()
kernel_approx.cpu()
X = X.cpu()
kernel_mat = kernel.get_kernel_matrix(X, X)
kernel_mat_approx = kernel_approx.get_kernel_matrix(X, X, quantizer, quantizer)
# # need to use double for XXT if we want the torch equal to hold.
# if not torch.equal(kernel_mat_approx, torch.transpose(kernel_mat_approx, 0, 1) ):
# raise Exception("Kernel matrix is not symetric!")
error_matrix = kernel_mat_approx.cpu() - kernel_mat.cpu()
F_norm_error = torch.sum(error_matrix**2)
if is_cuda_tensor:
kernel.torch(cuda=True)
kernel_approx.torch(cuda=True)
return float(F_norm_error)
class ProgressMonitor(object):
def __init__(self, init_lr=1.0, lr_decay_fac=2.0, min_lr=0.00001, min_metric_better=False, decay_thresh=0.99):
self.lr = init_lr
self.lr_decay_fac = lr_decay_fac
self.min_lr = min_lr
self.metric_history = []
self.min_metric_better = min_metric_better
self.best_model = None
self.decay_thresh = decay_thresh
self.prev_best = None
self.drop_cnt = 0
def end_of_epoch(self, metric, model, optimizer, epoch):
if self.min_metric_better:
model_is_better = (self.prev_best == None) or (metric <= self.prev_best)
else:
model_is_better = (self.prev_best == None) or (metric >= self.prev_best)
if model_is_better:
# save the best model
self.best_model = deepcopy(model.state_dict() )
print("saving best model with metric ", metric)
else:
# reverse to best model
model.load_state_dict(deepcopy(self.best_model) )
print("loading previous best model with metric ", self.prev_best)
if (self.prev_best is not None) \
and ( (self.min_metric_better and (metric > self.decay_thresh * self.prev_best) ) \
or ( (not self.min_metric_better) and (metric < (1.0 + 1.0 - self.decay_thresh) * self.prev_best) ) ):
self.lr /= self.lr_decay_fac
for g in optimizer.param_groups:
g['lr'] = self.lr
print("lr drop to ", self.lr)
self.drop_cnt += 1
if model_is_better:
self.prev_best = metric
self.metric_history.append(metric)
if self.drop_cnt == 10:
return True
else:
return False
| lp_rffs-master | utils/train_utils.py |
import numpy as np
import scipy.io as sio
import h5py
def load_data(path="../../data/census/census"):
try:
X_train = sio.loadmat(path + "_train_feat.mat")
Y_train = sio.loadmat(path + "_train_lab.mat")
X_test = sio.loadmat(path + "_heldout_feat.mat")
Y_test = sio.loadmat(path + "_heldout_lab.mat")
except:
print("switch to use h5py to load files")
X_train = h5py.File(path + "_train_feat.mat", 'r')
Y_train = sio.loadmat(path + "_train_lab.mat")
X_test = sio.loadmat(path + "_heldout_feat.mat")
Y_test = sio.loadmat(path + "_heldout_lab.mat")
if 'X_ho' in X_test.keys():
X_test = X_test['X_ho']
else:
X_test = X_test["fea"]
if "X_tr" in X_train.keys():
X_train = X_train['X_tr']
else:
X_train = X_train['fea']
if "Y_ho" in Y_test.keys():
Y_test = Y_test['Y_ho']
else:
Y_test = Y_test['lab']
if "Y_tr" in Y_train.keys():
Y_train = Y_train['Y_tr']
else:
Y_train = Y_train['lab']
if X_train.shape[0] != Y_train.size:
X_train = np.array(X_train).T
if X_test.shape[0] != Y_test.size:
X_test = X_test.T
# # # DEBUG
# s = np.arange(X_train.shape[0] )
# np.random.seed(0)
# np.random.shuffle(s)
# X_train = X_train[s, :]
# Y_train = Y_train[s]
# X_train, Y_train, X_test, Y_test = \
# X_train[:int(s.size * 1 / 5), :], Y_train[:int(s.size * 1 / 5)], X_test[:int(s.size * 1 / 5), :], Y_test[:int(s.size * 1 / 5)]
# print("test ", X_train.shape, Y_train.shape)
assert X_train.shape[0] == Y_train.shape[0]
assert X_test.shape[0] == Y_test.shape[0]
assert X_train.shape[0] != X_test.shape[0]
return X_train, X_test, Y_train, Y_test
| lp_rffs-master | utils/data_loader.py |
import numpy as np
import torch
import math
class Quantizer(object):
def __init__(self, nbit, min_val, max_val, scale=None, rand_seed=1, use_cuda=False, for_lm_halp=False):
self.nbit = nbit
self.min_val = min_val
self.max_val = max_val
if scale == None:
if for_lm_halp == False:
self.scale = (max_val - min_val) / float(2**self.nbit - 1)
else:
# adapt to the halp quantization scheme where 0 is in the representation grid
self.scale = (max_val - min_val) / float(2**self.nbit - 2)
self.rand_seed = rand_seed
self.use_cuda = use_cuda
def quantize_random(self, value, verbose=True, fixed_seed=False):
bound = math.pow(2.0, self.nbit) - 1
min_val = 0.0
max_val = bound
if self.use_cuda:
if fixed_seed:
np.random.seed(self.rand_seed)
adj_val = torch.cuda.FloatTensor(np.random.uniform(size=list(value.size() ) ) ).type(value.type() )
else:
adj_val = torch.cuda.FloatTensor(value.size()).type(value.type()).uniform_()
else:
if fixed_seed:
np.random.seed(self.rand_seed)
adj_val = torch.Tensor(np.random.uniform(size=list(value.size() ) ) ).type(value.type() )
else:
adj_val = torch.Tensor(value.size()).type(value.type()).uniform_()
rounded = (value - self.min_val).div_(self.scale).add_(adj_val).floor_()
clipped_value = rounded.clamp_(min_val, max_val)
clipped_value *= self.scale
quant_val = clipped_value + self.min_val
return quant_val
def quantize_random_old(self, value, verbose=True):
floor_val = self.min_val + torch.floor( (value - self.min_val) / self.scale) * self.scale
ceil_val = self.min_val + torch.ceil( (value - self.min_val) / self.scale) * self.scale
floor_prob = (ceil_val - value) / self.scale
ceil_prob = (value - floor_val) / self.scale
np.random.seed(self.rand_seed)
sample = torch.DoubleTensor(np.random.uniform(size=list(value.size() ) ) )
quant_val = floor_val * (sample < floor_prob).double() \
+ ceil_val * (sample >= floor_prob).double()
return quant_val
def quantize(self, value, verbose=True, fixed_seed=False):
# TODO update if we have other quantization schemes
value = torch.clamp(value, self.min_val, self.max_val)
return self.quantize_random(value, verbose, fixed_seed)
def quantize_old(self, value, verbose=True):
# TODO update if we have other quantization schemes
value = torch.clamp(value, self.min_val, self.max_val)
return self.quantize_random_old(value, verbose)
def test_random_quantizer():
quantizer = Quantizer(nbit=15, min_val=-2**14+1, max_val=2**14)
# test lower bound
lower = -2**14+1.0
shift = 1/3.0
value = np.ones( (1000, 1000) ) * (lower + shift)
value = torch.DoubleTensor(value)
quant_val = quantizer.quantize(value)
quant_val = quant_val.cpu().numpy()
assert np.unique(quant_val).size == 2
assert np.min(np.unique(quant_val) ) == lower
assert np.max(np.unique(quant_val) ) == lower + 1
ratio = np.sum(quant_val == lower) / np.sum(quant_val == (lower + 1) ).astype(np.float)
assert ratio > 1.95 and ratio < 2.05
# test upper bound
lower = 2**14-1.0
shift = 2/3.0
value = np.ones( (1000, 1000) ) * (lower + shift)
value = torch.DoubleTensor(value)
quant_val = quantizer.quantize(value)
quant_val = quant_val.cpu().numpy()
assert np.unique(quant_val).size == 2
assert np.min(np.unique(quant_val) ) == lower
assert np.max(np.unique(quant_val) ) == lower + 1
ratio = np.sum(quant_val == lower) / np.sum(quant_val == (lower + 1) ).astype(np.float)
assert ratio > 0.45 and ratio < 0.55
# test middle values
lower = 0.0
shift = 0.5
value = np.ones( (1000, 1000) ) * (lower + shift)
value = torch.DoubleTensor(value)
quant_val = quantizer.quantize(value)
quant_val = quant_val.cpu().numpy()
assert np.unique(quant_val).size == 2
assert np.min(np.unique(quant_val) ) == lower
assert np.max(np.unique(quant_val) ) == lower + 1
ratio = np.sum(quant_val == lower) / np.sum(quant_val == (lower + 1) ).astype(np.float)
assert ratio > 0.95 and ratio < 1.05
print("quantizer test passed!")
def test_random_quantizer_fast_impl():
# this only works when use numpy setted seed in new fast random quantize implementation
quantizer = Quantizer(nbit=15, min_val=-2**14+1, max_val=2**14)
# test middle values
lower = 0.0
shift = 0.5
# value = np.ones( (1000, 1000) ) * (lower + shift)
value = np.random.uniform((1000, 1000)) * 2**14
value = torch.DoubleTensor(value)
quant_val = quantizer.quantize(value)
quant_val_old = quantizer.quantize_old(value)
quant_val = quant_val.cpu().numpy()
quant_val_old = quant_val_old.cpu().numpy()
np.testing.assert_array_almost_equal(quant_val, quant_val_old, decimal=9)
print("fast impl quantizer test passed!")
if __name__ == "__main__":
test_random_quantizer_fast_impl()
test_random_quantizer() | lp_rffs-master | utils/quantizer.py |
import numpy as np
from scipy.optimize import minimize
import torch
# for numerical protection
EPS = 1e-20
def set_random_seed(seed):
np.random.seed(seed)
use_cuda = torch.cuda.is_available()
torch.manual_seed(seed)
if use_cuda:
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def expected_loss(lam, U, S, Y, noise):
m = float(Y.size)
uty2 = ( (np.dot(U.T, Y.reshape(Y.size, 1) ) )**2).reshape(int(m))
gamma = (S/(S + lam + EPS) ).reshape(int(m))
return (1/m) * np.sum(((1.0-gamma)**2) * uty2) + (1/m)*noise**2 * np.sum(gamma**2) + noise**2
# symmetric delta
def delta_approximation(K, K_tilde, lambda_=1e-3):
""" Compute the smallest D such that (1 + D)^(-1)(K + lambda_ I) <= K_tilde + lambda_ I <= (1 + D)(K + lambda_ I),
where the inequalities are in semidefinite order. This is a symetric version of delta_approximation
"""
n, m = K.shape
n_tilde, m_tilde = K_tilde.shape
assert n == m and n_tilde == m_tilde, "Kernel matrix must be square"
assert n == n_tilde, "K and K_tilde must have the same shape"
assert np.allclose(K, K.T) and np.allclose(K_tilde, K_tilde.T), "Kernel matrix must be symmetric"
# Compute eigen-decomposition of K + lambda_ I, of the form V @ np.diag(sigma) @ V.T
sigma, V = np.linalg.eigh(K)
#assert np.all(sigma >= 0), "Kernel matrix K must be positive semidefinite"
sigma += lambda_
# Whitened K_tilde: np.diag(1 / np.sqrt(sigma)) @ V.T @ K_tilde @ V @ np.diag(1 / np.sqrt(sigma))
K_tilde_whitened = V.T.dot(K_tilde.dot(V)) / np.sqrt(sigma) / np.sqrt(sigma)[:, np.newaxis]
K_whitened = np.diag(1 - lambda_ / sigma)
sigma_final, _ = np.linalg.eigh(K_tilde_whitened - K_whitened)
lambda_min = sigma_final[0]
lambda_max = sigma_final[-1]
assert lambda_max >= lambda_min
return lambda_max, -lambda_min
class Args(object):
def __init__(self, n_fp_rff, n_bit,
exact_kernel, reg_lambda,
sigma, random_seed, data_path,
do_fp, test_var_reduce=False):
self.n_fp_rff = n_fp_rff
self.n_bit = n_bit
self.exact_kernel = exact_kernel
self.reg_lambda = reg_lambda
self.sigma = sigma
self.random_seed = random_seed
self.data_path = data_path
self.do_fp = do_fp
self.test_var_reduce = test_var_reduce
| lp_rffs-master | utils/misc_utils.py |
import matplotlib
import matplotlib.pyplot as plt
#def get_colors():
# prop_cycle = plt.rcParams['axes.prop_cycle']
# colors = prop_cycle.by_key()['color']
# colors_dict = {}
# colors_dict["exact"] = colors[0]
# colors_dict["fp"] = colors[1]
# for idx, nbit in enumerate([1,2,4,8,16,32] ):
# colors_dict[str(nbit)] = colors[idx + 2]
# colors_dict["pca"] = colors[len(colors_dict.keys() ) ]
# colors_dict["pca_2"] = colors[len(colors_dict.keys() ) ]
# # colors_dict["pca_3"] = colors[len(colors_dict.keys() ) ]
# # colors_dict["pca_4"] = colors[len(colors_dict.keys() ) ]
# #print colors_dict
# return colors_dict
def get_colors():
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
colors_dict = {}
colors_dict["Exact"] = colors[0]
colors_dict["FP-RFF"] = colors[1]
colors_dict["FP-Nystrom"] = colors[2]
colors_dict["Cir. FP-RFF"] = colors[3]
for idx, nbit in enumerate([1,2,4,8,16] ):
colors_dict["LP-RFF " + str(nbit) ] = colors[idx + 4]
# for idx, nbit in enumerate([1,2,4,8,16] ):
# colors_dict["Cir. LP-RFF " + str(nbit) ] = colors[idx + 4]
# colors_dict["pca"] = colors[len(colors_dict.keys() ) ]
# colors_dict["pca_2"] = colors[len(colors_dict.keys() ) ]
# colors_dict["pca_3"] = colors[len(colors_dict.keys() ) ]
# colors_dict["pca_4"] = colors[len(colors_dict.keys() ) ]
#print colors_dict
return colors_dict
#def get_colors():
# prop_cycle = plt.rcParams['axes.prop_cycle']
# colors = prop_cycle.by_key()['color']
# colors_dict = {}
# colors_dict["exact"] = colors[0]
# colors_dict["fp RFF"] = colors[1]
# colors_dict["fp Nystrom"] = colors[2]
# colors_dict["fp cir. RFF"] = colors[3]
# for idx, nbit in enumerate([1,2,4,8,16] ):
# colors_dict["lp cir. RFF " + str(nbit) + " bits"] = colors[idx + 4]
## colors_dict["pca"] = colors[len(colors_dict.keys() ) ]
## colors_dict["pca_2"] = colors[len(colors_dict.keys() ) ]
# # colors_dict["pca_3"] = colors[len(colors_dict.keys() ) ]
# # colors_dict["pca_4"] = colors[len(colors_dict.keys() ) ]
# #print colors_dict
# return colors_dict
| lp_rffs-master | utils/plot_utils.py |
import torch
import numpy as np
from torch.autograd import Variable
class LogisticRegression(torch.nn.Module):
def __init__(self, input_dim, n_class, reg_lambda, dtype="float"):
super(LogisticRegression, self).__init__()
self.input_dim = input_dim
self.n_class = n_class
self.reg_lambda = reg_lambda
self.linear = torch.nn.Linear(self.input_dim, out_features=self.n_class)
self.criterion = torch.nn.CrossEntropyLoss(size_average=True)
self.dtype = dtype
if self.dtype == "double":
for w in self.parameters():
w.data = w.data.type(torch.DoubleTensor)
def forward(self, x, y):
self.output = self.linear(x)
if len(list(y.size() ) ) == 2:
y = y.squeeze()
self.loss = self.criterion(self.output, y)
return self.loss
def predict(self, x):
output = self.linear(x)
pred = output.data.cpu().numpy().argmax(axis=1)
return pred, output
def logistic_regression_grad_test():
n_sample = 4
n_dim = 3
n_class = 4
X = Variable(torch.DoubleTensor(np.random.normal(size=(n_sample, n_dim) ) ) )
Y = Variable(torch.LongTensor(np.array([0, 1, 3, 2] ) ) )
regressor = LogisticRegression(input_dim=n_dim, n_class=n_class, reg_lambda=100.0, dtype="double")
loss1 = regressor.forward(X, Y)
loss_diff = 0.0
move = 1e-9
loss1.backward()
for w in regressor.parameters():
loss_diff += torch.sum(w.grad.data * move)
for w in regressor.parameters():
w.data += move
loss2 = regressor.forward(X, Y)
assert np.abs((loss2[0] - loss1[0] ).data.cpu().numpy() - loss_diff) < 1e-9
# print("loss finite diff ", loss2[0] - loss1[0], " projected loss change ", loss_diff)
print("logistic regression gradient test done!")
if __name__ == "__main__":
logistic_regression_grad_test()
| lp_rffs-master | models/logistic_regression.py |
import numpy as np
import torch
import sys
sys.path.append("../kernels/")
from gaussian_exact import GaussianKernel
from rff import RFF
from time import time
import math
class KernelRidgeRegression(object):
def __init__(self, kernel, reg_lambda):
'''
reg_lambda is the strength of the regression regularizor
kernel matrix is a Pytorch Tensor
'''
# self.kernel_mat = kernel_mat
self.reg_lambda = reg_lambda
self.kernel = kernel
def fit(self, X_train=None, Y_train=None, kernel_mat=None, quantizer=None):
self.X_train, self.Y_train = X_train, Y_train
self.kernel_mat = self.kernel.get_kernel_matrix(X_train, X_train, quantizer, quantizer)
n_sample = self.kernel_mat.size(0)
# pytorch is super slow in inverse, so we finish this operation in numpy
print("using regularior strength ", self.reg_lambda)
self.alpha = torch.DoubleTensor( \
np.dot(np.linalg.inv(self.kernel_mat.cpu().numpy().astype(np.float64) + self.reg_lambda * np.eye(n_sample) ), Y_train.cpu().numpy().astype(np.float64) ) )
if self.kernel_mat.is_cuda:
self.alpha = self.alpha.cuda()
def torch(self, use_cuda):
if use_cuda:
self.alpha = self.alpha.cuda()
def get_train_error(self):
prediction = torch.mm(self.kernel_mat, self.alpha)
if prediction.is_cuda:
error = prediction - torch.cuda.DoubleTensor(self.Y_train)
else:
error = prediction - torch.DoubleTensor(self.Y_train)
return torch.mean(error**2)
def predict(self, X_test, quantizer_train=None, quantizer_test=None):
# quantizer 1 for test data, quantizer 2 for train data
self.X_test = X_test
self.kernel_mat_pred = \
self.kernel.get_kernel_matrix(self.X_test, self.X_train, quantizer_test, quantizer_train)
self.prediction = torch.mm(self.kernel_mat_pred, self.alpha)
return self.prediction.clone()
def get_test_error(self, Y_test):
# should only be called right after the predict function
self.Y_test = Y_test
if self.prediction.is_cuda:
error = self.prediction - torch.cuda.DoubleTensor(self.Y_test)
else:
error = self.prediction - torch.DoubleTensor(self.Y_test)
return torch.mean(error**2)
def test_kernel_ridge_regression1():
'''
We test the linear kernel case and gaussian kernel case
'''
n_feat = 10
n_rff_feat = 1000000
X_train = np.ones( [2, n_feat] )
X_train[0, :] *= 1
X_train[0, :] *= 2
Y_train = np.ones( [2, 1] )
kernel = GaussianKernel(sigma=2.0)
kernel = RFF(n_rff_feat, n_feat, kernel)
use_cuda=torch.cuda.is_available()
kernel.torch(cuda=torch.cuda.is_available())
reg_lambda = 1.0
regressor = KernelRidgeRegression(kernel, reg_lambda=reg_lambda)
#regressor.torch(use_cuda=torch.cuda.is_available() )
if use_cuda:
regressor.fit(torch.DoubleTensor(X_train).cuda(), torch.DoubleTensor(Y_train).cuda() )
else:
regressor.fit(torch.DoubleTensor(X_train), torch.DoubleTensor(Y_train) )
regressor.torch(use_cuda=torch.cuda.is_available() )
# if test data equals traing data, it should the same L2 error
X_test = np.copy(X_train)
Y_test = np.copy(Y_train)
if use_cuda:
test_pred = regressor.predict(torch.DoubleTensor(X_test).cuda() )
else:
test_pred = regressor.predict(torch.DoubleTensor(X_test) )
train_error = regressor.get_train_error()
if use_cuda:
test_error = regressor.get_test_error(torch.DoubleTensor(Y_test).cuda() )
else:
test_error = regressor.get_test_error(torch.DoubleTensor(Y_test) )
assert np.abs(train_error - test_error) < 1e-6
# if test data is different from traing data, L2 error for train and test should be different
X_test = np.copy(X_train) * 2
Y_test = np.copy(Y_train)
if use_cuda:
test_pred = regressor.predict(torch.cuda.DoubleTensor(X_test) )
else:
test_pred = regressor.predict(torch.DoubleTensor(X_test) )
train_error = regressor.get_train_error()
if use_cuda:
test_error = regressor.get_test_error(torch.cuda.DoubleTensor(Y_test) )
else:
test_error = regressor.get_test_error(torch.DoubleTensor(Y_test) )
assert np.abs(train_error - test_error) >= 1e-3
X_test = np.copy(X_train)
Y_test = np.copy(Y_train) * 2
if use_cuda:
test_pred = regressor.predict(torch.cuda.DoubleTensor(X_test) )
else:
test_pred = regressor.predict(torch.DoubleTensor(X_test) )
train_error = regressor.get_train_error()
if use_cuda:
test_error = regressor.get_test_error(torch.cuda.DoubleTensor(Y_test) )
else:
test_error = regressor.get_test_error(torch.DoubleTensor(Y_test) )
assert np.abs(train_error - test_error) >= 1e-3
print("kernel ridge regression test1 passed!")
def test_kernel_ridge_regression2():
'''
We test the linear kernel case and gaussian kernel case
'''
n_feat = 10
n_rff_feat = 1000
X_train = np.ones( [2, n_feat] )
X_train[0, :] *= 1
X_train[0, :] *= 2
Y_train = np.ones( [2, 1] )
kernel = GaussianKernel(sigma=2.0)
kernel = RFF(n_rff_feat, n_feat, kernel)
use_cuda = torch.cuda.is_available()
kernel.torch(cuda=use_cuda)
reg_lambda = 1.0
regressor = KernelRidgeRegression(kernel, reg_lambda=reg_lambda)
if use_cuda:
regressor.fit(torch.cuda.DoubleTensor(X_train), torch.cuda.DoubleTensor(Y_train) )
else:
regressor.fit(torch.DoubleTensor(X_train), torch.DoubleTensor(Y_train) )
# compare the two ways of calculating feature weights as sanity check
# feature weight using the approach inside KernelRidgeRegression
if use_cuda:
kernel.get_kernel_matrix(torch.cuda.DoubleTensor(X_train), torch.cuda.DoubleTensor(X_train) )
else:
kernel.get_kernel_matrix(torch.DoubleTensor(X_train), torch.DoubleTensor(X_train) )
# print kernel.rff_x2.size(), regressor.alpha.size()
w1 = torch.mm(torch.transpose(kernel.rff_x2, 0, 1), regressor.alpha)
# print w1.size()
# feature weight using alternative way of calculation
if use_cuda:
val = torch.inverse( (regressor.reg_lambda * torch.eye(n_rff_feat).double().cuda() \
+ torch.mm(torch.transpose(kernel.rff_x1, 0, 1), kernel.rff_x1) ) )
else:
val = torch.inverse( (regressor.reg_lambda * torch.eye(n_rff_feat).double() \
+ torch.mm(torch.transpose(kernel.rff_x1, 0, 1), kernel.rff_x1) ) )
val = torch.mm(val, torch.transpose(kernel.rff_x2, 0, 1) )
if use_cuda:
w2 = torch.mm(val, torch.cuda.DoubleTensor(Y_train) )
else:
w2 = torch.mm(val, torch.DoubleTensor(Y_train) )
np.testing.assert_array_almost_equal(w1.cpu().numpy(), w2.cpu().numpy() )
# print(w1.cpu().numpy().ravel()[-10:-1], w2.cpu().numpy().ravel()[-10:-1] )
print("kernel ridge regression test2 passed!")
if __name__ == "__main__":
test_kernel_ridge_regression1()
test_kernel_ridge_regression2()
| lp_rffs-master | models/kernel_regressor.py |
import torch
import numpy as np
from torch.autograd import Variable
class RidgeRegression(torch.nn.Module):
def __init__(self, input_dim, reg_lambda, dtype="float"):
super(RidgeRegression, self).__init__()
self.input_dim = input_dim
self.reg_lambda = reg_lambda
self.linear = torch.nn.Linear(self.input_dim, out_features=1)
self.criterion = torch.nn.MSELoss(size_average=True)
self.dtype = dtype
if self.dtype == "double":
for w in self.parameters():
w.data = w.data.type(torch.DoubleTensor)
def forward(self, x, y):
self.output = self.linear(x)
self.loss = self.criterion(self.output, y)
return self.loss
def predict(self, x):
output = self.linear(x)
pred = output.data.cpu().numpy()
return pred
def ridge_regression_grad_test():
n_sample = 4
n_dim = 3
X = Variable(torch.DoubleTensor(np.random.normal(size=(n_sample, n_dim) ) ) )
Y = Variable(torch.DoubleTensor(np.random.normal(size=(n_sample) ) ) )
regressor = RidgeRegression(input_dim=n_dim, reg_lambda=np.random.normal(), dtype="double")
loss1 = regressor.forward(X, Y)
loss_diff = 0.0
move = 1e-9
loss1.backward()
for w in regressor.parameters():
loss_diff += torch.sum(w.grad.data * move)
for w in regressor.parameters():
w.data += move
loss2 = regressor.forward(X, Y)
assert np.abs((loss2[0] - loss1[0] ).data.cpu().numpy() - loss_diff) < 1e-6
# print("loss finite diff ", loss2[0] - loss1[0], " projected loss change ", loss_diff)
print("Ridge regression gradient test done!")
if __name__ == "__main__":
ridge_regression_grad_test()
| lp_rffs-master | models/ridge_regression.py |
babble-master | tutorial/data/__init__.py |
|
from babble import Explanation
aliases = {
'spouse': ['spouse', 'wife', 'husband', 'ex-wife', 'ex-husband'],
'family': ['father', 'father', 'mother', 'sister', 'sisters',
'brother', 'brothers', 'son', 'sons', 'daughter', 'daughters',
'grandfather', 'grandmother', 'uncle', 'uncles', 'aunt', 'aunts',
'cousin', 'cousins'],
'friend': ['boyfriend', 'girlfriend', 'boss', 'employee', 'secretary', 'co-worker'],
}
aliases['family'] += ["{}-in-law".format(f) for f in aliases['family']]
explanations = [
Explanation(
name='LF_and_married',
condition="the word 'and' is between X and Y and 'married' within five words of Y",
candidate='1bcd8648-8a80-47a3-82d4-38a4a594092f::span:1223:1228~~1bcd8648-8a80-47a3-82d4-38a4a594092f::span:1234:1238',
label=1),
Explanation(
name='LF_third_wheel',
condition="there is a person between X and Y",
candidate='6cd34ab0-653b-438e-b966-d7365a31651d::span:595:607~~6cd34ab0-653b-438e-b966-d7365a31651d::span:712:719',
label=2),
Explanation(
name='LF_married_two_people',
condition="the word 'married' is in the sentence and there are only two people in the sentence",
candidate='d535c921-f102-4d3b-9891-5a36ed93259e::span:823:830~~d535c921-f102-4d3b-9891-5a36ed93259e::span:836:839',
label=1),
Explanation(
name='LF_same_person',
condition="X and Y are identical",
candidate='3820d641-7d5a-49d0-a872-b13199b50790::span:2392:2395~~3820d641-7d5a-49d0-a872-b13199b50790::span:2413:2416',
label=2),
Explanation(
name='LF_husband_wife',
condition="there is at least one spouse word between X and Y",
candidate='88eb2437-93ce-452d-ada0-905a90d0ccac::span:1467:1468~~88eb2437-93ce-452d-ada0-905a90d0ccac::span:1478:1488',
label=1),
Explanation(
name='LF_husband_wife_left_window',
condition="there is at least one spouse word within two words to the left of X or Y",
candidate='03a1e1a0-93c3-41a8-a905-a535ce8f2b09::span:6822:6837~~03a1e1a0-93c3-41a8-a905-a535ce8f2b09::span:6855:6858',
label=1),
Explanation(
name='LF_familial_relationship',
condition="there is at least one family word between X and Y",
candidate='17f7cc87-c207-48e1-a0b3-96ca8047250d::span:3474:3477~~17f7cc87-c207-48e1-a0b3-96ca8047250d::span:3570:3574',
label=2),
Explanation(
name='LF_family_left_window',
condition="there is a family word within two words to the left of X or Y",
candidate='ca285806-f17c-4b1e-9459-a6a7cc27f80c::span:12981:12995~~ca285806-f17c-4b1e-9459-a6a7cc27f80c::span:13014:13018',
label=2),
Explanation(
name='LF_other_relationship',
condition="there is at least one friend word between X and Y",
candidate='41b546d5-d525-4d0a-9fbe-f173e20b645a::span:456:469~~41b546d5-d525-4d0a-9fbe-f173e20b645a::span:554:564',
label=2),
Explanation(
name='LF_reporters',
condition='A word in the sentence starts with "report"',
candidate='0023c4a1-446c-488e-949a-1edbbb4354b3::span:12569:12578~~0023c4a1-446c-488e-949a-1edbbb4354b3::span:12584:12594',
label=2),
] | babble-master | tutorial/data/sample_explanations.py |
import pickle
import os
import unittest
from babble import SemanticParser
from test_babble_base import TestBabbleBase
import text_explanations
class TestBabbleText(TestBabbleBase):
@classmethod
def setUpClass(cls):
cls.sp = SemanticParser(aliases=text_explanations.get_aliases(),
string_format='implicit')
DATA_FILE = 'tutorial/data/tutorial_data.pkl'
with open(DATA_FILE, 'rb') as f:
Cs, _ = pickle.load(f)
cls.candidate_map = {}
for c in Cs[0]:
cls.candidate_map[c.mention_id] = c
def test_strings(self):
self.check_explanations(text_explanations.strings)
def test_string_lists(self):
self.check_explanations(text_explanations.string_lists)
def test_candidate_helpers(self):
self.check_explanations(text_explanations.candidate_helpers)
def test_index_words(self):
self.check_explanations(text_explanations.index_words)
def test_index_chars(self):
self.check_explanations(text_explanations.index_chars)
def test_pos_ner(self):
self.check_explanations(text_explanations.ner)
def test_count(self):
self.check_explanations(text_explanations.count)
def test_anaphora(self):
self.check_explanations(text_explanations.anaphora)
def test_tuples(self):
self.check_explanations(text_explanations.tuples)
def test_implicit_strings(self):
self.check_explanations(text_explanations.implicit_strings) | babble-master | tests/test_babble_text.py |
import pickle
import sys
import unittest
class TestBabbleBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
DATA_FILE = 'tutorial/data/tutorial_data.pkl'
with open(DATA_FILE, 'rb') as f:
Cs, _ = pickle.load(f)
cls.candidate_map = {}
for c in Cs[0]:
cls.candidate_map[c.mention_id] = c
@classmethod
def tearDownClass(cls):
pass
def check_explanations(self, explanations):
num_explanations = len(explanations)
num_parses = 0
self.assertTrue(len(explanations))
for exp in explanations:
if isinstance(exp.candidate, str):
exp.candidate = self.candidate_map[exp.candidate]
parse_dict = self.sp.parse_and_evaluate(exp, show_erroring=True)
num_correct = len(parse_dict['correct'])
num_passing = len(parse_dict['passing'])
num_failing = len(parse_dict['failing'])
num_erroring = len(parse_dict['erroring'])
num_acceptable = num_correct + num_passing
if num_acceptable == 0:
print(parse_dict)
if num_failing:
for failing in parse_dict['failing']:
print("Failed parse:")
print(self.sp.grammar.translate(failing.semantics))
if num_erroring:
print("It should not be possible to parse a function that throws an error:")
self.sp.grammar.print_chart()
parses = self.sp.parse(exp, return_parses=True)
if exp.semantics:
self.assertTrue(num_correct > 0)
else:
self.assertTrue(num_passing > 0)
self.assertTrue(num_acceptable <= 3)
num_parses += num_acceptable
sys.stdout.write("{}/{} ({}%) - ".format(num_parses, num_explanations,
float(num_parses)/num_explanations * 100))
sys.stdout.flush() | babble-master | tests/test_babble_base.py |
from babble import Explanation
def get_aliases():
return {
'colors':['red','green','blue'],
'bluebird':['blue','bird','fly'],
'greek':['alpha','beta','gamma'],
'letters':['a','B','C'],
'smalls':['a','b','c','d'],
'spouse':['wife','husband','spouse']
}
# Test candidate:
# "City land records show that GM President [Daniel Ammann] and his wife,
# [Pernilla Ammann], bought the 15-bedroom mansion on Balmoral Drive in
# the upscale historic neighborhood on July 31."
# hash = 668761641257950361
# stable_id = 52a56fa5-91bf-4df1-8443-632f4c1ce88d::span:604:616~~52a56fa5-91bf-4df1-8443-632f4c1ce88d::span:632:646
default_candidate = '52a56fa5-91bf-4df1-8443-632f4c1ce88d::span:604:616~~52a56fa5-91bf-4df1-8443-632f4c1ce88d::span:632:646'
strings = [
# Equals (String)
Explanation(
condition="'yes' equals 'yes'",
label=1,
candidate=('foo', 'bar'),
semantics=None),
# Lowercase
Explanation(
condition="X is lowercase",
label=1,
candidate=('foo', 'bar'),
semantics=None),
# Uppercase
Explanation(
condition="X is upper case",
label=1,
candidate=('FOO', 'bar'),
semantics=None),
# Capitalized
Explanation(
condition="X is capitalized",
label=1,
candidate=('Foo', 'bar'),
semantics=None),
# Starts with
Explanation(
condition="the word 'blueberry' starts with 'blue'",
label=1,
candidate=('foo', 'bar'),
semantics=None),
# Ends with
Explanation(
condition="the word 'blueberry' ends with 'berry'",
label=1,
candidate=('foo', 'bar'),
semantics=None),
]
string_lists = [
# In
Explanation(
condition="'bar' is in 'foobarbaz'",
label=1,
candidate=('foo', 'bar'),
semantics=None),
# Contains
Explanation(
condition="the word 'foobarbaz' contains 'oobarba'",
label=1,
candidate=('foo', 'bar'),
semantics=None),
# List
Explanation(
condition="'bar' equals 'foo', 'bar', or 'baz'",
label=1,
candidate=('foo', 'bar'),
semantics=None),
# UserList
Explanation(
condition="'blue' in colors",
label=1,
candidate=('foo', 'bar'),
semantics=None),
# OrList left
Explanation(
condition="'blue' or 'shmoo' is in colors",
label=1,
candidate=('foo', 'bar'),
semantics=None),
# OrList right
Explanation(
condition="'blue' ends with 'moe' or 'lue'",
label=1,
candidate=('foo', 'bar'),
semantics=None),
# AndList left
Explanation(
condition="'blue' and 'red' are in colors",
label=1,
candidate=('foo', 'bar'),
semantics=None),
# AndList right
Explanation(
condition="'blue' contains 'l' and 'u'",
label=1,
candidate=('foo', 'bar'),
semantics=None),
]
candidate_helpers = [
# Candidate as string
Explanation(
condition="X is 'foo'",
label=1,
candidate=('foo', 'bar'),
semantics=None),
# Left words (list)
Explanation(
condition="'wife' is in the words left of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Right words (list)
Explanation(
condition="'wife' is in the words to the right of X",
label=1,
candidate=default_candidate,
semantics=None),
# Between words (list)
Explanation(
condition="'wife' is in the words between X and Y",
label=1,
candidate=default_candidate,
semantics=None),
# Sentence (list)
Explanation(
condition='"wife" is in the sentence',
label=1,
candidate=default_candidate,
semantics=None),
]
index_words = [
# Index left
Explanation(
condition="X is left of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index right
Explanation(
condition="Y is right of X",
label=1,
candidate=default_candidate,
semantics=None),
# Between
Explanation(
condition="'wife' is between X and Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index left equality
Explanation(
condition="'wife' is two words to the left of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index left inequality 0
Explanation(
condition="X is more than three words to the left of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index left inequality 1
Explanation(
condition="not X is more than fifty words to the left of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index left inequality 2
Explanation(
condition="',' is immediately to the left of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index left inequality 3
Explanation(
condition="',' is right before Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index within (<=)
Explanation(
condition="'wife' is within three words to the left of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index within (<= or >= left)
Explanation(
condition="'wife' is within three words of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index within (<= or >= right)
Explanation(
condition="'bought' is within three words of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index OrList left
Explanation(
condition="'husband' or 'wife' is within three words to the left of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index AndList left
Explanation(
condition="the words 'his' and 'wife' are no more than three words to the left of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Index AndArgList
Explanation(
condition="'wife' is within three words to the left of X or Y",
label=1,
candidate=default_candidate,
semantics=None),
]
index_chars = [
# Characters0
Explanation(
condition="'wife' is less than 10 characters to the left of Y",
label=1,
candidate=default_candidate,
semantics=None),
# Characters1
Explanation(
condition="'wife' is more than 5 characters to the right of X",
label=1,
candidate=default_candidate,
semantics=None),
# Characters2
Explanation(
condition="there are at least 10 characters between X and Y",
label=1,
candidate=default_candidate,
semantics=None),
]
ner = [
# Tokens
Explanation(
condition="at least one word to the left of X is lower case",
label=1,
candidate=default_candidate,
semantics=None),
# NER
Explanation(
condition="there are no people between X and Y",
label=1,
candidate=default_candidate,
semantics=None),
]
count = [
# Count0
Explanation(
condition="there are not three people in the sentence",
label=1,
candidate=default_candidate,
semantics=None),
# Count1
Explanation(
condition="the number of words between X and Y is less than 25",
label=1,
candidate=default_candidate,
semantics=None),
# Count2
Explanation(
condition="there are at least two words between X and Y",
label=1,
candidate=default_candidate,
semantics=None),
# Count3
Explanation(
condition="at least one word exists between X and Y",
label=1,
candidate=default_candidate,
semantics=None),
# Count4
Explanation(
condition="at least one word to the left of Y starts with a spouse word",
label=1,
candidate=default_candidate,
semantics=None),
]
anaphora = [
# Them
Explanation(
condition="'wife' is between X and Y and 'divorced' is not between them",
label=1,
candidate=default_candidate,
semantics=None),
# TODO: add handling for he/she, his/her, him/her?
]
tuples = [
# Tuple
Explanation(
condition="the pair (X, Y) is the same as the tuple ('foo', 'bar')",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.call', ('.eq', ('.tuple', ('.list', ('.string', u'foo'), ('.string', u'bar')))), ('.tuple', ('.list', ('.arg_to_string', ('.arg', ('.int', 1))), ('.arg_to_string', ('.arg', ('.int', 2))))))))),
]
implicit_strings = [
# Normal
Explanation(
condition='It says "wife"',
label=1,
candidate=default_candidate,
semantics=None),
# Not quoted unigram
Explanation(
condition='It says wife',
label=1,
candidate=default_candidate,
semantics=None),
# Not quoted bigram
Explanation(
condition='It says historic neighborhood',
label=1,
candidate=default_candidate,
semantics=None),
# Not quoted bigram with stopword
Explanation(
condition='It says his wife',
label=1,
candidate=default_candidate,
semantics=None),
# Implicit candidate
Explanation(
condition='wife comes after Daniel Ammann',
label=1,
candidate=default_candidate,
semantics=None),
# Don't quote existing quotation
Explanation(
condition='It says "the upscale historic neighborhood"',
label=1,
candidate=default_candidate,
semantics=None),
] | babble-master | tests/text_explanations.py |
import os
import unittest
from babble import SemanticParser
from test_babble_base import TestBabbleBase
import core_explanations
class TestBabbleCore(TestBabbleBase):
@classmethod
def setUpClass(cls):
cls.sp = SemanticParser(aliases=core_explanations.get_aliases(),
beam_width=10,
top_k=-1)
def test_logic(self):
self.check_explanations(core_explanations.logic)
def test_grouping(self):
self.check_explanations(core_explanations.grouping)
def test_integers(self):
self.check_explanations(core_explanations.integers)
def test_lists(self):
self.check_explanations(core_explanations.lists)
def test_membership(self):
self.check_explanations(core_explanations.membership)
def test_absorption(self):
self.check_explanations(core_explanations.absorption)
def test_translate(self):
semantics = ('.root', ('.label', ('.bool', True), ('.and', ('.bool', True), ('.bool', True))))
pseudocode = 'return 1 if (True and True) else 0'
self.assertEqual(self.sp.translate(semantics), pseudocode) | babble-master | tests/test_babble_core.py |
from babble import Explanation
def get_aliases():
return {
'colors':['red','green','blue'],
'bluebird':['blue','bird','fly'],
'greek':['alpha','beta','gamma'],
'letters':['a','B','C'],
'smalls':['a','b','c','d'],
'luckies': [7, 8, 9],
'unluckies': [0, 13, 66],
}
# Test candidate (hash: 668761641257950361):
# "City land records show that GM President [Daniel Ammann] and his wife,
# [Pernilla Ammann], bought the 15-bedroom mansion on Balmoral Drive in
# the upscale historic neighborhood on July 31."
logic = [
# Base
Explanation(
condition="True",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.bool', True)))),
# And
Explanation(
condition="True and True",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.and', ('.bool', True), ('.bool', True))))),
# Or
Explanation(
condition="False or True",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.or', ('.bool', False), ('.bool', True))))),
# Not boolean
Explanation(
condition="not False",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.not', ('.bool', False))))),
# Not function
Explanation(
condition="2 is not less than 1",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.not', ('.call', ('.lt', ('.int', 1)), ('.int', 2)))))),
# All
Explanation(
condition='all of (2, 3, 4) are greater than 1',
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.all', ('.map', ('.gt', ('.int', 1)), ('.list', ('.int', 2), ('.int', 3), ('.int', 4))))))),
# Any
Explanation(
condition='any of (3, 1, 4) are less than 2',
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.any', ('.map', ('.lt', ('.int', 2)), ('.list', ('.int', 3), ('.int', 1), ('.int', 4))))))),
# None
Explanation(
condition='none of (1, 2, 3) are greater than 4',
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.none', ('.map', ('.gt', ('.int', 4)), ('.list', ('.int', 1), ('.int', 2), ('.int', 3))))))),
]
grouping = [
# Parentheses
Explanation(
condition="True or (True and False)",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.or', ('.bool', True), ('.and', ('.bool', True), ('.bool', False)))))),
]
integers = [
# Equals (Int)
Explanation(
condition="1 is equal to 1",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.call', ('.eq', ('.int', 1)), ('.int', 1))))),
# Integers (digit or text)
Explanation(
condition="1 is equal to one",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.call', ('.eq', ('.int', 1)), ('.int', 1))))),
# Less than
Explanation(
condition="1 is less than 2",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.call', ('.lt', ('.int', 2)), ('.int', 1))))),
# At most
Explanation(
condition="2 is less than or equal to 2",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.call', ('.leq', ('.int', 2)), ('.int', 2))))),
# Greater than
Explanation(
condition="2 > 1",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.call', ('.gt', ('.int', 1)), ('.int', 2))))),
# At least
Explanation(
condition="2 is at least 2",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.call', ('.geq', ('.int', 2)), ('.int', 2))))),
]
lists = [
# OrList left
Explanation(
condition="7 or 5 is larger than 6",
label=1,
candidate=('foo', 'bar'),
semantics=('.root',('.label',('.int', 1),('.any',('.map', ('.gt', ('.int', 6)), ('.list', ('.int', 7), ('.int', 5))))))),
# OrList right
Explanation(
condition="2 is less than 3 or 1",
label=1,
candidate=('foo', 'bar'),
semantics=('.root',('.label',('.int', 1),('.call',('.composite_or', ('.lt',), ('.list', ('.int', 3), ('.int', 1))),('.int', 2))))),
# AndList left
Explanation(
condition="8 and 8 are equal to 8",
label=1,
candidate=('foo', 'bar'),
semantics=('.root',('.label',('.int', 1),('.all',('.map', ('.eq', ('.int', 8)), ('.list', ('.int', 8), ('.int', 8))))))),
# AndList right
Explanation(
condition="2 is less than 3 and 4",
label=1,
candidate=('foo', 'bar'),
semantics=('.root',('.label',('.int', 1),('.call',('.composite_and', ('.lt',), ('.list', ('.int', 3), ('.int', 4))),('.int', 2))))),
# Not AndList
Explanation(
condition="2 is not more than 1 and 3",
label=1,
candidate=('foo', 'bar'),
semantics=('.root',('.label',('.int', 1),('.not',('.call',('.composite_and', ('.gt',), ('.list', ('.int', 1), ('.int', 3))),('.int', 2)))))),
# Not OrList
Explanation(
condition="2 is not more than 3 or 4",
label=1,
candidate=('foo', 'bar'),
semantics=('.root',('.label',('.int', 1),('.not',('.call',('.composite_or', ('.gt',), ('.list', ('.int', 3), ('.int', 4))),('.int', 2)))))),
]
membership = [
# In
Explanation(
condition="1 is in (1, 2)",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.call', ('.in', ('.list', ('.int', 1), ('.int', 2))), ('.int', 1))))),
# In AndList
Explanation(
condition="1 and 2 are in (1, 2, 3)",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.all', ('.map', ('.in', ('.list', ('.int', 1), ('.int', 2), ('.int', 3))), ('.list', ('.int', 1), ('.int', 2))))))),
# In OrList
Explanation(
condition="1 or 2 is in (2, 3)",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.any', ('.map', ('.in', ('.list', ('.int', 2), ('.int', 3))), ('.list', ('.int', 1), ('.int', 2))))))),
# Contains
Explanation(
condition="(1, 2) contains 2",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.call', ('.in', ('.list', ('.int', 1), ('.int', 2))), ('.int', 2))))),
# Contains AndList
Explanation(
condition="(1, 2) contains 2 and 1",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.all', ('.map', ('.in', ('.list', ('.int', 1), ('.int', 2))), ('.list', ('.int', 2), ('.int', 1))))))),
# Contains OrList
Explanation(
condition="(1, 2) contains 2 or 3",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.any', ('.map', ('.in', ('.list', ('.int', 1), ('.int', 2))), ('.list', ('.int', 2), ('.int', 3))))))),
]
absorption = [
# Partially unparseable
Explanation(
condition="1 is less than 2 and the moon is full",
label=1,
candidate=('foo', 'bar'),
semantics=('.root', ('.label', ('.int', 1), ('.call', ('.lt', ('.int', 2)), ('.int', 1)))))
]
explanations = (logic + grouping + integers + lists + membership + absorption) | babble-master | tests/core_explanations.py |
from collections import namedtuple, OrderedDict, defaultdict
import numpy as np
from metal.contrib.info_extraction.mentions import RelationMention
from scipy.sparse import csr_matrix
from babble.parsing import Parse
from babble.utils import PrintTimer, ProgressBar
FilteredParse = namedtuple('FilteredParse', ['parse', 'reason'])
class FilterBank(object):
def __init__(self, split=1):
self.dup_semantics_filter = DuplicateSemanticsFilter()
self.consistency_filter = ConsistencyFilter()
self.uniform_filter = UniformSignatureFilter()
self.dup_signature_filter = DuplicateSignatureFilter()
self.lowest_coverage_filter = LowestCoverageFilter()
self.label_matrix = None
def apply(self, parses, explanations, candidates, parallelism=1):
"""
Returns:
parses: Parses
filtered_parses: dict of Parses removed by each Filter
label_matrix: scipy.csr_matrix corresponding to parses
"""
filtered_parses = {}
# Apply structure and consistency based filters
parses, rejected = self.dup_semantics_filter.filter(parses)
filtered_parses[self.dup_semantics_filter.name()] = rejected
if not parses: return parses, filtered_parses, None
parses, rejected = self.consistency_filter.filter(parses, explanations)
filtered_parses[self.consistency_filter.name()] = rejected
if not parses: return parses, filtered_parses, None
# Label and extract signatures
label_matrix = self.label(parses, candidates)
# Apply signature based filters
parses, rejected, label_matrix = self.uniform_filter.filter(parses, label_matrix)
filtered_parses[self.uniform_filter.name()] = rejected
if not parses: return parses, filtered_parses, None
parses, rejected, label_matrix = self.dup_signature_filter.filter(parses, label_matrix)
filtered_parses[self.dup_signature_filter.name()] = rejected
if not parses: return parses, filtered_parses, None
# Apply coverage filter
parses, rejected, label_matrix = self.lowest_coverage_filter.filter(parses, label_matrix)
filtered_parses[self.lowest_coverage_filter.name()] = rejected
if not parses: return parses, filtered_parses, None
return parses, filtered_parses, label_matrix
def label(self, parses, candidates):
print("Applying labeling functions to investigate labeling signature.")
lfs = [parse.function for parse in parses]
dense_label_matrix = np.zeros((len(candidates), len(lfs)))
pb = ProgressBar(len(lfs))
for j, lf in enumerate(lfs):
pb.bar(j)
for i, c in enumerate(candidates):
dense_label_matrix[i, j] = lf(c)
pb.close()
label_matrix = csr_matrix(dense_label_matrix)
return label_matrix
def commit(self, idxs):
self.dup_semantics_filter.commit(idxs)
self.consistency_filter.commit(idxs)
self.uniform_filter.commit(idxs)
self.dup_signature_filter.commit(idxs)
self.lowest_coverage_filter.commit(idxs)
class Filter(object):
# def __init__(self)
def filter(self, input):
raise NotImplementedError
def name(self):
return type(self).__name__
def commit(self, idxs):
pass
def validate(self, parses):
parses = parses if isinstance(parses, list) else [parses]
if not parses:
print("Warning: Filter {} was applied to an empty list.".format(self.name()))
if parses and not isinstance(parses[0], Parse):
raise ValueError("Expected: Parse. Got: {}".format(type(parses[0])))
return parses
class DuplicateSemanticsFilter(Filter):
"""Filters out parses with identical logical forms (keeping one)."""
def __init__(self):
self.seen_semantics = {} # key: semantics, value: parses
self.temp_seen_semantics = OrderedDict() # key: semantics, value: parses
def filter(self, parses):
parses = self.validate(parses)
if not parses: return [], []
good_parses = []
bad_parses = []
for parse in parses:
# If a parse collides with a previously committed parse or a newly
# seen temporary parse, add it to the bad parses and store the parse
# that it collided with, for reference. Otherwise, add to good parses.
if parse.semantics in self.seen_semantics:
bad_parses.append(FilteredParse(parse, self.seen_semantics[parse.semantics]))
elif parse.semantics in self.temp_seen_semantics:
# Store the removed parse, and the parse it collided with, for reference.
bad_parses.append(FilteredParse(parse, self.temp_seen_semantics[parse.semantics]))
else:
good_parses.append(parse)
self.temp_seen_semantics[parse.semantics] = parse
print("{} parse(s) remain ({} parse(s) removed by {}).".format(
len(good_parses), len(bad_parses), self.name()))
return good_parses, bad_parses
def commit(self, idxs):
for i, (_, parse) in enumerate(self.temp_seen_semantics.items()):
if i in idxs:
self.seen_semantics[parse.semantics] = parse
self.temp_seen_semantics = OrderedDict()
class ConsistencyFilter(Filter):
"""Filters out parses that incorrectly label their accompanying candidate."""
def filter(self, parses, explanations):
parses = self.validate(parses)
if not parses: return [], []
explanations = explanations if isinstance(explanations, list) else [explanations]
explanation_dict = {exp.name: exp for exp in explanations}
good_parses = []
bad_parses = []
unknown_parses = []
for parse in parses:
lf = parse.function
exp_name = extract_exp_name(lf)
exp = explanation_dict[exp_name]
if not isinstance(exp.candidate, RelationMention):
unknown_parses.append(parse)
else:
try:
if lf(exp.candidate):
good_parses.append(parse)
else:
bad_parses.append(FilteredParse(parse, exp.candidate))
except:
unknown_parses.append(parse)
if unknown_parses:
print("Note: {} LFs did not have candidates and therefore could "
"not be filtered.".format(len(unknown_parses)))
good_parses += unknown_parses
print("{} parse(s) remain ({} parse(s) removed by {}).".format(
len(good_parses), len(bad_parses), self.name()))
return good_parses, bad_parses
class UniformSignatureFilter(Filter):
"""Filters out parses that give all candidates the same label."""
def filter(self, parses, label_matrix):
"""
:param parses: ...
:param label_matrix: a csr_sparse matrix of shape [M, N] for M
candidates by the N Parses in :param parses.
:returns good_parses: a list of Parses to keep
:returns bad_parses: a list of Parses to not keep
:returns label_matrix: a csr_sparse matrix of shape [M, N'] for M
candidates by the N' Parses in good_parses.
"""
parses = self.validate(parses)
if not parses: return [], [], label_matrix
if not isinstance(label_matrix, csr_matrix):
raise Exception("Method filter() requires a label_matrix of type "
"scipy.sparse.csr_matrix.")
num_candidates, num_lfs = label_matrix.shape
column_sums = np.asarray(abs(np.sum(label_matrix, 0))).ravel()
labeled_none_idxs = [i for i, sum in enumerate(column_sums) if sum == 0]
labeled_all_idxs = [i for i, sum in enumerate(column_sums) if sum == num_candidates]
uniform_idxs = labeled_none_idxs + labeled_all_idxs
nonuniform_idxs = [i for i, sum in enumerate(column_sums) if i not in uniform_idxs]
good_parses = [parse for i, parse in enumerate(parses) if i in nonuniform_idxs]
bad_parses = []
for i, parse in enumerate(parses):
if i in labeled_all_idxs:
bad_parses.append(FilteredParse(parse, "ALL"))
elif i in labeled_none_idxs:
bad_parses.append(FilteredParse(parse, "NONE"))
label_matrix = label_matrix[:, nonuniform_idxs]
print("{} parse(s) remain ({} parse(s) removed by {}: ({} None, {} All)).".format(
len(good_parses), len(bad_parses), self.name(),
len(labeled_none_idxs), len(labeled_all_idxs)))
return good_parses, bad_parses, label_matrix
class DuplicateSignatureFilter(Filter):
"""Filters out all but one parse that have the same labeling signature."""
def __init__(self):
self.seen_signatures = {} # key: signature hash, value: parse
self.temp_seen_signatures = OrderedDict() # key: signature hash, value: parse
def filter(self, parses, label_matrix):
"""
:param parses: ...
:param label_matrix: a label_matrix corresponding to only the remaining
parses from this batch.
"""
parses = self.validate(parses)
if not parses: return [], [], label_matrix
if not isinstance(label_matrix, csr_matrix):
raise Exception("Method filter() requires a label_matrix of type "
"scipy.sparse.csr_matrix.")
num_candidates, num_lfs = label_matrix.shape
signatures = [hash(label_matrix[:,i].nonzero()[0].tostring()) for i in range(num_lfs)]
good_parses = []
bad_parses = []
nonduplicate_idxs = []
for i, (sig, parse) in enumerate(zip(signatures, parses)):
# If a parse collides with a previously committed parse or a newly
# seen temporary parse, add it to the bad parses and store the parse
# that it collided with, for reference. Otherwise, add to good parses.
if sig in self.seen_signatures:
bad_parses.append(FilteredParse(parse, self.seen_signatures[sig]))
elif sig in self.temp_seen_signatures:
bad_parses.append(FilteredParse(parse, self.temp_seen_signatures[sig]))
else:
good_parses.append(parse)
self.temp_seen_signatures[sig] = parse
nonduplicate_idxs.append(i)
label_matrix = label_matrix[:, nonduplicate_idxs]
print("{} parse(s) remain ({} parse(s) removed by {}).".format(
len(good_parses), len(bad_parses), self.name()))
return good_parses, bad_parses, label_matrix
def commit(self, idxs):
for i, (sig, parse) in enumerate(self.temp_seen_signatures.items()):
if i in idxs:
self.seen_signatures[sig] = parse
self.temp_seen_signatures= OrderedDict()
class LowestCoverageFilter(Filter):
"""Filters out all but one parse that have the same labeling signature."""
def filter(self, parses, label_matrix):
"""
:param parses: ...
:param label_matrix: a label_matrix corresponding to only the remaining
parses from this batch.
"""
parses = self.validate(parses)
if not parses: return [], [], label_matrix
if not isinstance(label_matrix, csr_matrix):
raise Exception("Method filter() requires a label_matrix of type "
"scipy.sparse.csr_matrix.")
parses_by_exp = defaultdict(list)
label_counts = np.ravel(np.sum(abs(label_matrix), axis=0))
for i, parse in enumerate(parses):
num_labeled = label_counts[i]
parses_by_exp[parse.explanation.name].append((i, parse, num_labeled))
good_parses = []
good_idxs = []
bad_parses = []
for _, parse_tuples in parses_by_exp.items():
sorted_parse_tuples = sorted(parse_tuples, key=lambda x: x[2])
good_idx, good_parse, _ = sorted_parse_tuples[0]
good_idxs.append(good_idx)
good_parses.append(good_parse)
for (bad_idx, bad_parse, _) in sorted_parse_tuples[1:]:
bad_parses.append(FilteredParse(bad_parse, good_parse.semantics))
label_matrix = label_matrix[:, good_idxs]
print("{} parse(s) remain ({} parse(s) removed by {}).".format(
len(good_parses), len(bad_parses), self.name()))
return good_parses, bad_parses, label_matrix
def extract_exp_name(lf):
return lf.__name__[:lf.__name__.rindex('_')] | babble-master | babble/filter_bank.py |
from collections import defaultdict, namedtuple
import itertools
import random
import numpy as np
from pandas import DataFrame, Series
from scipy.sparse import csr_matrix, coo_matrix, lil_matrix
import scipy.sparse as sparse
from metal.analysis import lf_summary
from babble.filter_bank import FilterBank
from babble.explanation import Explanation
from babble.parsing import Parse, SemanticParser
from babble.utils import PrintTimer, ProgressBar, link_explanation_candidates
class BabbleStream(object):
"""
Iteratively displays candidates, collects and parses explanations.
Args:
Cs: a list of lists containing the candidates for each split
Ys: a list of arrays containing the ground truth labels for each split
aliases: a dictionary of a aliases for the grammar
entity_names: a list of two strings corresponding to custom names that
will be recognized as referring to the first and second entity of
each relation
apply_filters: if True, apply the filter bank
seed: an optional seed for the CandidateGenerator
verbose: controls verbosity of print statements
"""
def __init__(self, Cs, Ys, aliases={}, entity_names=[], apply_filters=True,
seed=None, verbose=True, **kwargs):
self.Cs = Cs
self.Ys = Ys
self.aliases = aliases
self.verbose = verbose
self.entity_names = entity_names
self.apply_filters = apply_filters
self.splits = list(range(len(self.Cs)))
self.candidate_generator = CandidateGenerator(self, seed=seed, **kwargs)
self._build_semparser()
self.filter_bank = FilterBank()
self.filter_split = None
self.parses = []
self.label_matrix = None
self.label_triples = [[[],[],[],0,0], [[],[],[],0,0], [[],[],[],0,0]]
# Temporary storage
self.temp_parses = None
self.temp_label_matrix = None
self.last_parses = []
# Evaluation tools
self.dev_size = len(Ys[1])
for Y in Ys:
if (Y == 0).sum() > 0:
raise ValueError("Y matrices should not contain 0s.")
def next(self):
c = self.candidate_generator.next()
self.temp_candidate = c
return c
def _build_semparser(self):
self.semparser = SemanticParser(
entity_names=self.entity_names,
aliases=self.aliases, beam_width=10)
def add_aliases(self, new_aliases):
"""
Adds additional aliases and rebuilds SemanticParser.
:param new_aliases: A dict {k: v, ...}
k = (string) list name
v = (list) words belonging to the alias
"""
self.aliases.update(new_aliases)
self._build_semparser()
def preload(self, explanations=None, aliases=None, label_others=True):
"""
Load and commit the provided aliases and/or explanations.
"""
if aliases:
self.add_aliases(aliases)
if explanations:
parses, _, = self.apply(explanations)
if parses:
self.commit()
def apply(self, explanations, split=0):
"""
:param explanations: an Explanation or list of Explanations.
:param split: the split to use for the filter bank
"""
# Flush all uncommmitted results from previous runs
self.commit([])
# Store the number of the split the filter bank will use
self.filter_split = split
candidates = self.Cs[split]
explanations = explanations if isinstance(explanations, list) else [explanations]
# Replace candidate ids with actual candidate objects in explanations
# if they don't already have candidate objects stored
explanations = link_explanation_candidates(
explanations, itertools.chain(*(self.Cs)))
parses, unparseable_explanations = self._parse(explanations)
if self.apply_filters:
parses, filtered_parses, label_matrix = self._filter(
parses, explanations, candidates)
else:
print("Because apply_filters=False, no parses are being filtered.")
filtered_parses = {}
label_matrix = self.filter_bank.label(parses, candidates)
filtered_objects = filtered_parses
filtered_objects['UnparseableExplanations'] = unparseable_explanations
# Hold results in temporary space until commit
self.temp_parses = parses if isinstance(parses, list) else [parses]
self.temp_label_matrix = label_matrix
self.temp_filtered_objects = filtered_objects
return parses, filtered_objects
def _parse(self, explanations):
"""
:param explanations: an Explanation or list of Explanations.
:return: a list of Parses.
"""
parses = self.semparser.parse(explanations,
return_parses=True, verbose=self.verbose)
used_explanations = set([p.explanation for p in parses])
unparseable_explanations = [FilteredExplanation(exp, 'Unparseable')
for exp in explanations if exp not in used_explanations]
return parses, unparseable_explanations
def _filter(self, parses, explanations, candidates):
"""
:param parses: a Parse or list of Parses.
:param explanations: the Explanation or list of Explanations from which
the parse(s) were produced.
:return: the outputs from filter_bank.apply()
"""
return self.filter_bank.apply(parses, explanations, candidates)
def analyze(self, parses):
lf_names = []
L = lil_matrix((len(self.Cs[1]), len(parses)))
for j, p in enumerate(parses):
lf_names.append(p.function.__name__)
for i, c in enumerate(self.Cs[1]):
L[i,j] = p.function(c)
return lf_summary(L.tocsr(), Y=self.Ys[1], lf_names=lf_names)
def filtered_analysis(self, filtered_parses=None):
if filtered_parses is None:
# Use the last set of filtered parses to be produced.
filtered_parses = self.temp_filtered_objects
if filtered_parses is None or not any(filtered_parses.values()):
print("No filtered parses to analyze.")
return
filter_names = [
'UnparseableExplanations',
'DuplicateSemanticsFilter',
'ConsistencyFilter',
'UniformSignatureFilter',
'DuplicateSignatureFilter',
'LowestCoverageFilter',
]
num_filtered = 0
print("SUMMARY")
print("{} TOTAL:".format(
sum([len(p) for p in filtered_parses.values()])))
print("{} Unparseable Explanation".format(
len(filtered_parses.get('UnparseableExplanations', []))))
print("{} Duplicate Semantics".format(
len(filtered_parses.get('DuplicateSemanticsFilter', []))))
print("{} Inconsistency with Example".format(
len(filtered_parses.get('ConsistencyFilter', []))))
print("{} Uniform Signature".format(
len(filtered_parses.get('UniformSignatureFilter', []))))
print("{} Duplicate Signature".format(
len(filtered_parses.get('DuplicateSignatureFilter', []))))
print("{} Lowest Coverage".format(
len(filtered_parses.get('LowestCoverageFilter', []))))
for filter_name in filter_names:
parses = filtered_parses.get(filter_name, [])
for filtered_parse in parses:
num_filtered += 1
if filtered_parse.reason == 'Unparseable':
parse_str = filtered_parse.parse.condition
else:
parse_str = self.semparser.grammar.translate(filtered_parse.parse.semantics)
if filter_name == 'UnparseableExplanations':
filter_str = "Unparseable Explanation"
reason_str = "This explanation couldn't be parsed."
elif filter_name == 'DuplicateSemanticsFilter':
filter_str = "Duplicate Semantics"
reason_str = 'This parse is identical to one produced by the following explanation:\n\t"{}"'.format(
filtered_parse.reason.explanation.condition)
elif filter_name == 'ConsistencyFilter':
candidate = filtered_parse.reason
filter_str = "Inconsistency with Example"
reason_str = "This parse abstained on its own candidate ({})".format(
candidate)
elif filter_name == 'UniformSignatureFilter':
filter_str = "Uniform Signature"
reason_str = "This parse labeled {} of the {} development examples".format(
filtered_parse.reason, self.dev_size)
elif filter_name == 'DuplicateSignatureFilter':
filter_str = "Duplicate Signature"
reason_str = "This parse labeled identically to the following existing parse:\n\t{}".format(
self.semparser.grammar.translate(filtered_parse.reason.explanation))
elif filter_name == 'LowestCoverageFilter':
filter_str = "Lowest Coverage"
reason_str = "This parse had the lowest coverage of all parses passing the other filters"
print("\n[#{}]: {}".format(num_filtered, filter_str))
# print("\nFilter: {}".format(filter_str))
if filtered_parse.reason == 'Unparseable':
print("\nExplanation: {}".format(parse_str))
else:
print("\nParse: {}".format(parse_str))
print("\nReason: {}\n".format(reason_str))
print("Semantics: {}\n".format(filtered_parse.parse.semantics))
def commit(self, idxs='all'):
"""
:param idxs: The indices of the parses (from the most recently returned
list of parses) to permanently keep.
If idxs = 'all', keep all of the parses.
If idxs is an integer, keep just that one parse.
If idxs is a list of integers, keep all parses from that list.
If idxs = None or [], keep none of the parses.
"""
if not self.temp_parses:
idxs = []
if idxs == 'all':
idxs = list(range(len(self.temp_parses)))
elif isinstance(idxs, int):
idxs = [idxs]
elif idxs == [] or idxs is None:
idxs = []
if self.temp_parses:
print("Flushing all parses from previous explanation set.")
if (isinstance(idxs, list) and len(idxs) > 0 and
all(isinstance(x, int) for x in idxs)):
if max(idxs) >= len(self.temp_parses):
raise Exception("Invalid idx: {}.".format(max(idxs)))
parses_to_add = [p for i, p in enumerate(self.temp_parses) if i in idxs]
explanations_to_add = set([parse.explanation for parse in parses_to_add])
# Update label matrix for label split
self.parses.extend(parses_to_add)
if self.label_matrix is None:
self.label_matrix = self.temp_label_matrix
else:
self.label_matrix = sparse.hstack((self.label_matrix, self.temp_label_matrix))
self.last_parses = parses_to_add
if self.verbose:
print("Added {} parse(s) from {} explanations to set. (Total # parses = {})\n".format(
len(parses_to_add), len(explanations_to_add), len(self.parses)))
# Update label matrix for other splits
for split in self.splits:
if split == self.filter_split:
continue
self.label_split(split)
# Permanently store the semantics and signatures in duplicate filters
self.filter_bank.commit(idxs)
self.temp_parses = None
self.temp_label_matrix = None
def label_split(self, split):
"""Label a single split with the most recently committed LFs."""
print("Applying labeling functions to split {}".format(split))
lfs = [parse.function for parse in self.last_parses]
candidates = self.Cs[split]
num_existing_lfs = self.label_triples[split][4]
rows = []
cols = []
data = []
pb = ProgressBar(len(candidates) * len(lfs))
count = 0
for j, lf in enumerate(lfs):
for i, c in enumerate(candidates):
pb.bar(count)
count += 1
label = int(lf(c))
if label:
rows.append(i)
cols.append(j + num_existing_lfs)
data.append(label)
pb.close()
# NOTE: There is potential for things to go wrong if the user calls
# this function twice and the label matrix ends up wonky.
self.label_triples[split][0].extend(rows)
self.label_triples[split][1].extend(cols)
self.label_triples[split][2].extend(data)
self.label_triples[split][3] = len(candidates)
self.label_triples[split][4] += len(lfs)
m, n = self.label_triples[split][3], self.label_triples[split][4]
print(f"Added {len(data)} labels to split {split}: "
f"L.nnz = {len(self.label_triples[split][2])}, "
f"L.shape = ({m}, {n}).")
def get_label_matrix(self, split):
"""Retrieve the given split's label matrix from all commited parses"""
if self.temp_parses is not None:
print("You must commit before retrieving the label matrix.")
return None
if split == self.filter_split:
L = self.label_matrix
L = self.label_matrix
else:
rows, cols, data, shape_row, shape_col = self.label_triples[split]
L = coo_matrix((data, (rows, cols)), shape=(shape_row, shape_col))
if self.verbose:
print(f"Retrieved label matrix for split {split}: L.nnz = {L.nnz}, "
f"L.shape = {L.shape}")
return L.astype(int).tocsr()
def error_buckets(self, parse):
correct = []
incorrect = []
for c, l in zip(self.Cs[1], self.Ys[1]):
vote = parse.function(c)
if vote == 0:
continue
elif vote == l:
correct.append(c)
else:
incorrect.append(c)
return (correct, incorrect)
def view_parse(self, parse):
print(f"Name: {parse.function.__name__}")
print(f"Parse: {self.semparser.grammar.translate(parse.semantics)}")
def get_parses(self, idx=None, translate=True):
if idx is None:
parses = self.parses
elif isinstance(idx, int):
parses = [self.parses[idx]]
elif isinstance(idx, list):
parses = [parse for i, parse in enumerate(self.parses) if i in idx]
if translate:
return [self.semparser.grammar.translate(parse.semantics) for parse in parses]
else:
return parses
def get_lfs(self, idx=None):
return [parse.function for parse in self.get_parses(idx=idx, translate=False)]
def get_explanations(self, idx=None):
explanations = []
explanations_set = set()
for parse in self.get_parses(idx=idx, translate=False):
explanation = parse.explanation
if explanation not in explanations_set:
explanations.append(explanation)
explanations_set.add(explanation)
return explanations
class Babbler(BabbleStream):
def apply(self, *args, **kwargs):
BabbleStream.apply(self, *args, **kwargs)
self.commit()
# Use 'parse' as field instead of 'explanation' to match with FilteredParse object.
FilteredExplanation = namedtuple('FilteredExplanation', ['parse', 'reason'])
class CandidateGenerator(object):
"""
A generator for returning a list of candidates in a certain order.
"""
def __init__(self, babble_stream, seed=None,
balanced=False, active=False, shuffled=False,
priority_candidate_ids=[]):
"""
If active = True, return only candidates that have no labels so far
If balanced = True, alternate between candidates with True/False gold labels
If random = True, return the candidates (passing the above conditions,
if applicable) in random order.
"""
candidates = babble_stream.Cs[1]
labels = babble_stream.Ys[1]
candidates, labels, priority_generator = self.make_priority_generator(
candidates, labels, priority_candidate_ids)
self.priority_generator = priority_generator
if active:
raise NotImplementedError
else:
if balanced:
self.candidate_generator = itertools.chain(
priority_generator, self.balanced_generator(
candidates, labels, seed, shuffled=shuffled))
else:
self.candidate_generator = itertools.chain(
priority_generator, self.linear_generator(
candidates, seed, shuffled=shuffled))
def next(self):
return self.candidate_generator.__next__()
def make_priority_generator(self, candidates, labels, priority_candidate_ids):
# Pull out priority candidates to view first if applicable
# Go for the slightly more wasteful but easy-to-understand solution
if priority_candidate_ids:
def simple_generator(candidates):
for c in candidates:
yield c
priority_set = set(priority_candidate_ids)
priority = []
other = []
# Pull out all priority candidates
for c, l in zip(candidates, labels):
if c.mention_id in priority_set:
priority.append(c)
else:
# Hold on to the labels for a possible balanced_generator downstream
other.append((c, l))
# Put them in desired order
priority_idxs = {c: i for i, c in enumerate(priority_candidate_ids)}
priority.sort(key=lambda x: priority_idxs[x.mention_id])
priority_generator = simple_generator(priority)
# Restore remaining candidates and labels to normal lists
candidates, labels = zip(*other)
else:
priority_generator = iter(())
return candidates, labels, priority_generator
@staticmethod
def linear_generator(candidates, seed, shuffled=False):
if shuffled:
if seed is not None:
random.seed(seed)
random.shuffle(candidates)
for c in candidates:
yield c
@staticmethod
def balanced_generator(candidates, labels, seed, shuffled=False):
candidates_labels = list(zip(candidates, labels))
if shuffled:
if seed is not None:
random.seed(seed)
random.shuffle(candidates_labels)
groups = defaultdict(list)
for c, l in candidates_labels:
groups[l].append(c)
counters = {k: 0 for k, _ in groups.items()}
candidate_queue = []
label_queue = []
total = 0
while total < len(candidates):
for label, cands in sorted(groups.items()):
if counters[label] < len(cands):
candidate_queue.append(cands[counters[label]])
label_queue.append(label)
counters[label] += 1
total += 1
for c in candidate_queue:
yield c | babble-master | babble/babbler.py |
from .explanation import Explanation
from .parsing import Rule, Grammar, Parse, SemanticParser
from .filter_bank import FilterBank
from .utils import ExplanationIO, link_explanation_candidates
from .babbler import Babbler, BabbleStream | babble-master | babble/__init__.py |
import random
from sklearn.linear_model import LogisticRegression
from metal.utils import convert_labels
from metal.metrics import metric_score
class LogisticRegressionWrapper(object):
"""A wrapper around scikit-learn's LogisticRegression class
The wrapper is necessary both to convert labels from categorical to one-zero
and to match the interface expected by snorkel-metal's ModelTuners.
"""
def __init__(self, C=1.0, penalty='l2', seed=None):
if seed:
random.seed(seed)
self.model = LogisticRegression(C=C, penalty='l2')
def train(self, X, Y, X_dev=None, Y_dev=None, **kwargs):
Y_bin = convert_labels(Y, 'categorical', 'onezero')
self.model.fit(X, Y_bin)
def predict(self, X):
return self.model.predict(X)
def score(self, X, Y, metric='f1', verbose=True):
Y = convert_labels(Y, 'categorical', 'onezero')
Y_p = self.predict(X)
metric_list = metric if isinstance(metric, list) else [metric]
scores = []
for metric in metric_list:
score = metric_score(Y, Y_p, metric)
scores.append(score)
if verbose:
print(f"{metric.capitalize()}: {score:.3f}")
if isinstance(scores, list) and len(scores) == 1:
return scores[0]
else:
return scores
| babble-master | babble/disc_model.py |
import re
class Explanation(object):
def __init__(self, condition, label, candidate=None, name=None,
semantics=None, paraphrase=None):
"""
Constructs an Explanation object.
:param condition: A string explanation that expresses a Boolean
condition (e.g., "The sentence is at least 5 words long.")
:param label: The categorical label (1,...,k) to apply to candidates
for which the condition evaluates to True.
:param candidate: A candidate that the explanation is consistent with.
May be a candidate object or the candidate's stable_id (for linking
later.)
:param name: The name of this explanation.
:param semantics: The intended semantic representation of the
explanation (if known).
"""
assert(isinstance(condition, str))
condition = re.sub(r'\s+', ' ', condition)
self.condition = condition
self.label = label
self.candidate = candidate
self.name = name
self.semantics = semantics
def __hash__(self):
return hash((self.label, self.condition, self.candidate))
def __repr__(self):
if self.name:
return 'Explanation(%s: %s, "%s")' % (self.name, self.label, self.condition)
else:
return 'Explanation(%s, "%s")' % (self.label, self.condition)
def display(self):
"""Prints the explanation in a format that can be copied and pasted"""
if isinstance(self.candidate, str):
candidate_id = self.candidate
else:
candidate_id = self.candidate.mention_id()
tab = ' '
print(
f"Explanation(\n"
f"{tab}name='{self.name}',\n"
f"{tab}label='{self.label}',\n"
f"{tab}condition='{self.condition}',\n"
f"{tab}candidate='{candidate_id}',\n"
f")"
) | babble-master | babble/explanation.py |
from collections import Counter, defaultdict
import csv
import json
import os
import random
import sys
from time import time
from metal.contrib.info_extraction.mentions import RelationMention
from metal.contrib.info_extraction.utils import mark_entities
import numpy as np
import torch
from scipy.sparse import issparse
from .explanation import Explanation
class PrintTimer:
"""Prints msg at start, total time taken at end."""
def __init__(self, msg, prefix="###"):
self.msg = msg
self.prefix = prefix + " " if len(prefix) > 0 else prefix
def __enter__(self):
self.t0 = time()
print("{0}{1}".format(self.prefix, self.msg))
def __exit__(self, type, value, traceback):
print ("{0}Done in {1:.1f}s.\n".format(self.prefix, time() - self.t0))
class ProgressBar(object):
def __init__(self, N, length=40):
# Protect against division by zero (N = 0 results in full bar being printed)
self.N = max(1, N)
self.nf = float(self.N)
self.length = length
# Precalculate the i values that should trigger a write operation
self.ticks = set([round(i/100.0 * N) for i in range(101)])
self.ticks.add(N-1)
self.bar(0)
def bar(self, i):
"""Assumes i ranges through [0, N-1]"""
if i in self.ticks:
b = int(np.ceil(((i+1) / self.nf) * self.length))
sys.stdout.write(
"\r[{0}{1}] {2}%".format(
"="*b, " "*(self.length-b), int(100*((i+1) / self.nf))))
sys.stdout.flush()
def close(self):
# Move the bar to 100% before closing
self.bar(self.N-1)
sys.stdout.write("\n\n")
sys.stdout.flush()
class ExplanationIO(object):
def write(self, explanations, fpath):
explanations = explanations if isinstance(explanations, list) else [explanations]
with open(fpath, 'w') as tsvfile:
tsvwriter = csv.writer(tsvfile, delimiter='\t')
for exp in explanations:
if isinstance(exp.candidate, str):
candidate_id = exp.candidate
else:
candidate_id = exp.candidate.mention_id
tsvwriter.writerow([
exp.name,
exp.label,
candidate_id,
exp.condition,
])
fpath = fpath if len(fpath) < 50 else fpath[:20] + '...' + fpath[-30:]
print("Wrote {} explanations to {}".format(len(explanations), fpath))
def read(self, fpath):
with open(fpath, 'r') as tsvfile:
tsvreader = csv.reader(tsvfile, delimiter='\t')
num_read = 0
explanations = []
for (name, label, candidate_id, condition) in tsvreader:
explanations.append(
Explanation(
name=name,
label=int(label),
candidate=candidate_id,
condition=condition.strip(),
)
)
num_read += 1
fpath = fpath if len(fpath) < 50 else fpath[:20] + '...' + fpath[-30:]
print("Read {} explanations from {}".format(num_read, fpath))
return explanations
def link_explanation_candidates(explanations, candidates):
"""Doc string goes here."""
target_candidate_ids = set()
linked = 0
print("Building list of target candidate ids...")
for e in explanations:
if e.candidate is not None and not isinstance(e.candidate, RelationMention):
target_candidate_ids.add(e.candidate)
elif e.candidate:
linked += 1
if linked == len(explanations):
print("All {} explanations are already linked to candidates.".format(
len(explanations)))
return explanations
else:
print("Collected {} unique target candidate ids from {} explanations.".format(
len(target_candidate_ids), len(explanations)))
if not target_candidate_ids:
print("No candidate hashes were provided. Skipping linking.")
return explanations
candidate_map = {}
print("Gathering desired candidates...")
for candidate in candidates:
if candidate.mention_id in target_candidate_ids:
candidate_map[candidate.mention_id] = candidate
if len(candidate_map) < len(target_candidate_ids):
num_missing = len(target_candidate_ids) - len(candidate_map)
print("Could not find {} target candidates with the following mention_ids (first 5):".format(
num_missing))
num_reported = 0
for i, c_hash in enumerate(target_candidate_ids):
if c_hash not in candidate_map:
print(c_hash)
num_reported += 1
if num_reported >= 5:
break
print("Found {}/{} desired candidates".format(
len(candidate_map), len(target_candidate_ids)))
print("Linking explanations to candidates...")
for e in explanations:
if not isinstance(e.candidate, RelationMention):
try:
e.candidate = candidate_map[e.candidate]
linked += 1
except KeyError:
pass
print("Linked {}/{} explanations".format(linked, len(explanations)))
return explanations
def sparse_to_indices(X):
"""Converts a sparse matrix into a tensor of the nonzero indices
Args:
X: an [n, num_features] one-hot scipy.sparse matrix
Returns:
X_idx: an [n, h] tensor where X_idx[i,:] is a zero-padded 1D tesnor of
the nonzero indices of X[i,:]
"""
if not issparse(X):
raise ValueError("X must be a scipy.sparse matrix")
nonzeros = X.nonzero()
indices = defaultdict(list)
for i, v in zip(nonzeros[0], nonzeros[1]):
indices[i].append(v + 1)
max_len = max(map(lambda x: len(x), indices.values()))
X_idx = torch.zeros(X.shape[0], max_len).long()
for i, values in indices.items():
X_idx[i, :len(values)] = torch.LongTensor(values)
return X_idx
def display_candidate(candidate):
tokens = candidate.tokens
positions = list(zip(candidate.word_starts, candidate.word_ends))
markers = ['{', '}', '{', '}']
marked = mark_entities(tokens, positions, markers, style='concatenate')
print(' '.join(marked))
print()
print(marked)
class CandidateViewer(object):
def __init__(self, candidates, shuffle=False, seed=None):
if seed:
random.seed(seed)
self.candidates = candidates
self.idx = -1
self.order = list(range(len(candidates)))
# Shuffle indirectly to not mess up alignment between candidates and
# other objects in the workspace (e.g., labels).
if shuffle:
random.shuffle(self.order)
def view(self):
self.idx += 1
if self.idx > len(self.order):
print("Exhausted provided candidate set")
return
c = self.candidates[self.order[self.idx]]
display_candidate(c)
return c | babble-master | babble/utils.py |
from .core_annotators import annotators, text2int
from .core_templates import PrimitiveTemplate
from .core_base import core_grammar | babble-master | babble/core/__init__.py |
import re
from babble.parsing.annotator import Annotator
class PunctuationAnnotator(Annotator):
def annotate(self, tokens):
if len(tokens) == 1:
if tokens[0]['pos'] in ["``", "\'\'"] or tokens[0]['word'] in ["'", '"']:
return [('$Quote', tokens[0]['word'])]
elif tokens[0]['pos'] == "-LRB-":
return [('$OpenParen', tokens[0]['word'])]
elif tokens[0]['pos'] == "-RRB-":
return [('$CloseParen', tokens[0]['word'])]
return []
class IntegerAnnotator(Annotator):
def annotate(self, tokens):
if len(tokens) == 1:
value = None
if tokens[0]['pos'] == 'CD':
try:
token = tokens[0]['word']
value = int(float(token))
except ValueError:
pass
if value is None:
try:
value = text2int(tokens[0]['word'])
except:
pass
if value is not None:
return [('$Int', ('.int', value))]
return []
annotators = [PunctuationAnnotator(), IntegerAnnotator()]
def text2int(textnum, numwords={}):
if not numwords:
units = [
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen",
]
tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion"]
numwords["and"] = (1, 0)
for idx, word in enumerate(units): numwords[word] = (1, idx)
for idx, word in enumerate(tens): numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0)
ordinal_words = {'first':1, 'second':2, 'third':3, 'fifth':5, 'eighth':8, 'ninth':9, 'twelfth':12}
ordinal_endings = [('ieth', 'y'), ('th', '')]
textnum = textnum.replace('-', ' ')
current = result = 0
for word in textnum.split():
if word in ordinal_words:
scale, increment = (1, ordinal_words[word])
else:
for ending, replacement in ordinal_endings:
if word.endswith(ending):
word = "%s%s" % (word[:-len(ending)], replacement)
if word not in numwords:
raise Exception("Illegal word: " + word)
scale, increment = numwords[word]
current = current * scale + increment
if scale > 100:
result += current
current = 0
return result + current | babble-master | babble/core/core_annotators.py |
from __future__ import print_function
from babble.parsing import Rule, sems0, sems1, sems_in_order, sems_reversed, star
def PrimitiveTemplate(seed):
X = seed
XListStub = X + 'ListStub'
XListAnd = X + 'ListAnd'
XListOr = X + 'ListOr'
XList = X + 'List'
XToBool = X + 'ToBool' # f(X) = Bool
XBinToBool = X + 'BinToBool' # f(X1, X2) = Bool
rules = [
# XToBool
# "'a' is uppercase"
Rule('$Bool', (X, XToBool), star(lambda x, func_: ('.call', func_, x))),
# XBinToBool
# Case 1: (X (f X))
Rule(XToBool, (XBinToBool, X), sems_in_order),
# Case 2: (XList (f X)) - handled naturally with XList XToBool rules
# Case 3: X f XList
Rule(XToBool, (XBinToBool, XListAnd),
lambda sems: ('.composite_and', (sems[0],), sems[1])),
Rule(XToBool, (XBinToBool, XListOr),
lambda sems: ('.composite_or', (sems[0],), sems[1])),
# Case 4: XList (f XList) - handled naturally right now
# Not
# "'a' is not uppercase"
Rule('$Bool', (X, '$Not', XToBool),
star(lambda x, not_, func_: (not_, ('.call', func_, x)))),
# Building lists
Rule(XListStub, (X, '?$Separator', X),
lambda sems: ('.list', sems[0], sems[2])),
Rule(XListStub, (XListStub, '?$Separator', X),
lambda sems: tuple((list(sems[0]) + [sems[2]]))),
Rule(XList, ('$OpenParen', XListStub, '$CloseParen'), sems1),
Rule(XListOr, (X, '?$Separator', '$Or', X),
lambda sems: ('.list', sems[0], sems[3])),
Rule(XListOr, (XListStub, '?$Separator', '$Or', X),
lambda sems: tuple(list(sems[0]) + [sems[3]])),
Rule(XListAnd, (X, '?$Separator', '$And', X),
lambda sems: ('.list', sems[0], sems[3])),
Rule(XListAnd, (XListStub, '?$Separator', '$And', X),
lambda sems: tuple(list(sems[0]) + [sems[3]])),
# Generalizing Lists
Rule(XList, XListStub, sems0),
Rule(XList, XListAnd, sems0),
Rule(XList, XListOr, sems0),
Rule('$List', XList, sems0),
# Applying functions to lists (normal and inverted order)
# "'a' or 'b' is in the sentence"
Rule('$Bool', (XListOr, XToBool), star(lambda list_, func_: ('.any', ('.map', func_, list_)))),
Rule('$Bool', (XToBool, XListOr), star(lambda func_, list_: ('.any', ('.map', func_, list_)))),
# "'a' and 'b' are in the sentence"
Rule('$Bool', (XListAnd, XToBool), star(lambda list_, func_: ('.all', ('.map', func_, list_)))),
Rule('$Bool', (XToBool, XListAnd), star(lambda func_, list_: ('.all', ('.map', func_, list_)))),
# "[at least two of] ('a','b','c') are in the sentence"
Rule('$BoolList', (XList, XToBool), star(lambda list_, func_: ('.map', func_, list_))),
Rule('$BoolList', (XToBool, XList), star(lambda func_, list_: ('.map', func_, list_))),
# "there is a spouse word in the sentence"
Rule('$Bool', ('$Exists', XList, XToBool), star(lambda exists_, list_, func_: ('.any', ('.map', func_, list_)))),
# Membership in lists
Rule(XToBool, ('$In', XList), sems_in_order),
Rule(XToBool, ('$In', '$UserList'), sems_in_order),
# NOTE: $Contains is still somewhat limited in its functionality
Rule('$Bool', ('$List', '$Contains', X), star(lambda list_, contains_, x: ('.call', ('.in', list_), x))),
Rule('$Bool', ('$List', '$Contains', XListAnd), star(lambda list_, contains_, andlist_: ('.all', ('.map', ('.in', list_), andlist_)))),
Rule('$Bool', ('$List', '$Contains', XListOr), star(lambda list_, contains_, orlist_: ('.any', ('.map', ('.in', list_), orlist_)))),
# All (not) equal
Rule('$Bool', (XListAnd, '$Equals'), star(lambda list_, eq_: ('.all_equal', list_))),
Rule('$Bool', (XListAnd, '$NotEquals'), star(lambda list_, eq_: ('.not', ('.all_equal', list_)))),
]
return rules
| babble-master | babble/core/core_templates.py |
from __future__ import print_function
from babble.parsing import (
GrammarMixin,
Rule,
sems0,
sems1,
sems_in_order,
sems_reversed,
flip_dir,
star,
)
from babble.core.core_templates import PrimitiveTemplate
from babble.core.core_annotators import annotators
# Rules ======================================================================
lexical_rules = (
[Rule('$Start', w) for w in ['<START>']] +
[Rule('$Stop', w) for w in ['<STOP>']] +
[Rule('$Label', w, '.label') for w in ['label ?it']] +
[Rule('$Arg', w, '.arg') for w in ['arg', 'argument']] +
[Rule('$True', w, ('.bool', True)) for w in ['true', 'correct']] +
[Rule('$False', w, ('.bool', False)) for w in ['false', 'incorrect', 'wrong']] +
[Rule('$And', w, '.and') for w in ['and', 'but']] +
[Rule('$Or', w, '.or') for w in ['or', 'nor']] +
[Rule('$Not', w, '.not') for w in ['not', "n't"]] +
[Rule('$All', w, '.all') for w in ['all', 'both']] +
[Rule('$Any', w, '.any') for w in ['any', 'a', 'one of']] +
[Rule('$None', w, '.none') for w in ['none', 'not any', 'neither', 'no']] +
[Rule('$Is', w) for w in ['is', 'are', 'be', 'comes', 'appears', 'occurs']] +
[Rule('$Exists', w) for w in ['exist', 'exists']] +
[Rule('$Int', w, ('.int', 0)) for w in ['no']] +
[Rule('$Int', w, ('.int', 1)) for w in ['immediately', 'right']] +
[Rule('$Int', w, ('.int', -1)) for w in ['last', 'final', 'ending']] +
[Rule('$AtLeastOne', w, ('.geq', ('.int', 1))) for w in ['a', 'another']] +
[Rule('$Because', w) for w in ['because', 'since', 'if']] +
[Rule('$Equals', w, '.eq') for w in ['equal', 'equals', '=', '==', 'same ?as', 'identical', 'exactly']] +
[Rule('$NotEquals', w, '.neq') for w in ['different ?than']] +
[Rule('$LessThan', w, '.lt') for w in ['less than', 'smaller than', '<']] +
[Rule('$AtMost', w, '.leq') for w in ['at most', 'no larger than', 'less than or equal', 'within', 'no more than', '<=']] +
[Rule('$AtLeast', w, '.geq') for w in ['at least', 'no less than', 'no smaller than', 'greater than or equal', '>=']] +
[Rule('$MoreThan', w, '.gt') for w in ['more than', 'greater than', 'larger than', '>']] +
[Rule('$In', w, '.in') for w in ['?is in']] +
[Rule('$Contains', w, '.contains') for w in ['contains', 'contain', 'containing', 'include', 'includes', 'says', 'states', 'mentions', 'mentioned', 'referred', 'refers']] +
[Rule('$Separator', w) for w in [',', ';', '/']] +
[Rule('$Possessive', w) for w in ["'s"]] +
[Rule('$Count', w, '.count') for w in ['number', 'length', 'count']] +
[Rule('$Punctuation', w) for w in ['.', ',', ';', '!', '?']] +
[Rule('$Tuple', w, '.tuple') for w in ['pair', 'tuple']] +
[Rule('$CID', w, '.cid') for w in ['cid', 'cids', 'canonical id', 'canonical ids']] +
[Rule('$ArgNum', w, ('.int', 1)) for w in ['one', '1']] +
[Rule('$ArgNum', w, ('.int', 2)) for w in ['two', '2']] +
[Rule('$ArgXListAnd', w, ('.list', ('.arg', ('.int', 1)), ('.arg', ('.int', 2)))) for w in ['they', 'them', 'entities']] +
[Rule('$EachOther', w) for w in ['eachother', 'each other']]
)
unary_rules = [
Rule('$Bool', '$BoolLit', sems0),
Rule('$BoolLit', '$True', sems0),
Rule('$BoolLit', '$False', sems0),
Rule('$Num', '$Int', sems0),
Rule('$Num', '$Float', sems0),
Rule('$Conj', '$And', sems0),
Rule('$Conj', '$Or', sems0),
Rule('$Exists', '$Is'),
Rule('$Equals', '$Is ?$Equals', '.eq'),
Rule('$NotEquals', '$Equals $Not', '.neq'),
Rule('$NotEquals', '$Is $NotEquals', '.neq'),
Rule('$Compare', '$Equals', sems0),
Rule('$Compare', '$NotEquals', sems0),
Rule('$Compare', '$LessThan', sems0),
Rule('$Compare', '$AtMost', sems0),
Rule('$Compare', '$MoreThan', sems0),
Rule('$Compare', '$AtLeast', sems0),
Rule('$NumBinToBool', '$Compare', sems0),
Rule('$NumToBool', '$AtLeastOne', sems0),
]
compositional_rules = [
### Top Level ###
Rule('$ROOT', '$Start $LF $Stop', lambda sems: ('.root', sems[1])),
Rule('$LF', '$Label $Int $Because $Bool ?$Punctuation', lambda sems: (sems[0], sems[1], sems[3])),
### Logicals ###
Rule('$Bool', '$Bool $Conj $Bool', lambda sems: (sems[1], sems[0], sems[2])),
Rule('$Bool', '$Not $Bool', sems_in_order),
Rule('$Bool', '$All $BoolList', sems_in_order),
Rule('$Bool', '$Any $BoolList', sems_in_order),
Rule('$Bool', '$None $BoolList', sems_in_order),
### Grouping ###
Rule('$Bool', '$OpenParen $Bool $CloseParen', star(lambda open_, bool_, close_: bool_)),
### BoolLists ###
# "more than five of X words are upper"
Rule('$Bool', '$NumToBool $BoolList', star(lambda func_, boollist_: ('.call', func_, ('.sum', boollist_)))),
### Context ###
Rule('$ArgX', '$Arg $ArgNum', sems_in_order),
Rule('$ArgXListAnd', '$ArgX $And $ArgX', ('.list', ('.arg', ('.int', 1)), ('.arg', ('.int', 2)))),
Rule('$ArgXListOr', '$ArgX $Or $ArgX', ('.list', ('.arg', ('.int', 1)), ('.arg', ('.int', 2)))),
]
template_rules = (
PrimitiveTemplate('$ArgX') +
PrimitiveTemplate('$Num')
)
rules = lexical_rules + unary_rules + compositional_rules + template_rules
ops = {
# root
'.root': lambda x: lambda c: x(c),
'.label': lambda x, y: lambda c: x(c) if y(c)==True else 0,
# primitives
'.bool': lambda x: lambda c: x,
'.string': lambda x: lambda c: x,
'.int': lambda x: lambda c: x,
# lists
'.tuple': lambda x: lambda c: tuple(x(c)),
'.list': lambda *x: lambda c: [z(c) for z in x],
'.alias': lambda x: lambda c: c['aliases'][x(c)],
# apply a function x to elements in list y
'.map': lambda func_, list_: lambda cxy: [func_(cxy)(lambda c: yi)(cxy) for yi in list_(cxy)],
# call a 'hungry' evaluated function on one or more arguments
'.call': lambda *x: lambda c: x[0](c)(x[1])(c),
# apply a list of hungry functions to an element, then call 'any' or 'all' to convert to boolean
'.composite_and': lambda x, y: lambda cxy: lambda z: lambda cz: all(x(lambda c: yi)(cxy)(z)(cz)==True for yi in y(cxy)),
'.composite_or': lambda x, y: lambda cxy: lambda z: lambda cz: any(x(lambda c: yi)(cxy)(z)(cz)==True for yi in y(cxy)),
# apply a list of full functions to an element, then call 'any' or 'all' to convert to boolean
'.composite_and_func': lambda funclist: lambda cx: lambda z: lambda cz: all(func(z)(cz)==True for func in funclist(cx)),
'.composite_or_func': lambda funclist: lambda cx: lambda z: lambda cz: any(func(z)(cz)==True for func in funclist(cx)),
# logic
# NOTE: and/or expect individual inputs, not/all/any/none expect a single iterable of inputs
'.and': lambda x, y: lambda c: x(c)==True and y(c)==True,
'.or': lambda x, y: lambda c: x(c)==True or y(c)==True,
'.not': lambda x: lambda c: not x(c)==True,
'.all': lambda x: lambda c: all(xi==True for xi in x(c)),
'.any': lambda x: lambda c: any(xi==True for xi in x(c)),
'.none': lambda x: lambda c: not any(xi==True for xi in x(c)),
# comparisons
'.eq': lambda x: lambda cx: lambda y: lambda cy: y(cy) == x(cx),
'.neq': lambda x: lambda cx: lambda y: lambda cy: y(cy) != x(cx),
'.lt': lambda x: lambda cx: lambda y: lambda cy: y(cy) < x(cx),
'.leq': lambda x: lambda cx: lambda y: lambda cy: y(cy) <= x(cx),
'.geq': lambda x: lambda cx: lambda y: lambda cy: y(cy) >= x(cx),
'.gt': lambda x: lambda cx: lambda y: lambda cy: y(cy) > x(cx),
# lists
'.in': lambda x: lambda cx: lambda y: lambda cy: y(cy) in x(cx),
'.contains': lambda x: lambda cx: lambda y: lambda cy: x(cx) in y(cy),
'.count': lambda x: lambda c: len(x(c)),
'.sum': lambda x: lambda c: sum(x(c)),
'.intersection': lambda x, y: lambda c: list(set(x(c)).intersection(y(c))),
'.all_equal': lambda list_: lambda c: (lambda mylist: all(mylist[0] == elem for elem in mylist))(list_(c)),
# context
'.arg': lambda x: lambda c: c['candidate'][x(c) - 1],
}
translate_ops = {
'.root': lambda LF: LF,
'.label': lambda label, cond: "return {} if {} else 0".format(1 if label else -1, cond),
'.bool': lambda bool_: bool_=='True',
'.string': lambda str_: "'{}'".format(str_),
'.int': lambda int_: int(int_),
'.tuple': lambda list_: "tuple({})".format(list_),
'.list': lambda *elements: "[{}]".format(','.join(x for x in elements)),
'.alias': lambda name: "alias({})".format(name),
'.map': lambda func_, list_: "[s.{} for s in {}]".format(
func_[1:] if func_.startswith('.') else func_, list_),
'.call': lambda func_, args_: "{}.{}".format(args_, func_),
'.composite_and': lambda func_, args_: "({}(z) for all z in {})".format(func_, args_),
'.composite_or': lambda func_, args_: "({}(u) for at least one u in {})".format(func_, args_),
'.composite_and_func': lambda func_list: "(all({}))".format(func_list),
'.composite_or_func': lambda func_list: "(any({}))".format(func_list),
'.and': lambda x, y: "({} and {})".format(x, y),
'.or': lambda x, y: "({} or {})".format(x, y),
'.not': lambda x: "not ({})".format(x),
'.all': lambda x: "all({})".format(x),
'.any': lambda x: "any({})".format(x),
'.none': lambda x: "not any({})".format(x),
'.eq': lambda x: "(= {})".format(x),
'.neq': lambda x: "(!= {})".format(x),
'.lt': lambda x: "(< {})".format(x),
'.leq': lambda x: "(<= {})".format(x),
'.geq': lambda x: "(>= {})".format(x),
'.gt': lambda x: "(> {})".format(x),
'.in': lambda rhs: "in({})".format(rhs),
'.contains': lambda rhs: "contains({})".format(rhs),
'.count': lambda list_: "count({})".format(list_),
'.sum': lambda arg_: "sum({})".format(arg_),
'.intersection': lambda arg_: "intersection({})".format(arg_),
'.all_equal': lambda list_: "all_equal({})".format(list_),
'.arg': lambda int_: ["X", "Y"][int_ - 1],
}
core_grammar = GrammarMixin(
rules=rules,
ops=ops,
helpers={},
annotators=annotators,
translate_ops=translate_ops
) | babble-master | babble/core/core_base.py |
from collections import namedtuple
import re
inequalities = {
'.lt': lambda x, y: x < y,
'.leq': lambda x, y: x <= y,
'.eq': lambda x, y: x == y,
'.geq': lambda x, y: x >= y,
'.gt': lambda x, y: x > y,
}
class Phrase(object):
fields = ['text', 'words', 'char_offsets', 'pos_tags', 'ner_tags', 'entity_types']
def __init__(self, sentence=None):
for field in self.fields:
setattr(self, field, getattr(sentence, field) if sentence else None)
def __getitem__(self, key):
if isinstance(key, slice):
start = key.start
stop = key.stop
else:
assert isinstance(key, int)
start = key
stop = key + 1
p = Phrase()
text_start = self.char_offsets[start]
text_stop = self.char_offsets[stop] if stop < len(self.char_offsets) else None
p.text = self.text[text_start:text_stop]
p.words = self.words[start:stop]
p.char_offsets = self.char_offsets[start:stop]
p.pos_tags = self.pos_tags[start:stop]
p.ner_tags = self.ner_tags[start:stop]
p.entity_types = self.entity_types[start:stop]
return p
def __len__(self):
return len(self.words)
def __repr__(self):
return 'Phrase("{}" : {} tokens)'.format(self.text.strip(), len(self.words))
def index_word(string, index):
words = string.split()
return _index_wordlist(words, index)
def index_phrase(phrase, index):
words = phrase.words
return _index_wordlist(words, index)
def _index_wordlist(wordlist, index):
if len(wordlist) == 0:
return ''
if index > 0:
index = index - 1
elif index < 0:
index = len(wordlist) + index
return wordlist[max(0, min(index, len(wordlist) - 1))]
def phrase_filter(phr, field, val):
if field == 'words':
return [key for key in getattr(phr, field) if re.match(val, key)]
elif field == 'chars':
return [c for c in phr.text.strip()]
else: # NER
# Don't count a two-token person (John Smith) as two people
results = []
on = False
for i, key in enumerate(getattr(phr, field)):
if re.match(val, key):
if not on:
text_start = phr.char_offsets[i]
results.append(phr.words[i])
on = True
else:
text_stop = phr.char_offsets[i + 1] if i + 1 < len(phr.char_offsets) else None
results[-1] = phr.text[text_start:text_stop]
else:
on = False
return results
def get_left_phrase(entity, cmp='.gt', num=0, unit='words'):
phrase = Phrase(entity)
k = entity.word_start
indices = []
for i in range(k):
if unit == 'words':
if inequalities[cmp](-i, -k + num):
indices.append(i)
elif unit == 'chars':
I = entity.word_to_char_idx(i)
K = entity.word_to_char_idx(k)
if inequalities[cmp](-I, -K + num):
indices.append(i)
else:
raise Exception("Expected unit in ('words', 'chars'), got '{}'".format(unit))
if indices:
return phrase[min(indices):max(indices) + 1]
else:
return phrase[0:0]
def get_right_phrase(entity, cmp='.gt', num=0, unit='words'):
phrase = Phrase(entity)
k = entity.word_end
indices = []
for i in range(k + 1, len(phrase)):
if unit == 'words':
if inequalities[cmp](i, k + num):
indices.append(i)
elif unit == 'chars':
I = entity.word_to_char_idx(i)
K = entity.word_to_char_idx(k)
if inequalities[cmp](I, K + num):
indices.append(i)
else:
raise Exception("Expected unit in ('words', 'chars'), got '{}'".format(unit))
if indices:
return phrase[min(indices):max(indices) + 1]
else:
return phrase[0:0]
def get_within_phrase(entity, num=0, unit='words'):
phrase = Phrase(entity)
if unit == 'words':
j = entity.word_start
k = entity.word_end
return phrase[max(0, j - num):min(k + num + 1, len(phrase))]
elif unit == 'chars':
# Get the indices of the words at right distance, then index with those
j = max(0, entity.char_to_word_index(entity.char_start - num))
k = min(len(phrase), entity.char_to_word_index(entity.char_end + num))
return phrase[j:k]
else:
raise Exception("Expected unit in ('words', 'chars'), got '{}'".format(unit))
def get_between_phrase(entity1, entity2):
phrase = Phrase(entity1)
if entity1.char_start > entity2.char_start:
entity1, entity2 = entity2, entity1
i = entity1.word_end
j = entity2.word_start
return phrase[i + 1:j]
def get_sentence_phrase(entity):
return Phrase(entity)
helpers = {
'index_word': index_word,
'index_phrase': index_phrase,
'phrase_filter': phrase_filter,
'get_left_phrase': get_left_phrase,
'get_right_phrase': get_right_phrase,
'get_within_phrase': get_within_phrase,
'get_between_phrase': get_between_phrase,
'get_sentence_phrase': get_sentence_phrase,
} | babble-master | babble/text/text_helpers.py |
from babble.parsing import Annotator
class TokenAnnotator(Annotator):
def annotate(self, tokens):
# Quotation marks are hard stops to prevent merging of multiple strings
if len(tokens) == 1 and tokens[0]['pos'] not in ["``", "\'\'"]:
return [('$QueryToken', tokens[0]['word'])]
else:
return []
annotators = [TokenAnnotator()] | babble-master | babble/text/text_annotators.py |
from .text_base import text_grammar
from .text_helpers import * | babble-master | babble/text/__init__.py |
from babble.parsing import GrammarMixin, Rule, sems0, sems1, sems_in_order, sems_reversed, flip_dir, star
from babble.core import PrimitiveTemplate
from babble.text.text_helpers import helpers
from babble.text.text_annotators import annotators
lexical_rules = (
[Rule('$Token', w, 'token') for w in ['token']] +
[Rule('$Word', w, 'words') for w in ['word', 'words', 'term', 'terms', 'token', 'tokens', 'phrase', 'phrases']] +
[Rule('$Char', w, 'chars') for w in ['character', 'characters', 'letter', 'letters']] +
[Rule('$Upper', w, '.upper') for w in ['upper', 'uppercase', 'upper case', 'all caps', 'all capitalized']] +
[Rule('$Lower', w, '.lower') for w in ['lower', 'lowercase', 'lower case']] +
[Rule('$Capital', w, '.capital') for w in ['capital', 'capitals', 'capitalized']] +
[Rule('$StartsWith', w, '.startswith') for w in ['starts with', 'start with', 'starting with']] +
[Rule('$EndsWith', w, '.endswith') for w in ['ends with', 'end with', 'ending with']] +
[Rule('$Left', w, '.left') for w in ['?to ?the left ?of', 'in front of', 'before', 'precedes', 'preceding', 'followed by']] +
[Rule('$Right', w, '.right') for w in ['?to ?the right ?of', 'behind', 'after', 'preceded by', 'follows', 'following']] +
[Rule('$Within', w, '.within') for w in ['within', 'next']] +
[Rule('$Apart', w) for w in ['apart', 'away']] +
[Rule('$Sentence', w, '.sentence') for w in ['sentence', 'text', 'it']] +
[Rule('$Between', w, '.between') for w in ['between', 'inbetween', 'sandwiched', 'enclosed']] +
[Rule('$PersonNER', w, ('PERSON')) for w in ['person', 'people']] +
[Rule('$LocationNER', w, ('LOC')) for w in ['location', 'locations', 'place', 'places']] +
[Rule('$DateNER', w, ('DATE')) for w in ['date', 'dates']] +
[Rule('$NumberNER', w, ('ORDINAL|CARDINAL')) for w in ['number', 'numbers']] +
[Rule('$OrganizationNER', w, ('ORG')) for w in ['organization', 'organizations', 'company', 'companies', 'agency', 'agencies', 'institution', 'institutions']] +
[Rule('$NorpNER', w, ('NORP')) for w in ['political', 'politician', 'religious']] +
# Allow references to X and Y instead of candidate spans' titles
[Rule('$ArgX', w, ('.arg', ('.int', 1))) for w in ['x', 'X']] +
[Rule('$ArgX', w, ('.arg', ('.int', 2))) for w in ['y', 'Y']] +
[Rule('$ArgXListAnd', w, ('.list', ('.arg', ('.int', 1)), ('.arg', ('.int', 2)))) for w in ['they', 'them']]
)
unary_rules = [
Rule('$Direction', '$Left', sems0),
Rule('$Direction', '$Right', sems0),
Rule('$NER', '$DateNER', sems0),
Rule('$NER', '$PersonNER', sems0),
Rule('$NER', '$LocationNER', sems0),
Rule('$NER', '$OrganizationNER', sems0),
Rule('$Unit', '$Word', sems0),
Rule('$Unit', '$Char', sems0),
# ArgX may be treated as an object or a string (referring to its textual contents)
Rule('$String', '$ArgX', lambda sems: ('.arg_to_string', sems[0])),
Rule('$ArgToString', '$CID', lambda sems: (sems[0],)),
Rule('$StringListOr', '$UserList', sems0),
Rule('$UnaryStringToBool', '$Lower', sems0),
Rule('$UnaryStringToBool', '$Upper', sems0),
Rule('$UnaryStringToBool', '$Capital', sems0),
Rule('$StringBinToBool', '$Equals', sems0),
Rule('$StringBinToBool', '$NotEquals', sems0),
Rule('$StringBinToBool', '$StartsWith', sems0),
Rule('$StringBinToBool', '$EndsWith', sems0),
# These represent string comparisons (like the letter 'a' in 'cat'),
# not set comparisons (like 'cat' in ['dog', 'cat', 'bird'])
Rule('$StringBinToBool', '$In', sems0),
Rule('$StringBinToBool', '$Contains', sems0),
]
compositional_rules = [
# Direction
# "is left of Y"
Rule('$StringToBool', '$Direction $ArgX', star(lambda dir_, arg_:
('.in', ('.extract_text', (dir_, arg_))))),
Rule('$StringToBool', '$Direction $ArgXListAnd', star(lambda dir_, arglist_:
('.composite_and_func', ('.list',
('.in', ('.extract_text', (dir_, arglist_[1]))),
('.in', ('.extract_text', (dir_, arglist_[2]))))))),
Rule('$StringToBool', '$Direction $ArgXListOr', star(lambda dir_, arglist_:
('.composite_or_func', ('.list',
('.in', ('.extract_text', (dir_, arglist_[1]))),
('.in', ('.extract_text', (dir_, arglist_[2]))))))),
# "is two words left of Y"
Rule('$StringToBool', '$Int ?$Unit $Direction $ArgX',
star(lambda int_, unit_, dir_, arg_:
('.in', ('.extract_text', (dir_, arg_, ('.string', '.eq'), int_,
('.string', (unit_ if unit_ else 'words'))))))),
Rule('$StringToBool', '$Int ?$Unit $Direction $ArgXListAnd',
star(lambda int_, unit_, dir_, arglist_:
('.composite_and_func', ('.list',
('.in', ('.extract_text', (dir_, arglist_[1], ('.string', '.eq'), int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (dir_, arglist_[2], ('.string', '.eq'), int_,
('.string', (unit_ if unit_ else 'words'))))))))),
Rule('$StringToBool', '$Int ?$Unit $Direction $ArgXListOr',
star(lambda int_, unit_, dir_, arglist_:
('.composite_or_func', ('.list',
('.in', ('.extract_text', (dir_, arglist_[1], ('.string', '.eq'), int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (dir_, arglist_[2], ('.string', '.eq'), int_,
('.string', (unit_ if unit_ else 'words'))))))))),
# NOTE: String + StringToBool -> Bool, StringToBoolForward + String -> Bool
Rule('$StringToBoolForward', '$ArgX $Int ?$Unit $Direction',
star(lambda arg_, int_, unit_, dir_:
('.in', ('.extract_text', (flip_dir(dir_), arg_, ('.string', '.eq'), int_,
('.string', (unit_ if unit_ else 'words'))))))),
Rule('$StringToBoolForward', '$ArgXListAnd $Int ?$Unit $Direction',
star(lambda arglist_, int_, unit_, dir_:
('.composite_and_func', ('.list',
('.in', ('.extract_text', (flip_dir(dir_), arglist_[1], ('.string', '.eq'), int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (flip_dir(dir_), arglist_[2], ('.string', '.eq'), int_,
('.string', (unit_ if unit_ else 'words'))))))))),
Rule('$StringToBoolForward', '$ArgXListOr $Int ?$Unit $Direction',
star(lambda arglist_, int_, unit_, dir_:
('.composite_or_func', ('.list',
('.in', ('.extract_text', (flip_dir(dir_), arglist_[1], ('.string', '.eq'), int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (flip_dir(dir_), arglist_[2], ('.string', '.eq'), int_,
('.string', (unit_ if unit_ else 'words'))))))))),
# "is at least five words to the left of Y"
Rule('$StringToBool', '$Compare $Int ?$Unit $Direction $ArgX',
star(lambda cmp_, int_, unit_, dir_, arg_:
('.in', ('.extract_text', (dir_, arg_, ('.string', cmp_), int_,
('.string', (unit_ if unit_ else 'words'))))))),
Rule('$StringToBool', '$Compare $Int ?$Unit $Direction $ArgXListAnd',
star(lambda cmp_, int_, unit_, dir_, arglist_:
('.composite_and_func', ('.list',
('.in', ('.extract_text', (dir_, arglist_[1], ('.string', cmp_), int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (dir_, arglist_[2], ('.string', cmp_), int_,
('.string', (unit_ if unit_ else 'words'))))))))),
Rule('$StringToBool', '$Compare $Int ?$Unit $Direction $ArgXListOr',
star(lambda cmp_, int_, unit_, dir_, arglist_:
('.composite_or_func', ('.list',
('.in', ('.extract_text', (dir_, arglist_[1], ('.string', cmp_), int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (dir_, arglist_[2], ('.string', cmp_), int_,
('.string', (unit_ if unit_ else 'words'))))))))),
# "is to the left of Y by at least five words"
Rule('$StringToBool', '$Direction $ArgX $Compare $Int ?$Unit',
star(lambda dir_, arg_, cmp_, int_, unit_:
('.in', ('.extract_text', (dir_, arg_, ('.string', cmp_), int_,
('.string', (unit_ if unit_ else 'words'))))))),
Rule('$StringToBool', '$Direction $ArgXListAnd $Compare $Int ?$Unit',
star(lambda dir_, arglist_, cmp_, int_, unit_:
('.composite_and_func', ('.list',
('.in', ('.extract_text', (dir_, arglist_[1], ('.string', cmp_), int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (dir_, arglist_[2], ('.string', cmp_), int_,
('.string', (unit_ if unit_ else 'words'))))))))),
Rule('$StringToBool', '$Direction $ArgXListOr $Compare $Int ?$Unit',
star(lambda dir_, arglist_, cmp_, int_, unit_:
('.composite_or_func', ('.list',
('.in', ('.extract_text', (dir_, arglist_[1], ('.string', cmp_), int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (dir_, arglist_[2], ('.string', cmp_), int_,
('.string', (unit_ if unit_ else 'words'))))))))),
# Others
# "'foo' is within 5 words of X"
Rule('$StringToBool', '$Within $Int ?$Unit $ArgX',
star(lambda win_, int_, unit_, arg_:
('.in', ('.extract_text', (win_, arg_, int_,
('.string', (unit_ if unit_ else 'words'))))))),
Rule('$StringToBool', '$Within $Int ?$Unit $ArgXListAnd',
star(lambda win_, int_, unit_, arglist_:
('.composite_and_func', ('.list',
('.in', ('.extract_text', (win_, arglist_[1], int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (win_, arglist_[2], int_,
('.string', (unit_ if unit_ else 'words'))))))))),
Rule('$StringToBool', '$Within $Int ?$Unit $ArgXListOr',
star(lambda win_, int_, unit_, arglist_:
('.composite_or_func', ('.list',
('.in', ('.extract_text', (win_, arglist_[1], int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (win_, arglist_[2], int_,
('.string', (unit_ if unit_ else 'words'))))))))),
# "X is within 5 words of ['foo']"
Rule('$StringToBoolForward', '$ArgX $Within $Int ?$Unit',
star(lambda arg_, win_, int_, unit_:
('.in', ('.extract_text', (win_, arg_, int_,
('.string', (unit_ if unit_ else 'words'))))))),
Rule('$StringToBoolForward', '$ArgXListAnd $Within $Int ?$Unit',
star(lambda arglist_, win_, int_, unit_:
('.composite_and_func', ('.list',
('.in', ('.extract_text', (win_, arglist_[1], int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (win_, arglist_[2], int_,
('.string', (unit_ if unit_ else 'words'))))))))),
Rule('$StringToBoolForward', '$ArgXListOr $Within $Int ?$Unit',
star(lambda arglist_, win_, int_, unit_:
('.composite_or_func', ('.list',
('.in', ('.extract_text', (win_, arglist_[1], int_,
('.string', (unit_ if unit_ else 'words'))))),
('.in', ('.extract_text', (win_, arglist_[2], int_,
('.string', (unit_ if unit_ else 'words'))))))))),
# "X and Y are within 3 words of each other"
Rule('$Bool', '$ArgXListAnd $Within $Int ?$Unit $EachOther',
star(lambda arglist_, win_, int_, unit_, eachother_:
('.call',
('.in', ('.extract_text', (win_, arglist_[1], int_,
('.string', (unit_ if unit_ else 'words'))))),
('.arg_to_string', arglist_[2])))),
# "X and Y are more than 10 words apart" ->
# "Y is in the words that are more than 10 to the right of Y"
Rule('$Bool', '$ArgXListAnd $Compare $Int ?$Unit $Apart',
star(lambda arglist_, cmp_, int_, unit_, apart_:
('.call',
('.in', ('.extract_text', ('.right', arglist_[1],
('.string', cmp_), int_, ('.string', (unit_ if unit_ else 'words'))))),
('.arg_to_string', arglist_[2])))),
# "between X and Y"
Rule('$StringToBool', '$Between $ArgXListAnd',
star(lambda btw_, arglist_: ('.in', ('.extract_text', (btw_, arglist_))))),
# "in the sentence"
Rule('$StringToBool', '$In $Sentence',
star(lambda in_, sent_: ('.in', ('.extract_text', (sent_,))))),
# "sentence contains 'foo'"
Rule('$Bool', '$Sentence $Contains $String',
star(lambda sent_, cont_, str_: ('.call', (cont_, str_), ('.extract_text', (sent_,))))),
# "sentence contains a bar word"
Rule('$Bool', '$Sentence $Contains $StringListOr',
star(lambda sent_, cont_, strlist_: ('.any', ('.map', ('.in', ('.extract_text', (sent_,))), strlist_)))),
# Phrases
# standard directions: "to the left of arg 1"
Rule('$Phrase', '$Direction $ArgX', star(lambda dir_, arg_: (dir_, arg_))),
# [a word] "within 7 words to the left of arg 1" [is capitalized]
Rule('$Phrase', '$Compare $Int ?$Unit $Direction $ArgX',
star(lambda cmp_, int_, unit_, dir_, arg_:
(dir_, arg_, ('.string', cmp_), int_, ('.string', (unit_ if unit_ else 'words'))))),
Rule('$Phrase', '$Between $ArgXListAnd', star(lambda btw_, arglist_: (btw_, arglist_))),
Rule('$Phrase', '$Sentence', star(lambda sent,: (sent,))),
# inverted directions: "arg 1 is right of"
Rule('$Phrase', '$ArgX $Direction', star(lambda arg_, dir_: (flip_dir(dir_), arg_))),
# "there are three [nouns in the sentence]"
Rule('$TokenList', '$Word $Phrase', star(lambda word_, phr_: ('.filter', phr_, 'words', r'\w+\S*'))),
Rule('$TokenList', '$Char $Phrase', star(lambda char_, phr_: ('.filter', phr_, 'chars', None))),
Rule('$TokenList', '$NER $Phrase', star(lambda ner_, phr_: ('.filter', phr_, 'ner_tags', ner_))),
Rule('$StringList', '$TokenList', sems0),
# Count
# "the [number of (words left of arg 1)] is larger than five"
Rule('$Int', '$Count $Phrase', sems_in_order),
# "There is at least one [person between X and Y]"
Rule('$Bool', '?$Exists $NumToBool $TokenList',
star(lambda exists_, func_, list_: ('.call', func_, ('.count', list_)))),
# UserList simplification (e.g., a family word if family is an alias)
Rule('$UserList', '$UserList $Word', sems0),
# Arg lists
Rule('$String', '$ArgToString $ArgX', star(lambda func_, arg_: ('.call', func_, arg_))),
Rule('$String', '$ArgX $ArgToString', star(lambda arg_, func_: ('.call', func_, arg_))),
Rule('$StringListAnd', '$ArgToString $ArgXListAnd', star(lambda func_, args_: ('.map', func_, args_))),
Rule('$StringListAnd', '$ArgXListAnd $ArgToString', star(lambda args_, func_: ('.map', func_, args_))),
# Tuples
Rule('$StringTuple', '$Tuple $StringList', sems_in_order),
Rule('$StringTupleToBool', '$Equals $StringTuple', sems_in_order),
### Strings ###
# building strings of arbitrary length
# Rule('$StringStub', '$Quote $QueryToken', lambda sems: [sems[1]]),
# Rule('$StringStub', '$StringStub $QueryToken', lambda sems: sems[0] + [sems[1]]),
# Rule('$String', '$StringStub $Quote', lambda sems: ('.string', ' '.join(sems[0]))),
# building strings of max length 5 (allows us to reduce beam width)
Rule('$String', '$Quote $QueryToken $Quote', lambda sems: ('.string', ' '.join(sems[1:2]))),
Rule('$String', '$Quote $QueryToken $QueryToken $Quote', lambda sems: ('.string', ' '.join(sems[1:3]))),
Rule('$String', '$Quote $QueryToken $QueryToken $QueryToken $Quote', lambda sems: ('.string', ' '.join(sems[1:4]))),
Rule('$String', '$Quote $QueryToken $QueryToken $QueryToken $QueryToken $Quote', lambda sems: ('.string', ' '.join(sems[1:5]))),
Rule('$String', '$Quote $QueryToken $QueryToken $QueryToken $QueryToken $QueryToken $Quote', lambda sems: ('.string', ' '.join(sems[1:6]))),
# defining $StringToBool functions
Rule('$StringToBool', '$UnaryStringToBool', lambda sems: (sems[0],)),
Rule('$Bool', '$StringToBoolForward $String', star(lambda func_, str_: ('.call', func_, str_))),
# Indexing Strings #
Rule('$String', '$Int $Word $String', star(lambda idx_, word_, str_: ('.index_word', str_, idx_))),
]
# template_rules = []
template_rules = (
PrimitiveTemplate('$String') +
PrimitiveTemplate('$StringTuple')
)
rules = lexical_rules + unary_rules + compositional_rules + template_rules
ops = {
# string functions
'.upper': lambda c: lambda x: lambda cx: x(cx).isupper(),
'.lower': lambda c: lambda x: lambda cx: x(cx).islower(),
'.capital': lambda c: lambda x: lambda cx: len(x(cx)) and x(cx)[0].isupper(),
'.startswith': lambda x: lambda cx: lambda y: lambda cy: y(cy).startswith(x(cx)),
'.endswith': lambda x: lambda cx: lambda y: lambda cy: y(cy).endswith(x(cx)),
'.index_word': lambda str_, idx_: lambda c: c['helpers']['index_word'](str_(c), idx_(c)),
# context functions
'.arg_to_string': lambda x: lambda c: x(c).strip() if isinstance(x(c), str) else x(c).entity.strip(),
'.cid': lambda c: lambda arg: lambda cx: arg(cx).get_entity_attrib('entity_cids')[0], # take the first token's CID
'.left': lambda *x: lambda cx: cx['helpers']['get_left_phrase'](*[xi(cx) for xi in x]),
'.right': lambda *x: lambda cx: cx['helpers']['get_right_phrase'](*[xi(cx) for xi in x]),
'.within': lambda *x: lambda cx: cx['helpers']['get_within_phrase'](*[xi(cx) for xi in x]),
'.between': lambda x: lambda c: c['helpers']['get_between_phrase'](*[xi for xi in x(c)]),
'.sentence': lambda c: c['helpers']['get_sentence_phrase'](c['candidate'][0]),
'.extract_text': lambda phr: lambda c: getattr(phr(c), 'text').strip(),
'.filter': lambda phr, field, val: lambda c: c['helpers']['phrase_filter'](phr(c), field, val),
}
cmp_converter = {
'.eq' : 'exactly',
'.neq' : 'not',
'.lt' : 'less than',
'.leq' : 'no more than',
'.gt' : 'greater than',
'.geq' : 'at least',
}
def dir_defaults(x):
arg_ = x[0]
cmp_ = cmp_converter[x[1][1:-1] if len(x) > 1 else '.gt']
int_ = x[2] if len(x) > 2 else 0
unit_ = x[3][1:-1] if len(x) > 3 else 'words'
if unit_ == 'words': unit_ = 'word(s)'
return cmp_, int_, unit_, arg_
def within_defaults(x):
arg_ = x[0]
cmp_ = 'within'
int_ = x[1] if len(x) > 1 else 0
unit_ = x[2][1:-1] if len(x) > 2 else 'words'
if unit_ == 'words': unit_ = 'word(s)'
return cmp_, int_, unit_, arg_
def filter_defaults(x):
phr, field, val = x
if field == 'chars':
return "[char(s) {}]".format(phr)
elif field == 'words':
return "[w for w in the word(s) {}]".format(phr)
else:
return "[w for w in the word(s) {} if w.{} == {}]".format(phr, field, val)
translate_ops = {
'.upper': "isupper()",
'.lower': "islower()",
'.capital': "iscapitalized()",
'.startswith': lambda prefix: "startswith({})".format(prefix),
'.endswith': lambda suffix: "endswith({})".format(suffix),
'.index_word': lambda str_, idx_: "{}[{}]".format(str_, idx_ - 1 if idx_ > 0 else idx_),
'.index_phrase': lambda str_, idx_: "{}[{}]".format(str_, idx_ - 1 if idx_ > 0 else idx_),
'.arg_to_string': lambda arg_: "text({})".format(arg_),
'.cid': lambda arg_: "cid({})".format(arg_),
'.left': lambda *x: "{} {} {} to the left of {}".format(*dir_defaults(x)),
'.right': lambda *x: "{} {} {} to the right of {}".format(*dir_defaults(x)),
'.within': lambda *x: "{} {} {} of {}".format(*within_defaults(x)),
'.between': lambda list_: "between({})".format(list_),
'.sentence': "the sentence",
'.extract_text': lambda phr: phr if phr in ["X", "Y"] else "text({})".format(phr),
'.filter': lambda *x: "{}".format(filter_defaults(x)),
}
text_grammar = GrammarMixin(
rules=rules,
ops=ops,
helpers=helpers,
annotators=annotators,
translate_ops=translate_ops,
) | babble-master | babble/text/text_base.py |
from .annotator import Annotator
from .stopwords import stopword_list
from .rule import Rule, sems0, sems1, sems_in_order, sems_reversed, flip_dir, star
from .parse import Parse
from .grammar import Grammar, GrammarMixin
from .parser import SemanticParser | babble-master | babble/parsing/__init__.py |
class Annotator:
"""A base class for annotators."""
def annotate(self, tokens):
"""Returns a list of pairs, each a category and a semantic representation."""
return [] | babble-master | babble/parsing/annotator.py |
from __future__ import print_function
from collections import defaultdict, namedtuple
from itertools import product
import re
from types import FunctionType
from babble.parsing.spacy.spacy_parser import Spacy
from babble.parsing.rule import Rule, is_cat, is_optional
from babble.parsing.parse import Parse
class GrammarMixin(object):
def __init__(self, rules, ops, helpers, annotators, translate_ops):
self.rules = rules
self.ops = ops
self.helpers = helpers
self.annotators = annotators
self.translate_ops = translate_ops
class Grammar(object):
def __init__(self, bases, entity_names=[], aliases={},
beam_width=10, top_k=-1, start_symbol='$ROOT'):
# Extract from bases
bases = bases if isinstance(bases, list) else [bases]
rules = []
self.ops = {}
self.helpers = {}
self.annotators = []
self.translate_ops = {}
for base in bases:
rules += base.rules
self.ops.update(base.ops)
self.helpers.update(base.helpers)
self.annotators += base.annotators
self.translate_ops.update(base.translate_ops)
# Add aliases and candidate-specific rules
self.aliases = aliases
for i, arg in enumerate(entity_names):
rules.append(Rule('$ArgX', arg, ('.arg', ('.int', i + 1))))
# Set parameters
self.beam_width = beam_width
self.top_k = top_k
# Initialize
self.categories = set()
self.lexical_rules = defaultdict(list)
self.unary_rules = defaultdict(list)
self.binary_rules = defaultdict(list)
self.start_symbol = start_symbol
self.parser = Spacy()
for rule in rules:
self.add_rule(rule)
print('Grammar construction complete.')
def parse_string(self, string):
"""
Returns the list of parses for the given string which can be derived
using this grammar.
:param string:
"""
# Tokenize input string
assert(isinstance(string, str))
if string.endswith('.'):
string = string[:-1]
string = re.sub(r'\s+', ' ', string)
output = self.parser.parse(None, string).__next__()
tokens = list(map(lambda x: dict(zip(['word', 'pos', 'ner'], x)),
zip(output['words'], output['pos_tags'], output['ner_tags'])))
# Lowercase all non-quoted words; doesn't handle nested quotes
quoting = False
for token in tokens:
if not quoting:
token['word'] = token['word'].lower()
if token['pos'] in ["``", "\'\'"]:
quoting = not quoting
# Add start and stop _after_ parsing to not confuse the CoreNLP parser
start = {'word': '<START>', 'pos': '<START>', 'ner': '<START>'}
stop = {'word': '<STOP>', 'pos': '<STOP>', 'ner': '<STOP>'}
tokens = [start] + tokens + [stop]
words = [t['word'] for t in tokens]
self.words = words # (for print_chart)
chart = defaultdict(list)
for j in range(1, len(tokens) + 1):
for i in range(j - 1, -1, -1):
self.apply_annotators(chart, tokens, i, j) # tokens[i:j] should be tagged?
self.apply_aliases(chart, words, i, j) # words[i:j] is the name of a UserList?
self.apply_lexical_rules(chart, words, i, j) # words[i:j] matches lexical rule?
self.apply_binary_rules(chart, i, j) # any split of words[i:j] matches binary rule?
self.apply_absorb_rules(chart, i, j)
self.apply_unary_rules(chart, i, j) # add additional tags if chart[(i,j)] matches unary rule
if self.beam_width:
self.apply_beam(chart, i, j)
parses = chart[(0, len(tokens))]
if self.start_symbol:
parses = [parse for parse in parses if parse.rule.lhs == self.start_symbol]
self.chart = chart
if self.top_k:
# If top_k is negative, accept all parses that are tied for the
# fewest absorptions, then second fewest absorptions, ..., then k-fewest absorptions
if self.top_k < 0:
k = abs(self.top_k)
levels = sorted(list(set(p.absorbed for p in parses)))
parses = [p for p in parses if p.absorbed in levels[:k]]
else:
parses = sorted(parses, key=lambda x: x.absorbed)[:self.top_k]
return parses
def add_rule(self, rule):
if rule.contains_optionals():
self.add_rule_containing_optional(rule)
elif rule.is_lexical():
self.lexical_rules[rule.rhs].append(rule)
elif rule.is_unary():
self.unary_rules[rule.rhs].append(rule)
elif rule.is_binary():
self.binary_rules[rule.rhs].append(rule)
elif all([is_cat(rhsi) for rhsi in rule.rhs]):
self.add_n_ary_rule(rule)
else:
raise Exception('RHS mixes terminals and non-terminals: %s' % rule)
def add_rule_containing_optional(self, rule):
"""
Handles adding a rule which contains an optional element on the RHS.
We find the leftmost optional element on the RHS, and then generate
two variants of the rule: one in which that element is required, and
one in which it is removed. We add these variants in place of the
original rule. (If there are more optional elements further to the
right, we'll wind up recursing.)
For example, if the original rule is:
Rule('$Z', '$A ?$B ?$C $D')
then we add these rules instead:
Rule('$Z', '$A $B ?$C $D')
Rule('$Z', '$A ?$C $D')
"""
# Find index of the first optional element on the RHS.
first = next((idx for idx, elt in enumerate(rule.rhs) if is_optional(elt)), -1)
assert first >= 0
assert len(rule.rhs) > 1, 'Entire RHS is optional: %s' % rule
prefix = rule.rhs[:first]
suffix = rule.rhs[(first + 1):]
# First variant: the first optional element gets deoptionalized.
deoptionalized = (rule.rhs[first][1:],)
self.add_rule(Rule(rule.lhs, prefix + deoptionalized + suffix, rule.sem))
# Second variant: the first optional element gets removed.
# If the semantics is a value, just keep it as is.
sem = rule.sem
# But if it's a function, we need to supply a dummy argument for the removed element.
if isinstance(rule.sem, FunctionType):
sem = lambda sems: rule.sem(sems[:first] + [None] + sems[first:])
self.add_rule(Rule(rule.lhs, prefix + suffix, sem))
def add_n_ary_rule(self, rule):
"""
Handles adding a rule with three or more non-terminals on the RHS.
We introduce a new category which covers all elements on the RHS except
the first, and then generate two variants of the rule: one which
consumes those elements to produce the new category, and another which
combines the new category which the first element to produce the
original LHS category. We add these variants in place of the
original rule. (If the new rules still contain more than two elements
on the RHS, we'll wind up recursing.)
For example, if the original rule is:
Rule('$Z', '$A $B $C $D')
then we create a new category '$Z_$A' (roughly, "$Z missing $A to the left"),
and add these rules instead:
Rule('$Z_$A', '$B $C $D')
Rule('$Z', '$A $Z_$A')
"""
def add_category(base_name):
assert is_cat(base_name)
name = base_name
while name in self.categories:
name = name + '_'
self.categories.add(name)
return name
category = add_category('%s_%s' % (rule.lhs, rule.rhs[0]))
self.add_rule(Rule(category, rule.rhs[1:], lambda sems: sems))
self.add_rule(Rule(rule.lhs, (rule.rhs[0], category),
lambda sems: rule.apply_semantics([sems[0]] + sems[1])))
def apply_aliases(self, chart, words, i, j):
"""Add parses to chart cell (i, j) by applying user lists."""
if self.aliases:
key = ' '.join(words[i:j])
if key in self.aliases:
lhs = '$UserList'
rhs = tuple(key.split())
semantics = ('.alias', ('.string', key))
rule = Rule(lhs, rhs, semantics)
chart[(i, j)].append(Parse(rule, words[i:j]))
def apply_annotators(self, chart, tokens, i, j):
"""Add parses to chart cell (i, j) by applying annotators."""
if self.annotators:
words = [t['word'] for t in tokens]
for annotator in self.annotators:
for category, semantics in annotator.annotate(tokens[i:j]):
rule = Rule(category, tuple(words[i:j]), semantics)
chart[(i, j)].append(Parse(rule, words[i:j]))
def apply_lexical_rules(self, chart, words, i, j):
"""Add parses to chart cell (i, j) by applying lexical rules."""
for rule in self.lexical_rules[tuple(words[i:j])]:
chart[(i, j)].append(Parse(rule, words[i:j]))
def apply_binary_rules(self, chart, i, j):
"""Add parses to chart cell (i, j) by applying binary rules."""
for k in range(i + 1, j):
for parse_1, parse_2 in product(chart[(i, k)], chart[(k, j)]):
for rule in self.binary_rules[(parse_1.rule.lhs, parse_2.rule.lhs)]:
chart[(i, j)].append(Parse(rule, [parse_1, parse_2]))
def apply_absorb_rules(self, chart, i, j):
"""Add parses to chart cell (i, j) that require absorbing."""
if j - i > 2: # Otherwise, there's no chance for absorption
for m in range(i + 1, j - 1):
for n in range(m + 1, j):
for parse_1, parse_2 in product(chart[(i, m)], chart[(n, j)]):
# Don't absorb unmatched quote marks
if sum(parse.rule.lhs=='$Quote' for p in range(m, n) for parse in chart[(p, p+1)]) % 2 != 0:
break
for rule in self.binary_rules[(parse_1.rule.lhs, parse_2.rule.lhs)]:
# Don't allow $StringStub to absorb (to control growth)
if rule.lhs=='$StringStub':
continue
absorbed = n - m
chart[(i, j)].append(Parse(rule, [parse_1, parse_2], absorbed))
def apply_unary_rules(self, chart, i, j):
"""Add parses to chart cell (i, j) by applying unary rules."""
for parse in chart[(i, j)]:
for rule in self.unary_rules[(parse.rule.lhs,)]:
chart[(i, j)].append(Parse(rule, [parse]))
def apply_beam(self, chart, i, j):
chart[(i,j)] = sorted(chart[(i,j)], key=lambda x: x.absorbed)[:self.beam_width]
def evaluate(self, parse):
def recurse(sem):
if isinstance(sem, tuple):
op = self.ops[sem[0]]
args = [recurse(arg) for arg in sem[1:]]
return op(*args) if args else op
else:
return sem
LF = recurse(parse.semantics)
return lambda candidate: LF({'helpers': self.helpers, 'aliases': self.aliases, 'candidate': candidate})
def translate(self, sem):
def recurse(sem):
if isinstance(sem, tuple):
if sem[0] in self.translate_ops:
op = self.translate_ops[sem[0]] # op is a lambda function
# For these types, leave func as the name of the function
if sem[0] in ['.composite_or', '.composite_and']:
args_ = [sem[1][0]]
args_.extend([recurse(arg) for arg in sem[2:]])
elif sem[0] in ['.map']:
func = sem[1]
if len(func) == 1:
args_ = ['{}'.format(func[0])]
else:
args_ = [recurse(func)]
args_.extend([recurse(arg) for arg in sem[2:]])
else:
args_ = [recurse(arg) for arg in sem[1:]]
return op(*args_) if args_ else op
else:
return str(sem)
else:
return str(sem)
return recurse(sem)
def print_grammar(self):
def all_rules(rule_index):
return [rule for rules in list(rule_index.values()) for rule in rules]
def print_rules_sorted(rules):
for s in sorted([str(rule) for rule in rules]):
print(' ' + s)
print('Lexical rules:')
print_rules_sorted(all_rules(self.lexical_rules))
print('Unary rules:')
print_rules_sorted(all_rules(self.unary_rules))
print('Binary rules:')
print_rules_sorted(all_rules(self.binary_rules))
def print_chart(self, nested=False):
"""Print the chart. Useful for debugging."""
spans = sorted(list(self.chart.keys()), key=(lambda span: span[0]))
spans = sorted(spans, key=(lambda span: span[1] - span[0]))
for span in spans:
if len(self.chart[span]) > 0:
print('%-12s' % str(span))
if nested:
for entry in self.chart[span]:
print('%-12s' % ' ', entry)
else:
print(' '.join(self.words[span[0]:span[1]]))
for entry in self.chart[span]:
print('%-12s' % ' ', entry.rule.lhs) | babble-master | babble/parsing/grammar.py |
from pandas import DataFrame, Series
from metal.contrib.info_extraction.mentions import RelationMention
from babble.core import core_grammar, text2int
from babble.text import text_grammar
from babble.parsing import Grammar, stopword_list
from babble.explanation import Explanation
class SemanticParser(object):
def __init__(self, string_format='implicit', **kwargs):
grammar_mixins = [core_grammar, text_grammar]
self.grammar = Grammar(grammar_mixins, **kwargs)
self.string_format = string_format
if string_format == 'implicit':
self.unquotable = [' '.join(key) for key in
self.grammar.lexical_rules] + stopword_list
self.explanation_counter = 0
def name_explanations(self, explanations, names):
if names:
if len(names) != len(explanations):
raise Exception("If argument _names_ is provided, _names_ and "
"_explanations_ must have same length.")
else:
for exp, name in zip(explanations, names):
exp.name = name
else:
for i, exp in enumerate(explanations):
if not exp.name:
exp.name = "Explanation{}".format(i)
def parse(self, explanations, names=None, verbose=False, return_parses=False):
"""
Converts Explanation objects into labeling functions.
:param explanations: An instance or list of Explanation objects
"""
LFs = []
parses = []
num_parses_by_exp = []
explanations = explanations if isinstance(explanations, list) else [explanations]
names = names if isinstance(names, list) or names is None else [names]
self.name_explanations(explanations, names)
for i, exp in enumerate(explanations):
exp_normalized = u'label {} if {}'.format(exp.label, exp.condition)
if self.string_format == 'implicit':
exp_normalized = self.mark_implicit_strings(exp_normalized, exp.candidate)
exp_parses = self.grammar.parse_string(exp_normalized)
num_parses_by_exp.append(len(exp_parses))
for j, parse in enumerate(exp_parses):
parse.explanation = exp
lf = self.grammar.evaluate(parse)
if return_parses:
parse.function = lf
parses.append(parse)
lf.__name__ = "{}_{}".format(exp.name, j)
LFs.append(lf)
self.explanation_counter += 1
if verbose:
return_object = 'parse(s)' if return_parses else "LF(s)"
print("{} explanation(s) out of {} were parseable.".format(
len(explanations) - num_parses_by_exp.count(0),
len(explanations)))
print("{} {} generated from {} explanation(s).".format(
len(LFs), return_object, len(explanations)))
if return_parses:
return parses
else:
return LFs
def parse_and_evaluate(self,
explanations,
show_everything=False,
show_nothing=False,
show_explanation=False,
show_candidate=False,
show_sentence=False,
show_parse=False,
show_semantics=False,
show_correct=False,
show_passing=False,
show_failing=False,
show_redundant=False,
show_erroring=False,
show_unknown=False,
pseudo_python=False,
paraphrases=False,
only=[]):
"""
Calls SemanticParser.parse and evaluates the accuracy of resulting LFs.
Results are stored in self.results, which contains a pandas DataFrame.
"""
assert(not (show_everything and show_nothing))
if show_everything:
if any([show_explanation, show_candidate, show_sentence, show_parse, show_semantics]):
print("Note: show_everything = True. This overrides all other show_x commands.")
show_explanation = show_candidate = show_sentence = show_parse = show_semantics = True
if show_semantics:
show_correct = show_passing = show_failing = True
show_redundant = show_erroring = show_unknown = True
if show_nothing:
if any([show_explanation, show_candidate, show_sentence, show_parse, show_semantics,
show_correct, show_passing, show_failing, show_redundant, show_erroring, show_unknown]):
print("Note: show_nothing = True. This will override all other show_ commands.")
show_explanation = show_candidate = show_sentence = show_parse = show_semantics = False
show_correct = show_passing = show_failing = show_redundant = show_erroring = show_unknown = False
self.explanation_counter = 0
explanations = explanations if isinstance(explanations, list) else [explanations]
col_names = ['Correct', 'Passing', 'Failing', 'Redundant', 'Erroring', 'Unknown','Index']
dataframe = {}
indices = []
nCorrect = [0] * len(explanations)
nPassing = [0] * len(explanations)
nFailing = [0] * len(explanations)
nRedundant = [0] * len(explanations)
nErroring = [0] * len(explanations)
nUnknown = [0] * len(explanations)
parse_dict = {
'correct' : [],
'passing' : [],
'failing' : [],
'redundant': [],
'erroring' : [],
'unknown' : [],
}
for i, explanation in enumerate(explanations):
if only and i not in only:
continue
indices.append(i)
if paraphrases and not explanation.paraphrase:
raise Exception('Keyword argument paraphrases == True '
'but explanation has no paraphrase.')
if show_explanation:
print("Explanation {}: {}\n".format(i, explanation))
if show_candidate:
print("CANDIDATE: {}\n".format(explanation.candidate))
if show_sentence and not isinstance(explanation.candidate[0], str):
print("SENTENCE: {}\n".format(explanation.candidate.text))
semantics = set()
parses = self.parse(
explanation,
explanation.name,
return_parses=True)
for parse in parses:
if show_parse:
print("PARSE: {}\n".format(parse))
semantics_ = self.translate(parse.semantics) if pseudo_python else parse.semantics
# REDUNDANT
if parse.semantics in semantics:
if show_redundant: print("R: {}\n".format(semantics_))
nRedundant[i] += 1
parse_dict['redundant'].append(parse)
continue
semantics.add(parse.semantics)
# ERRORING
try:
condition_passes = parse.function(explanation.candidate)
except:
if show_erroring:
print("E: {}\n".format(semantics_))
print(parse.semantics)
print(parse.function(explanation.candidate)) # to display traceback
import pdb; pdb.set_trace()
nErroring[i] += 1
parse_dict['erroring'].append(parse)
continue
# CORRECT
if explanation.semantics and parse.semantics == explanation.semantics:
if show_correct: print("C: {}\n".format(semantics_))
nCorrect[i] += 1
LF = parse.function
LF.__name__ = LF.__name__[:(LF.__name__).rindex('_')] + '*'
parse_dict['correct'].append(parse)
continue
# PASSING
if condition_passes:
if show_passing: print("P: {}\n".format(semantics_))
nPassing[i] += 1
parse_dict['passing'].append(parse)
continue
else:
# FAILING
if show_failing: print("F: {}\n".format(semantics_))
nFailing[i] += 1
parse_dict['failing'].append(parse)
continue
# UNKNOWN
if explanation.candidate is None:
nUnknown[i] += 1
parse_dict['unknown'].append(parse)
continue
raise Exception('This should not be reached.')
if nCorrect[i] + nPassing[i] == 0:
print("WARNING: No correct or passing parses found for the following explanation:")
print("EXPLANATION {}: {}\n".format(i, explanation))
explanation_names = [exp.name for exp in explanations]
dataframe['Correct'] = Series(data=[nCorrect[i] for i in indices], index=explanation_names)
dataframe['Passing'] = Series(data=[nPassing[i] for i in indices], index=explanation_names)
dataframe['Failing'] = Series(data=[nFailing[i] for i in indices], index=explanation_names)
dataframe['Redundant'] = Series(data=[nRedundant[i] for i in indices], index=explanation_names)
dataframe['Erroring'] = Series(data=[nErroring[i] for i in indices], index=explanation_names)
dataframe['Unknown'] = Series(data=[nUnknown[i] for i in indices], index=explanation_names)
dataframe['Index'] = Series(data=indices, index=explanation_names)
self.results = DataFrame(data=dataframe, index=explanation_names)[col_names]
return parse_dict
def mark_implicit_strings(self, condition, candidate):
"""
Puts quotation marks around words that are likely quotes from candidate.
To be quoted, a phrase must:
a) not already be in quotes
b) occur in the candidate's sentence
c) not be a part of any existing lexical rule in the grammar
d) not be a number (e.g., "three")
If a phrase is a component span of the candidate, it is replaced with
_arg 1_ or _arg 2_ instead (without underscores).
"""
if not isinstance(candidate, RelationMention):
return condition
# First, replace direct mentions of candidate components with _arg x_
candidate_words = set(candidate.tokens)
candidate_text = candidate.text
for argnum in [1, 2]:
if candidate[argnum - 1].entity in condition:
# Replace name with _arg x_
condition = condition.replace(candidate[argnum - 1].entity, 'arg {}'.format(argnum))
# Identify potential quoted words
condition_words = condition.split()
quote_list = []
quoting = False
for i, word in enumerate(condition_words):
if word.startswith('"'):
quoting = True
if word in candidate_words and not quoting:
if (quote_list and # There is something to compare to
quote_list[-1][1] == i - 1 and # The previous word was also added
' '.join(condition_words[quote_list[-1][0]:i + 1]) in candidate_text): # The complete phrase appears in candidate
quote_list[-1] = (quote_list[-1][0], i)
else:
# Confirm it's not a number
try:
val = text2int(word)
except:
val = None
if val is None:
quote_list.append((i, i))
if word.endswith('"'):
quoting = False
if not quote_list:
return condition
# Quote the quotable words
new_condition_words = []
i = 0
j = 0
while i < len(condition_words):
if j < len(quote_list) and i == quote_list[j][0]:
text_to_quote = ' '.join(condition_words[quote_list[j][0]:quote_list[j][1] + 1])
if text_to_quote.lower() in self.unquotable or all(w in self.unquotable for w in text_to_quote.lower().split()):
j += 1
else:
new_condition_words.append('"{}"'.format(text_to_quote))
i = quote_list[j][1] + 1
j += 1
continue
new_condition_words.append(condition_words[i])
i += 1
new_condition = ' '.join(new_condition_words)
return new_condition
def translate(self, sem):
"""Converts a parse's semantics into a pseudocode string."""
return self.grammar.translate(sem) | babble-master | babble/parsing/parser.py |
from types import FunctionType
class Rule(object):
"""Represents a CFG rule with a semantic attachment."""
def __init__(self, lhs, rhs, sem=None):
self.lhs = lhs
self.rhs = tuple(rhs.split()) if isinstance(rhs, str) else rhs
self.sem = sem
self.validate_rule()
def __str__(self):
"""Returns a string representation of this Rule."""
return 'Rule' + str((self.lhs, ' '.join(self.rhs), self.sem))
def __eq__(self, other):
return (self.lhs == other.lhs and self.rhs == other.rhs)
def __ne__(self, other):
return (self.lhs != other.lhs or self.rhs != other.rhs)
def __hash__(self):
return hash((self.lhs, self.rhs))
def apply_semantics(self, sems):
if isinstance(self.sem, FunctionType):
return self.sem(sems)
else:
return self.sem
def is_lexical(self):
"""
Returns true iff the given Rule is a lexical rule, i.e., contains only
words (terminals) on the RHS.
"""
return all([not is_cat(rhsi) for rhsi in self.rhs])
def is_unary(self):
"""
Returns true iff the given Rule is a unary compositional rule, i.e.,
contains only a single category (non-terminal) on the RHS.
"""
return len(self.rhs) == 1 and is_cat(self.rhs[0])
def is_binary(self):
"""
Returns true iff the given Rule is a binary compositional rule, i.e.,
contains exactly two categories (non-terminals) on the RHS.
"""
return len(self.rhs) == 2 and is_cat(self.rhs[0]) and is_cat(self.rhs[1])
def validate_rule(self):
"""Returns true iff the given Rule is well-formed."""
assert is_cat(self.lhs), 'Not a category: %s' % self.lhs
assert isinstance(self.rhs, tuple), 'Not a tuple: %s' % self.rhs
for rhs_i in self.rhs:
assert isinstance(rhs_i, str), 'Not a string: %s' % rhs_i
def contains_optionals(self):
"""Returns true iff the given Rule contains any optional items on the RHS."""
return any([is_optional(rhsi) for rhsi in self.rhs])
def is_cat(label):
"""
Returns true iff the given label is a category (non-terminal), i.e., is
marked with an initial '$'.
"""
return label.startswith('$')
def is_optional(label):
"""
Returns true iff the given RHS item is optional, i.e., is marked with an
initial '?'.
"""
return label.startswith('?') and len(label) > 1
def sems0(sems):
return sems[0]
def sems1(sems):
return sems[1]
def sems_in_order(sems):
return tuple(sems)
def sems_reversed(sems):
return tuple(reversed(sems))
def flip_dir(dir_):
if dir_ == '.right':
return '.left'
elif dir_ == '.left':
return '.right'
else:
raise ValueError
def star(f):
return lambda args: f(*args) | babble-master | babble/parsing/rule.py |
from __future__ import print_function
from collections import Iterable
from six import StringIO
from babble.parsing.rule import Rule, is_cat
class Parse(object):
def __init__(self, rule, children, absorbed=0):
self.rule = rule
self.children = tuple(children[:])
self.semantics = self.compute_semantics()
self.function = None
self.explanation = None
self.absorbed = absorbed + sum(child.absorbed for child in self.children if isinstance(child, Parse))
self.validate_parse()
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return (not self.__eq__(other))
def __hash__(self):
return hash(self.__repr__())
def __repr__(self):
if self.function:
return "Parse({})".format(self.function.__name__)
else:
return "Parse(hash={})".format(hash(self.semantics)[:8])
def validate_parse(self):
assert isinstance(self.rule, Rule), 'Not a Rule: %s' % self.rule
assert isinstance(self.children, Iterable)
assert len(self.children) == len(self.rule.rhs)
for i in range(len(self.rule.rhs)):
if is_cat(self.rule.rhs[i]):
assert self.rule.rhs[i] == self.children[i].rule.lhs
else:
assert self.rule.rhs[i] == self.children[i]
def compute_semantics(self):
if self.rule.is_lexical():
return self.rule.sem
else:
child_semantics = [child.semantics for child in self.children]
return self.rule.apply_semantics(child_semantics)
def display(self, indent=0, show_sem=False):
def indent_string(level):
return ' ' * level
def label(parse):
if show_sem:
return '(%s %s)' % (parse.rule.lhs, parse.semantics)
else:
return parse.rule.lhs
def to_oneline_string(parse):
if isinstance(parse, Parse):
child_strings = [to_oneline_string(child) for child in parse.children]
return '[%s %s]' % (label(parse), ' '.join(child_strings))
else:
return str(parse)
def helper(parse, level, output):
line = indent_string(level) + to_oneline_string(parse)
if len(line) <= 100:
print(line, file=output)
elif isinstance(parse, Parse):
print(indent_string(level) + '[' + label(parse), file=output)
for child in parse.children:
helper(child, level + 1, output)
print(indent_string(level) + ']', file=output)
else:
print(indent_string(level) + parse, file=output)
output = StringIO()
helper(self, indent, output)
return output.getvalue()[:-1] # trim final newline | babble-master | babble/parsing/parse.py |
stopword_list = [
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she',
'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that',
'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through',
'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then',
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no',
'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's',
't', 'can', 'will', 'just', 'don', 'should', 'now'
] | babble-master | babble/parsing/stopwords.py |
import sys
class Parser(object):
def __init__(self, name, encoding='utf-8'):
self.name = name
self.encoding = encoding
def to_unicode(self, text):
'''
Convert char encoding to unicode
:param text:
:return:
'''
if sys.version_info[0] < 3:
text_alt = text.encode('utf-8', 'error')
text_alt = text_alt.decode('string_escape', errors='ignore')
text_alt = text_alt.decode('utf-8')
return text_alt
else:
return text
def connect(self):
'''
Return connection object for this parser type
:return:
'''
raise NotImplemented
def close(self):
'''
Kill this parser
:return:
'''
raise NotImplemented
class ParserConnection(object):
'''
Default connection object assumes local parser object
'''
def __init__(self, parser):
self.parser = parser
def _connection(self):
raise NotImplemented
def parse(self, document, text):
return self.parser.parse(document, text)
| babble-master | babble/parsing/spacy/parser.py |
from collections import defaultdict
from .parser import Parser, ParserConnection
try:
import spacy
from spacy.cli import download
from spacy import util
try:
spacy_version=int(spacy.__version__[0])
except:
spacy_version=1
except:
raise Exception("spaCy not installed. Use `pip install spacy`.")
class Spacy(Parser):
'''
spaCy
https://spacy.io/
Models for each target language needs to be downloaded using the
following command:
python -m spacy download en
Default named entity types
PERSON People, including fictional.
NORP Nationalities or religious or political groups.
FACILITY Buildings, airports, highways, bridges, etc.
ORG Companies, agencies, institutions, etc.
GPE Countries, cities, states.
LOC Non-GPE locations, mountain ranges, bodies of water.
PRODUCT Objects, vehicles, foods, etc. (Not services.)
EVENT Named hurricanes, battles, wars, sports events, etc.
WORK_OF_ART Titles of books, songs, etc.
LANGUAGE Any named language.
DATE Absolute or relative dates or periods.
TIME Times smaller than a day.
PERCENT Percentage, including "%".
MONEY Monetary values, including unit.
QUANTITY Measurements, as of weight or distance.
ORDINAL "first", "second", etc.
CARDINAL Numerals that do not fall under another type.
'''
def __init__(self, annotators=['tagger', 'parser', 'entity'],
lang='en', num_threads=1, verbose=False):
super(Spacy, self).__init__(name="spacy")
self.model = Spacy.load_lang_model(lang)
self.num_threads = num_threads
self.pipeline = []
if spacy_version==1:
for proc in annotators:
self.pipeline += [self.model.__dict__[proc]]
else:
annotators=[i if i!='entity' else 'ner' for i in annotators]
for i,proc in enumerate(annotators):
self.pipeline += [self.model.pipeline[i][1]]
@staticmethod
def model_installed(name):
'''
Check if spaCy language model is installed
:param name:
:return:
'''
data_path = util.get_data_path()
model_path = data_path / name
return model_path.exists()
@staticmethod
def load_lang_model(lang):
'''
Load spaCy language model or download if
model is available and not installed
Currenty supported spaCy languages
en English (50MB)
de German (645MB)
fr French (1.33GB)
es Spanish (377MB)
:param lang:
:return:
'''
if not Spacy.model_installed(lang):
download(lang)
return spacy.load(lang)
def connect(self):
return ParserConnection(self)
def parse(self, document, text):
'''
Transform spaCy output to match CoreNLP's default format
:param document:
:param text:
:return:
'''
text = self.to_unicode(text)
doc = self.model.tokenizer(text)
for proc in self.pipeline:
proc(doc)
assert doc.is_parsed
position = 0
for sent in doc.sents:
parts = defaultdict(list)
text = sent.text
for i,token in enumerate(sent):
parts['words'].append(str(token))
parts['lemmas'].append(token.lemma_)
parts['pos_tags'].append(token.tag_)
parts['ner_tags'].append(token.ent_type_ if token.ent_type_ else 'O')
parts['char_offsets'].append(token.idx)
parts['abs_char_offsets'].append(token.idx)
head_idx = 0 if token.head is token else token.head.i - sent[0].i + 1
parts['dep_parents'].append(head_idx)
parts['dep_labels'].append(token.dep_)
# Add null entity array (matching null for CoreNLP)
parts['entity_cids'] = ['O' for _ in parts['words']]
parts['entity_types'] = ['O' for _ in parts['words']]
# make char_offsets relative to start of sentence
parts['char_offsets'] = [
p - parts['char_offsets'][0] for p in parts['char_offsets']
]
parts['position'] = position
# Link the sentence to its parent document object
parts['document'] = document
parts['text'] = text
# Add null entity array (matching null for CoreNLP)
parts['entity_cids'] = ['O' for _ in parts['words']]
parts['entity_types'] = ['O' for _ in parts['words']]
position += 1
yield parts
| babble-master | babble/parsing/spacy/spacy_parser.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import scipy
from scipy.optimize import Bounds, LinearConstraint, minimize, SR1
import pdb
import math
import numpy.random
import time
from scipy.interpolate import UnivariateSpline, splrep, BSpline, splev
import torch
n = 500
#G = torch.tensor([1-i/(n+1) for i in range(n)])
G = torch.tensor([1.0 for i in range(n)])
# CIFAR10 approx pattern
#G = torch.concatenate((1.0*torch.ones(7*n//8), 0.5*torch.ones(n//8)))
# Imagenet like
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
#G = torch.tensor([1.0 - 0.5*i/n for i in range(n)])
#G = torch.tensor([min(0.1, 1.0/math.sqrt(i+1)) for i in range(n)])
#G = torch.concatenate((10.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 1.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 0.1*torch.ones(n//2)))
G = torch.concatenate((
torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
torch.tensor([1.0 for i in range(9*n//10)])))
# This one gives very promising shapes!
# It gives a learning rate warmup at the begining,
# with a fall-off thats more gradual and cosine like.
# G = torch.concatenate((
# torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 + (i/(9*n//10)) for i in range(9*n//10)])))
# No warmup version
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
# G = torch.concatenate((
# torch.tensor([((i+1)/(n//100+1)) for i in range(n//100)]),
# torch.tensor([1.0 + (i/((99*n)//100)) for i in range((99*n)//100)])))
# G = torch.concatenate((
# torch.tensor([max(1, 2*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 - 0.3*(i/(9*n//10)) for i in range(9*n//10)])))
# spl = splrep(x=[0, n//10, n], y=[10, 1, 2], k=2)
# spl(range(n))
G = torch.tensor(scipy.ndimage.gaussian_filter1d(G, sigma=30))
constrain_decreasing = False
D = 1.0
Dsq = D**2
Gsq = G**2
numpy.random.seed(42)
mask = np.zeros(n)
mask[0] = 1
mask = torch.tensor(mask)
def lamb_from_increments_torch(x):
xmod = x.sub(x*mask) # Set first entry to 0
v = torch.exp(-xmod)
cexp = torch.cumprod(v, dim=0)
cexp_shift = cexp * x[0]
#pdb.set_trace()
return cexp_shift
def lamb_from_increments(xraw):
if not torch.is_tensor(xraw):
x = torch.tensor(xraw, dtype=torch.float64)
else:
x = xraw
result = lamb_from_increments_torch(x)
if torch.is_tensor(xraw):
return result
else:
return result.numpy()
def lamb_to_increments(yraw):
if not torch.is_tensor(yraw):
y = torch.tensor(yraw, dtype=torch.float64)
else:
y = yraw
def inv_cum_prod(v):
return torch.exp(torch.diff(torch.log(v)))
log_incs = -torch.log(inv_cum_prod(y))
result = torch.concatenate(
(torch.tensor([y[0]]), log_incs))
if torch.is_tensor(yraw):
return result
else:
return result.numpy()
y0 = np.flip(np.cumsum(np.abs(numpy.random.normal(size=n))))/n
x0 = lamb_to_increments(y0)
assert np.all(np.isclose(lamb_from_increments(x0), y0))
def func(x_raw):
if torch.is_tensor(x_raw):
x = x_raw
else:
x = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
# Convert to cumulative value
lamb = lamb_from_increments_torch(x)
lamb_sq = lamb*lamb
lamb_flip = lamb.flip(dims=(0,))
lamb_sum = torch.sum(lamb)
lamb_sq_flip = lamb_flip*lamb_flip
Gsq_flip = Gsq.flip(dims=(0,))
t1 = 0.5*Dsq/lamb_sum # Distance error term
t2 = 0.5/lamb_sum # Gradient error term
t2 *= torch.sum(Gsq*lamb_sq)
inner_cumsum = torch.cumsum(Gsq_flip*lamb_sq_flip, dim=0)
denom_cumsum = torch.cumsum(lamb_flip, dim=0)
eval = lamb_flip[1:]*inner_cumsum[1:]/(denom_cumsum[1:]*(denom_cumsum[1:]-lamb_flip[1:]))
t3 = 0.5*torch.sum(eval)
fval = (t1+t2+t3) #/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(x.grad.numpy()))
return (fval.item(), g)
# Test
fx0, fgx0 = func(x0)
start = time.time()
if constrain_decreasing:
bounds = [(1e-12, np.inf)] + [(0, 10) for _ in range(n-1)]
else:
bounds = [(1e-12, np.inf)] + [(-10, 10) for _ in range(n-1)]
print(f"Starting solve...")
xopt_inc, fopt, dopt = scipy.optimize.fmin_l_bfgs_b(
func, x0,
bounds = bounds,
iprint = 0,
factr = 10.0, # High accuracy
maxls = 100000,
maxfun = 100000,
pgtol=1e-10,
m=20,
)
end = time.time()
xopt = lamb_from_increments(xopt_inc)
assert dopt['warnflag'] == 0
print(f"Time taken: {end - start}")
print(f"Steps to convergence: {dopt['funcalls']}")
#print(f"grad: {dopt['grad']}")
#print(xopt)
print(f"xopt[0]: {xopt[0]}")
print(f"xopt[-1]: {xopt[-1]}")
print(f"xopt[0]/xopt[-1]: {xopt[0]/xopt[-1]}")
print(f"fval: {fopt}")
print(f"fval * sqrt(n): {fopt * math.sqrt(n)} ")
cosine_curve = [D/(math.sqrt(n)) * 0.5 * (1 + math.cos((i/n) * math.pi)) for i in range(n)]
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.titlesize'] = 5
mpl.rcParams['axes.labelsize'] = 5
mpl.rcParams['font.size'] = 4.2
mpl.rcParams['legend.fontsize'] = 4.2
linewidth = '0.2'
mpl.rcParams['lines.markersize'] = 1.0
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
fig = plt.figure(figsize=(4, 5))
ax = fig.add_subplot(3, 1, 1)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence (final={xopt[-1]})")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 2)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.set_yscale('log')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 3)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('G')
ax.set_title(f"Gradient norm sequence")
ax.plot(range(1, n+1), G, 'k')
plt.tight_layout()
fname = "lamb_lbfgs_seq.png"
plt.savefig(fname, bbox_inches='tight', pad_inches=0, dpi=300)
print(f"Saved {fname}")
plt.close()
plt.close('all') | adaptive_scheduling-main | solve_gradient_seq.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import scipy
from scipy.optimize import Bounds, LinearConstraint, minimize, SR1
import pdb
import math
import numpy.random
import time
from scipy.interpolate import UnivariateSpline, splrep, BSpline, splev
import torch
n = 500
#G = torch.tensor([1-i/(n+1) for i in range(n)])
G = torch.tensor([1.0 for i in range(n)])
# CIFAR10 approx pattern
#G = torch.concatenate((1.0*torch.ones(7*n//8), 0.5*torch.ones(n//8)))
# Imagenet like
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
#G = torch.tensor([1.0 - 0.5*i/n for i in range(n)])
#G = torch.tensor([min(0.1, 1.0/math.sqrt(i+1)) for i in range(n)])
#G = torch.concatenate((10.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 1.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 0.1*torch.ones(n//2)))
G = torch.concatenate((
torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
torch.tensor([1.0 for i in range(9*n//10)])))
# This one gives very promising shapes!
# It gives a learning rate warmup at the begining,
# with a fall-off thats more gradual and cosine like.
# G = torch.concatenate((
# torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 + (i/(9*n//10)) for i in range(9*n//10)])))
# No warmup version
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
# G = torch.concatenate((
# torch.tensor([((i+1)/(n//100+1)) for i in range(n//100)]),
# torch.tensor([1.0 + (i/((99*n)//100)) for i in range((99*n)//100)])))
# G = torch.concatenate((
# torch.tensor([max(1, 2*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 - 0.3*(i/(9*n//10)) for i in range(9*n//10)])))
# spl = splrep(x=[0, n//10, n], y=[10, 1, 2], k=2)
# spl(range(n))
#G = torch.tensor(scipy.ndimage.gaussian_filter1d(G, sigma=30))
D = 1.0
Dsq = D**2
Gsq = G**2
numpy.random.seed(42)
mask = np.zeros(n)
mask[0] = 1
mask = torch.tensor(mask)
x0 = np.array([D/(math.sqrt(n)) for _ in range(n)])
def func(x_raw):
if torch.is_tensor(x_raw):
x = x_raw
else:
x = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
# Convert to cumulative value
lamb = x
lamb_sq = lamb*lamb
lamb_flip = lamb.flip(dims=(0,))
lamb_sum = torch.sum(lamb)
lamb_sq_flip = lamb_flip*lamb_flip
Gsq_flip = Gsq.flip(dims=(0,))
t1 = 0.5*Dsq/lamb_sum # Distance error term
t2 = 0.5/lamb_sum # Gradient error term
t2 *= torch.sum(Gsq*lamb_sq)
inner_cumsum = torch.cumsum(Gsq_flip*lamb_sq_flip, dim=0)
denom_cumsum = torch.cumsum(lamb_flip, dim=0)
eval = lamb_flip[1:]*inner_cumsum[1:]/(denom_cumsum[1:]*(denom_cumsum[1:]-lamb_flip[1:]))
t3 = 0.5*torch.sum(eval)
fval = (t1+t2+t3) #/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(x.grad.numpy()))
return (fval.item(), g)
# Test
fx0, fgx0 = func(x0)
start = time.time()
bounds = [(1e-12, np.inf) for _ in range(n)]
print(f"Starting solve...")
xopt_inc, fopt, dopt = scipy.optimize.fmin_l_bfgs_b(
func, x0,
bounds = bounds,
iprint = 0,
factr = 10.0, # High accuracy
maxls = 100000,
maxfun = 100000,
pgtol=1e-10,
m=20,
)
end = time.time()
xopt = xopt_inc
assert dopt['warnflag'] == 0
print(f"Time taken: {end - start}")
print(f"Steps to convergence: {dopt['funcalls']}")
#print(f"grad: {dopt['grad']}")
#print(xopt)
print(f"xopt[0]: {xopt[0]}")
print(f"xopt[-1]: {xopt[-1]}")
print(f"xopt[0]/xopt[-1]: {xopt[0]/xopt[-1]}")
print(f"fval: {fopt}")
print(f"fval * sqrt(n): {fopt * math.sqrt(n)} ")
cosine_curve = [D/(math.sqrt(n)) * 0.5 * (1 + math.cos((i/n) * math.pi)) for i in range(n)]
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.titlesize'] = 5
mpl.rcParams['axes.labelsize'] = 5
mpl.rcParams['font.size'] = 4.2
mpl.rcParams['legend.fontsize'] = 4.2
linewidth = '0.2'
mpl.rcParams['lines.markersize'] = 1.0
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
fig = plt.figure(figsize=(4, 5))
ax = fig.add_subplot(3, 1, 1)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence (final={xopt[-1]})")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 2)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.set_yscale('log')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 3)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('G')
ax.set_title(f"Gradient norm sequence")
ax.plot(range(1, n+1), G, 'k')
plt.tight_layout()
fname = "lamb_lbfgs_seq.png"
plt.savefig(fname, bbox_inches='tight', pad_inches=0, dpi=300)
print(f"Saved {fname}")
plt.close()
plt.close('all') | adaptive_scheduling-main | solve_gradient_simple.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import scipy
from scipy.optimize import Bounds, LinearConstraint, minimize, SR1
import pdb
import math
import numpy.random
import time
import torch
n = 1000
G = 1.0
D = 1.0
Gsq = G**2
Dsq = D**2
numpy.random.seed(42)
mask = np.zeros(n)
mask[0] = 1
mask = torch.tensor(mask)
def lamb_from_increments_torch(x):
xmod = x.sub(x*mask) # Set first entry to 0
v = torch.exp(-xmod)
cexp = torch.cumprod(v, dim=0)
cexp_shift = cexp * x[0]
#pdb.set_trace()
return cexp_shift
def lamb_from_increments(xraw):
if not torch.is_tensor(xraw):
x = torch.tensor(xraw, dtype=torch.float64)
else:
x = xraw
result = lamb_from_increments_torch(x)
if torch.is_tensor(xraw):
return result
else:
return result.numpy()
def lamb_to_increments(yraw):
if not torch.is_tensor(yraw):
y = torch.tensor(yraw, dtype=torch.float64)
else:
y = yraw
def inv_cum_prod(v):
return torch.exp(torch.diff(torch.log(v)))
log_incs = -torch.log(inv_cum_prod(y))
result = torch.concatenate(
(torch.tensor([y[0]]), log_incs))
if torch.is_tensor(yraw):
return result
else:
return result.numpy()
y0 = np.flip(np.cumsum(np.abs(numpy.random.normal(size=n))))/n
x0 = lamb_to_increments(y0)
assert np.all(np.isclose(lamb_from_increments(x0), y0))
def func(x_raw):
if torch.is_tensor(x_raw):
x = x_raw
else:
x = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
lamb = lamb_from_increments_torch(x)
lamb_flip = lamb.flip(dims=(0,))
lamb_sum = torch.sum(lamb)
lamb_sq_flip = lamb_flip*lamb_flip
t1 = 0.5*Dsq/lamb_sum # Distance error term
t2 = 0.5*Gsq/lamb_sum # Gradient error term
t2 *= torch.sum(lamb_sq_flip)
inner_cumsum = torch.cumsum(lamb_sq_flip, dim=0)
denom_cumsum = torch.cumsum(lamb_flip, dim=0)
eval = lamb_flip[1:]*inner_cumsum[1:]/(denom_cumsum[1:]*(denom_cumsum[1:]-lamb_flip[1:]))
t3 = 0.5*Gsq*torch.sum(eval)
fval = (t1+t2+t3) #/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(x.grad.numpy()))
return (fval.item(), g)
# Test
fx0, fgx0 = func(x0)
start = time.time()
bounds = [(1e-12, np.inf)] + [(0, 10) for _ in range(n-1)]
print(f"Starting solve...")
xopt_inc, fopt, dopt = scipy.optimize.fmin_l_bfgs_b(
func, x0,
bounds = bounds,
iprint = 0,
factr = 10.0, # High accuracy
maxls = 100000,
maxfun = 100000,
pgtol=1e-10,
m=20,
)
end = time.time()
xopt = lamb_from_increments(xopt_inc)
assert dopt['warnflag'] == 0
print(f"Time taken: {end - start}")
print(f"Steps to convergence: {dopt['funcalls']}")
#print(f"grad: {dopt['grad']}")
#print(xopt)
print(f"xopt[0]: {xopt[0]}")
print(f"xopt[-1]: {xopt[-1]}")
print(f"xopt[0]/xopt[-1]: {xopt[0]/xopt[-1]}")
print(f"fval: {fopt}")
print(f"fval * sqrt(n): {fopt * math.sqrt(n)} ")
def func1d(x_raw):
eta = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
t1 = Dsq/(2*n*eta)
t2 = Gsq*eta/2
t3 = (Gsq*eta/2)*torch.sum(1/torch.arange(1, n))
fval = (t1+t2+t3)#/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(eta.grad.numpy()))
return (fval.item(), g)
xopt_1d, fopt_1d, dopt_1d = scipy.optimize.fmin_l_bfgs_b(
func1d, np.array([y0[0]]), bounds = [(1e-8, 100)],
iprint = 0
)
assert dopt_1d['warnflag'] == 0
xopt_1d = xopt_1d[0]
print(f"1D grad: {dopt_1d['grad']}")
print(f"1D Steps to convergence: {dopt_1d['funcalls']}")
#print(f"grad: {dopt_1d['grad']}")
print(f"eta 1d: {xopt_1d}")
print(f"1D fval: {fopt_1d}")
theory_eta = D/(G*math.sqrt(n*(2+math.log(n-1))))
theory1d = (D*G*math.sqrt(2+math.log(n-1))/math.sqrt(n))#/max(G/D,D/G)
print(f"Theory eta: {theory_eta}")
print(f"theory 1d fval: {theory1d}")
print(f"1d/full ratio: {fopt_1d/fopt}")
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.titlesize'] = 5
mpl.rcParams['axes.labelsize'] = 5
mpl.rcParams['font.size'] = 4.2
mpl.rcParams['legend.fontsize'] = 4.2
linewidth = '0.2'
mpl.rcParams['lines.markersize'] = 1.0
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
fig = plt.figure(figsize=(4, 3))
ax = fig.add_subplot(2, 1, 1)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence v.s. optimal flat Dsq={D} Gsq={G}")
ax.plot(range(1, n+1), xopt, 'k')
ax.hlines(y=xopt_1d, xmin=1, xmax=n, color='r')
ax.hlines(y=D/(G*math.sqrt(n)), xmin=1, xmax=n, color='b')
#ax.set_yscale('log')
plt.tight_layout()
ax = fig.add_subplot(2, 1, 2)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence v.s. optimal flat D={D} G={G}")
ax.plot(range(1, n+1), xopt, 'k')
ax.hlines(y=xopt_1d, xmin=1, xmax=n, color='r')
ax.hlines(y=D/(G*math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.set_yscale('log')
plt.tight_layout()
fname = "lamb_lbfgs.png"
plt.savefig(fname, bbox_inches='tight', pad_inches=0, dpi=300)
print(f"Saved {fname}")
plt.close()
plt.close('all') | adaptive_scheduling-main | solve_bound.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import logging
from typing import TYPE_CHECKING, Any, Callable, Optional
import torch
import torch.optim
import pdb
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
from fairseq.optim import FairseqOptimizer, register_optimizer
logger = logging.getLogger(__name__)
def gmean(input_x):
log_x = torch.log(input_x.flatten())
return torch.exp(torch.mean(log_x))
class AdaGradFlex(torch.optim.Optimizer):
"""
Adagrad with coordinate-wise flex statistics.
"""
def __init__(
self, params: _params_t,
lr: float = 1.0,
momentum: float = 0,
log_every: int = 0,
weight_decay: float = 0.0,
eps: float = 1e-20,
decouple: bool = True,
):
if lr <= 0:
raise ValueError(f"Learning rate {lr} must be positive")
if momentum < 0:
raise ValueError(f"Momentum {momentum} must be non-negative")
print(f"Weight decay: {weight_decay}")
defaults = dict(lr=lr,
momentum=momentum,
eps=eps,
weight_decay=weight_decay,
log_every=log_every,
k = 0,
numerator_weighted=0.0,
decouple=decouple)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return False
@property
def supports_flat_params(self):
return True
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
group = self.param_groups[0]
momentum = group['momentum']
ck = 1 - momentum
log_every = group['log_every']
for group in self.param_groups:
eps = group["eps"]
k = group['k']
decay = group['weight_decay']
decouple = group['decouple']
lr = group['lr']
below_one = 0
total = 0
######
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
if "alphak" not in state:
state["alphak"] = torch.zeros_like(p.data).detach()
#state["gsq"] = torch.zeros_like(p.data).detach()
state["gmax"] = torch.zeros_like(p.data).detach()
state['sk'] = torch.zeros_like(p.data).detach()
if momentum > 0:
state["z"] = torch.clone(p.data).detach()
state['flex'] = torch.zeros_like(p.data).detach()
sk = state['sk']
#gsq = state['gsq']
alphak = state['alphak']
gmax = state['gmax']
flex = state['flex']
if grad.is_sparse:
grad = grad.to_dense()
if decay != 0 and not decouple:
grad.add_(p.data, alpha=decay)
flex.add_(grad*grad).sub_(grad * sk)
alphak.copy_(alphak.fmax(flex))
gmax.copy_(gmax.fmax(grad.abs()))
sk.add_(grad)
if decay != 0 and decouple:
p_old = p.data.clone()
if momentum > 0:
z = state['z']
z.sub_(grad.div(torch.sqrt(gmax*gmax + alphak) + eps), alpha=lr)
p.data.mul_(1-ck).add_(z, alpha=ck)
if decay != 0 and decouple:
z.add_(p_old, alpha=-decay * lr)
else:
p.data.sub_(grad.div(torch.sqrt(gmax*gmax + alphak) + eps), alpha=lr)
if decay != 0 and decouple:
p.data.add_(p_old, alpha=-decay * lr)
### Logging
# below_one += ((alphak+eps)/(gmax*gmax + eps) < 1).sum().item()
# total += grad.numel()
# if k % 50 == 0 and k > 0:
# print(f"fraction below 1: {below_one/total}")
# ratio = (alphak+eps)/(gmax*gmax + eps)
# print(f"mean: {ratio.mean()} gmean: {gmean(ratio)} std: {ratio.std()}")
# qs = [0.0, 0.05, 0.10, 0.25, 0.50, 0.75, 0.90, 0.95, 1.0]
# quantiles = torch.quantile(ratio, q=torch.tensor(qs).cuda())
# print(f"quantiles: {list(zip(qs, quantiles))}")
group['k'] = k + 1
return loss
| adaptive_scheduling-main | adagrad_flex.py |
from glob import glob
import sys, os
import pathlib
def print_folder_containing_files(contains=False, root_folder='./', file_regex="*final.json"):
# we use this function to search for folders not containing a file with substr in its name
paths = glob(root_folder + "/*")
print("total subdir ", len(paths))
for dir in paths:
regex = str(pathlib.PurePath(dir, file_regex))
if len(glob(regex)) == 0 and contains == False:
print(dir)
if len(glob(regex)) != 0 and contains == True:
print(dir)
if __name__ == "__main__":
option = sys.argv[1]
if option == 'missing-files':
root = sys.argv[2]
regex = sys.argv[3]
print_folder_containing_files(root_folder=root, file_regex=regex)
| bert-pretraining-master | src/bert-pretraining/gc_utils.py |
import sys, os
import glob
import json
import csv
import numpy as np
import utils
from plot_utils import save_csv_with_error_bar
def get_wiki17_wiki18_pred_disagreement_vs_dim(results, dims=[192, 384, 768, 1536, 3072], seeds=[1,2,3]):
disagreements_all_dim = []
for dim in dims:
disagreements = []
for seed in seeds:
# get wiki 17 results
keys = {"corpus": ["wiki17"], "feat_dim": [dim], "model_seed": [seed]}
subset = utils.extract_result_subset(results, keys)
print(subset[0]["test_err"])
assert len(subset) == 1
wiki17_pred = subset[0]["test_pred"]
# get wiki 18 results
keys = {"corpus": ["wiki18"], "feat_dim": [dim], "model_seed": [seed]}
subset = utils.extract_result_subset(results, keys)
print(subset[0]["test_err"])
assert len(subset) == 1
wiki18_pred = subset[0]["test_pred"]
disagreements.append(utils.get_classification_disagreement(wiki17_pred, wiki18_pred))
disagreements_all_dim.append(disagreements)
print("dim ", dim, "disagr. ave / std: ", np.mean(disagreements), np.std(disagreements))
disagr = np.array(disagreements_all_dim).T
data_list = [['Disagreement', dims, [disagr[i, :] for i in range(len(seeds))]]]
return data_list
def print_all_stab_vs_dim_for_linear_bert_sentiment():
datasets = ['mr', 'subj', 'mpqa', 'sst']
nbit = 32
exp_names = ['default', 'opt']
for exp_name in exp_names:
print("\n\n", exp_name)
for dataset in datasets:
if exp_name == "default":
date_str = "2019-07-07"
else:
date_str = "2019-07-09"
json_regex = "/home/zjian/bert-pretraining/results/predictions/dimensionality_{}_lr_3_seeds_{}/{}/nbit_32/*wiki17*/final_results.json".format(exp_name, date_str, dataset)
# filter by dataset and setting
results = utils.clean_json_results(utils.gather_json_results(json_regex))
assert len(results) == 15, json_regex
json_regex = "/home/zjian/bert-pretraining/results/predictions/dimensionality_{}_lr_3_seeds_{}/{}/nbit_32/*wiki18_aligned*/final_results.json".format(exp_name, date_str, dataset)
# filter by dataset and setting
results += utils.clean_json_results(utils.gather_json_results(json_regex))
assert len(results) == 30, json_regex
print("\n\n", dataset)
data_list = get_wiki17_wiki18_pred_disagreement_vs_dim(results)
print(data_list)
data_list[0][2] = [x * 100.0 for x in data_list[0][2]]
csv_name = utils.get_csv_folder() + "/stab_vs_dim_{}_lr_dataset_{}.csv".format(exp_name, dataset)
save_csv_with_error_bar(data_list, csv_name)
def get_wiki17_wiki18_pred_disagreement_generic(results, xlabel, xvalues, seeds=[1,2,3], subset_dict=None, single_xvalue_for_wiki17=False, results_ref=None):
disagrs_all= []
for seed in seeds:
disagrs = []
for x in xvalues:
# get wiki 17 results
if single_xvalue_for_wiki17:
keys = {"corpus": ["wiki17"], "model_seed": [seed]}
else:
keys = {"corpus": ["wiki17"], "model_seed": [seed], xlabel: [x]}
keys.update(subset_dict)
if single_xvalue_for_wiki17:
subset = utils.extract_result_subset(results_ref, keys)
else:
subset = utils.extract_result_subset(results, keys)
#print("keys ",keys, len(results), len(subset))
print(keys, subset[0]["test_err"])
assert len(subset) == 1
wiki17_pred = subset[0]["test_pred"]
# get wiki 18 results
keys = {"corpus": ["wiki18"], "model_seed": [seed], xlabel: [x]}
keys.update(subset_dict)
subset = utils.extract_result_subset(results, keys)
print(keys, subset[0]["test_err"])
assert len(subset) == 1
wiki18_pred = subset[0]["test_pred"]
disagrs.append(utils.get_classification_disagreement(wiki17_pred, wiki18_pred))
disagrs_all.append(disagrs)
# print("dim ", dim, "disagr. ave / std: ", np.mean(disagreements), np.std(disagreements))
disagr = np.array(disagrs_all)
#print("all disagreement ", disagr.shape)
data_list = [['Disagreement', xvalues, [disagr[i, :] for i in range(len(seeds))]]]
return data_list
def print_all_stab_vs_compression_for_linear_bert_sentiment():
datasets = ['mr', 'subj', 'mpqa', 'sst']
exp_names = ['default', 'opt']
for exp_name in exp_names:
print("\n\n", exp_name)
for dataset in datasets:
if exp_name == "default":
date_str = "2019-07-08"
else:
date_str = "2019-07-09"
json_regex = "/home/zjian/bert-pretraining/results/predictions/compression_{}_lr_3_seeds_{}/{}/nbit_*/*wiki17*/final_results.json".format(exp_name, date_str, dataset)
# filter by dataset and setting
results = utils.clean_json_results(utils.gather_json_results(json_regex))
assert len(results) == 18, json_regex # 1 corpus x 3 seeds x 6 precision
print("\n\n", dataset)
json_regex = "/home/zjian/bert-pretraining/results/predictions/compression_{}_lr_3_seeds_{}/{}/nbit_*/*wiki18_aligned*/final_results.json".format(exp_name, date_str, dataset)
# filter by dataset and setting
results += utils.clean_json_results(utils.gather_json_results(json_regex))
assert len(results) == 36, json_regex # 1 corpus x 3 seeds x 6 precision
data_list = get_wiki17_wiki18_pred_disagreement_generic(results,
xlabel="nbit", xvalues=[1,2,4,8,16,32], subset_dict={"feat_dim": [768]})
print(data_list)
data_list[0][2] = [x * 100.0 for x in data_list[0][2]]
csv_name = utils.get_csv_folder() + "/stab_vs_comp_{}_lr_dataset_{}.csv".format(exp_name, dataset)
save_csv_with_error_bar(data_list, csv_name)
def get_wiki18_metric_generic(results, xlabel, xvalues, seeds=[1,2,3], subset_dict=None):
quals_all= []
for seed in seeds:
quals = []
for x in xvalues:
# get wiki 18 results
keys = {"corpus": ["wiki18"], "model_seed": [seed], xlabel: [x]}
keys.update(subset_dict)
subset = utils.extract_result_subset(results, keys)
print(keys, subset[0]["test_err"])
assert len(subset) == 1
quals.append(subset[0]["test_err"])
quals_all.append(quals)
# print("dim ", dim, "disagr. ave / std: ", np.mean(disagreements), np.std(disagreements))
quals = np.array(quals_all)
#print("all disagreement ", disagr.shape)
data_list = [['Test err.', xvalues, [quals[i, :] for i in range(len(seeds))]]]
return data_list
def print_all_stab_vs_ensemble_for_linear_bert_sentiment():
datasets = ['mr', 'subj', 'mpqa', 'sst']
exp_names = ['default', 'opt']
feat_dim = 768
for exp_name in exp_names:
print("\n\n", exp_name)
for dataset in datasets:
date_str = "2019-07-10"
json_regex = "/home/zjian/bert-pretraining/results/predictions/ensemble_{}_lr_3_seeds_{}/{}/nbit_*/*eps*/final_results.json".format(exp_name, date_str, dataset)
# filter by dataset and setting
results = utils.clean_json_results(utils.gather_json_results(json_regex))
assert len(results) == 27, json_regex # 1 corpus x 3 seeds x 9 precision
# print("\n\n", dataset)
if exp_name == "default":
date_str = "2019-07-07"
else:
date_str = "2019-07-09"
json_regex = "/home/zjian/bert-pretraining/results/predictions/dimensionality_{}_lr_3_seeds_{}/{}/nbit_*/*dim_768*wiki17*/final_results.json".format(exp_name, date_str, dataset)
# filter by dataset and setting
results_ref = utils.clean_json_results(utils.gather_json_results(json_regex))
assert len(results_ref) == 3, json_regex # 1 corpus x 3 seeds
data_list = get_wiki17_wiki18_pred_disagreement_generic(results,
xlabel="ensemble_eps", xvalues=[0.0, 0.001, 0.01, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0],
subset_dict={"feat_dim": [768]}, single_xvalue_for_wiki17=True, results_ref=results_ref)
print(data_list)
data_list[0][2] = [x * 100.0 for x in data_list[0][2]]
csv_name = utils.get_csv_folder() + "/stab_vs_ensemble_{}_lr_dataset_{}.csv".format(exp_name, dataset)
save_csv_with_error_bar(data_list, csv_name)
data_list = get_wiki18_metric_generic(results,
xlabel="ensemble_eps", xvalues=[0.0, 0.001, 0.01, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0],
subset_dict={"feat_dim": [768]})
print(data_list)
csv_name = utils.get_csv_folder() + "/qual_vs_ensemble_{}_lr_dataset_{}.csv".format(exp_name, dataset)
save_csv_with_error_bar(data_list, csv_name)
if __name__ == "__main__":
print_all_stab_vs_dim_for_linear_bert_sentiment()
print_all_stab_vs_compression_for_linear_bert_sentiment()
print_all_stab_vs_ensemble_for_linear_bert_sentiment()
| bert-pretraining-master | src/bert-pretraining/analysis.py |
import glob
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
import plot_utils
LABEL_FONT_SIZE = 25
TITLE_FONT_SIZE = 25
TICKER_FONT_SIZE = 22
MARKER_SIZE = 17
def get_dataset_name(csv_name):
dataset_name = csv_name.split("dataset_")[1].split(".")[0].upper()
if dataset_name == "SST":
dataset_name = "SST-2"
return dataset_name
def plot_all_bert_stab_vs_dim():
all_csv = glob.glob("./output/csv/stab_vs_dim*.csv")
for csv_name in all_csv:
groups, names, data = plot_utils.csv_to_table(csv_name)
plt.figure()
plot_utils.plot_figure_with_error_bar(names, data, color_list="b", marker_size=MARKER_SIZE)
# replace suffix and folder for pdf
print(csv_name.replace(".csv", ".pdf").replace("csv", "figure"))
plt.title(get_dataset_name(csv_name), fontsize=TITLE_FONT_SIZE)
plt.ylabel("\% Disagreement", fontsize=LABEL_FONT_SIZE)
plt.xlabel("Dimention", fontsize=LABEL_FONT_SIZE)
plt.xscale("log")
plt.tick_params(axis='both', which='major', labelsize=TICKER_FONT_SIZE)
plt.tick_params(axis='both', which='minor', labelsize=TICKER_FONT_SIZE)
plt.savefig(csv_name.replace(".csv", ".pdf").replace("csv", "figure"), bbox_inches="tight")
plt.savefig(csv_name.replace(".csv", ".png").replace("csv", "figure"), bbox_inches="tight")
plt.close()
def plot_all_bert_stab_vs_comp():
all_csv = glob.glob("./output/csv/stab_vs_comp*.csv")
for csv_name in all_csv:
groups, names, data = plot_utils.csv_to_table(csv_name)
plt.figure()
plot_utils.plot_figure_with_error_bar(names, data, color_list="b", marker_size=MARKER_SIZE)
# replace suffix and folder for pdf
print(csv_name.replace(".csv", ".pdf").replace("csv", "figure"))
plt.title(get_dataset_name(csv_name), fontsize=TITLE_FONT_SIZE)
plt.ylabel("\% Disagreement", fontsize=LABEL_FONT_SIZE)
plt.xlabel("Bits", fontsize=LABEL_FONT_SIZE)
plt.xscale("log")
plt.tick_params(axis='both', which='major', labelsize=TICKER_FONT_SIZE)
plt.tick_params(axis='both', which='minor', labelsize=TICKER_FONT_SIZE)
plt.savefig(csv_name.replace(".csv", ".pdf").replace("csv", "figure"), bbox_inches="tight")
plt.savefig(csv_name.replace(".csv", ".png").replace("csv", "figure"), bbox_inches="tight")
plt.close()
def plot_all_bert_stab_vs_ensemble():
all_csv = glob.glob("./output/csv/stab_vs_ensemble*.csv")
for csv_name in all_csv:
groups, names, data = plot_utils.csv_to_table(csv_name)
plt.figure()
plot_utils.plot_figure_with_error_bar(names, data, color_list="b", marker_size=MARKER_SIZE)
# replace suffix and folder for pdf
print(csv_name.replace(".csv", ".pdf").replace("csv", "figure"))
plt.title(get_dataset_name(csv_name), fontsize=TITLE_FONT_SIZE)
plt.ylabel("\% Disagreement", fontsize=LABEL_FONT_SIZE)
plt.xlabel("Epsilon", fontsize=LABEL_FONT_SIZE)
# plt.xscale("log")
plt.tick_params(axis='both', which='major', labelsize=TICKER_FONT_SIZE)
plt.tick_params(axis='both', which='minor', labelsize=TICKER_FONT_SIZE)
plt.savefig(csv_name.replace(".csv", ".pdf").replace("csv", "figure"), bbox_inches="tight")
plt.savefig(csv_name.replace(".csv", ".png").replace("csv", "figure"), bbox_inches="tight")
plt.close()
def plot_all_bert_qual_vs_ensemble():
all_csv = glob.glob("./output/csv/qual_vs_ensemble*.csv")
for csv_name in all_csv:
groups, names, data = plot_utils.csv_to_table(csv_name)
plt.figure()
plot_utils.plot_figure_with_error_bar(names, data, color_list="b")
# replace suffix and folder for pdf
print(csv_name.replace(".csv", ".pdf").replace("csv", "figure"))
plt.title(csv_name.split("dataset_")[1].split(".")[0], fontsize=TITLE_FONT_SIZE)
plt.ylabel("Test err.", fontsize=LABEL_FONT_SIZE)
plt.xlabel(r"$\epsilon$", fontsize=LABEL_FONT_SIZE)
# plt.xscale("log")
plt.tick_params(axis='both', which='major', labelsize=TICKER_FONT_SIZE)
plt.tick_params(axis='both', which='minor', labelsize=TICKER_FONT_SIZE)
plt.savefig(csv_name.replace(".csv", ".pdf").replace("csv", "figure"), bbox_inches="tight")
plt.savefig(csv_name.replace(".csv", ".png").replace("csv", "figure"), bbox_inches="tight")
plt.close()
# cp /Users/Jian/Data/research/remote_repo/dawn8/bert-pretraining/src/bert-pretraining/output/csv/*.csv figures/bert_figures/csv/
# cp /Users/Jian/Data/research/remote_repo/dawn8/bert-pretraining/src/bert-pretraining/output/figure/*.pdf figures/bert_figures/
if __name__ == "__main__":
plot_all_bert_stab_vs_dim()
plot_all_bert_stab_vs_comp()
plot_all_bert_stab_vs_ensemble()
plot_all_bert_qual_vs_ensemble() | bert-pretraining-master | src/bert-pretraining/plotting.py |
import torch
import numpy
import numpy as np
import _pickle as cp
from run_compress import load_dataset_feat
def test_ensembling():
# test whether ensemble is doing correctly
path = "/dfs/scratch0/zjian/bert-pretraining/src/bert-pretraining/tmp"
old_folder = path + "/pretrain_seed_2_dim_192_wiki17"
new_folder = path + "/pretrain_seed_2_dim_192_wiki18_aligned"
folder_0 = path + "/pretrain_seed_2_dim_192_eps_0.0"
folder_1 = path + "/pretrain_seed_2_dim_192_eps_1.0"
folder_half = path + "/pretrain_seed_2_dim_192_eps_0.5"
def check_ensemble_results(folder1, folder2, equal=True):
feats_train1, feats_heldout1, feats_test1, \
labels_train1, labels_heldout1, labels_test1 = load_dataset_feat(folder1, "mpqa")
feats_train2, feats_heldout2, feats_test2, \
labels_train2, labels_heldout2, labels_test2 = load_dataset_feat(folder2, "mpqa")
np.testing.assert_array_equal(labels_train1, labels_train2)
np.testing.assert_array_equal(labels_heldout1, labels_heldout2)
np.testing.assert_array_equal(labels_test1, labels_test2)
if equal == True:
# print("double test ", np.sum(feats_train1[10]**2), np.sum(feats_train2[10]**2))
np.testing.assert_array_equal(feats_train1[10], feats_train2[10])
np.testing.assert_array_equal(feats_test1[0], feats_test2[0])
np.testing.assert_array_equal(feats_heldout1[0], feats_heldout2[0])
else:
assert np.all(feats_train1[10] - feats_train2[10])
assert np.all(feats_test1[0] - feats_test2[0])
assert np.all(feats_heldout1[0] - feats_heldout2[0])
check_ensemble_results(old_folder, folder_0)
check_ensemble_results(new_folder, folder_1)
check_ensemble_results(folder_half, old_folder, equal=False)
check_ensemble_results(folder_half, new_folder, equal=False)
if __name__ == "__main__":
# # feature = torch.Tensor([[, [], []])
# example1 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
# example2 = [[1, 2, 3, 4], [5, 6, 7, 8], [0,0,0,0]]
# examples = [example1, example2]
# input = torch.Tensor(examples).permute([1, 0 , 2])
# length = torch.LongTensor([3, 2])
# print(input, input.size())
# lstm_model = torch.nn.LSTM(input_size=4, hidden_size=2, num_layers=2)
# input1 = torch.Tensor([example1]).permute([1,0,2])
# output1, _ = lstm_model(input1)
# # print("example 1 output ", output1.shape)
# print("example 1 output ", output1[2, 0])
# input2 = torch.Tensor([example2]).permute([1,0,2])
# output2, _ = lstm_model(input2)
# print("example 2 output ", output2[1, 0])
# input = torch.nn.utils.rnn.pack_padded_sequence(input, length, batch_first=False, enforce_sorted=False)
# output, _ = lstm_model(input)
# print("combined example output ", output)
# output, _ = torch.nn.utils.rnn.pad_packed_sequence(output)
# print("extract example 1 output ", output[2, 0])
# print("extract example 2 output ", output[1, 1])
# print("inversed packed seq ", output.size(), output[-1])
# # save list of torch tensor
# torch.save([input1, input2], "output/test.pt")
# tensors = torch.load("output/test.pt")
# print("test pickles ", tensors)
# on how to generate the
test_ensembling()
| bert-pretraining-master | src/bert-pretraining/scratch.py |
import numpy as np
import torch
import scipy
import argparse
import sys, os
import logging
from smallfry import compress
import utils
NUM_TOL = 1e-6
FEAT_SUBSAMPLE_RATE = 0.1
def read_npy_feature(file):
feats = np.load(file)
feats = [feats[name] for name in feats.files]
return feats
def read_npy_label(file):
label = np.load(file)
return label
def get_feature_file(dataset, folder, datapart="train"):
if datapart == "train":
return folder + "/{}.train.feature.npz".format(dataset)
elif datapart == "heldout":
return folder + "/{}.heldout.feature.npz".format(dataset)
elif datapart == "test":
return folder + "/{}.test.feature.npz".format(dataset)
else:
raise Exception("Datapart", datapart, " not supported!")
def get_label_file(dataset, folder, datapart="train"):
if datapart == "train":
return folder + "/{}.train.label.npy".format(dataset)
elif datapart == "heldout":
return folder + "/{}.heldout.label.npy".format(dataset)
elif datapart == "test":
return folder + "/{}.test.label.npy".format(dataset)
else:
raise Exception("Datapart", datapart, " not supported!")
def load_dataset_feat(folder, dataset):
feats_train = read_npy_feature(get_feature_file(dataset, folder, "train"))
feats_heldout = read_npy_feature(get_feature_file(dataset, folder, "heldout"))
feats_test = read_npy_feature(get_feature_file(dataset, folder, "test"))
labels_train = read_npy_label(get_label_file(dataset, folder, "train"))
labels_heldout = read_npy_label(get_label_file(dataset, folder, "heldout"))
labels_test = read_npy_label(get_label_file(dataset, folder, "test"))
return feats_train, feats_heldout, feats_test, \
labels_train, labels_heldout, labels_test
def load_procrustes_data(args):
assert "train.feature.npz" in args.input_file
dataset = args.dataset
folder = os.path.dirname(args.input_file)
return load_dataset_feat(folder, dataset)
# feats_train = read_npy_feature(get_feature_file(dataset, folder, "train"))
# feats_heldout = read_npy_feature(get_feature_file(dataset, folder, "heldout"))
# feats_test = read_npy_feature(get_feature_file(dataset, folder, "test"))
# labels_train = read_npy_label(get_label_file(dataset, folder, "train"))
# labels_heldout = read_npy_label(get_label_file(dataset, folder, "heldout"))
# labels_test = read_npy_label(get_label_file(dataset, folder, "test"))
# return feats_train, feats_heldout, feats_test, \
# labels_train, labels_heldout, labels_test
def load_ensemble_data(args):
dataset = args.dataset
folder = args.new_input_folder
new_dataset = load_dataset_feat(folder, dataset)
logging.info("new feature from " + folder)
folder = args.old_input_folder
old_dataset = load_dataset_feat(folder, dataset)
logging.info("old feature from " + folder)
return new_dataset, old_dataset
def save_data_feat(args, feats_train, feats_heldout, feats_test,
labels_train, labels_heldout, labels_test):
folder = args.out_folder
dataset = args.dataset
np.savez(get_feature_file(dataset, folder, "train"), *feats_train)
np.savez(get_feature_file(dataset, folder, "heldout"), *feats_heldout)
np.savez(get_feature_file(dataset, folder, "test"), *feats_test)
np.save(get_label_file(dataset, folder, "train"), labels_train)
np.save(get_label_file(dataset, folder, "heldout"), labels_heldout)
np.save(get_label_file(dataset, folder, "test"), labels_test)
def save_final_results_compress(args, range_limit):
results = args.__dict__
results["results"] = {"range_limit": range_limit}
utils.save_to_json(results, args.out_folder + "/final_results.json")
def save_final_results_procrutes(args):
results = args.__dict__
utils.save_to_json(results, args.out_folder + "/final_results.json")
def save_final_results_ensemble(args):
results = args.__dict__
utils.save_to_json(results, args.out_folder + "/final_results.json")
def compression(feats, labels, args):
# we will directly save if we need 32 bit embeddings
range_limit = None
if args.nbit != 32:
# subsample and concatenate
subset_id = np.random.choice(np.arange(len(feats)), size=int(len(feats) * FEAT_SUBSAMPLE_RATE))
feats_subset = [feats[i].copy() for i in subset_id]
X = np.concatenate(feats_subset, axis=0)
# assert the last axis is feature dimension
assert feats_subset[0].shape[-1] == feats_subset[-1].shape[-1]
assert feats_subset[1].shape[-1] == feats_subset[-1].shape[-1]
# run the compression to each of the things in the list
logging.info("Estimate range limit using sample shape " + str(X.shape))
range_limit = compress.find_optimal_range(X, args.nbit, stochastic_round=False, tol=args.golden_sec_tol)
logging.info("Range limit {}, max/min {}/{}, std {} ".format(
range_limit, np.max(X), np.min(X), np.std(X)))
compressed_feats = []
for i, feat in enumerate(feats):
comp_feat = compress._compress_uniform(feat, args.nbit, range_limit,
stochastic_round=False, skip_quantize=False)
np.copyto(dst=feat, src=comp_feat)
assert np.max(feats[i]) - range_limit < NUM_TOL, "not clipped right max/limit {}/{}".format(np.max(feats[i]), range_limit)
assert -range_limit - np.min(feats[i]) < NUM_TOL, "not clipped right max/limit {}/{}".format(np.min(feats[i]), -range_limit)
assert np.unique(feats[i]).size <= 2**args.nbit, "more unique values than expected"
return feats, labels, range_limit
def procrustes(feats_train, feats_heldout, feats_test, train_labels, args):
# sanity check the reference and main feature feature are indeed a pair
# which are only different in terms of corpus they are trained on
# we process both the train heldout and test feature in this function
assert "wiki18" in args.input_file
check_name = args.input_file.replace("wiki18", "wiki17")
assert check_name == args.procrustes_ref_input_file
assert ".train.feature.npz" in args.input_file
# load reference feature
logging.info("procrustes ref feature " + args.procrustes_ref_input_file)
feats_ref = read_npy_feature(args.procrustes_ref_input_file)
labels_ref = read_npy_label(args.procrustes_ref_input_file.replace(".feature.npz", ".label.npy"))
np.testing.assert_array_equal(train_labels, labels_ref)
# subsample and concatenate
subset_id = np.random.choice(np.arange(len(feats_train)), size=int(len(feats_train) * FEAT_SUBSAMPLE_RATE))
feats_subset = [feats_train[i].copy() for i in subset_id]
feats_subset_ref = [feats_ref[i].copy() for i in subset_id]
X = np.concatenate(feats_subset, axis=0)
X_ref = np.concatenate(feats_subset_ref, axis=0)
# assert the last axis is feature dimension
assert feats_subset[0].shape[-1] == feats_subset[-1].shape[-1]
assert feats_subset[1].shape[-1] == feats_subset[-1].shape[-1]
# assert the subsampled features has matched shape
assert X.shape == X_ref.shape
logging.info("Performing procrustes using sample matrix of size " + str(X.shape))
R, _ = scipy.linalg.orthogonal_procrustes(A=X, B=X_ref)
feats_train_rot = []
feats_heldout_rot = []
feats_test_rot = []
for feat in feats_train:
feats_train_rot.append(feat @ R)
for feat in feats_heldout:
feats_heldout_rot.append(feat @ R)
for feat in feats_test:
feats_test_rot.append(feat @ R)
return feats_train_rot, feats_heldout_rot, feats_test_rot
def ensemble(data_new, data_old, eps):
# the ensembled feature = eps * new embedding + (1 - eps) * old embedding
assert eps >= 0.0
assert eps <= 1.0
(feats_train_old, feats_heldout_old, feats_test_old, \
labels_train_old, labels_heldout_old, labels_test_old) = data_old
(feats_train_new, feats_heldout_new, feats_test_new, \
labels_train_new, labels_heldout_new, labels_test_new) = data_new
np.testing.assert_array_equal(labels_train_old, labels_train_new)
np.testing.assert_array_equal(labels_heldout_old, labels_heldout_new)
np.testing.assert_array_equal(labels_test_old, labels_test_new)
assert not np.array_equal(feats_train_old, feats_train_new)
assert not np.array_equal(feats_heldout_old, feats_heldout_new)
assert not np.array_equal(feats_test_old, feats_test_new)
feats_train = []
feats_heldout = []
feats_test = []
labels_train = labels_train_new
labels_heldout = labels_heldout_new
labels_test = labels_test_new
logging.info("ensembling with weights {} for new feature/embeddings".format(eps))
for i, (feat_old, feat_new) in enumerate(zip(feats_train_old, feats_train_new)):
assert feat_old.shape == feat_new.shape
feats_train.append(feat_new * eps + feat_old * (1 - eps))
for feat_old, feat_new in zip(feats_heldout_old, feats_heldout_new):
assert feat_old.shape == feat_new.shape
feats_heldout.append(feat_new * eps + feat_old * (1 - eps))
for feat_old, feat_new in zip(feats_test_old, feats_test_new):
assert feat_old.shape == feat_new.shape
feats_test.append(feat_new * eps + feat_old * (1 - eps))
return feats_train, feats_heldout, feats_test, labels_train, labels_heldout, labels_test
def main():
# add arguments
argparser = argparse.ArgumentParser(sys.argv[0], conflict_handler='resolve')
argparser.add_argument("--job_type", type=str, default="compression", choices=["compression", "procrustes", "ensemble"])
argparser.add_argument("--input_file", type=str, help="The feature file to be compressed.")
argparser.add_argument("--procrustes_ref_input_file", type=str, help="For procrustes, this specifies the reference we rotate the input file feature to.")
argparser.add_argument("--out_folder", type=str, help="The folder to contain the output")
argparser.add_argument("--nbit", type=int, help="Number of bits for compressed features.")
argparser.add_argument("--dataset", type=str, help="The dataset for asserting on the filenames. Only for on the fly check")
argparser.add_argument("--seed", type=int, help="Random seeds for the sampleing process.")
argparser.add_argument("--golden_sec_tol", type=float, default=1e-3,
help="termination criterion for golden section search")
argparser.add_argument("--old_input_folder", type=str, help="old embedding for ensembling, e.g. wiki17 enbeddings")
argparser.add_argument("--new_input_folder", type=str, help="new embedding for ensembling, e.g. wiki18 aligned")
argparser.add_argument("--ensemble_eps", type=float, help="the ensembling weights for new embedding, e.g. wiki18 aligned")
args = argparser.parse_args()
# assert args.dataset in args.input_file
# assert "seed_{}".format(args.seed) in args.input_file
utils.ensure_dir(args.out_folder)
utils.init_logging(args.out_folder)
# set random seeds
utils.set_random_seed(args.seed)
if args.job_type == "compression":
# load the dataset
feats = read_npy_feature(args.input_file)
labels = read_npy_label(args.input_file.replace(".feature.npz", ".label.npy"))
feats, labels, range_limit = compression(feats, labels, args)
save_final_results_compress(args, range_limit)
# save the results back to the format
out_file_name = os.path.basename(args.input_file)
out_file_name = args.out_folder + "/" + out_file_name
np.savez(out_file_name, *feats)
np.save(out_file_name.replace(".feature.npz", ".label.npy"), labels)
elif args.job_type == "procrustes":
feats_train, feats_heldout, feats_test, \
labels_train, labels_heldout, labels_test = load_procrustes_data(args)
feats_train, feats_heldout, feats_test = procrustes(feats_train, feats_heldout, feats_test, labels_train, args)
save_data_feat(args, feats_train, feats_heldout, feats_test,
labels_train, labels_heldout, labels_test)
save_final_results_procrutes(args)
elif args.job_type == "ensemble":
new_dataset, old_dataset = load_ensemble_data(args)
feats_train, feats_heldout, feats_test, \
labels_train, labels_heldout, labels_test = ensemble(new_dataset, old_dataset, args.ensemble_eps)
save_data_feat(args, feats_train, feats_heldout, feats_test,
labels_train, labels_heldout, labels_test)
save_final_results_ensemble(args)
# TODO: the range limit is correct, compression is indeed carried out and it is properly copyed inplace
# TODO: check a direct through saving case, check a compressed case to see the similarity
# sanity check on if the tol is reasoanble
if __name__ == "__main__":
main()
| bert-pretraining-master | src/bert-pretraining/run_compress.py |
import os, sys
import json
from glob import glob
import spacy
import re
import random
import math
from multiprocessing import Process
def get_raw_data_content(f_name):
with open(f_name, "r") as f:
content = f.readlines()
content = [eval(x) for x in content]
return content
def extract_article_id_name(path):
files = glob(path + "/*/wiki*")
files = [x for x in files if "_sent" not in x]
id_to_name = {}
id_to_len = {}
for f_name in files:
print("processing id and name from ", f_name)
# with open(f_name, "r") as f:
# content = f.readlines()
# content = [eval(x) for x in content]
content = get_raw_data_content(f_name)
for x in content:
x['id'] = int(x['id'])
id_to_name[x['id']] = x['title']
id_to_len[x['id']] = len(x['text'].split(' '))
return id_to_name, id_to_len
def process_article_ids(path_wiki17, path_wiki18):
id_to_name_wiki17, id_to_len_wiki17 = extract_article_id_name(path_wiki17)
id_to_name_wiki18, id_to_len_wiki18 = extract_article_id_name(path_wiki18)
with open("./output/id_to_name_wiki17", "w") as f:
json.dump(id_to_name_wiki17, f)
with open("./output/id_to_name_wiki18", "w") as f:
json.dump(id_to_name_wiki18, f)
with open("./output/id_to_len_wiki17", "w") as f:
json.dump(id_to_len_wiki17, f)
with open("./output/id_to_len_wiki18", "w") as f:
json.dump(id_to_len_wiki18, f)
print('start comparing ')
token_cnt_wiki17 = 0
token_cnt_wiki18 = 0
# common_article_id = set(id_to_name_wiki17.keys()).intersection(id_to_name_wiki18.keys())
# wiki18_only_article_id = set(id_to_name_wiki18.keys()) - set(id_to_name_wiki17.keys())
for article_id in id_to_name_wiki17.keys():
# if article_id in id_to_name_wiki18.keys():
token_cnt_wiki17 += id_to_len_wiki17[article_id]
for article_id in id_to_name_wiki18.keys():
token_cnt_wiki18 += id_to_len_wiki18[article_id]
print("# of tokens for 17 and 18 ", token_cnt_wiki17, token_cnt_wiki18)
wiki17_id_list_sorted = sorted(list(id_to_name_wiki17.keys()))
wiki18_id_list_sorted = sorted(list(id_to_name_wiki18.keys()))
wiki17_id = set(wiki17_id_list_sorted)
wiki18_id = set(wiki18_id_list_sorted)
common_article_id = list(wiki17_id.intersection(wiki18_id))
wiki18_only_article_id = list(wiki18_id.difference(wiki17_id))
with open("./output/common_article_id", "w") as f:
json.dump(common_article_id, f)
with open("./output/wiki18_only_article_id", "w") as f:
json.dump(wiki18_only_article_id, f)
print("# of articles for 17 and 18, intersection / diff ",
len(id_to_name_wiki17.keys()), len(id_to_name_wiki18.keys()),
len(common_article_id), len(wiki18_only_article_id))
common_article_tokens = 0
for article_id in common_article_id:
common_article_tokens += id_to_len_wiki17[article_id]
wiki18_only_article_tokens = 0
for article_id in wiki18_only_article_id:
wiki18_only_article_tokens += id_to_len_wiki18[article_id]
print("17 18 # token intersection / diff ", common_article_tokens, wiki18_only_article_tokens)
def seg_json_sentences(spacy_proc, article, subset_ids):
text = article['text']
title = article['title']
if int(article['id']) not in subset_ids:
return []
article = spacy_proc(text)
sentences = []
for i, sent in enumerate(article.sents):
x = str(sent)
# remove trailing white spaces
x = x.rstrip(' ').rstrip('\n').rstrip('\r').rstrip('\t')
if i == 0:
# remove title from the begining of text
x = x.replace(title + '\n\n', '')
# remove in sentence \n
x = x.replace('\n', '')
sentences.append(x)
return sentences
def proc_raw_data_file(f_in_name, f_out_name, subset_ids):
spacy_proc = spacy.load("en")
# given a wikiExtractor.py generated subfile, this func
# produce the 1 sentence per line file
with open(f_in_name, 'r') as f_in:
with open(f_out_name, 'w') as f_out:
content = get_raw_data_content(f_in_name)
for x in content:
# each iteration process an article
sentences = seg_json_sentences(spacy_proc, x, subset_ids)
for sent in sentences:
f_out.write(sent + '\n')
# separate articles
if len(sentences) != 0:
f_out.write('\n')
# print("raw file processed ", f_in_name)
def subsample_wiki_id(article_id_src="./output/common_article_id", subset_prop=0.1, seed=123):
random.seed(seed)
if type(article_id_src) == str:
with open(article_id_src, 'r') as f:
article_ids = json.load(f)
elif type(article_id_src) == list or type(article_id_src) == tuple:
article_ids = article_id_src
else:
raise Exception("article_id_src type not supported!")
random.shuffle(article_ids)
n_sample = math.floor(len(article_ids) * subset_prop)
return article_ids[:n_sample]
def get_stat_on_final_res(path):
print("stats on ", path)
n_sent = 0
n_token = 0
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if line != "\n":
n_sent += 1
n_token += len(line.split(" "))
print("# sent: ", n_sent, " # token ", n_token)
def get_txt_per_sentence_file(path, wiki_subset_ids, n_subfiles, do_eval=False):
raw_files = glob(path+'/*/wiki_*')
# prevent old _sent files being processed
raw_files = [x for x in raw_files if '_sent' not in x]
if do_eval:
proc_files = [x + "_sent_eval" for x in raw_files]
else:
proc_files = [x + "_sent" for x in raw_files]
mprocs = []
for i, (raw_file, proc_file) in enumerate(zip(raw_files, proc_files)):
mproc = Process(target=proc_raw_data_file,
args=(raw_file, proc_file, wiki_subset_ids))
mprocs.append(mproc)
mproc.start()
# launch 100 process every time
if i % 100 == 99 or i == len(raw_files) - 1:
for mproc in mprocs:
mproc.join()
mprocs = []
print("raw file block processing done for ", path, " at ", i, " th sample out of ", len(raw_files))
# merge all the files into a smaller number of files
# for path in [path_wiki17, path_wiki18]:
if do_eval:
# we only generate 1 file for evaluation file
assert n_subfiles == 1, "we only generate 1 file for evaluation"
if do_eval:
proc_files = glob(path+'/*/wiki_*_sent_eval')
else:
proc_files = glob(path+'/*/wiki_*_sent')
proc_files = sorted(proc_files)
n_proc_file_per_subfiles = math.ceil(len(proc_files) / n_subfiles)
f_out = None
approx_num_articles_total = 0
for i, proc_file in enumerate(proc_files):
if i % n_proc_file_per_subfiles == 0:
if n_subfiles == 1:
if do_eval:
full_file = path.replace("wiki_json", 'wiki_txt/wiki_bert_eval.txt')
else:
full_file = path.replace("wiki_json", 'wiki_txt/wiki_bert.txt')
else:
full_file = path.replace("wiki_json", 'wiki_txt/wiki_bert_{}.txt'.format(i // n_proc_file_per_subfiles))
if f_out is not None:
f_out.close()
f_out = open(full_file, 'w')
f_in = open(proc_file, 'r')
data = f_in.read()
f_in.close()
f_out.write(data)
f_in = open(proc_file, 'r')
approx_num_articles = len([x for x in f_in.readlines() if x == '\n'])
approx_num_articles_total += approx_num_articles
f_in.close()
print("merged ", i, "files", len(proc_files), n_proc_file_per_subfiles, full_file, approx_num_articles_total, approx_num_articles)
if __name__ == "__main__":
# path_wiki17 = "/dfs/scratch0/zjian/bert-pretraining/data/wiki/wiki17/wiki_json"
# path_wiki18 = "/dfs/scratch0/zjian/bert-pretraining/data/wiki/wiki18/wiki_json"
# path_wiki17 = "../../data/wiki/wiki17/wiki_json"
# path_wiki18 = "../../data/wiki/wiki18/wiki_json"
path_wiki17 = sys.argv[1]
path_wiki18 = sys.argv[2]
n_subfiles = int(sys.argv[3]) # split into n subfiles tf record for efficient processing
print("wiki 17 path ", path_wiki17)
print("wiki 18 path ", path_wiki18)
# generate wiki dump article id related meta data
# process_article_ids(path_wiki17, path_wiki18)
# subsampling and get text file for tensorflow bert
common_subset_ids = subsample_wiki_id("./output/common_article_id")
wiki18_only_article_ids = subsample_wiki_id("./output/wiki18_only_article_id")
wiki17_subset_ids = sorted(common_subset_ids)
wiki18_subset_ids = sorted(common_subset_ids + wiki18_only_article_ids)
# also sample the eval set
all_common_subset_ids = subsample_wiki_id("./output/common_article_id", subset_prop=1.0)
# The input is already 90% of the original article list, we sample here the 1% of the original
wiki17_eval_subset_ids = subsample_wiki_id(list(set(all_common_subset_ids) - set(wiki17_subset_ids)),
subset_prop=0.01/0.9)
wiki17_eval_subset_ids = sorted(wiki17_eval_subset_ids)
assert len(set(wiki17_eval_subset_ids).intersection(set(wiki17_subset_ids))) == 0
assert len(set(wiki17_eval_subset_ids).intersection(set(wiki18_subset_ids))) == 0
print("wiki 17 train / wiki 17 eval / wiki 18 train subsampled size ",
len(wiki17_subset_ids), len(wiki17_eval_subset_ids), len(wiki18_subset_ids))
# generate training set
# for path, subset_ids in zip([path_wiki17, ],
# [wiki17_subset_ids, ]):
for path, subset_ids in zip([path_wiki17, path_wiki18],
[wiki17_subset_ids, wiki18_subset_ids]):
get_txt_per_sentence_file(path, subset_ids, n_subfiles, do_eval=False)
# # generate test set for wiki 17
get_txt_per_sentence_file(path_wiki17, wiki17_eval_subset_ids, 1, do_eval=True)
# get final stats
get_stat_on_final_res(path_wiki17.replace('wiki_json', 'wiki_txt/wiki_bert.txt'))
get_stat_on_final_res(path_wiki18.replace('wiki_json', 'wiki_txt/wiki_bert.txt'))
| bert-pretraining-master | src/bert-pretraining/wiki_preprocessing.py |
import json
import glob
import utils
import os
SCRIPT_FOLDER="../../script"
def bert_pretraining_lr_tuning_training():
file_name = SCRIPT_FOLDER + "/0701_bert_pretraining_lr_tuning_training"
lrs = [0.000001, 0.00001, 0.0001, 0.001, 0.01]
BERT_BASE_DIR = "../../data/bert"
tpu_tmp = 'gcloud compute tpus create tpu-{} --range=10.240.{}.0 --version=1.13 --accelerator-type=v2-8 --network=default &'
run_tmp = ('python ./third_party/bert/run_pretraining.py \
--input_file=gs://embeddings-data2/bert-wiki/wiki17/wiki_tf_rec/part_tf_examples_*.tfrecord \
--output_dir=gs://embeddings-ckpt/bert_pretraining_lr_tuning/pretrain_tuning_lr_{} \
--do_train=True \
--do_eval=True \
--bert_config_file=../../data/bert/3_layer_bert_config.json \
--train_batch_size=256 \
--max_seq_length=128 \
--max_predictions_per_seq=20 \
--num_train_steps=250000 \
--num_warmup_steps=2500 \
--learning_rate={} \
--use_tpu=True \
--tpu_name=tpu-{} 2>&1 | tee output/pretrain_tuning_lr_{}.log &')
with open(file_name, 'w') as f:
#for i, lr in enumerate(lrs):
# cmd_str = tpu_tmp.format(i, i)
# f.write(cmd_str + "\n")
for i, lr in enumerate(lrs):
cmd_str = run_tmp.format(lr, lr, i, lr)
f.write(cmd_str + "\n")
def bert_pretraining_lr_tuning_evaluation():
file_name = SCRIPT_FOLDER + "/0701_bert_pretraining_lr_tuning_eval"
print("cmd in ", file_name)
lrs = [0.000001, 0.00001, 0.0001, 0.001, 0.01]
BERT_BASE_DIR = "../../data/bert"
tpu_tmp = 'gcloud compute tpus create tpu-{} --range=10.240.{}.0 --version=1.13 --accelerator-type=v2-8 --network=default &'
run_tmp = ('python ./third_party/bert/run_pretraining.py \
--input_file=gs://embeddings-data2/bert-wiki/wiki17/wiki_tf_rec/eval_full_tf_examples.tfrecord \
--output_dir=gs://embeddings-ckpt/bert_pretraining_lr_tuning/pretrain_tuning_lr_{}_eval \
--do_eval=True \
--bert_config_file=../../data/bert/3_layer_bert_config.json \
--eval_batch_size=256 \
--init_checkpoint=gs://embeddings-ckpt/bert_pretraining_lr_tuning/pretrain_tuning_lr_{}/model.ckpt-250000 \
--max_seq_length=128 \
--max_predictions_per_seq=20 \
--use_tpu=True \
--tpu_name=tpu-0 2>&1 | tee output/pretrain_tuning_lr_{}_eval.log')
with open(file_name, 'w') as f:
#for i, lr in enumerate(lrs):
# cmd_str = tpu_tmp.format(i, i)
# f.write(cmd_str + "\n")
for i, lr in enumerate(lrs):
cmd_str = run_tmp.format(lr, lr, i, lr)
f.write(cmd_str + "\n")
def bert_pretraining_3_seeds_different_size():
# the optimial learning rate from grid search using 768 dimension is 0.0001
# generate script to launch tpu
dims = [192, 384, 768, 1536, 3072]
for dim in dims:
with open("../../data/bert/3_layer_dim_{}_bert_config.json".format(dim), "w") as f_out:
with open("../../data/bert/3_layer_bert_config.json", "r") as f_in:
for line in f_in.readlines():
if "hidden_size" in line:
line = line.replace("768", str(dim))
f_out.write(line)
run_tmp = ('python ./third_party/bert/run_pretraining.py --rand_seed={} \
--input_file=gs://embeddings-data2/bert-wiki/{}/wiki_tf_rec/part_tf_examples_*.tfrecord \
--output_dir=gs://embeddings-ckpt/bert_pretraining_3_seeds/pretrain_seed_{}_dim_{}_{} \
--do_train=True \
--do_eval=True \
--bert_config_file=../../data/bert/3_layer_dim_{}_bert_config.json \
--train_batch_size=256 \
--max_seq_length=128 \
--max_predictions_per_seq=20 \
--num_train_steps=250000 \
--num_warmup_steps=2500 \
--learning_rate=0.0001 \
--use_tpu=True \
--tpu_name=tpu-{} 2>&1 | tee output/pretrain_seed_{}_dim_{}_{}.log \n')
tpu_id = 0
for name in ['wiki17', 'wiki18']:
for seed in [1,2,3]:
file_name = SCRIPT_FOLDER + "/0701_bert_pretraining_all_seed_tpu_{}".format(tpu_id)
print("cmd saved in ", file_name)
with open(file_name, "w") as f:
dim = 192
cmd = run_tmp.format(seed, name, seed, dim, name, dim, tpu_id, seed, dim, name)
f.write(cmd)
dim = 384
cmd = run_tmp.format(seed, name, seed, dim, name, dim, tpu_id, seed, dim, name)
f.write(cmd)
dim = 768
cmd = run_tmp.format(seed, name, seed, dim, name, dim, tpu_id, seed, dim, name)
f.write(cmd)
tpu_id += 1
file_name = SCRIPT_FOLDER + "/0701_bert_pretraining_all_seed_tpu_{}".format(tpu_id)
print("cmd saved in ", file_name)
with open(file_name, "w") as f:
dim = 1536
cmd = run_tmp.format(seed, name, seed, dim, name, dim, tpu_id, seed, dim, name)
f.write(cmd)
tpu_id += 1
file_name = SCRIPT_FOLDER + "/0701_bert_pretraining_all_seed_tpu_{}".format(tpu_id)
print("cmd saved in ", file_name)
with open(file_name, "w") as f:
dim = 3072
cmd = run_tmp.format(seed, name, seed, dim, name, dim, tpu_id, seed, dim, name)
f.write(cmd)
tpu_id += 1
# for launch tpu machines
file_name = SCRIPT_FOLDER + "/0701_bert_pretraining_all_seed_tpu_launch"
with open(file_name, "w") as f:
for i in range(tpu_id):
cmd = "gcloud compute tpus create tpu-{} --range=10.240.{}.0 --version=1.13 --accelerator-type=v2-8 --network=default & \n".format(i, i)
f.write(cmd)
print("cmd saved in ", file_name)
# for copy logs into folders
file_name = SCRIPT_FOLDER + "/0701_bert_pretraining_all_seed_copy_logs"
with open(file_name, "w") as f:
for name in ['wiki17', 'wiki18']:
for seed in [1,2,3]:
for dim in dims:
cmd = "gsutil cp output/pretrain_seed_{}_dim_{}_{}.log gs://embeddings-ckpt/bert_pretraining_3_seeds/pretrain_seed_{}_dim_{}_{} \n".format(seed, dim, name, seed, dim, name)
f.write(cmd)
print("cmd saved in ", file_name)
file_name = SCRIPT_FOLDER + "/0701_bert_pretraining_all_seed_copy_configs"
cmd_tmp = ("gsutil cp ../../data/bert/3_layer_dim_{}_bert_config.json \
gs://embeddings-ckpt/bert_pretraining_3_seeds/pretrain_seed_{}_dim_{}_{}/bert_config.json")
with open(file_name, "w") as f:
for name in ['wiki17', 'wiki18']:
for seed in [1,2,3]:
for dim in dims:
cmd = cmd_tmp.format(dim, seed, dim, name)
f.write(cmd + "\n")
print("cmd saved in ", file_name)
file_name = SCRIPT_FOLDER + "/0701_bert_pretraining_all_seed_download_bucket"
# for downloading to local disk
cmd_tmp = ("gsutil cp -r gs://embeddings-ckpt/bert_pretraining_3_seeds/* \
../../results/bert_ckpt/")
# generate bert pytorch ckpt
file_name = SCRIPT_FOLDER + "/0701_bert_pretraining_all_seed_trans_to_pytorch"
folders = glob.glob("../../results/bert_ckpt/*")
cmd_tmp = ('pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch '
'{}/model.ckpt-250000 '
'{}/bert_config.json '
'{}/pytorch_model.bin')
with open(file_name, "w") as f:
for folder in folders:
cmd = cmd_tmp.format(folder, folder, folder)
f.write(cmd + "\n")
print("cmd saved in ", file_name)
def get_feature_path(exp_name, dataset, ckpt_folder, nbit=32, date_str=None):
if date_str is None:
exp_path = "../../results/features/{}_{}".format(exp_name, utils.get_date_str())
else:
exp_path = "../../results/features/{}_{}".format(exp_name, date_str)
ckpt_name = ckpt_folder.split("/")[-1]
folder = exp_path + "/{}/nbit_{}/{}".format(dataset, nbit, ckpt_name)
return folder
def get_sentiment_data_path():
return "./third_party/sentence_classification/data"
def generate_all_sentiment_features_dimensionality():
ckpt_folders = glob.glob("../../results/bert_ckpt/*")
# datasets = ['mr', 'sst', 'subj', 'mpqa']
datasets = ['sst',]
nbits = [32]
exp_name = "dimensionality"
data_path = get_sentiment_data_path()
script_name = SCRIPT_FOLDER + "/0703_generate_features_for_dimensionality_copy_vocab_file"
# copy the vocab files to the ckpt folders
with open(script_name, "w") as f:
cmd_tmp = "cp ../../data/bert/vocab.txt {}/"
for ckpt_path in ckpt_folders:
cmd = cmd_tmp.format(ckpt_path)
f.write(cmd + "\n")
print("cmd saved in ", script_name)
# generate the cmd to generate features
script_name = SCRIPT_FOLDER + "/0703_generate_features_for_dimensionality"
with open(script_name, "w") as f:
cmd_tmp = ('python ./third_party/pytorch-pretrained-BERT/examples/extract_features.py '
'--input_file {} '
'--output_file {} '
'--bert_model {} '
'--do_lower_case '
'--layer 2 '
'--max_seq_length 128 '
'--for_sentiment')
for ckpt_path in ckpt_folders:
for dataset in datasets:
for nbit in nbits:
output_path = get_feature_path(exp_name,
dataset=dataset, ckpt_folder=ckpt_path, nbit=nbit)
for part in ['train', 'test', 'heldout']:
input_file = data_path + "/{}.{}.txt".format(dataset, part)
output_file = output_path + "/{}.{}.feature.npy".format(dataset, part)
cmd = cmd_tmp.format(input_file, output_file, ckpt_path)
f.write(cmd + "\n")
print("cmd saved in ", script_name)
def generate_all_sentiment_features_pytorch_file():
#for corpus in ['wiki17', 'wiki18']:
# ckpt_folders = glob.glob("../../results/bert_ckpt/*{}".format(corpus))
# generate the cmd to generate features
ckpt_folders = glob.glob("../../results/bert_ckpt/*")
datasets = ['mr', 'subj', 'mpqa', 'sst']
nbits = [32]
exp_name = "dimensionality"
data_path = get_sentiment_data_path()
# script_name = SCRIPT_FOLDER + "/0706_generate_features_for_dimensionality_pytorch_{}".format(corpus)
script_name = SCRIPT_FOLDER + "/0706_generate_features_for_dimensionality_pytorch"
with open(script_name, "w") as f:
cmd_tmp = ('python ./third_party/pytorch-pretrained-BERT/examples/extract_features.py '
'--input_file {} '
'--output_file {} '
'--bert_model {} '
'--do_lower_case '
'--layer 2 '
'--max_seq_length 128 '
'--for_sentiment')
for ckpt_path in ckpt_folders:
for dataset in datasets:
for nbit in nbits:
output_path = get_feature_path(exp_name,
dataset=dataset, ckpt_folder=ckpt_path, nbit=nbit)
for part in ['train', 'test', 'heldout']:
input_file = data_path + "/{}.{}.txt".format(dataset, part)
output_file = output_path + "/{}.{}.feature.npz".format(dataset, part)
cmd = cmd_tmp.format(input_file, output_file, ckpt_path)
f.write(cmd + "\n")
print("cmd saved in ", script_name)
def get_pred_path_from_feature_path(exp_name, dataset, feat_folder, nbit=32, date_str=None):
if date_str is None:
exp_path = "../../results/predictions/{}_{}".format(exp_name, utils.get_date_str())
else:
exp_path = "../../results/predictions/{}_{}".format(exp_name, date_str)
feat_name = feat_folder.split("/")[-1]
folder = exp_path + "/{}/nbit_{}/{}".format(dataset, nbit, feat_name)
return os.path.abspath(folder)
def tune_lr_bert_sentiment_with_wiki17_768_dim_linear_model():
script_name = SCRIPT_FOLDER + "/0706_generate_prediction_for_dimensionality_lr_tuning"
datasets = ['mr', 'subj', 'mpqa', 'sst']
nbit = 32
#lrs = [0.1, 0.01, 0.001, 0.0001, 0.00001]
lrs = [0.000001, 0.0000001]
exp_name = "dimensionality"
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py '
'--la --feat_input --feat_input_folder {} --feat_dim {} '
'--dataset {} --out {} --model_seed {} --lr {}\\"')
with open(script_name, "w") as f:
for dataset in datasets:
for lr in lrs:
feature_folders = glob.glob("../../results/features/dimensionality_2019-07-06/{}/nbit_32/*dim_768*wiki17".format(dataset))
assert len(feature_folders) == 3
for feature_folder in feature_folders:
feature_folder = os.path.abspath(feature_folder)
seed = int(feature_folder.split("seed_")[1].split("_")[0])
pred_path = get_pred_path_from_feature_path(exp_name, dataset, feature_folder, nbit)
pred_path += "_lr_{}".format(str(lr))
feat_dim = int(feature_folder.split("dim_")[1].split("_")[0])
cmd = cmd_tmp.format(feature_folder, feat_dim, dataset, pred_path, str(seed), str(lr))
f.write(cmd + "\n")
print("cmd saved in ", script_name)
def get_best_lr_for_linear_bert_sentiment():
datasets = ['mr', 'subj', 'mpqa', 'sst']
lrs = [0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001, 0.0000001]
metric = "best_valid_err"
best_lr = {}
for dataset in datasets:
all_json_regex = "../../results/predictions/dimensionality_2019-07-07/{}/nbit_32/*/final_results.json".format(dataset)
results = utils.gather_results(all_json_regex)
results = [utils.flatten_dict(result) for result in results]
best_err = 1.0
for lr in lrs:
keys = {"lr": [lr]}
subset_results = utils.extract_result_subset(results, keys)
assert len(subset_results) == 3
ave, std = utils.stats_on_subset_json(subset_results, metric)
print(dataset, " lr ", lr, " val err ave/std", ave, std)
if ave < best_err:
best_err = ave
best_lr[dataset] = lr
print(best_lr)
return best_lr
def generate_all_predictions_for_linear_bert_sentiment_dimensionality():
best_lr = get_best_lr_for_linear_bert_sentiment()
# script_name = SCRIPT_FOLDER + "/0707_generate_prediction_for_dimensionality_3_seeds"
# datasets = ['mr', 'subj', 'mpqa', 'sst']
# script_name = SCRIPT_FOLDER + "/0707_generate_prediction_for_dimensionality_3_seeds_sst_rerun"
# datasets = ['sst']
# rerun with the best validation err based lr for optimal
script_name = SCRIPT_FOLDER + "/0709_generate_prediction_for_dimensionality_3_seeds_sst_rerun"
datasets = ['sst']
nbit = 32
exp_names = ["dimensionality_opt_lr_3_seeds", "dimensionality_default_lr_3_seeds"]
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py '
'--la --feat_input --feat_input_folder {} --feat_dim {} '
'--dataset {} --out {} --model_seed {} --lr {}\\"')
with open(script_name, "w") as f:
for exp_name in exp_names:
for dataset in datasets:
if "default" not in exp_name:
lr = best_lr[dataset]
else:
lr = 0.001
feature_folders = glob.glob("../../results/features/dimensionality_2019-07-06/{}/nbit_32/*".format(dataset))
#assert len(feature_folders) == 3
for feature_folder in feature_folders:
feature_folder = os.path.abspath(feature_folder)
seed = int(feature_folder.split("seed_")[1].split("_")[0])
pred_path = get_pred_path_from_feature_path(exp_name, dataset, feature_folder, nbit)
pred_path += "_lr_{}".format(str(lr))
feat_dim = int(feature_folder.split("dim_")[1].split("_")[0])
cmd = cmd_tmp.format(feature_folder, feat_dim, dataset, pred_path, str(seed), str(lr))
f.write(cmd + "\n")
print("cmd saved in ", script_name)
# bert dimensionality experiments using lstm
def tune_lstm_bert_sentiment_with_wiki17_768_dim():
# script_name = SCRIPT_FOLDER + "/0707_generate_prediction_for_dimensionality_lr_tuning_lstm"
# datasets = ['mr', 'subj', 'mpqa', 'sst']
script_name = SCRIPT_FOLDER + "/0708_generate_prediction_for_dimensionality_lr_tuning_lstm_sst_only"
datasets = ['sst']
nbit = 32
lrs = [0.01, 0.001, 0.0001, 0.00001, 0.000001,]
exp_name = "dimensionality_lstm_lr_tuning"
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py '
'--lstm --feat_input --feat_input_folder {} --feat_dim {} '
'--dataset {} --out {} --model_seed {} --lr {} --d 768 \\"')
with open(script_name, "w") as f:
for dataset in datasets:
for lr in lrs:
feature_folders = glob.glob("../../results/features/dimensionality_2019-07-06/{}/nbit_32/*dim_768*wiki17".format(dataset))
assert len(feature_folders) == 3
for feature_folder in feature_folders:
feature_folder = os.path.abspath(feature_folder)
seed = int(feature_folder.split("seed_")[1].split("_")[0])
pred_path = get_pred_path_from_feature_path(exp_name, dataset, feature_folder, nbit)
pred_path += "_lr_{}".format(str(lr))
feat_dim = int(feature_folder.split("dim_")[1].split("_")[0])
cmd = cmd_tmp.format(feature_folder, feat_dim, dataset, pred_path, str(seed), str(lr))
f.write(cmd + "\n")
print("cmd saved in ", script_name)
def get_seed_from_folder_name(folder):
return int(folder.split("seed_")[1].split("_")[0])
def get_dataset_from_folder_name(folder):
return folder.split("/nbit_")[0].split("/")[-1]
def get_feature_bit(folder):
with open(folder + "/final_results.json", 'r') as f:
config = json.load(f)
return config["nbit"]
# compress all 768 dimensional features
def compress_768_dim_features():
script_name = SCRIPT_FOLDER + "/0708_generate_compressed_768_dim_features"
datasets = ['mr', 'subj', 'mpqa', 'sst']
nbits = [32, 16, 8, 4, 2, 1]
exp_name = "compression_768_dim"
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/run_compress.py '
'--input_file {} --out_folder {} '
'--nbit {} --dataset {} --seed {} \\"')
with open(script_name, "w") as f:
for dataset in datasets:
for nbit in nbits:
full_prec_files = glob.glob("/home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/{}/nbit_32/*dim_768*/*.feature.npz".format(dataset))
#print(full_prec_files)
assert len(full_prec_files) == 18 # 3 seeds for wiki 17 and wiki 18 corpora, each of these settings have 3 feature files
for feat_file in full_prec_files:
# we hack the ckpt_folder argument to reuse the get_feature path function
dummy_ckpt_folder = feat_file.split("/")[-2]
feat_path_comp = os.path.abspath(get_feature_path(exp_name, dataset,
ckpt_folder=dummy_ckpt_folder, nbit=nbit, date_str=None))
seed = get_seed_from_folder_name(feat_file)
cmd = cmd_tmp.format(feat_file, feat_path_comp, nbit, dataset, seed)
f.write(cmd + "\n")
print("cmd saved in ", script_name)
def generate_all_predictions_for_linear_bert_sentiment_compression():
best_lr = get_best_lr_for_linear_bert_sentiment()
script_name = SCRIPT_FOLDER + "/0707_generate_prediction_for_compression_3_seeds"
datasets = ['mr', 'subj', 'mpqa', 'sst']
nbits = [32, 16, 8, 4, 2, 1]
exp_names = ["compression_opt_lr_3_seeds", "compression_default_lr_3_seeds"]
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py '
'--la --feat_input --feat_input_folder {} --feat_dim {} '
'--dataset {} --out {} --model_seed {} --lr {}\\"')
with open(script_name, "w") as f:
for exp_name in exp_names:
for dataset in datasets:
if "default" not in exp_name:
lr = best_lr[dataset]
else:
lr = 0.001
feature_folders = glob.glob("/home/zjian/bert-pretraining/results/features/compression_768_dim_2019-07-08/{}/nbit_*/*".format(dataset))
#assert len(feature_folders) == 3
for feature_folder in feature_folders:
feature_folder = os.path.abspath(feature_folder)
nbit = get_feature_bit(feature_folder)
assert "nbit_{}".format(nbit) in feature_folder
seed = get_seed_from_folder_name(feature_folder)
pred_path = get_pred_path_from_feature_path(exp_name, dataset, feature_folder, nbit)
pred_path += "_lr_{}".format(str(lr))
print(feature_folder)
feat_dim = int(feature_folder.split("dim_")[2].split("_")[0])
assert feat_dim == 768
cmd = cmd_tmp.format(feature_folder, feat_dim, dataset, pred_path, str(seed), str(lr))
f.write(cmd + "\n")
print("cmd saved in ", script_name)
def generate_aligned_wiki18_full_prec_features():
train_feat_files = glob.glob("/home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/*/*/*wiki18/*.train.feature.npz")
assert len(train_feat_files) == 4 * 3 * 5 # 4 dataset, 3 seeds, 5 dim
script_name = SCRIPT_FOLDER + "/0708_generate_aligned_wiki18_full_prec_features"
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/run_compress.py '
'--job_type procrustes --input_file {} --procrustes_ref_input_file {} --out_folder {} '
'--dataset {} --seed {} \\"')
with open(script_name, "w") as f:
for train_feat_file in train_feat_files:
train_feat_ref_file = train_feat_file.replace("wiki18", "wiki17")
out_folder = os.path.dirname(train_feat_file) + "_aligned"
seed = int(get_seed_from_folder_name(os.path.dirname(train_feat_file)))
dataset = get_dataset_from_folder_name(os.path.dirname(train_feat_file))
cmd = cmd_tmp.format(train_feat_file, train_feat_ref_file, out_folder, dataset, seed)
f.write(cmd + "\n")
print("cmd saved in ", script_name)
# compress all 768 dimensional features for aligned wiki18 features
def compress_768_dim_features_aligned_wiki18():
script_name = SCRIPT_FOLDER + "/0708_generate_compressed_768_dim_features_aligned_wiki18"
datasets = ['mr', 'subj', 'mpqa', 'sst']
nbits = [32, 16, 8, 4, 2, 1]
exp_name = "compression_768_dim"
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/run_compress.py '
'--input_file {} --out_folder {} '
'--nbit {} --dataset {} --seed {} \\"')
with open(script_name, "w") as f:
for dataset in datasets:
for nbit in nbits:
full_prec_files = glob.glob("/home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/{}/nbit_32/*dim_768*_wiki18_aligned/*.feature.npz".format(dataset))
# print(len(full_prec_files), full_prec_files)
assert len(full_prec_files) == 9 # 3 seeds for wiki 18 corpora, each of these settings have 3 feature files
for feat_file in full_prec_files:
# we hack the ckpt_folder argument to reuse the get_feature path function
dummy_ckpt_folder = feat_file.split("/")[-2]
feat_path_comp = os.path.abspath(get_feature_path(exp_name, dataset,
ckpt_folder=dummy_ckpt_folder, nbit=nbit, date_str="2019-07-08"))
seed = get_seed_from_folder_name(feat_file)
cmd = cmd_tmp.format(feat_file, feat_path_comp, nbit, dataset, seed)
f.write(cmd + "\n")
print("cmd saved in ", script_name)
def generate_all_predictions_for_linear_bert_sentiment_compression_aligned_wiki18():
best_lr = get_best_lr_for_linear_bert_sentiment()
script_name = SCRIPT_FOLDER + "/0708_generate_prediction_for_compression_3_seeds_aligned_wiki18"
datasets = ['mr', 'subj', 'mpqa', 'sst']
nbits = [32, 16, 8, 4, 2, 1]
exp_names = ["compression_opt_lr_3_seeds", "compression_default_lr_3_seeds"]
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py '
'--la --feat_input --feat_input_folder {} --feat_dim {} '
'--dataset {} --out {} --model_seed {} --lr {}\\"')
with open(script_name, "w") as f:
for exp_name in exp_names:
for dataset in datasets:
if "default" not in exp_name:
lr = best_lr[dataset]
else:
lr = 0.001
feature_folders = glob.glob("/home/zjian/bert-pretraining/results/features/compression_768_dim_2019-07-08/{}/nbit_*/*wiki18_aligned".format(dataset))
#assert len(feature_folders) == 3
for feature_folder in feature_folders:
feature_folder = os.path.abspath(feature_folder)
nbit = get_feature_bit(feature_folder)
assert "nbit_{}".format(nbit) in feature_folder
seed = get_seed_from_folder_name(feature_folder)
pred_path = get_pred_path_from_feature_path(exp_name, dataset, feature_folder, nbit, date_str="2019-07-08")
pred_path += "_lr_{}".format(str(lr))
print(feature_folder)
feat_dim = int(feature_folder.split("dim_")[2].split("_")[0])
assert feat_dim == 768
cmd = cmd_tmp.format(feature_folder, feat_dim, dataset, pred_path, str(seed), str(lr))
f.write(cmd + "\n")
print("cmd saved in ", script_name)
def generate_all_predictions_for_linear_bert_sentiment_dimensionality_wiki18_aligned():
best_lr = get_best_lr_for_linear_bert_sentiment()
script_name = SCRIPT_FOLDER + "/0708_generate_prediction_for_dimensionality_3_seeds_wiki18_aligned"
datasets = ['mr', 'subj', 'mpqa', 'sst']
nbit = 32
exp_names = ["dimensionality_opt_lr_3_seeds", "dimensionality_default_lr_3_seeds"]
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py '
'--la --feat_input --feat_input_folder {} --feat_dim {} '
'--dataset {} --out {} --model_seed {} --lr {}\\"')
with open(script_name, "w") as f:
for exp_name in exp_names:
for dataset in datasets:
if "default" not in exp_name:
lr = best_lr[dataset]
else:
lr = 0.001
feature_folders = glob.glob("../../results/features/dimensionality_2019-07-06/{}/nbit_32/*wiki18_aligned".format(dataset))
#assert len(feature_folders) == 3
for feature_folder in feature_folders:
feature_folder = os.path.abspath(feature_folder)
seed = int(feature_folder.split("seed_")[1].split("_")[0])
pred_path = get_pred_path_from_feature_path(exp_name, dataset, feature_folder, nbit, date_str="2019-07-07")
pred_path += "_lr_{}".format(str(lr))
feat_dim = int(feature_folder.split("dim_")[1].split("_")[0])
cmd = cmd_tmp.format(feature_folder, feat_dim, dataset, pred_path, str(seed), str(lr))
f.write(cmd + "\n")
print("cmd saved in ", script_name)
def rerun_failed_3072_dim_procrustes_and_training():
# generate the procrustes wiki 18 features
'qsub -V -b y -wd /home/zjian/bert-pretraining/wd /home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh \"python /home/zjian/bert-pretraining/src/bert-pretraining/run_compress.py --job_type procrustes --input_file /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_1_dim_3072_wiki18/sst.train.feature.npz --procrustes_ref_input_file /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_1_dim_3072_wiki17/sst.train.feature.npz --out_folder /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_1_dim_3072_wiki18_aligned --dataset sst --seed 1 \"'
'qsub -V -b y -wd /home/zjian/bert-pretraining/wd /home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh \"python /home/zjian/bert-pretraining/src/bert-pretraining/run_compress.py --job_type procrustes --input_file /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_2_dim_3072_wiki18/sst.train.feature.npz --procrustes_ref_input_file /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_2_dim_3072_wiki17/sst.train.feature.npz --out_folder /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_2_dim_3072_wiki18_aligned --dataset sst --seed 2 \"'
'qsub -V -b y -wd /home/zjian/bert-pretraining/wd /home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh \"python /home/zjian/bert-pretraining/src/bert-pretraining/run_compress.py --job_type procrustes --input_file /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_3_dim_3072_wiki18/sst.train.feature.npz --procrustes_ref_input_file /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_3_dim_3072_wiki17/sst.train.feature.npz --out_folder /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_3_dim_3072_wiki18_aligned --dataset sst --seed 3 \"'
# rerun 3072 dimensional training
'qsub -V -b y -wd /home/zjian/bert-pretraining/wd /home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh \"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py --la --feat_input --feat_input_folder /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_1_dim_3072_wiki18_aligned --feat_dim 3072 --dataset sst --out /home/zjian/bert-pretraining/results/predictions/dimensionality_opt_lr_3_seeds_2019-07-07/sst/nbit_32/pretrain_seed_1_dim_3072_wiki18_aligned_lr_1e-05 --model_seed 1 --lr 1e-05\"'
'qsub -V -b y -wd /home/zjian/bert-pretraining/wd /home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh \"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py --la --feat_input --feat_input_folder /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_1_dim_3072_wiki18_aligned --feat_dim 3072 --dataset sst --out /home/zjian/bert-pretraining/results/predictions/dimensionality_default_lr_3_seeds_2019-07-07/sst/nbit_32/pretrain_seed_1_dim_3072_wiki18_aligned_lr_0.001 --model_seed 1 --lr 0.001\"'
'qsub -V -b y -wd /home/zjian/bert-pretraining/wd /home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh \"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py --la --feat_input --feat_input_folder /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_2_dim_3072_wiki18_aligned --feat_dim 3072 --dataset sst --out /home/zjian/bert-pretraining/results/predictions/dimensionality_opt_lr_3_seeds_2019-07-07/sst/nbit_32/pretrain_seed_2_dim_3072_wiki18_aligned_lr_1e-05 --model_seed 2 --lr 1e-05\"'
'qsub -V -b y -wd /home/zjian/bert-pretraining/wd /home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh \"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py --la --feat_input --feat_input_folder /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_2_dim_3072_wiki18_aligned --feat_dim 3072 --dataset sst --out /home/zjian/bert-pretraining/results/predictions/dimensionality_default_lr_3_seeds_2019-07-07/sst/nbit_32/pretrain_seed_2_dim_3072_wiki18_aligned_lr_0.001 --model_seed 2 --lr 0.001\"'
'qsub -V -b y -wd /home/zjian/bert-pretraining/wd /home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh \"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py --la --feat_input --feat_input_folder /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_3_dim_3072_wiki18_aligned --feat_dim 3072 --dataset sst --out /home/zjian/bert-pretraining/results/predictions/dimensionality_opt_lr_3_seeds_2019-07-07/sst/nbit_32/pretrain_seed_3_dim_3072_wiki18_aligned_lr_1e-05 --model_seed 3 --lr 1e-05\"'
'qsub -V -b y -wd /home/zjian/bert-pretraining/wd /home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh \"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py --la --feat_input --feat_input_folder /home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/sst/nbit_32/pretrain_seed_3_dim_3072_wiki18_aligned --feat_dim 3072 --dataset sst --out /home/zjian/bert-pretraining/results/predictions/dimensionality_default_lr_3_seeds_2019-07-07/sst/nbit_32/pretrain_seed_3_dim_3072_wiki18_aligned_lr_0.001 --model_seed 3 --lr 0.001\"'
def generate_all_predictions_for_linear_bert_sentiment_compression_sst_only_for_new_opt_lr():
best_lr = get_best_lr_for_linear_bert_sentiment()
script_name = SCRIPT_FOLDER + "/0709_generate_prediction_for_compression_3_seeds_sst_only_for_new_opt_lr"
datasets = ['sst']
nbits = [32, 16, 8, 4, 2, 1]
exp_names = ["compression_opt_lr_3_seeds"]
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py '
'--la --feat_input --feat_input_folder {} --feat_dim {} '
'--dataset {} --out {} --model_seed {} --lr {}\\"')
with open(script_name, "w") as f:
for exp_name in exp_names:
for dataset in datasets:
if "default" not in exp_name:
lr = best_lr[dataset]
else:
lr = 0.001
feature_folders = glob.glob("/home/zjian/bert-pretraining/results/features/compression_768_dim_2019-07-08/{}/nbit_*/*wiki17*".format(dataset))
feature_folders += glob.glob("/home/zjian/bert-pretraining/results/features/compression_768_dim_2019-07-08/{}/nbit_*/*wiki18_aligned*".format(dataset))
#assert len(feature_folders) == 3
for feature_folder in feature_folders:
feature_folder = os.path.abspath(feature_folder)
nbit = get_feature_bit(feature_folder)
assert "nbit_{}".format(nbit) in feature_folder
seed = get_seed_from_folder_name(feature_folder)
pred_path = get_pred_path_from_feature_path(exp_name, dataset, feature_folder, nbit)
pred_path += "_lr_{}".format(str(lr))
print(feature_folder)
feat_dim = int(feature_folder.split("dim_")[2].split("_")[0])
assert feat_dim == 768
cmd = cmd_tmp.format(feature_folder, feat_dim, dataset, pred_path, str(seed), str(lr))
f.write(cmd + "\n")
print("cmd saved in ", script_name)
def generate_all_predictions_for_linear_bert_sentiment_dimensionality_sst_rerun_new_opt_lr():
best_lr = get_best_lr_for_linear_bert_sentiment()
script_name = SCRIPT_FOLDER + "/0709_generate_prediction_for_dimensionality_3_seeds_sst_rerun_new_opt_lr"
datasets = ['sst']
nbit = 32
exp_names = ["dimensionality_opt_lr_3_seeds", ]
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py '
'--la --feat_input --feat_input_folder {} --feat_dim {} '
'--dataset {} --out {} --model_seed {} --lr {}\\"')
with open(script_name, "w") as f:
for exp_name in exp_names:
for dataset in datasets:
if "default" not in exp_name:
lr = best_lr[dataset]
else:
lr = 0.001
feature_folders = glob.glob("../../results/features/dimensionality_2019-07-06/{}/nbit_32/*wiki17*".format(dataset))
feature_folders += glob.glob("../../results/features/dimensionality_2019-07-06/{}/nbit_32/*wiki18_aligned*".format(dataset))
#assert len(feature_folders) == 3
for feature_folder in feature_folders:
feature_folder = os.path.abspath(feature_folder)
seed = int(feature_folder.split("seed_")[1].split("_")[0])
pred_path = get_pred_path_from_feature_path(exp_name, dataset, feature_folder, nbit)
pred_path += "_lr_{}".format(str(lr))
feat_dim = int(feature_folder.split("dim_")[1].split("_")[0])
cmd = cmd_tmp.format(feature_folder, feat_dim, dataset, pred_path, str(seed), str(lr))
f.write(cmd + "\n")
print("cmd saved in ", script_name)
def generate_ensembled_full_prec_features():
feat_folders_old = glob.glob("/home/zjian/bert-pretraining/results/features/dimensionality_2019-07-06/*/*/*dim_768*wiki17")
assert len(feat_folders_old) == 4 * 3 # 4 dataset, 3 seeds
script_name = SCRIPT_FOLDER + "/0709_generate_ensembled_full_prec_features"
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/run_compress.py '
'--job_type ensemble --old_input_folder {} --new_input_folder {} --out_folder {} '
'--dataset {} --seed {} --ensemble_eps {} \\"')
eps_list = [0.0, 0.001, 0.01, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0]
exp_name = "ensemble"
with open(script_name, "w") as f:
for feat_folder_old in feat_folders_old:
for eps in eps_list:
assert "wiki17" in feat_folder_old
feat_folder_new = feat_folder_old.replace("wiki17", "wiki18_aligned")
# out_folder = os.path.dirname(train_feat_file) + "_aligned"
seed = int(get_seed_from_folder_name(feat_folder_old))
dataset = get_dataset_from_folder_name(feat_folder_old)
out_folder = os.path.abspath(get_pred_path_from_feature_path(exp_name, dataset, feat_folder_old, nbit=32, date_str=None)).replace("wiki17", "").replace("predictions", "features") + "_eps_{}".format(eps)
cmd = cmd_tmp.format(feat_folder_old, feat_folder_new, out_folder, dataset, seed, eps)
f.write(cmd + "\n")
print("cmd saved in ", script_name)
# the results folders above in this function are manually moved to the features folder from prediciton folder
def generate_all_predictions_for_linear_bert_sentiment_ensemble():
best_lr = get_best_lr_for_linear_bert_sentiment()
script_name = SCRIPT_FOLDER + "/0709_generate_prediction_for_ensemble_3_seeds"
datasets = ['mr', 'subj', 'mpqa', 'sst']
nbit = 32
exp_names = ["ensemble_opt_lr_3_seeds", "ensemble_default_lr_3_seeds"]
cmd_tmp = ('qsub -V -b y -wd /home/zjian/bert-pretraining/wd '
'/home/zjian/bert-pretraining/src/bert-pretraining/gc_env.sh '
'\\"python /home/zjian/bert-pretraining/src/bert-pretraining/third_party/sentence_classification/train_classifier_feat_input.py '
'--la --feat_input --feat_input_folder {} --feat_dim {} '
'--dataset {} --out {} --model_seed {} --lr {}\\"')
feat_dim = 768
with open(script_name, "w") as f:
for exp_name in exp_names:
for dataset in datasets:
if "default" not in exp_name:
lr = best_lr[dataset]
else:
lr = 0.001
feature_folders = glob.glob("/home/zjian/bert-pretraining/results/features/ensemble_2019-07-10/{}/nbit_*/*".format(dataset))
#assert len(feature_folders) == 3
for feature_folder in feature_folders:
feature_folder = os.path.abspath(feature_folder)
assert "nbit_{}".format(nbit) in feature_folder
seed = get_seed_from_folder_name(feature_folder)
pred_path = get_pred_path_from_feature_path(exp_name, dataset, feature_folder, nbit)
pred_path += "_lr_{}".format(str(lr))
print(feature_folder)
assert "dim_{}".format(feat_dim) in feature_folder
cmd = cmd_tmp.format(feature_folder, feat_dim, dataset, pred_path, str(seed), str(lr))
f.write(cmd + "\n")
print("cmd saved in ", script_name)
if __name__ == "__main__":
# bert_pretraining_lr_tuning_training()
# bert_pretraining_lr_tuning_evaluation()
# bert_pretraining_3_seeds_different_size()
# generate_all_sentiment_features_dimensionality()
# generate_all_sentiment_features_pytorch_file()
# tune_lr_bert_sentiment_with_wiki17_768_dim_linear_model()
# get_best_lr_for_linear_bert_sentiment()
# generate_all_predictions_for_linear_bert_sentiment_dimensionality()
# tune_lstm_bert_sentiment_with_wiki17_768_dim()
# # compress_768_dim_features()
# generate_all_predictions_for_linear_bert_sentiment_compression()
# generate_aligned_wiki18_full_prec_features()
# compress_768_dim_features_aligned_wiki18()
# generate_all_predictions_for_linear_bert_sentiment_compression_aligned_wiki18()
# generate_all_predictions_for_linear_bert_sentiment_dimensionality_wiki18_aligned()
# The below is for running with new validation set based opt lr for sst
# generate_all_predictions_for_linear_bert_sentiment_compression_sst_only_for_new_opt_lr()
# generate_all_predictions_for_linear_bert_sentiment_dimensionality_sst_rerun_new_opt_lr()
generate_ensembled_full_prec_features()
generate_all_predictions_for_linear_bert_sentiment_ensemble()
| bert-pretraining-master | src/bert-pretraining/experiments.py |
import tensorflow as tf
import torch
import numpy as np
import sys, os
import datetime
import logging
import pathlib
import json
import glob
import random
def set_tensorflow_random_seed(rand_seed):
random.seed(rand_seed)
np.random.seed(rand_seed)
tf.set_random_seed(rand_seed)
def set_random_seed(rand_seed):
# set random seed for python
random.seed(rand_seed)
# Set random seed for numpy
np.random.seed(seed=rand_seed)
# Set random seed for torch
torch.manual_seed(rand_seed)
torch.cuda.manual_seed(rand_seed)
# set random seed for tensorflow
tf.set_random_seed(rand_seed)
def get_date_str():
return '{:%Y-%m-%d}'.format(datetime.date.today())
def ensure_dir(dir):
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
def save_to_json(dict_to_write, path):
with open(path, 'w') as f: json.dump(dict_to_write, f, indent=2)
def init_logging(path):
"""Initialize logfile to be used for experiment."""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# create console handler and set level to info
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# create error file handler and set level to error
handler = logging.FileHandler(os.path.join(path, "run.log"),"w", encoding=None, delay="true")
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def non_default_args(parser, args):
non_default = []
for action in parser._actions:
default = action.default
key = action.dest
if key in args:
val = args[key]
if val != default:
non_default.append((key,val))
assert len(non_default) > 0, 'There must be a non-default arg.'
return non_default
def get_arg_str(parser, args):
# args needs to be a dict
for key,val in non_default_args(parser, args):
if key not in to_skip:
runname += '{},{}_'.format(key,val)
def load_from_json(path):
with open(path) as f: return json.load(f)
def gather_results(path_regex):
file_list = glob.glob(path_regex)
#for x in file_list:
# print(x)
return [load_from_json(f) for f in file_list]
def flatten_dict(to_flatten):
flattened = {}
for k,v in to_flatten.items():
if isinstance(v,dict):
for k2,v2 in v.items():
flattened[k2] = v2
else:
flattened[k] = v
return flattened
# Returns a list of result dictionaries with the subset of results from
# all_results which exactly matched the 'key_values_to_match' dictionary.
def extract_result_subset(all_results, key_values_to_match):
subset = []
for result in all_results:
if matches_all_key_values(result, key_values_to_match):
subset.append(result)
return subset
# return True if result[key] in values for all key-value pairs in key_values_to_match
def matches_all_key_values(result, key_values_to_match):
for key,values in key_values_to_match.items():
assert type(values) == list
if (key not in result) or (result[key] not in values): return False
return True
def stats_on_subset_json(results, key):
res_list = []
for res in results:
res_list.append(res[key])
return np.mean(res_list), np.std(res_list)
def clean_json_results(results):
results_clean = []
for result in results:
# for feature to prediction experiments
if "feat_input_folder" in result.keys():
if "wiki17" in result["feat_input_folder"]:
result["corpus"] = "wiki17"
elif "wiki18" in result["feat_input_folder"]:
result["corpus"] = "wiki18"
elif "ensemble" in result["feat_input_folder"]:
result["corpus"] = "wiki18"
nbit = int(result["feat_input_folder"].split("nbit_")[-1].split("/")[0])
result["nbit"] = nbit
dim = int(result["feat_input_folder"].split("dim_")[-1].split("_")[0])
result["dim"] = dim
if "ensemble" in result["feat_input_folder"]:
assert "_eps_" in result["feat_input_folder"]
result["ensemble_eps"] = float(result["feat_input_folder"].split("_eps_")[-1].split("_")[0])
results_clean.append(result)
return results_clean
def gather_json_results(json_regex):
# all_json_regex = "../../results/predictions/dimensionality_2019-07-07/{}/nbit_32/*/final_results.json".format(dataset)
results = gather_results(json_regex)
results = [flatten_dict(result) for result in results]
return results
def get_classification_disagreement(pred1, pred2):
assert isinstance(pred1, list)
assert isinstance(pred2, list)
compare = np.array(pred1) != np.array(pred2)
disagreement = float(np.sum(compare)) / float(len(pred1))
return disagreement
def get_csv_folder(gc=True):
if gc:
# using google cloud
return "/home/zjian/bert-pretraining/src/bert-pretraining/output/csv"
else:
return "./output/csv"
| bert-pretraining-master | src/bert-pretraining/utils.py |
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import csv
import os, sys
def std_results_array(results_array):
# average list of 1d np array results
results_array = [np.reshape(x, x.size) for x in results_array]
results = np.vstack(results_array)
return np.std(results, axis=0)
def average_results_array(results_array):
# average list of 1d np array results
results_array = [np.reshape(x, x.size) for x in results_array]
results = np.vstack(results_array)
return np.mean(results, axis=0)
def save_csv_with_error_bar(data_list, file_name="./test/test.csv", ave_x=False):
'''
data is a list of tuple (label, x_pt, y_pt), it is plotted using color named as label in the color_dict.
x_pt is a 1d list, y_pt is list of list, each inner list is from a random seed.
'''
df_list = []
for i in range(len(data_list) ):
label = data_list[i][0]
x = data_list[i][1]
y = data_list[i][2]
average_y = average_results_array(y)
std_y = std_results_array(y)
if ave_x:
x = average_results_array(x)
x = np.array(x)
average_y = np.array(average_y)
std_y = np.array(std_y)
df_list.append(pd.DataFrame(np.reshape(x, [x.size, 1] ), columns = [label + "|x" ] ) )
df_list.append(pd.DataFrame(np.reshape(average_y, [average_y.size, 1] ), columns = [label + "|y" ] ) )
df_list.append(pd.DataFrame(np.reshape(std_y, [std_y.size, 1] ), columns = [label + "|y_std" ] ) )
pd.concat(df_list, axis=1).to_csv(file_name)
def csv_to_table(file_name, delimiter=',', row_headers=True):
with open(file_name, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=delimiter)
groups = next(csv_reader)[1:]
names = []
data = []
for row in csv_reader:
if row_headers:
names.append(row[0])
# print([type(x) for x in row[1:] ])
#print(row)
#print()
row2 = []
for r in row[1:]:
if r == '':
row2.append(float('nan'))
else:
row2.append(float(r))
data.append(list(row2))
#data.append(list(map(lambda x: float(x), row[1:])))
else:
data.append(list(map(lambda x: float(x), row)))
return groups, names, np.array(data)
def plot_figure_without_error_bar(names, data, color_list, normalizer=1.0):
'''
each column of data is a line
the name follows the pattern like ['fp Nystrom-x', 'fp Nystrom-y', 'fp Nystrom-y_std', 'fp RFF-x', 'fp RFF-y', 'fp RFF-y_std']
'''
marker_list = ['v', 'd', 'o', 's', '+', '^', 'h', '*', 'x']
for i in range(data.shape[1] // 3):
idx = i * 3
label = names[idx].split("|")[0]
print("label ", names[idx], label)
x = data[:, idx]
average_y = data[:, idx + 1]
std_y = data[:, idx + 2]
# print x, average_y, std_y
if "FP" in label:
print("FP mode plot", marker_list[i])
plt.plot(x, average_y / normalizer, "-", label=label, marker=marker_list[i], markeredgecolor=color_list[i % len(color_list)], markerfacecolor="none", markersize=6, markeredgewidth=1, linewidth=1, color=color_list[i % len(color_list)])
# plt.errorbar(x, average_y, yerr=std_y, label=label, marker=marker_list[i], markeredgecolor=color_list[i % len(color_list)], markerfacecolor="none", markersize=5, markeredgewidth=1, fmt="-", linewidth=1, capsize=5, capthick=1, color=color_list[i % len(color_list)])
else:
print("LP mode plot", marker_list[i])
plt.plot(x, average_y / normalizer, "--", label=label, marker=marker_list[i], markeredgecolor=color_list[i % len(color_list)], markerfacecolor="none", markersize=6, markeredgewidth=1, linewidth=1, color=color_list[i % len(color_list)])
# plt.errorbar(x, average_y, yerr=std_y, label=label, marker=marker_list[i], markeredgecolor=color_list[i % len(color_list)], markerfacecolor="none", markersize=5, markeredgewidth=1, fmt="--", linewidth=1, capsize=5, capthick=1, color=color_list[i % len(color_list)])
def plot_figure_with_error_bar(names, data, color_list, normalizer=1.0, marker_size=8.0):
'''
each column of data is a line
the name follows the pattern like ['fp Nystrom-x', 'fp Nystrom-y', 'fp Nystrom-y_std', 'fp RFF-x', 'fp RFF-y', 'fp RFF-y_std']
'''
marker_list = ['v', 'd', 'o', 's', '+', 'v', 'h']
for i in range(data.shape[1] // 3):
idx = i * 3
label = names[idx].split("|")[0]
print("label ", names[idx], label)
x = data[:, idx]
average_y = data[:, idx + 1]
std_y = data[:, idx + 2]
# print x, average_y, std_y
if "FP" in label:
print("FP mode plot")
plt.errorbar(x, average_y / normalizer, yerr=std_y / normalizer, label=label, marker=marker_list[i % len(marker_list)], markeredgecolor=color_list[i % len(color_list)], markersize=marker_size, markeredgewidth=1.5, fmt="-", linewidth=2, capsize=5, capthick=1, color=color_list[i % len(color_list)])
else:
print("LP mode plot")
plt.errorbar(x, average_y / normalizer, yerr=std_y / normalizer, label=label, marker=marker_list[i % len(marker_list)], markeredgecolor=color_list[i % len(color_list)], markersize=marker_size, markeredgewidth=1.5, fmt="-", linewidth=2, capsize=5, capthick=1, color=color_list[i % len(color_list)])
def plot_figure_with_error_bar2(names, data, color_list, normalizer=1.0):
'''
each column of data is a line
the name follows the pattern like ['fp Nystrom-x', 'fp Nystrom-y', 'fp Nystrom-y_std', 'fp RFF-x', 'fp RFF-y', 'fp RFF-y_std']
'''
marker_list = ['v', 'd', 'o', 's', '+', 'v', 'h']
for i in range(data.shape[1] // 3):
idx = i * 3
label = names[idx].split("|")[0]
print("label ", names[idx], label)
x = data[:, idx]
average_y = data[:, idx + 1]
std_y = data[:, idx + 2]
# print x, average_y, std_y
if "FP" in label:
print("FP mode plot")
plt.errorbar(x, average_y / normalizer, yerr=std_y / normalizer, label=label, marker=marker_list[i % len(marker_list)], markeredgecolor=color_list[i % len(color_list)], markerfacecolor="none", markersize=8, markeredgewidth=1.5, fmt="-", linewidth=1, capsize=5, capthick=1, color=color_list[i % len(color_list)])
else:
print("LP mode plot")
plt.errorbar(x, average_y / normalizer, yerr=std_y / normalizer, label=label, marker=marker_list[i % len(marker_list)], markeredgecolor=color_list[i % len(color_list)], markerfacecolor="none", markersize=8, markeredgewidth=1.5, fmt="-", linewidth=1, capsize=5, capthick=1, color=color_list[i % len(color_list)])
def set_fig_xtick(values, labels, fontsize):
ax = plt.gca()
ax.set_xticks(values)
if labels is not None:
ax.set_xticklabels(labels)
ax.tick_params(axis='both', which='major', labelsize=fontsize)
ax.tick_params(axis='both', which='minor', labelsize=fontsize)
def set_fig_ytick(values, labels, fontsize):
ax = plt.gca()
ax.set_yticks(values)
if labels is not None:
ax.set_yticklabels(labels)
ax.tick_params(axis='both', which='major', labelsize=fontsize)
ax.tick_params(axis='both', which='minor', labelsize=fontsize)
| bert-pretraining-master | src/bert-pretraining/plot_utils.py |
from setuptools import setup
setup(name='halp',
version='0.1',
description='Code for floating point based halp.',
url='https://github.com/HazyResearch/PyTorch_HALP',
author='Jian Zhang',
author_email='[email protected]',
license='Apache Version 2',
install_requires = ['numpy',
'torch']
) | halp-master | setup.py |
halp-master | halp/__init__.py |
|
import torch
import torch.nn as nn
from torch.nn import ReLU
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Function
from torch.autograd import Variable
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.layers.bit_center_layer import BitCenterActivation
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class BitCenterReLUFunction(Function):
@staticmethod
def forward(ctx, input_delta, input_lp, grad_output_lp):
input_full = input_lp + input_delta
out = F.threshold(input_full, threshold=0, value=0) \
- F.threshold(input_lp, threshold=0, value=0)
ctx.save_for_backward(grad_output_lp, input_full, input_lp)
return out
@staticmethod
def backward(ctx, grad_output):
grad_output_lp, input_full, input_lp = ctx.saved_tensors
grad_input_delta = grad_output + grad_output_lp
grad_input_delta[input_full < 0] = 0.0
grad_input_delta[input_lp >= 0] -= grad_output_lp[input_lp >= 0]
grad_input_lp = None
grad_grad_output_lp = None
return grad_input_delta.clone(), grad_input_lp, grad_grad_output_lp
bit_center_relu = BitCenterReLUFunction.apply
class BitCenterReLU(BitCenterActivation, nn.ReLU):
def __init__(self, cast_func=void_cast_func, n_train_sample=1):
BitCenterActivation.__init__(
self,
fp_functional=F.relu,
lp_functional=bit_center_relu,
cast_func=cast_func,
n_train_sample=n_train_sample)
nn.ReLU.__init__(self)
self.register_backward_hook(self.update_grad_output_cache) | halp-master | halp/layers/relu_layer.py |
import torch
import numpy as np
import torch.nn.functional as F
from torch.nn import Parameter
from halp.layers.tanh_layer import BitCenterTanh, bit_center_tanh
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.layers.bit_center_layer_test import TestBitCenterNoParamLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class TestBitCenterSigmoidLayer(TestBitCenterNoParamLayer, TestCase):
def prepare_layer(self,
channel_in,
w_in,
h_in,
cast_func=void_cast_func,
bias=False,
do_double=True,
seed=0,
batch_size=1,
n_train_sample=1):
layer = BitCenterTanh(
cast_func=cast_func, n_train_sample=n_train_sample)
# Note do_double = setup layer for gradient check, otherwise, it is for checking
# the tensor properties, and layer behaviors
if do_double:
layer.double()
layer.cuda()
return layer
if __name__ == "__main__":
print(torch.__version__)
unittest.main() | halp-master | halp/layers/tanh_layer_test.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from torch.autograd import Function
from torch.autograd import Variable
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.layers.bit_center_layer import BitCenterLayer
# the current implementation does not use chebyshev approximiation yet.
# The input grad during backward is the exact delta grad
class BitCenterCrossEntropyLPFunction(Function):
@staticmethod
def forward(ctx, input_delta, input_lp, target, grad_offset):
# suffix lp means the lp version of the offset tensors
# suffix delta means the real low precision part of the model representation
# make softmax more numerically stable, we substract the max
input = input_lp + input_delta - torch.max(input_lp + input_delta)
ctx.save_for_backward(input_lp, input_delta, target, grad_offset)
output = F.nll_loss(F.log_softmax(input, dim=1), target)
return output
@staticmethod
def backward(ctx, grad_output):
# grad_z_{i, j} = 1/m\sum_i 1(y_i = j) - P(y_i = j)
input_lp, input_delta, target, grad_offset = ctx.saved_tensors
assert input_lp.size(0) == target.numel()
prob = F.softmax(input_lp + input_delta - torch.max(input_lp + input_delta), dim=1)
sample_idx = torch.LongTensor(np.arange(input_lp.size(0)))
minibatch_size = input_delta.size(0)
grad_input_lp = None
grad_input_delta = torch.zeros_like(
input_delta, dtype=input_delta.dtype)
grad_input_delta[sample_idx, target] = 1.0
grad_input_delta.add_(-prob)
grad_input_delta.div_(-minibatch_size)
grad_input_delta.add_(-grad_offset)
grad_target = None
grad_grad_offset = None
return grad_input_delta, grad_input_lp, grad_target, grad_grad_offset
class BitCenterCrossEntropyFPFunction(Function):
@staticmethod
def forward(ctx, input_fp, target):
# suffix lp means the lp version of the offset tensors
# suffix delta means the real low precision part of the model representation
ctx.save_for_backward(input_fp, target)
output = F.nll_loss(F.log_softmax(input_fp, dim=1), target)
return output
@staticmethod
def backward(ctx, grad_output):
# grad_z_{i, j} = 1/m\sum_i 1(y_i = j) - P(y_i = j)
input_fp, target = ctx.saved_tensors
assert input_fp.size(0) == target.numel()
prob = F.softmax(input_fp, dim=1)
sample_idx = torch.LongTensor(np.arange(input_fp.size(0)))
minibatch_size = input_fp.size(0)
grad_input_fp = torch.zeros_like(input_fp, dtype=input_fp.dtype)
grad_input_fp[sample_idx, target] = 1.0
grad_input_fp.add_(-prob)
grad_input_fp.div_(-minibatch_size)
grad_target = None
return grad_input_fp, grad_target
bit_center_cross_entropy_lp = BitCenterCrossEntropyLPFunction.apply
bit_center_cross_entropy_fp = BitCenterCrossEntropyFPFunction.apply
class BitCenterCrossEntropy(BitCenterLayer):
def __init__(self, cast_func=void_cast_func, n_train_sample=1):
BitCenterLayer.__init__(
self,
fp_functional=bit_center_cross_entropy_fp,
lp_functional=bit_center_cross_entropy_lp,
bias=False,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.setup_bit_center_vars()
self.cuda()
self.reset_parameters_bit_center()
self.register_backward_hook(self.update_grad_output_cache)
def setup_bit_center_vars(self):
# there is no bc variables to be setup for this layer
pass
def reset_parameters_bit_center(self):
pass
def update_grad_output_cache(self, self1, input, output):
# pass
# use duplicated self to adapt to the pytorch API requirement
# as this is a class member function
if self.do_offset:
# note here grad_output_lp is actually the grad_input offset.
# This is because we want to utilize the existing infra in bitCenterLayer
if self.on_site_compute:
self.grad_output_cache = \
self.update_single_cache_on_site_compute(
self.grad_output_cache, input[0])
self.grad_cache_iter = 0
self.output_size = input[0].size()
else:
self.grad_output_cache[self.grad_cache_iter:min(
self.grad_cache_iter +
input[0].size(0), self.n_train_sample)].data.copy_(
self.cast_func(input[0].cpu()))
self.grad_cache_iter = (
self.grad_cache_iter + input[0].size(0)) % self.n_train_sample
# we use the following variable only for test purpose, we want to be able to access
# the gradeint value wrt input in the outside world. For lp mode, it is grad_input_delta
# for fp mode, it is grad_input
self.input_grad_for_test = input[0].clone()
def forward_fp(self, input, target):
self.check_or_setup_input_cache(input)
output = self.fp_func(input, target)
if self.grad_output_cache is None:
# in the cross entropy layer we need to cache the input gradient
self.grad_output_cache = self.setup_cache(input)
self.grad_cache_iter = 0
self.update_input_cache(input)
return output
def forward_lp(self, input, target):
# Need to test do_offset mode whether gradient is updated properly
if self.on_site_compute:
assert self.cache_iter == 0 and self.grad_cache_iter == 0
input_lp = self.input_cache[0:input.size(0)].cuda()
else:
input_lp = self.input_cache[self.cache_iter:(
self.cache_iter + input.size(0))].cuda()
# give a handle to access input_lp from outside
self.input_lp = input_lp
# note here grad_output_lp is actually the grad_input offset.
# This is because we want to utilize the existing infra in bitCenterLayer
if self.on_site_compute:
grad_output_lp = self.get_single_cache_on_site_compute(
self.grad_output_cache, self.output_size).cuda()
else:
grad_output_lp = \
self.grad_output_cache[self.grad_cache_iter:(self.grad_cache_iter + input.size(0))].cuda()
input_delta = input
output = self.lp_func(input_delta, input_lp, target, grad_output_lp)
self.increment_cache_iter(input)
return output
def forward(self, input, target):
# Need to test do_offset mode whether gradient is updated properly
if self.do_offset:
return self.forward_fp(input, target)
else:
return self.forward_lp(input, target) | halp-master | halp/layers/cross_entropy.py |
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.nn import BatchNorm2d
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Function
from torch.autograd import Variable
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.layers.bit_center_layer import BitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger("batch norm")
def expand_param_as_input(param, input):
return param.view(1, input.size(1), 1, 1).expand_as(input)
def sum_tensor_as_param(tensor):
return tensor.sum(-1).sum(-1).sum(0)
def get_bn_grads(grad_output, weight, input, sigma_sq, mu, eps, x_hat):
d_x_hat = grad_output * expand_param_as_input(weight, input)
inv_std = torch.tensor([1.0], device=sigma_sq.device, dtype=sigma_sq.dtype)\
/ torch.sqrt(sigma_sq + eps)
m = input.size(0) * input.size(2) * input.size(3)
input_center = input - expand_param_as_input(mu, input)
# we found in fp16 mode, adding non-zero eps can results in very inaccurate gradient
log_eps = 0.0
log_d_x_hat = torch.log(torch.abs(d_x_hat) + log_eps)
sign_d_x_hat = torch.sign(d_x_hat)
log_input_center = torch.log(torch.abs(input_center) + log_eps)
sign_input_center = torch.sign(input_center)
log_inv_std = torch.log(torch.abs(inv_std) + log_eps)
sign_inv_std = torch.sign(inv_std)
sign_mult = sign_d_x_hat * sign_input_center * expand_param_as_input(
sign_inv_std, input_center)
d_sigma_sq = -torch.tensor(
[0.5], dtype=sigma_sq.dtype, device=sigma_sq.device
) * sum_tensor_as_param(
sign_mult *
torch.exp(log_d_x_hat + log_input_center + expand_param_as_input(
torch.tensor([3.0], dtype=sigma_sq.dtype, device=sigma_sq.device) *
log_inv_std, input_center)))
d_mu = sum_tensor_as_param(
-d_x_hat * expand_param_as_input(inv_std, d_x_hat)) + torch.tensor(
[-2.0 / m], dtype=sigma_sq.dtype, device=sigma_sq.
device) * d_sigma_sq * sum_tensor_as_param(input_center)
d_x = d_x_hat * expand_param_as_input(inv_std, d_x_hat) \
+ torch.tensor([2.0 / m], dtype=d_x_hat.dtype, device=d_x_hat.device) \
* expand_param_as_input(d_sigma_sq, input_center) * input_center \
+ torch.tensor([1.0 / m], dtype=d_mu.dtype, device=d_mu.device) \
* expand_param_as_input(d_mu, input)
d_weight = sum_tensor_as_param(grad_output * x_hat)
d_bias = sum_tensor_as_param(grad_output)
return d_x, d_weight, d_bias
class BitCenterBatchNormFunction(Function):
"""
Notations in this class alignes with the ones in Srgey et al.(https://arxiv.org/pdf/1502.03167.pdf).
"""
@staticmethod
def forward(ctx, input_delta, input_lp, mu_delta, mu_lp, sigma_sq_delta,
sigma_sq_lp, output_grad_lp, weight_delta, weight_lp,
bias_delta, bias_lp, momentum, eps):
input_full = input_delta + input_lp
batch_size = input_delta.size(0)
eps = torch.tensor([eps],
dtype=sigma_sq_delta.dtype,
device=sigma_sq_delta.device,
requires_grad=False)
# we assume input is 4d tensor
C = input_full.size(1)
m = input_full.numel() / C
batch_mean_full = input_full.mean(-1).mean(-1).mean(0).view(
1, input_full.size(1), 1, 1)
batch_var_full = (input_full**2).mean(-1).mean(-1).mean(0).view(1, input_full.size(1), 1, 1) \
- batch_mean_full * batch_mean_full
batch_mean_lp = input_lp.mean(-1).mean(-1).mean(0).view(
1, input_lp.size(1), 1, 1)
batch_var_lp = (input_lp**2).mean(-1).mean(-1).mean(0).view(1, input_lp.size(1), 1, 1) \
- batch_mean_lp * batch_mean_lp
# Given O + d <--(O + d) (1 - rho) + rho * V where V is the new observed value.
# the update rule to delta running statistics d is
# d <-- d (1 - rho) + rho (V - O)
mu_delta.mul_(
torch.Tensor([
1.0 - momentum,
]).type(mu_delta.dtype).item())
mu_delta.add_(
torch.Tensor([
momentum,
]).type(mu_delta.dtype).item(),
batch_mean_full.squeeze() - mu_lp)
sigma_sq_delta.mul_(
torch.Tensor([
1.0 - momentum,
]).type(sigma_sq_delta.dtype).item())
# note the running stat uses unbias estimate
# while the batch stat does not for variance
sigma_sq_delta.add_(
torch.Tensor([
momentum,
]).type(sigma_sq_delta.dtype).item(),
batch_var_full.squeeze() * (m / float(m - 1.0)) - sigma_sq_lp)
x_hat_lp = \
(input_lp - expand_param_as_input(batch_mean_lp, input_lp)) \
/ expand_param_as_input(torch.sqrt(batch_var_lp + eps), input_lp)
x_hat_full = \
(input_full - expand_param_as_input(batch_mean_full, input_full)) \
/ expand_param_as_input(torch.sqrt(batch_var_full + eps), input_full)
y_lp = expand_param_as_input(weight_lp, input_lp) * x_hat_lp \
+ expand_param_as_input(bias_lp, input_lp)
y_full = expand_param_as_input(weight_lp + weight_delta, input_full) * x_hat_full \
+ expand_param_as_input(bias_lp + bias_delta, input_full)
ctx.save_for_backward(input_delta, input_lp, x_hat_lp, x_hat_full,
mu_delta, mu_lp, sigma_sq_delta, sigma_sq_lp,
output_grad_lp, weight_lp, weight_delta, eps,
batch_mean_full, batch_mean_lp, batch_var_full,
batch_var_lp)
return y_full - y_lp
def backward(ctx, grad_output):
input_delta, input_lp, x_hat_lp, x_hat_full, \
mu_delta, mu_lp, sigma_sq_delta, sigma_sq_lp, \
output_grad_lp, weight_lp, weight_delta, eps, \
batch_mean_full, batch_mean_lp, \
batch_var_full, batch_var_lp = ctx.saved_tensors
d_x_full, d_weight_full, d_bias_full = \
get_bn_grads(grad_output + output_grad_lp,
weight_delta + weight_lp,
input_delta + input_lp,
batch_var_full,
batch_mean_full,
eps,
x_hat_full)
d_x_lp, d_weight_lp, d_bias_lp = \
get_bn_grads(output_grad_lp,
weight_lp,
input_lp,
batch_var_lp,
batch_mean_lp,
eps,
x_hat_lp)
return d_x_full - d_x_lp, None, None, None, None, None, None, \
d_weight_full - d_weight_lp, None, d_bias_full - d_bias_lp, None, None, None
bit_center_batch_norm2d = BitCenterBatchNormFunction.apply
class BitCenterBatchNorm2D(BitCenterLayer, BatchNorm2d):
"""
This is an implementation of batch norm 2d. It currently
only support batch norm layer with affine transformation
"""
def __init__(self,
num_features,
cast_func=void_cast_func,
eps=1e-05,
momentum=0.1,
n_train_sample=1):
BitCenterLayer.__init__(
self,
fp_functional=F.batch_norm,
lp_functional=bit_center_batch_norm2d,
bias=True,
cast_func=cast_func,
n_train_sample=n_train_sample)
BatchNorm2d.__init__(
self,
num_features=num_features,
eps=eps,
momentum=momentum,
affine=True,
track_running_stats=True)
# set up delta part of affine transform param
self.setup_bit_center_vars()
# set up delta part of the running statistics
self.setup_bit_center_stat()
self.cuda()
# initialize bit center delta parameters (the offset part is initialized by the base BatchNorm2D class)
self.reset_parameters_bit_center()
# initialize bit center delta running statistics (the offset part is initialized by the base BatchNorm2D class)
self.reset_stat_bit_center()
# register backward hook to update grad cache
self.register_backward_hook(self.update_grad_output_cache)
def setup_bit_center_stat(self):
# to allow our optimizers to properly update the offset part of the
# running statistics, we need to turn the running stat from buffer to
# parameter without gradient
self.running_mean = Parameter(
self.running_mean.data.clone(), requires_grad=False)
self.running_var = Parameter(
self.running_var.data.clone(), requires_grad=False)
self.running_mean_delta = \
Parameter(self.cast_func(self.running_mean.data), requires_grad=False)
# self.running_mean_delta.zero_()
self.running_mean_lp = \
Parameter(self.cast_func(self.running_mean.data), requires_grad=False)
self.running_var_delta = \
Parameter(self.cast_func(self.running_var.data), requires_grad=False)
# self.running_var_delta.zero_()
self.running_var_lp = \
Parameter(self.cast_func(self.running_var.data), requires_grad=False)
def reset_stat_bit_center(self):
# lp value should inheritate the original lp value
init.zeros_(self.running_mean_delta)
# init.zeros_(self.running_mean_lp)
init.zeros_(self.running_var_delta)
# init.zeros_(self.running_var_lp)
def forward_fp(self, input):
self.check_or_setup_input_cache(input)
# as foward fp is used for test or fp steps
# it should not update the running statistics
# however, it needs to use batch stat for training
# and running stat for evaluation
if self.training:
# we use a dummy running stat to make sure
# the true running stat is not updated during
# fp steps.
output = self.fp_func(
input,
torch.zeros_like(self.running_mean),
torch.zeros_like(self.running_var),
self.weight,
self.bias,
training=True,
momentum=self.momentum,
eps=self.eps)
else:
output = self.fp_func(
input,
self.running_mean,
self.running_var,
self.weight,
self.bias,
training=False,
momentum=self.momentum,
eps=self.eps)
self.check_or_setup_grad_cache(output)
self.update_input_cache(input)
return output
def forward_lp(self, input):
input_lp, grad_output_lp = self.get_input_cache_grad_cache(input)
# note fp func only has training mode
output = self.lp_func(
input, input_lp, self.running_mean_delta, self.running_mean_lp,
self.running_var_delta, self.running_var_lp, grad_output_lp,
self.weight_delta, self.weight_lp, self.bias_delta, self.bias_lp,
self.momentum, self.eps)
self.increment_cache_iter(input)
return output | halp-master | halp/layers/batch_norm_layer.py |
import torch
import numpy as np
import torch.nn.functional as F
from torch.nn import Parameter
from halp.layers.sigmoid_layer import BitCenterSigmoid, bit_center_sigmoid
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.layers.bit_center_layer_test import TestBitCenterNoParamLayer
class TestBitCenterSigmoidLayer(TestBitCenterNoParamLayer, TestCase):
def prepare_layer(self,
channel_in,
w_in,
h_in,
cast_func=void_cast_func,
bias=False,
do_double=True,
seed=0,
batch_size=1,
n_train_sample=1):
layer = BitCenterSigmoid(
cast_func=cast_func, n_train_sample=n_train_sample)
# Note do_double = setup layer for gradient check, otherwise, it is for checking
# the tensor properties, and layer behaviors
if do_double:
layer.double()
layer.cuda()
return layer
if __name__ == "__main__":
print(torch.__version__)
unittest.main() | halp-master | halp/layers/sigmoid_layer_test.py |
import torch
import numpy as np
from torch.nn import Parameter
from halp.layers.conv_layer import BitCenterConv2D, bit_center_conv2d
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.utils.utils import set_seed
from halp.utils.test_utils import HalpTest
from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors, make_jacobian
from halp.layers.bit_center_layer_test import TestBitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class TestBitCenterConv2DLayer(TestBitCenterLayer, TestCase):
'''
Test the functionality of bit centering conv2d layers
'''
def get_config(self, type="grad_check"):
config = {}
# this config can test for padding != 0 and stride > 1 cases
config["input_w"] = 15
config["input_h"] = 8
config["kernel_size"] = (5, 5)
config["stride"] = 3
config["padding"] = 2
if type == "grad_check":
config["n_train_sample"] = 6
config["dim_in"] = 4
config["dim_out"] = 8
config["bias"] = True
config["cast_func"] = void_cast_func
config["do_double"] = True
config["seed"] = 0
config["batch_size"] = 6
elif type == "fw_bw_proc":
config["n_train_sample"] = 98
config["dim_in"] = 13
config["dim_out"] = 31
config["bias"] = True
config["cast_func"] = single_to_half_det
config["do_double"] = False
config["seed"] = 0
config["batch_size"] = 33
else:
raise Exception("Config type not supported!")
return config
def prepare_layer(self,
input_w,
input_h,
kernel_size,
stride,
padding,
n_train_sample,
dim_in,
dim_out,
bias,
cast_func=void_cast_func,
do_double=True,
seed=0,
batch_size=1):
layer = BitCenterConv2D(
in_channels=dim_in,
out_channels=dim_out,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=1,
groups=1,
bias=bias,
cast_func=cast_func,
n_train_sample=n_train_sample)
# Note do_double = setup layer for gradient check, otherwise, it is for checking
# the tensor properties
self.target_dtype = None
if do_double:
layer.double()
layer.weight.data.copy_(
torch.randn(
dim_out,
dim_in,
*layer.kernel_size,
dtype=torch.double,
requires_grad=False).cuda())
layer.weight_lp.data.copy_(layer.weight.data)
layer.weight_delta.data.copy_(
torch.randn(
dim_out,
dim_in,
*layer.kernel_size,
dtype=torch.double,
requires_grad=True).cuda())
if bias:
layer.bias.data.copy_(
torch.randn(
dim_out, dtype=torch.double,
requires_grad=True).cuda())
layer.bias_lp.data.copy_(layer.bias.data)
layer.bias_delta.data.copy_(
torch.randn(
dim_out, dtype=torch.double,
requires_grad=True).cuda())
layer.cuda()
return layer
def get_input(self,
input_w,
input_h,
kernel_size,
stride,
padding,
n_train_sample,
dim_in,
dim_out,
bias,
cast_func=void_cast_func,
do_double=True,
seed=0,
batch_size=1):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.target_dtype = None
if do_double:
input_delta = Parameter(
torch.randn(
n_train_sample,
dim_in,
input_w,
input_h,
dtype=torch.double).cuda(),
requires_grad=True)
input_fp = Parameter(
torch.randn(
n_train_sample,
dim_in,
input_w,
input_h,
dtype=torch.double).cuda(),
requires_grad=True)
else:
input_delta = Parameter(
cast_func(
torch.randn(
n_train_sample,
dim_in,
input_w,
input_h,
dtype=torch.double).cuda()),
requires_grad=True)
input_fp = Parameter(
torch.randn(
n_train_sample,
dim_in,
input_w,
input_h,
dtype=torch.float).cuda(),
requires_grad=True)
return [
input_fp,
], [
input_delta,
]
if __name__ == "__main__":
print(torch.__version__)
unittest.main() | halp-master | halp/layers/conv_layer_test.py |
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.nn.parameter import Parameter
from torch.autograd import Function
from torch.autograd import Variable
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc, void_func
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class BitCenterModule(nn.Module):
'''
Every layer and module using bit centering operations
should inheritate this base module class
'''
def __init__(self):
nn.Module.__init__(self)
# note the on_site_compute mode is to support the
# fp compute without input data / output grad caching.
# The on site compute mode can be turned on via
# set_on_cite_compute. As the caching memory on each layer
# is set up after the initialization of each layer (when fp forward is first called)
# we can aall the set_on_cite_compute after the intialization of the entire model,
# and before the first forward function call.
self.on_site_compute = False
def set_mode(self, do_offset, cache_iter=0):
self.do_offset = do_offset
self.cache_iter = cache_iter
for name, child in self.named_children():
if isinstance(child, BitCenterModule):
child.set_mode(do_offset, cache_iter)
else:
logger.warning("None bit centering module can not change mode " \
+ child.__class__.__name__)
def set_on_site_compute(self, do_on_site_compute=False):
self.on_site_compute = do_on_site_compute
for child in self.children():
if isinstance(child, BitCenterModule):
child.set_on_site_compute(do_on_site_compute)
else:
logger.warning("None bit centering module can not set on_site_compute mode " \
+ child.__class__.__name__)
def print_module_types(self):
logger.info("module type: " + self.__class__.__name__)
for child in self.children():
if isinstance(child, BitCenterModule):
child.print_module_types()
else:
logger.warning("None bit centering module " \
+ child.__class__.__name__)
def get_trainable_param_squared_norm(self):
state_dict = self.state_dict()
param_norm = 0.0
for name, p in self.named_parameters():
if (name.endswith("_delta")) \
or (name.endswith("_lp")) \
or (p.requires_grad == False):
continue
if name + "_delta" in state_dict.keys():
p_delta = state_dict[name + "_delta"]
if self.on_site_compute and p_delta.dtype != torch.float64:
# note to test on site compute mode, we use cpu norm
# when we use double precision for numerical comparison
param_norm += torch.sum(
(p.data.type(torch.cuda.FloatTensor) +
p_delta.data.type(torch.cuda.FloatTensor))**2).item()
else:
param_norm += torch.sum(
(p.data.type(torch.FloatTensor) + p_delta.data.type(
torch.FloatTensor))**2).item()
else:
if self.on_site_compute:
param_norm += torch.sum(
p.data.type(torch.cuda.FloatTensor)**2).item()
else:
param_norm += torch.sum(p.data.type(torch.FloatTensor)
**2).item()
return param_norm
def get_named_offset_plus_delta_parameters(self):
state_dict = self.state_dict()
param_list = []
for name, p in self.named_parameters():
if name.endswith("_delta"):
p_lp = state_dict[name.split("_delta")[0] + "_lp"]
p_full = p_lp + p
param_list.append((name, p_full))
return param_list
class BitCenterModuleList(BitCenterModule, nn.ModuleList):
def __init__(self, modules=None):
BitCenterModule.__init__(self)
nn.ModuleList.__init__(self, modules)
class BitCenterSequential(BitCenterModule, nn.Sequential):
def __init__(self, *modules):
BitCenterModule.__init__(self)
nn.Sequential.__init__(self, *modules)
class BitCenterLayer(BitCenterModule):
'''
Every bit center style layer should inheritate this base
class. It provides common behavior of forward and backward
caching behavior. The current implementation directly
support the construction of layers with a single weight and
a single bias variable, like conv and linear layer. For
other layers such as the cross entropy layer, they can be
implemented by overwriting some of the member functions.
For an exampla instantiation of bit center layers,
please refer to the BitCenterLinear layer in bit_center_layer.py
'''
def __init__(self,
fp_functional=void_func,
lp_functional=void_func,
bias=True,
cast_func=void_cast_func,
n_train_sample=1):
BitCenterModule.__init__(self)
self.cast_func = cast_func
# register the fp and lp forward function
self.fp_func = fp_functional
self.lp_func = lp_functional
# input cache
self.input_cache = None
self.grad_output_cache = None
self.cache_iter = 0
self.grad_cache_iter = 0
self.n_train_sample = n_train_sample
# starting from fp mode
self.set_mode(do_offset=True)
def setup_bit_center_vars(self):
self.weight_delta = Parameter(
self.cast_func(self.weight.data), requires_grad=True)
self.weight_lp = Parameter(
self.cast_func(self.weight.data), requires_grad=False)
self.set_mode(do_offset=True)
if self.bias is not None:
self.bias_delta = Parameter(
self.cast_func(self.bias.data), requires_grad=True)
self.bias_lp = Parameter(
self.cast_func(self.bias.data), requires_grad=False)
else:
self.register_parameter('bias_delta', None)
self.register_parameter('bias_lp', None)
def reset_parameters_bit_center(self):
init.zeros_(self.weight_delta)
if self.bias is not None:
init.zeros_(self.bias_delta)
def setup_cache(self, input_all):
if isinstance(input_all, list) or isinstance(input_all, tuple):
# in case there are multiple input to this layer
return [self.setup_cache_single(x) for x in input_all]
else:
return self.setup_cache_single(input_all)
def setup_cache_single(self, input):
# the cache is set up when the first minibatch forward is done.
# here we assume the first dimension of input blob indicates the size of minibatch
if len(list(input.size())) == 0:
# this is the scalar output case
# loss layers need this to be consistent with the setup of bit center layers
cache_shape = [1, 1]
else:
cache_shape = list(input.size())
if not self.on_site_compute:
# for on site compute we only need to set up the cache for one minibatch
cache_shape[0] = self.n_train_sample
if input.dtype == torch.long:
# if the input is int type like for embedding, we do not cast it.
cache = Variable(torch.zeros(cache_shape).type(input.dtype)).cpu()
else:
cache = self.cast_func(
Variable(torch.zeros(cache_shape).type(input.dtype))).cpu()
if self.on_site_compute:
cache = cache.cuda()
return cache
def get_input_grad_for_test(self, input):
# input is the grad with respect to input here
if isinstance(self.input_cache, list) or isinstance(self.input_cache, tuple):
# if there are multiple input, we will need to test all the grad
self.input_grad_for_test = input[:len(self.input_cache)]
else:
self.input_grad_for_test = input[0]
def update_grad_output_cache(self, self1, input, output):
# use duplicated self to adapt to the pytorch API requirement
# as this is a class member function.
# Specific layer might need to update this function. This is
# because the returned gradient is not in the order as shown
# in the Python API, e.g. the linear layer
if self.do_offset:
if self.on_site_compute:
self.grad_output_cache = \
self.update_single_cache_on_site_compute(
self.grad_output_cache, output[0])
self.grad_cache_iter = 0
self.output_size = output[0].size()
else:
self.grad_output_cache[self.grad_cache_iter:min(
self.grad_cache_iter +
output[0].size()[0], self.n_train_sample)].data.copy_(
output[0].cpu())
self.grad_cache_iter = (self.grad_cache_iter + output[0].size(
0)) % self.n_train_sample
self.get_input_grad_for_test(input)
def update_single_cache_on_site_compute(self, cache, input):
# For vaiable length input, such as for lstm models,
# the cache might need to be larger than the already setup one.
# currently we only support this for the on site compute mode.
assert self.on_site_compute
assert len(cache.size()) == len(input.size())
if cache.size() < input.size():
cache = self.setup_cache_single(input)
# the cache can also be larger than what is currently need
# so we need to get the section in cache to update
cache_section = cache
for i, dim in enumerate(input.size()):
cache_section = torch.narrow(cache_section, dim=i, start=0, length=dim)
cache_section.data.copy_(input)
return cache
def get_single_cache_on_site_compute(self, cache, input_size):
assert self.on_site_compute
assert len(cache.size()) == len(input_size)
assert cache.size() >= input_size
if cache.size() != input_size:
for i, dim in enumerate(input_size):
cache = torch.narrow(cache, dim=i, start=0, length=dim)
cache = cache.contiguous()
return cache
def update_input_cache(self, input):
if self.do_offset:
if self.on_site_compute:
if isinstance(input, list) or isinstance(input, tuple):
for i, (cache, val) in enumerate(zip(self.input_cache, input)):
self.input_cache[i] = \
self.update_single_cache_on_site_compute(cache, val)
else:
self.input_cache = self.update_single_cache_on_site_compute(self.input_cache, input)
self.cache_iter = 0
else:
if isinstance(input, list) or isinstance(input, tuple):
for cache, val in zip(self.input_cache, input):
cache[self.cache_iter:min(self.cache_iter +
val.size()[0], self.
n_train_sample)].data.copy_(val)
self.cache_iter = (
self.cache_iter + input[0].size(0)) % self.n_train_sample
else:
self.input_cache[self.cache_iter:min(
self.cache_iter +
input.size()[0], self.n_train_sample)].data.copy_(input)
self.cache_iter = (
self.cache_iter + input.size(0)) % self.n_train_sample
def check_or_setup_input_cache(self, input):
if self.input_cache is None:
self.input_cache = self.setup_cache(input)
self.cache_iter = 0
def check_or_setup_grad_cache(self, output):
if self.grad_output_cache is None:
self.grad_output_cache = self.setup_cache(output)
self.grad_cache_iter = 0
def get_input_cache_grad_cache(self, input):
if self.on_site_compute:
assert self.grad_cache_iter == 0 or self.cache_iter == 0
if isinstance(input, list) or isinstance(input, tuple):
# if input is a list for this layer, the retrieved cache value should also be a list
assert isinstance(self.input_cache, list) or isinstance(
self.input_cache, tuple)
if self.on_site_compute:
input_lp = [
self.get_single_cache_on_site_compute(x, y.size()).cuda()
for x, y in zip(self.input_cache, input)
]
grad_output_lp = self.get_single_cache_on_site_compute(
self.grad_output_cache, self.output_size).cuda()
else:
input_lp = [
x[self.cache_iter:(self.cache_iter + y.size(0))].cuda()
for x, y in zip(self.input_cache, input)
]
grad_output_lp = \
self.grad_output_cache[self.grad_cache_iter:(self.grad_cache_iter + input[0].size(0))].cuda()
else:
if self.on_site_compute:
input_lp = self.get_single_cache_on_site_compute(
self.input_cache, input.size()).cuda()
# note the output_size member variable is setup when
# gradient hook update_grad_output_cache is called
grad_output_lp = self.get_single_cache_on_site_compute(
self.grad_output_cache, self.output_size).cuda()
else:
input_lp = self.input_cache[self.cache_iter:(
self.cache_iter + input.size(0))].cuda()
grad_output_lp = \
self.grad_output_cache[self.grad_cache_iter:(self.grad_cache_iter + input.size(0))].cuda()
return input_lp, grad_output_lp
def increment_cache_iter(self, input):
# note for on_site_compute mode, you should always keep cache iter at 0
if not self.on_site_compute:
if isinstance(input, list) or isinstance(input, tuple):
n_input_sample = input[0].size(0)
else:
n_input_sample = input.size(0)
self.cache_iter = (
self.cache_iter + n_input_sample) % self.n_train_sample
self.grad_cache_iter = (
self.grad_cache_iter + n_input_sample) % self.n_train_sample
def forward_fp(self, input):
self.check_or_setup_input_cache(input)
output = self.fp_func(input, self.weight, self.bias)
self.check_or_setup_grad_cache(output)
self.update_input_cache(input)
return output
def forward_lp(self, input):
# Need to test do_offset mode whether gradient is updated properly
input_lp, grad_output_lp = self.get_input_cache_grad_cache(input)
input_delta = input
weight_lp = self.weight_lp
weight_delta = self.weight_delta
bias_lp = self.bias_lp
bias_delta = self.bias_delta
output = self.lp_func(input_delta, input_lp, grad_output_lp,
weight_delta, weight_lp, bias_delta, bias_lp)
self.increment_cache_iter(input)
return output
def forward(self, input):
# Need to test do_offset mode whether gradient is updated properly
if self.do_offset:
return self.forward_fp(input)
else:
return self.forward_lp(input)
class BitCenterActivation(BitCenterLayer):
'''
Base class for activation layers like Relu and tanh
'''
def __init__(self,
fp_functional,
lp_functional,
cast_func=void_cast_func,
n_train_sample=1):
BitCenterLayer.__init__(
self,
fp_functional=fp_functional,
lp_functional=lp_functional,
cast_func=cast_func,
bias=False,
n_train_sample=n_train_sample)
def forward_lp(self, input):
# Need to test do_offset mode whether gradient is updated properly
input_lp, grad_output_lp = self.get_input_cache_grad_cache(input)
input_delta = input
output = self.lp_func(input_delta, input_lp, grad_output_lp)
self.increment_cache_iter(input)
return output
def forward_fp(self, input):
self.check_or_setup_input_cache(input)
output = self.fp_func(input)
self.check_or_setup_grad_cache(output)
self.update_input_cache(input)
return output
| halp-master | halp/layers/bit_center_layer.py |
import torch
import numpy as np
from torch.nn import Parameter
from halp.layers.linear_layer import BitCenterLinear, bit_center_linear
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.utils.test_utils import HalpTest
from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors, make_jacobian
from halp.layers.bit_center_layer_test import TestBitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class TestBitCenterLinearLayer(TestBitCenterLayer, TestCase):
'''
Test the functionality of bit centering linear layers
'''
def get_config(self, type="grad_check"):
config = {}
if type == "grad_check":
config["n_train_sample"] = 35
config["dim_in"] = 17
config["dim_out"] = 24
config["bias"] = True
config["cast_func"] = void_cast_func
config["do_double"] = True
config["seed"] = 0
config["batch_size"] = 35
elif type == "fw_bw_proc":
config["n_train_sample"] = 98
config["dim_in"] = 13
config["dim_out"] = 31
config["bias"] = True
config["cast_func"] = single_to_half_det
config["do_double"] = False
config["seed"] = 0
config["batch_size"] = 33
else:
raise Exception("Config type not supported!")
return config
def prepare_layer(self,
n_train_sample,
dim_in,
dim_out,
bias=False,
cast_func=void_cast_func,
do_double=True,
seed=0,
batch_size=1):
layer = BitCenterLinear(
in_features=dim_in,
out_features=dim_out,
bias=bias,
cast_func=cast_func,
n_train_sample=n_train_sample)
# Note do_double = setup layer for gradient check, otherwise, it is for checking
# the tensor properties
self.target_dtype = None
if do_double:
layer.double()
layer.weight.data.copy_(
torch.randn(
dim_out, dim_in, dtype=torch.double,
requires_grad=False).cuda())
layer.weight_lp.data.copy_(layer.weight.data)
layer.weight_delta.data.copy_(
torch.randn(
dim_out, dim_in, dtype=torch.double,
requires_grad=True).cuda())
if bias:
layer.bias.data.copy_(
torch.randn(
dim_out, dtype=torch.double,
requires_grad=True).cuda())
layer.bias_lp.data.copy_(layer.bias.data)
layer.bias_delta.data.copy_(
torch.randn(
dim_out, dtype=torch.double,
requires_grad=True).cuda())
layer.cuda()
return layer
def get_input(self,
n_train_sample,
dim_in,
dim_out,
bias,
cast_func=void_cast_func,
do_double=True,
seed=0,
batch_size=1):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.target_dtype = None
if do_double:
input_delta = Parameter(
torch.randn(n_train_sample, dim_in, dtype=torch.double).cuda(),
requires_grad=True)
input_fp = Parameter(
torch.randn(n_train_sample, dim_in, dtype=torch.double).cuda(),
requires_grad=True)
else:
input_delta = Parameter(
cast_func(
torch.randn(n_train_sample, dim_in,
dtype=torch.double).cuda()),
requires_grad=True)
input_fp = Parameter(
torch.randn(n_train_sample, dim_in, dtype=torch.float).cuda(),
requires_grad=True)
return [
input_fp,
], [
input_delta,
]
if __name__ == "__main__":
print(torch.__version__)
unittest.main() | halp-master | halp/layers/linear_layer_test.py |
import torch
import torch.nn as nn
from torch.nn import ReLU
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules.utils import _pair
from torch.autograd import Function
from torch.autograd import Variable
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.layers.bit_center_layer import BitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class BitCenterMaxPool2DFunction(Function):
@staticmethod
def forward(ctx,
input_delta,
input_lp,
grad_output_lp,
kernel_size,
stride=None,
padding=0):
input_full = input_lp + input_delta
output_full, indices_full = F.max_pool2d(
input_full,
kernel_size=kernel_size,
stride=stride,
padding=padding,
return_indices=True)
# The following line is just for simulation, the output and indices can be cached
output_lp, indices_lp = F.max_pool2d(
input_lp,
kernel_size=kernel_size,
stride=stride,
padding=padding,
return_indices=True)
ctx.save_for_backward(grad_output_lp, indices_full, indices_lp)
ctx.hyperparam = (kernel_size, stride, padding, input_full.shape)
if type(padding) is list:
raise Exception("tuple based padding ")
return output_full - output_lp
@staticmethod
def backward(ctx, grad_output):
kernel_size, stride, padding, input_shape = ctx.hyperparam
grad_output_lp, indices_full, indices_lp = ctx.saved_tensors
# note here pytorch max pool layer set stride = kernel size if not specified
if (stride != kernel_size) or (padding != [0, 0] and padding != (0, 0)):
raise Exception("stride and padding are not fully supported yet!")
grad_output_full = grad_output + grad_output_lp
grad_input_full = F.max_unpool2d(
grad_output_full,
indices_full,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_size=input_shape)
grad_input_lp = F.max_unpool2d(
grad_output_lp,
indices_lp,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_size=input_shape)
grad_input_delta = grad_input_full - grad_input_lp
grad_input_lp = None
grad_grad_output_lp = None
grad_kernel_size = None
grad_stride = None
grad_padding = None
return grad_input_delta, grad_input_lp, grad_grad_output_lp, grad_kernel_size, grad_stride, grad_padding
class BitCenterAvgPool2DFunction(Function):
@staticmethod
def forward(ctx,
input_delta,
input_lp,
grad_output_lp,
kernel_size,
stride=None,
padding=0):
input_full = input_lp + input_delta
output_full = F.avg_pool2d(
input_full,
kernel_size=kernel_size,
stride=stride,
padding=padding)
# The following line is just for simulation, the output and indices can be cached
output_lp = F.avg_pool2d(
input_lp, kernel_size=kernel_size, stride=stride, padding=padding)
ctx.save_for_backward(grad_output_lp)
ctx.hyperparam = (kernel_size, stride, padding, input_full.shape)
if type(padding) is list:
raise Exception("tuple based padding ")
return output_full - output_lp
@staticmethod
def backward(ctx, grad_output):
# note here pytorch avg pool layer set stride = kernel size if not specified
kernel_size, stride, padding, input_shape = ctx.hyperparam
if (stride != kernel_size):
raise Exception(
"stride is only supported when it is equal to kernel size!")
grad_output_lp = ctx.saved_tensors[0]
grad_output_full = grad_output + grad_output_lp
def get_avg_pool2d_grad(grad_output, kernel_size, input_shape):
shape = list(grad_output.shape)
# perform numpy style repeat to have repeated elements contiguously
grad_input = grad_output.view(*shape, 1).contiguous()\
.expand(*shape, kernel_size[1]).contiguous()\
.view(*shape[:-1], shape[-1]*kernel_size[-1])
shape = list(grad_input.shape)
grad_input = grad_input.view(*shape[:-1], 1, shape[-1]).contiguous() \
.expand(*shape[:-1], kernel_size[0], shape[-1]).contiguous()\
.view(*shape[:-2], shape[-2] * kernel_size[0], shape[-1])
padded_input_shape = list(input_shape).copy()
if padding != (0, 0) and padding != [0, 0]:
padded_input_shape[-2] += 2 * padding[-2]
padded_input_shape[-1] += 2 * padding[-1]
# recover to padded input shape
if list(grad_input.shape) != list(padded_input_shape):
grad_input_tmp = grad_input
grad_input = torch.zeros(
padded_input_shape,
dtype=grad_output.dtype,
device=grad_output.device)
grad_input[:, :, :grad_input_tmp.size(2), :grad_input_tmp.
size(3)] = grad_input_tmp
grad_input /= torch.tensor([kernel_size[0] * kernel_size[1]],
dtype=grad_input.dtype,
device=grad_input.device)
# recover back to unpadded shape
if padding != (0, 0) and padding != [0, 0]:
return grad_input[:, :, padding[0]:(-padding[0]), padding[1]:(
-padding[1])]
else:
return grad_input
grad_input_full = get_avg_pool2d_grad(grad_output_full, kernel_size,
input_shape)
grad_input_lp = get_avg_pool2d_grad(grad_output_lp, kernel_size,
input_shape)
grad_input_delta = grad_input_full - grad_input_lp
grad_input_lp = None
grad_grad_output_lp = None
grad_kernel_size = None
grad_stride = None
grad_padding = None
return grad_input_delta, grad_input_lp, grad_grad_output_lp, grad_kernel_size, grad_stride, grad_padding
bit_center_max_pool2d = BitCenterMaxPool2DFunction.apply
bit_center_avg_pool2d = BitCenterAvgPool2DFunction.apply
class BitCenterPool2D(BitCenterLayer):
def forward_lp(self, input):
input_lp, grad_output_lp = self.get_input_cache_grad_cache(input)
input_delta = input
output = self.lp_func(input_delta, input_lp, grad_output_lp,
self.kernel_size, self.stride, self.padding)
self.increment_cache_iter(input)
return output
def forward_fp(self, input):
self.check_or_setup_input_cache(input)
output = self.fp_func(
input,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding)
self.check_or_setup_grad_cache(output)
self.update_input_cache(input)
return output
class BitCenterMaxPool2D(BitCenterPool2D, nn.MaxPool2d):
def __init__(self,
kernel_size,
stride=None,
padding=0,
cast_func=void_cast_func,
n_train_sample=1):
BitCenterLayer.__init__(
self,
fp_functional=F.max_pool2d,
lp_functional=bit_center_max_pool2d,
cast_func=cast_func,
n_train_sample=n_train_sample)
if stride is None:
stride = kernel_size
nn.MaxPool2d.__init__(
self,
kernel_size=_pair(kernel_size),
stride=_pair(stride),
padding=_pair(padding))
self.register_backward_hook(self.update_grad_output_cache)
class BitCenterAvgPool2D(BitCenterPool2D, nn.AvgPool2d):
def __init__(self,
kernel_size,
stride=None,
padding=0,
cast_func=void_cast_func,
n_train_sample=1):
BitCenterLayer.__init__(
self,
fp_functional=F.avg_pool2d,
lp_functional=bit_center_avg_pool2d,
cast_func=cast_func,
n_train_sample=n_train_sample)
if stride is None:
stride = kernel_size
nn.AvgPool2d.__init__(
self,
kernel_size=_pair(kernel_size),
stride=_pair(stride),
padding=_pair(padding))
self.register_backward_hook(self.update_grad_output_cache)
| halp-master | halp/layers/pool_layer.py |
import torch
import torch.nn as nn
from torch.nn import Embedding
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Function
from torch.autograd import Variable
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.layers.bit_center_layer import BitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class BitCenterEmbeddingFunction(Function):
# note here we assume embedding is the bottom layer
# and there is no need to compute the gradient with respect
# to input tensor
@staticmethod
def forward(ctx, input, weight_delta):
ctx.save_for_backward(input)
ctx.hyperparam = weight_delta.size()
return weight_delta[input]
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors[0]
weight_size = ctx.hyperparam
grad_weight = torch.zeros(
weight_size, dtype=grad_output.dtype, device=grad_output.device)
grad_weight.index_add_(
dim=0,
index=input.view(-1),
source=grad_output.view(-1, grad_output.size(-1)))
return None, grad_weight
bit_center_embedding = BitCenterEmbeddingFunction.apply
class BitCenterEmbedding(BitCenterLayer, Embedding):
# the bit center embedding assumes same length for each
# input sample.
def __init__(self,
num_embeddings,
embedding_dim,
cast_func=void_cast_func,
n_train_sample=1):
BitCenterLayer.__init__(
self,
fp_functional=F.embedding,
lp_functional=bit_center_embedding,
cast_func=cast_func,
bias=False, # embedding layer only has a embedding matrix
n_train_sample=n_train_sample)
Embedding.__init__(self, num_embeddings, embedding_dim)
self.bias = None # dummy variable to adapt to the interface of bit center layers
# weight_delta is the delta tensor in the algorithm while weight_lp is the cached
# lp version of weight offset, they are setup in setup_bit_center_vars function
self.setup_bit_center_vars()
# make sure the variables are on gpu as fp16 is only supported on gpu
self.cuda()
self.reset_parameters_bit_center()
# register backward hook to update gradient caches for output grad
self.register_backward_hook(self.update_grad_output_cache)
def forward_fp(self, input):
self.check_or_setup_input_cache(input)
output = self.fp_func(input, self.weight)
self.check_or_setup_grad_cache(output)
self.update_input_cache(input)
return output
def forward_lp(self, input):
# Need to test do_offset mode whether gradient is updated properly
input_lp, _ = self.get_input_cache_grad_cache(input)
input_delta = input_lp # embedding assumes same index input at fp and lp steps
weight_delta = self.weight_delta
output = self.lp_func(input_delta, weight_delta)
self.increment_cache_iter(input)
return output | halp-master | halp/layers/embedding.py |
import torch
import torch.nn as nn
from torch.nn import Linear
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Function
from torch.autograd import Variable
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.layers.bit_center_layer import BitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class BitCenterLinearFunction(Function):
@staticmethod
def forward(ctx,
input_delta,
input_lp,
output_grad_lp,
weight_delta,
weight_lp,
bias_delta=None,
bias_lp=None):
# suffix lp means the lp version of the offset tensors
# suffix delta means the real low precision part of the model representation
# output_grad_lp is only for backward function, but we need to keep it in ctx
# for backward function to access it.
ctx.save_for_backward(input_lp, input_delta, output_grad_lp, weight_lp,
weight_delta, bias_lp, bias_delta)
output = torch.mm(input_delta, weight_lp.t()) \
+ torch.mm((input_lp + input_delta), weight_delta.t())
if (bias_lp is not None) and (bias_delta is not None):
# we assume the idx in first dimension represents the sample id in minibatch
output += bias_delta.unsqueeze(0).expand_as(output)
return output
@staticmethod
def backward(ctx, grad_output):
input_lp, input_delta, output_grad_lp, \
weight_lp, weight_delta, bias_lp, bias_delta = ctx.saved_tensors
grad_input_lp = None
grad_input_delta = \
grad_output.mm((weight_lp + weight_delta)) + output_grad_lp.mm(weight_delta)
grad_output_grad_lp = None # this dummy to adapt to pytorch API
grad_weight_lp = None
grad_weight_delta = \
torch.mm(grad_output.t(), (input_lp + input_delta)) \
+ output_grad_lp.t().mm(input_delta)
grad_bias_lp = None
if (bias_lp is not None) and (bias_delta is not None):
grad_bias_delta = grad_output.sum(0)
else:
grad_bias_delta = None
return grad_input_delta, grad_input_lp, grad_output_grad_lp, \
grad_weight_delta, grad_weight_lp, grad_bias_delta, grad_bias_lp
bit_center_linear = BitCenterLinearFunction.apply
class BitCenterLinear(BitCenterLayer, Linear):
def __init__(self,
in_features,
out_features,
bias=True,
cast_func=void_cast_func,
n_train_sample=1):
BitCenterLayer.__init__(
self,
fp_functional=F.linear,
lp_functional=bit_center_linear,
bias=bias,
cast_func=cast_func,
n_train_sample=n_train_sample)
Linear.__init__(
self,
in_features=in_features,
out_features=out_features,
bias=bias)
# weight_delta is the delta tensor in the algorithm while weight_lp is the cached
# lp version of weight offset
self.setup_bit_center_vars()
# make sure the variables are on gpu as fp16 is only supported on gpu
self.cuda()
self.reset_parameters_bit_center()
# register backward hook to update gradient caches for output grad
self.register_backward_hook(self.update_grad_output_cache)
def update_grad_output_cache(self, self1, input, output):
# use duplicated self to adapt to the pytorch API requirement
# as this is a class member function
if self.do_offset:
if self.on_site_compute:
self.grad_output_cache = \
self.update_single_cache_on_site_compute(
self.grad_output_cache, output[0])
self.grad_cache_iter = 0
self.output_size = output[0].size()
else:
self.grad_output_cache[self.grad_cache_iter:min(
self.grad_cache_iter +
output[0].size()[0], self.n_train_sample)].data.copy_(
self.cast_func(output[0].cpu()))
self.grad_cache_iter = (
self.grad_cache_iter + output[0].size(0)) % self.n_train_sample
# we use the following variable only for test purpose, we want to be able to access
# the gradeint value wrt input in the outside world. For lp mode, it is grad_input_delta
# for fp mode, it is grad_input
# TODO: update if pytorch stable version fixes this:
# The following branch is due to the layer specific behavior of
# input argument to the backward hook.
# Here we hard code the order of tensor in the input list (this is layer specific)
if self.bias is not None:
self.input_grad_for_test = input[1]
else:
self.input_grad_for_test = input[0]
else:
self.input_grad_for_test = input[0] | halp-master | halp/layers/linear_layer.py |
import torch
import numpy as np
from torch.nn import Parameter
from torch.autograd import Variable
from halp.layers.cross_entropy import BitCenterCrossEntropy
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.utils.test_utils import HalpTest
from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors, make_jacobian
from halp.layers.bit_center_layer_test import TestBitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class TestBitCenterCrossEntropyLayer(TestBitCenterLayer, TestCase):
'''
Test the functionality of bit centering cross entropy layers
'''
def get_config(self, type="grad_check"):
config = {}
if type == "grad_check":
config["n_train_sample"] = 35
config["dim_in"] = 17
config["dim_out"] = 24
config["bias"] = True
config["cast_func"] = void_cast_func
config["do_double"] = True
config["seed"] = 0
config["batch_size"] = 35
elif type == "fw_bw_proc":
config["n_train_sample"] = 98
config["dim_in"] = 13
config["dim_out"] = 31
config["bias"] = True
config["cast_func"] = single_to_half_det
config["do_double"] = False
config["seed"] = 0
config["batch_size"] = 33
else:
raise Exception("Config type not supported!")
return config
def prepare_layer(self,
n_train_sample,
dim_in,
dim_out,
bias=None,
cast_func=void_cast_func,
do_double=True,
seed=0,
batch_size=1):
# note the argument dim_in, dim_out, bias function are dummy, we use it to adapt to the test API interface
layer = BitCenterCrossEntropy(
cast_func=cast_func, n_train_sample=n_train_sample)
if do_double:
layer.double()
layer.cuda()
return layer
def get_input(self,
n_train_sample,
dim_in,
dim_out,
bias=None,
cast_func=void_cast_func,
do_double=True,
seed=0,
batch_size=1):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.target_dtype = torch.long
if do_double:
input_delta = Parameter(
torch.randn(n_train_sample, dim_in, dtype=torch.double).cuda(),
requires_grad=True)
input_fp = Parameter(
torch.randn(n_train_sample, dim_in, dtype=torch.double).cuda(),
requires_grad=True)
else:
input_delta = Parameter(
cast_func(
torch.randn(n_train_sample, dim_in,
dtype=torch.double).cuda()),
requires_grad=True)
input_fp = Parameter(
torch.randn(n_train_sample, dim_in, dtype=torch.float).cuda(),
requires_grad=True)
# if this layer need label input
target = torch.LongTensor(n_train_sample).random_(dim_in).cuda()
return [input_fp, target], [input_delta, target]
def check_layer_param_and_cache(self, layer):
t_list = [(layer.input_cache, torch.half, False, False),
(layer.grad_output_cache, torch.half, False, False)]
self.CheckLayerTensorProperty(t_list)
def get_analytical_grad(self, layer, input_fp, input_delta):
layer.set_mode(do_offset=True)
grad_list = []
output = layer(*input_fp)
output.backward()
grad_input_fp = input_fp[0].grad.clone()
layer.set_mode(do_offset=False)
loss_lp = output_lp = layer(*input_delta)
loss_lp.backward()
grad_input_delta = input_delta[0].grad.clone()
input_grad = grad_input_fp + grad_input_delta
grad_list.append(input_grad)
# note the output_lp is the full loss at the input_offset + input_delta
return output_lp, grad_list
def get_numerical_grad(self, layer, input_fp, input_delta, perturb_eps):
# get numerical finite difference
layer.set_mode(do_offset=True)
def get_loss(input):
loss = output = layer(*input)
return loss
grad_list = []
layer.set_mode(do_offset=True)
input = []
for i, (x, y) in enumerate(zip(input_fp, input_delta)):
if i != len(input_fp) - 1:
input.append(x + y)
else:
np.testing.assert_array_equal(x.data.cpu().numpy(),
y.data.cpu().numpy())
input.append(x)
output_final = layer(*input)
num_input_grad = get_numerical_jacobian(
get_loss, input, target=input[0], eps=perturb_eps)
grad_list.append(num_input_grad.clone())
return output_final, grad_list
if __name__ == "__main__":
print(torch.__version__)
unittest.main() | halp-master | halp/layers/cross_entropy_test.py |
import torch
import torch.nn as nn
from torch.nn import Tanh
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Function
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.layers.bit_center_layer import BitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class BitCenterEleMultFunction(Function):
@staticmethod
def forward(ctx, input_delta_l, input_delta_r, input_lp_l, input_lp_r,
grad_output_lp):
ctx.save_for_backward(input_delta_l, input_delta_r, input_lp_l,
input_lp_r, grad_output_lp)
return (input_delta_l +
input_lp_l) * input_delta_r + input_delta_l * input_lp_r
@staticmethod
def backward(ctx, grad_output):
input_delta_l, input_delta_r, input_lp_l, input_lp_r, grad_output_lp = \
ctx.saved_tensors
grad_delta_l = input_delta_r * (grad_output + grad_output_lp) \
+ input_lp_r * grad_output
grad_delta_r = input_delta_l * (grad_output + grad_output_lp) \
+ input_lp_l * grad_output
return grad_delta_l, grad_delta_r, None, None, None
bit_center_ele_mult = BitCenterEleMultFunction.apply
ele_mult = lambda x, y: x * y
class BitCenterEleMult(BitCenterLayer):
def __init__(self, cast_func=void_cast_func, n_train_sample=1):
BitCenterLayer.__init__(
self,
fp_functional=ele_mult,
lp_functional=bit_center_ele_mult,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.register_backward_hook(self.update_grad_output_cache)
def forward_fp(self, input_l, input_r):
input = [input_l, input_r]
self.check_or_setup_input_cache(input)
output = self.fp_func(*input)
self.check_or_setup_grad_cache(output)
self.update_input_cache(input)
return output
def forward_lp(self, input_l, input_r):
input = [input_l, input_r]
# Need to test do_offset mode whether gradient is updated properly
input_lp, grad_output_lp = self.get_input_cache_grad_cache(input)
input_delta_l = input_l
input_delta_r = input_r
input_lp_l = input_lp[0]
input_lp_r = input_lp[1]
output = self.lp_func(input_delta_l, input_delta_r, input_lp_l,
input_lp_r, grad_output_lp)
self.increment_cache_iter(input)
return output
def forward(self, input_l, input_r):
# Need to test do_offset mode whether gradient is updated properly
# print("ce forward mode ", self.do_offset)
if self.do_offset:
return self.forward_fp(input_l, input_r)
else:
return self.forward_lp(input_l, input_r)
| halp-master | halp/layers/ele_mult.py |
halp-master | halp/layers/__init__.py |
|
import torch
import numpy as np
from torch.nn import Parameter
from halp.layers.embedding import BitCenterEmbedding, bit_center_embedding
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.utils.test_utils import HalpTest
from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors, make_jacobian
from halp.layers.bit_center_layer_test import TestBitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class TestBitCenterEmbedding(TestBitCenterLayer, TestCase):
'''
Test the functionality of bit centering linear layers
'''
def get_config(self, type="grad_check"):
config = {}
if type == "grad_check":
config["n_train_sample"] = 35
config["num_embeddings"] = 17
config["embedding_dim"] = 24
config["bias"] = True
config["cast_func"] = void_cast_func
config["do_double"] = True
config["seed"] = 0
config["batch_size"] = 35
config["seq_length"] = 11
elif type == "fw_bw_proc":
config["n_train_sample"] = 98
config["num_embeddings"] = 13
config["embedding_dim"] = 31
config["bias"] = False
config["cast_func"] = single_to_half_det
config["do_double"] = False
config["seed"] = 0
config["batch_size"] = 33
config["seq_length"] = 11
else:
raise Exception("Config type not supported!")
return config
def prepare_layer(self,
n_train_sample,
num_embeddings,
embedding_dim,
bias=False, # bias is dummy to adapt to the test interface
cast_func=void_cast_func,
do_double=True,
seed=0,
batch_size=1,
seq_length=10):
layer = BitCenterEmbedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
cast_func=cast_func,
n_train_sample=n_train_sample)
# Note do_double = setup layer for gradient check, otherwise, it is for checking
# the tensor properties
if do_double:
layer.double()
layer.weight.data.copy_(
torch.randn(
num_embeddings, embedding_dim, dtype=torch.double,
requires_grad=False).cuda())
layer.weight_lp.data.copy_(layer.weight.data)
layer.weight_delta.data.copy_(
torch.randn(
num_embeddings, embedding_dim, dtype=torch.double,
requires_grad=True).cuda())
layer.cuda()
return layer
def get_input(self,
n_train_sample,
num_embeddings,
embedding_dim,
bias=False,
cast_func=void_cast_func,
do_double=True,
seed=0,
batch_size=1,
seq_length=10):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.target_dtype = None
input_delta = Parameter(
torch.zeros(n_train_sample, seq_length, dtype=torch.long).cuda().random_(0, num_embeddings),
requires_grad=False)
input_fp = Parameter(
torch.zeros(n_train_sample, seq_length, dtype=torch.long).cuda().data.copy_(input_delta.data),
requires_grad=False)
return [
input_fp,
], [
input_delta,
]
def check_layer_param_and_cache(self, layer):
t_list = [(layer.weight, torch.float32, True, True),
(layer.weight_delta, torch.half, True, True),
(layer.weight_lp, torch.half, True, False),
(layer.input_cache, torch.long, False, False),
(layer.grad_output_cache, torch.half, False, False)]
self.CheckLayerTensorProperty(t_list)
self.CheckLayerTensorGradProperty(t_list)
def get_numerical_grad(self,
layer,
input_fp,
input_delta,
perturb_eps,
target=None):
grad_list = []
layer.set_mode(do_offset=True)
param_dict = layer.state_dict()
# update the offset variable
for name, param in layer.named_parameters():
if name.endswith("_delta"):
p_offset = param_dict[name.split("_delta")[0]]
p_offset.data.add_(param)
input_final = [ # for embedding the input_fp and input_delta are the same index array
Parameter(x, requires_grad=False)
for x, y in zip(input_fp, input_delta)
]
output_final = layer(*input_final)
loss = 0.5 * torch.sum(output_final**2)
loss.backward()
# use the gradient from 0.5*sum(output**2), this case has output = gradient wrt output
num_input_grad = None
grad_list.append(num_input_grad)
def get_loss(x):
output = layer(*x)
return torch.sum(0.5 * output * output)
grad_list += self.get_numerical_param_grad(layer, input_final, get_loss,
perturb_eps)
return output_final, grad_list
if __name__ == "__main__":
print(torch.__version__)
unittest.main() | halp-master | halp/layers/embedding_test.py |
import torch
import numpy as np
from torch.nn import Parameter
from halp.layers.batch_norm_layer import BitCenterBatchNorm2D, bit_center_batch_norm2d
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.utils.utils import set_seed
from halp.utils.test_utils import HalpTest
from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors, make_jacobian
from halp.layers.bit_center_layer_test import TestBitCenterLayer
import logging
import sys
import copy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class TestBitCenterBatchNorm2DLayer(TestBitCenterLayer, TestCase):
'''
Test the functionality of bit centering conv2d layers
'''
def get_config(self, type="grad_check"):
config = {}
# this config can test for padding != 0 and stride > 1 cases
config["input_w"] = 15
config["input_h"] = 8
config["eps"] = 1e-5
config["momentum"] = 0.1
if type == "grad_check":
config["n_train_sample"] = 6
config["num_features"] = 3
config["bias"] = True # this is dummy to adapt test interface
config["cast_func"] = void_cast_func
config["do_double"] = True
config["seed"] = 0
config["batch_size"] = 6
elif type == "fw_bw_proc":
config["n_train_sample"] = 98
config["num_features"] = 13
config["bias"] = True # dummy
config["cast_func"] = single_to_half_det
config["do_double"] = False
config["seed"] = 0
config["batch_size"] = 33
else:
raise Exception("Config type not supported!")
return config
def prepare_layer(self, input_w, input_h, n_train_sample, num_features,
bias, cast_func, eps, momentum, do_double, seed,
batch_size):
layer = BitCenterBatchNorm2D(
num_features=num_features,
cast_func=cast_func,
eps=eps,
momentum=momentum,
n_train_sample=n_train_sample)
# Note do_double = setup layer for gradient check, otherwise, it is for checking
# the tensor properties
if do_double:
layer.double()
# properly setup value for weights
layer.weight.data.copy_(
torch.randn(
layer.num_features, dtype=torch.double,
requires_grad=True).cuda())
layer.weight_lp.data.copy_(layer.weight.data)
layer.weight_delta.data.copy_(
torch.randn(
layer.num_features, dtype=torch.double,
requires_grad=True).cuda())
layer.bias.data.copy_(
torch.randn(
layer.num_features, dtype=torch.double,
requires_grad=True).cuda())
layer.bias_lp.data.copy_(layer.bias.data)
layer.bias_delta.data.copy_(
torch.randn(
layer.num_features, dtype=torch.double,
requires_grad=True).cuda())
# properly setup running statistics
layer.running_mean.data.copy_(
torch.randn(
layer.num_features,
dtype=torch.double,
requires_grad=False).cuda())
layer.running_mean_lp.data.copy_(layer.running_mean.data)
layer.running_mean_delta.data.copy_(
torch.randn(
layer.num_features,
dtype=torch.double,
requires_grad=False).cuda())
layer.running_var.data.copy_(
torch.randn(
layer.num_features,
dtype=torch.double,
requires_grad=False).cuda()).abs_()
layer.running_var_lp.data.copy_(layer.running_var.data)
layer.running_var_delta.data.copy_(
torch.randn(
layer.num_features,
dtype=torch.double,
requires_grad=False).cuda()).abs_()
layer.cuda()
return layer
def get_input(self, input_w, input_h, n_train_sample, num_features, bias,
cast_func, eps, momentum, do_double, seed, batch_size):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.target_dtype = None
if do_double:
input_delta = Parameter(
torch.randn(
n_train_sample,
num_features,
input_w,
input_h,
dtype=torch.double).cuda(),
requires_grad=True)
input_fp = Parameter(
torch.randn(
n_train_sample,
num_features,
input_w,
input_h,
dtype=torch.double).cuda(),
requires_grad=True)
else:
input_delta = Parameter(
cast_func(
torch.randn(
n_train_sample,
num_features,
input_w,
input_h,
dtype=torch.double).cuda()),
requires_grad=True)
input_fp = Parameter(
torch.randn(
n_train_sample,
num_features,
input_w,
input_h,
dtype=torch.float).cuda(),
requires_grad=True)
return [
input_fp,
], [
input_delta,
]
def get_numerical_grad(self,
layer,
input_fp,
input_delta,
perturb_eps,
target=None):
# as the running stats change in every forward call,
# the finite difference approach in bit_center_layer_test.py
# would not work properly. Instead, we use the original batchnorm2d
# layer, generate grad and compare to the one we got using
# bit center layer.
grad_list = []
layer.set_mode(do_offset=True)
param_dict = layer.state_dict()
init_running_mean = layer.running_mean.clone(
) + layer.running_mean_delta.clone()
init_running_var = layer.running_var.clone(
) + layer.running_var_delta.clone()
# update the offset variable
for name, param in layer.named_parameters():
if name.endswith("_delta"):
# print("copied name", name)
p_offset = param_dict[name.split("_delta")[0]]
p_offset.data.add_(param)
param_dict = layer.state_dict()
layer_orig = torch.nn.BatchNorm2d(
num_features=layer.num_features,
track_running_stats=True).cuda().double()
for name, param in layer_orig.named_parameters():
param.data.copy_(param_dict[name])
layer_orig.running_mean.data.copy_(init_running_mean.data)
layer_orig.running_var.data.copy_(init_running_var.data)
# turn off running stat update for this batch to sync with the bc layer
layer_orig.train()
input = []
for i, (x, y) in enumerate(zip(input_fp, input_delta)):
input.append(Parameter(x + y, requires_grad=True))
output_final = layer_orig(*input)
loss = 0.5 * torch.sum(output_final**2)
loss.backward()
grad_list.append(input[0].grad.data.clone())
grad_list.append(layer_orig.weight.grad.data.clone())
grad_list.append(layer_orig.bias.grad.data.clone())
grad_list.append(layer_orig.running_mean.clone())
grad_list.append(layer_orig.running_var.clone())
return output_final, grad_list
def get_analytical_param_grad(self, layer):
# this function get the analytical grad with respect to parameters
# This function can be very layer specfic.
grad_list = []
weight_grad = layer.weight.grad + layer.weight_delta.grad
grad_list.append(weight_grad)
bias_grad = layer.bias.grad + layer.bias_delta.grad
grad_list.append(bias_grad)
grad_list += [
layer.running_mean.clone() + layer.running_mean_delta.clone()
]
grad_list += [
layer.running_var.clone() + layer.running_var_delta.clone()
]
return grad_list
if __name__ == "__main__":
print(torch.__version__)
unittest.main() | halp-master | halp/layers/batch_norm_layer_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.